aboutsummaryrefslogtreecommitdiffstats
path: root/src/3rdparty/masm
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/masm')
-rw-r--r--src/3rdparty/masm/WeakRandom.h52
-rw-r--r--src/3rdparty/masm/assembler/ARMAssembler.cpp444
-rw-r--r--src/3rdparty/masm/assembler/ARMAssembler.h1129
-rw-r--r--src/3rdparty/masm/assembler/ARMv7Assembler.cpp36
-rw-r--r--src/3rdparty/masm/assembler/ARMv7Assembler.h2790
-rw-r--r--src/3rdparty/masm/assembler/AbstractMacroAssembler.h842
-rw-r--r--src/3rdparty/masm/assembler/AssemblerBuffer.h181
-rw-r--r--src/3rdparty/masm/assembler/AssemblerBufferWithConstantPool.h342
-rw-r--r--src/3rdparty/masm/assembler/CodeLocation.h218
-rw-r--r--src/3rdparty/masm/assembler/LinkBuffer.cpp230
-rw-r--r--src/3rdparty/masm/assembler/LinkBuffer.h297
-rw-r--r--src/3rdparty/masm/assembler/MIPSAssembler.h1107
-rw-r--r--src/3rdparty/masm/assembler/MacroAssembler.h1465
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerARM.cpp99
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerARM.h1386
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerARMv7.h1914
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerCodeRef.h406
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerMIPS.h2751
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerSH4.cpp52
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerSH4.h2293
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerX86.h314
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerX86Common.h1541
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerX86_64.h643
-rw-r--r--src/3rdparty/masm/assembler/RepatchBuffer.h181
-rw-r--r--src/3rdparty/masm/assembler/SH4Assembler.h2152
-rw-r--r--src/3rdparty/masm/assembler/X86Assembler.h2540
-rw-r--r--src/3rdparty/masm/config.h56
-rw-r--r--src/3rdparty/masm/create_regex_tables121
-rw-r--r--src/3rdparty/masm/disassembler/Disassembler.cpp43
-rw-r--r--src/3rdparty/masm/disassembler/Disassembler.h52
-rw-r--r--src/3rdparty/masm/disassembler/UDis86Disassembler.cpp63
-rw-r--r--src/3rdparty/masm/disassembler/udis86/differences.txt24
-rw-r--r--src/3rdparty/masm/disassembler/udis86/itab.py360
-rw-r--r--src/3rdparty/masm/disassembler/udis86/optable.xml8959
-rw-r--r--src/3rdparty/masm/disassembler/udis86/ud_opcode.py235
-rw-r--r--src/3rdparty/masm/disassembler/udis86/ud_optable.py103
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86.c182
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86.h33
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_decode.c1141
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_decode.h258
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_extern.h88
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_input.c262
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_input.h67
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_itab_holder.c33
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_syn-att.c252
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_syn-intel.c278
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_syn.c86
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_syn.h47
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_types.h238
-rw-r--r--src/3rdparty/masm/jit/JITCompilationEffort.h39
-rw-r--r--src/3rdparty/masm/masm-defs.pri28
-rw-r--r--src/3rdparty/masm/masm.pri86
-rw-r--r--src/3rdparty/masm/runtime/MatchResult.h71
-rw-r--r--src/3rdparty/masm/stubs/ExecutableAllocator.h120
-rw-r--r--src/3rdparty/masm/stubs/JSGlobalData.h65
-rw-r--r--src/3rdparty/masm/stubs/LLIntData.h0
-rw-r--r--src/3rdparty/masm/stubs/Options.h53
-rw-r--r--src/3rdparty/masm/stubs/WTFStubs.cpp131
-rw-r--r--src/3rdparty/masm/stubs/WTFStubs.h50
-rw-r--r--src/3rdparty/masm/stubs/wtf/FastAllocBase.h48
-rw-r--r--src/3rdparty/masm/stubs/wtf/FastMalloc.h46
-rw-r--r--src/3rdparty/masm/stubs/wtf/Noncopyable.h48
-rw-r--r--src/3rdparty/masm/stubs/wtf/OwnPtr.h46
-rw-r--r--src/3rdparty/masm/stubs/wtf/PassOwnPtr.h120
-rw-r--r--src/3rdparty/masm/stubs/wtf/PassRefPtr.h101
-rw-r--r--src/3rdparty/masm/stubs/wtf/RefCounted.h70
-rw-r--r--src/3rdparty/masm/stubs/wtf/RefPtr.h93
-rw-r--r--src/3rdparty/masm/stubs/wtf/TypeTraits.h58
-rw-r--r--src/3rdparty/masm/stubs/wtf/UnusedParam.h48
-rw-r--r--src/3rdparty/masm/stubs/wtf/Vector.h104
-rw-r--r--src/3rdparty/masm/stubs/wtf/text/CString.h44
-rw-r--r--src/3rdparty/masm/stubs/wtf/text/WTFString.h75
-rw-r--r--src/3rdparty/masm/stubs/wtf/unicode/Unicode.h59
-rw-r--r--src/3rdparty/masm/wtf/ASCIICType.h181
-rw-r--r--src/3rdparty/masm/wtf/Assertions.h428
-rw-r--r--src/3rdparty/masm/wtf/Atomics.h227
-rw-r--r--src/3rdparty/masm/wtf/BumpPointerAllocator.h252
-rw-r--r--src/3rdparty/masm/wtf/CheckedArithmetic.h721
-rw-r--r--src/3rdparty/masm/wtf/Compiler.h302
-rw-r--r--src/3rdparty/masm/wtf/CryptographicallyRandomNumber.h45
-rw-r--r--src/3rdparty/masm/wtf/DataLog.h128
-rw-r--r--src/3rdparty/masm/wtf/DynamicAnnotations.h96
-rw-r--r--src/3rdparty/masm/wtf/EnumClass.h134
-rw-r--r--src/3rdparty/masm/wtf/FeatureDefines.h874
-rw-r--r--src/3rdparty/masm/wtf/FilePrintStream.cpp64
-rw-r--r--src/3rdparty/masm/wtf/FilePrintStream.h62
-rw-r--r--src/3rdparty/masm/wtf/Locker.h48
-rw-r--r--src/3rdparty/masm/wtf/MathExtras.h459
-rw-r--r--src/3rdparty/masm/wtf/NotFound.h37
-rw-r--r--src/3rdparty/masm/wtf/NullPtr.h56
-rw-r--r--src/3rdparty/masm/wtf/OSAllocator.h115
-rw-r--r--src/3rdparty/masm/wtf/OSAllocatorPosix.cpp193
-rw-r--r--src/3rdparty/masm/wtf/OSAllocatorWin.cpp84
-rw-r--r--src/3rdparty/masm/wtf/PageAllocation.h120
-rw-r--r--src/3rdparty/masm/wtf/PageAllocationAligned.cpp85
-rw-r--r--src/3rdparty/masm/wtf/PageAllocationAligned.h70
-rw-r--r--src/3rdparty/masm/wtf/PageBlock.cpp78
-rw-r--r--src/3rdparty/masm/wtf/PageBlock.h88
-rw-r--r--src/3rdparty/masm/wtf/PageReservation.h149
-rw-r--r--src/3rdparty/masm/wtf/Platform.h1019
-rw-r--r--src/3rdparty/masm/wtf/PossiblyNull.h59
-rw-r--r--src/3rdparty/masm/wtf/PrintStream.cpp114
-rw-r--r--src/3rdparty/masm/wtf/PrintStream.h300
-rw-r--r--src/3rdparty/masm/wtf/RawPointer.h58
-rw-r--r--src/3rdparty/masm/wtf/StdLibExtras.h282
-rw-r--r--src/3rdparty/masm/wtf/VMTags.h75
-rw-r--r--src/3rdparty/masm/yarr/Yarr.h69
-rw-r--r--src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.cpp463
-rw-r--r--src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.h138
-rw-r--r--src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.js219
-rw-r--r--src/3rdparty/masm/yarr/YarrInterpreter.cpp1959
-rw-r--r--src/3rdparty/masm/yarr/YarrInterpreter.h380
-rw-r--r--src/3rdparty/masm/yarr/YarrJIT.cpp2702
-rw-r--r--src/3rdparty/masm/yarr/YarrJIT.h141
-rw-r--r--src/3rdparty/masm/yarr/YarrParser.h880
-rw-r--r--src/3rdparty/masm/yarr/YarrPattern.cpp880
-rw-r--r--src/3rdparty/masm/yarr/YarrPattern.h401
-rw-r--r--src/3rdparty/masm/yarr/YarrSyntaxChecker.cpp59
-rw-r--r--src/3rdparty/masm/yarr/YarrSyntaxChecker.h38
-rw-r--r--src/3rdparty/masm/yarr/yarr.pri12
120 files changed, 55333 insertions, 0 deletions
diff --git a/src/3rdparty/masm/WeakRandom.h b/src/3rdparty/masm/WeakRandom.h
new file mode 100644
index 0000000000..325d1f6ac6
--- /dev/null
+++ b/src/3rdparty/masm/WeakRandom.h
@@ -0,0 +1,52 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef MASM_WEAKRANDOM_H
+#define MASM_WEAKRANDOM_H
+
+#include <stdint.h>
+
+struct WeakRandom {
+ WeakRandom(int) {}
+ uint32_t getUint32() { return 0; }
+};
+
+#endif // MASM_WEAKRANDOM_H
diff --git a/src/3rdparty/masm/assembler/ARMAssembler.cpp b/src/3rdparty/masm/assembler/ARMAssembler.cpp
new file mode 100644
index 0000000000..6912d1ea39
--- /dev/null
+++ b/src/3rdparty/masm/assembler/ARMAssembler.cpp
@@ -0,0 +1,444 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "ARMAssembler.h"
+
+namespace JSC {
+
+// Patching helpers
+
+void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+{
+ ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
+ ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr;
+ ARMWord index = (*ldr & 0xfff) >> 1;
+
+ ASSERT(diff >= 1);
+ if (diff >= 2 || index > 0) {
+ diff = (diff + index - 2) * sizeof(ARMWord);
+ ASSERT(diff <= 0xfff);
+ *ldr = (*ldr & ~0xfff) | diff;
+ } else
+ *ldr = (*ldr & ~(0xfff | ARMAssembler::DataTransferUp)) | sizeof(ARMWord);
+}
+
+// Handle immediates
+
+ARMWord ARMAssembler::getOp2(ARMWord imm)
+{
+ int rol;
+
+ if (imm <= 0xff)
+ return Op2Immediate | imm;
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol = 8;
+ }
+ else {
+ imm = (imm << 24) | (imm >> 8);
+ rol = 0;
+ }
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol += 4;
+ }
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ return Op2Immediate | (imm >> 24) | (rol << 8);
+
+ return InvalidImmediate;
+}
+
+int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
+{
+ // Step1: Search a non-immediate part
+ ARMWord mask;
+ ARMWord imm1;
+ ARMWord imm2;
+ int rol;
+
+ mask = 0xff000000;
+ rol = 8;
+ while(1) {
+ if ((imm & mask) == 0) {
+ imm = (imm << rol) | (imm >> (32 - rol));
+ rol = 4 + (rol >> 1);
+ break;
+ }
+ rol += 2;
+ mask >>= 2;
+ if (mask & 0x3) {
+ // rol 8
+ imm = (imm << 8) | (imm >> 24);
+ mask = 0xff00;
+ rol = 24;
+ while (1) {
+ if ((imm & mask) == 0) {
+ imm = (imm << rol) | (imm >> (32 - rol));
+ rol = (rol >> 1) - 8;
+ break;
+ }
+ rol += 2;
+ mask >>= 2;
+ if (mask & 0x3)
+ return 0;
+ }
+ break;
+ }
+ }
+
+ ASSERT((imm & 0xff) == 0);
+
+ if ((imm & 0xff000000) == 0) {
+ imm1 = Op2Immediate | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
+ imm2 = Op2Immediate | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
+ } else if (imm & 0xc0000000) {
+ imm1 = Op2Immediate | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+ imm <<= 8;
+ rol += 4;
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol += 4;
+ }
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ imm2 = Op2Immediate | (imm >> 24) | ((rol & 0xf) << 8);
+ else
+ return 0;
+ } else {
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ imm1 = Op2Immediate | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+ imm <<= 8;
+ rol += 4;
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ imm2 = Op2Immediate | (imm >> 24) | ((rol & 0xf) << 8);
+ else
+ return 0;
+ }
+
+ if (positive) {
+ mov(reg, imm1);
+ orr(reg, reg, imm2);
+ } else {
+ mvn(reg, imm1);
+ bic(reg, reg, imm2);
+ }
+
+ return 1;
+}
+
+ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
+{
+ ARMWord tmp;
+
+ // Do it by 1 instruction
+ tmp = getOp2(imm);
+ if (tmp != InvalidImmediate)
+ return tmp;
+
+ tmp = getOp2(~imm);
+ if (tmp != InvalidImmediate) {
+ if (invert)
+ return tmp | Op2InvertedImmediate;
+ mvn(tmpReg, tmp);
+ return tmpReg;
+ }
+
+ return encodeComplexImm(imm, tmpReg);
+}
+
+void ARMAssembler::moveImm(ARMWord imm, int dest)
+{
+ ARMWord tmp;
+
+ // Do it by 1 instruction
+ tmp = getOp2(imm);
+ if (tmp != InvalidImmediate) {
+ mov(dest, tmp);
+ return;
+ }
+
+ tmp = getOp2(~imm);
+ if (tmp != InvalidImmediate) {
+ mvn(dest, tmp);
+ return;
+ }
+
+ encodeComplexImm(imm, dest);
+}
+
+ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest)
+{
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ ARMWord tmp = getImm16Op2(imm);
+ if (tmp != InvalidImmediate) {
+ movw(dest, tmp);
+ return dest;
+ }
+ movw(dest, getImm16Op2(imm & 0xffff));
+ movt(dest, getImm16Op2(imm >> 16));
+ return dest;
+#else
+ // Do it by 2 instruction
+ if (genInt(dest, imm, true))
+ return dest;
+ if (genInt(dest, ~imm, false))
+ return dest;
+
+ ldrImmediate(dest, imm);
+ return dest;
+#endif
+}
+
+// Memory load/store helpers
+
+void ARMAssembler::dataTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, int32_t offset)
+{
+ if (offset >= 0) {
+ if (offset <= 0xfff)
+ dtrUp(transferType, srcDst, base, offset);
+ else if (offset <= 0xfffff) {
+ add(ARMRegisters::S0, base, Op2Immediate | (offset >> 12) | (10 << 8));
+ dtrUp(transferType, srcDst, ARMRegisters::S0, (offset & 0xfff));
+ } else {
+ moveImm(offset, ARMRegisters::S0);
+ dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+ }
+ } else {
+ if (offset >= -0xfff)
+ dtrDown(transferType, srcDst, base, -offset);
+ else if (offset >= -0xfffff) {
+ sub(ARMRegisters::S0, base, Op2Immediate | (-offset >> 12) | (10 << 8));
+ dtrDown(transferType, srcDst, ARMRegisters::S0, (-offset & 0xfff));
+ } else {
+ moveImm(offset, ARMRegisters::S0);
+ dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+ }
+ }
+}
+
+void ARMAssembler::baseIndexTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+ ASSERT(scale >= 0 && scale <= 3);
+ ARMWord op2 = lsl(index, scale);
+
+ if (!offset) {
+ dtrUpRegister(transferType, srcDst, base, op2);
+ return;
+ }
+
+ if (offset <= 0xfffff && offset >= -0xfffff) {
+ add(ARMRegisters::S0, base, op2);
+ dataTransfer32(transferType, srcDst, ARMRegisters::S0, offset);
+ return;
+ }
+
+ moveImm(offset, ARMRegisters::S0);
+ add(ARMRegisters::S0, ARMRegisters::S0, op2);
+ dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+}
+
+void ARMAssembler::dataTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, int32_t offset)
+{
+ if (offset >= 0) {
+ if (offset <= 0xff)
+ halfDtrUp(transferType, srcDst, base, getOp2Half(offset));
+ else if (offset <= 0xffff) {
+ add(ARMRegisters::S0, base, Op2Immediate | (offset >> 8) | (12 << 8));
+ halfDtrUp(transferType, srcDst, ARMRegisters::S0, getOp2Half(offset & 0xff));
+ } else {
+ moveImm(offset, ARMRegisters::S0);
+ halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+ }
+ } else {
+ if (offset >= -0xff)
+ halfDtrDown(transferType, srcDst, base, getOp2Half(-offset));
+ else if (offset >= -0xffff) {
+ sub(ARMRegisters::S0, base, Op2Immediate | (-offset >> 8) | (12 << 8));
+ halfDtrDown(transferType, srcDst, ARMRegisters::S0, getOp2Half(-offset & 0xff));
+ } else {
+ moveImm(offset, ARMRegisters::S0);
+ halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+ }
+ }
+}
+
+void ARMAssembler::baseIndexTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+ if (!scale && !offset) {
+ halfDtrUpRegister(transferType, srcDst, base, index);
+ return;
+ }
+
+ ARMWord op2 = lsl(index, scale);
+
+ if (offset <= 0xffff && offset >= -0xffff) {
+ add(ARMRegisters::S0, base, op2);
+ dataTransfer16(transferType, srcDst, ARMRegisters::S0, offset);
+ return;
+ }
+
+ moveImm(offset, ARMRegisters::S0);
+ add(ARMRegisters::S0, ARMRegisters::S0, op2);
+ halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+}
+
+void ARMAssembler::dataTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, int32_t offset)
+{
+ // VFP cannot directly access memory that is not four-byte-aligned
+ if (!(offset & 0x3)) {
+ if (offset <= 0x3ff && offset >= 0) {
+ doubleDtrUp(transferType, srcDst, base, offset >> 2);
+ return;
+ }
+ if (offset <= 0x3ffff && offset >= 0) {
+ add(ARMRegisters::S0, base, Op2Immediate | (offset >> 10) | (11 << 8));
+ doubleDtrUp(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
+ return;
+ }
+ offset = -offset;
+
+ if (offset <= 0x3ff && offset >= 0) {
+ doubleDtrDown(transferType, srcDst, base, offset >> 2);
+ return;
+ }
+ if (offset <= 0x3ffff && offset >= 0) {
+ sub(ARMRegisters::S0, base, Op2Immediate | (offset >> 10) | (11 << 8));
+ doubleDtrDown(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
+ return;
+ }
+ offset = -offset;
+ }
+
+ moveImm(offset, ARMRegisters::S0);
+ add(ARMRegisters::S0, ARMRegisters::S0, base);
+ doubleDtrUp(transferType, srcDst, ARMRegisters::S0, 0);
+}
+
+void ARMAssembler::baseIndexTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+ add(ARMRegisters::S1, base, lsl(index, scale));
+ dataTransferFloat(transferType, srcDst, ARMRegisters::S1, offset);
+}
+
+PassRefPtr<ExecutableMemoryHandle> ARMAssembler::executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+{
+ // 64-bit alignment is required for next constant pool and JIT code as well
+ m_buffer.flushWithoutBarrier(true);
+ if (!m_buffer.isAligned(8))
+ bkpt(0);
+
+ RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData, ownerUID, effort);
+ char* data = reinterpret_cast<char*>(result->start());
+
+ for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
+ // The last bit is set if the constant must be placed on constant pool.
+ int pos = (iter->m_offset) & (~0x1);
+ ARMWord* ldrAddr = reinterpret_cast_ptr<ARMWord*>(data + pos);
+ ARMWord* addr = getLdrImmAddress(ldrAddr);
+ if (*addr != InvalidBranchTarget) {
+ if (!(iter->m_offset & 1)) {
+ intptr_t difference = reinterpret_cast_ptr<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetchOffset);
+
+ if ((difference <= MaximumBranchOffsetDistance && difference >= MinimumBranchOffsetDistance)) {
+ *ldrAddr = B | getConditionalField(*ldrAddr) | (difference & BranchOffsetMask);
+ continue;
+ }
+ }
+ *addr = reinterpret_cast<ARMWord>(data + *addr);
+ }
+ }
+
+ return result;
+}
+
+#if OS(LINUX) && COMPILER(RVCT)
+
+__asm void ARMAssembler::cacheFlush(void* code, size_t size)
+{
+ ARM
+ push {r7}
+ add r1, r1, r0
+ mov r7, #0xf0000
+ add r7, r7, #0x2
+ mov r2, #0x0
+ svc #0x0
+ pop {r7}
+ bx lr
+}
+
+#endif
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
diff --git a/src/3rdparty/masm/assembler/ARMAssembler.h b/src/3rdparty/masm/assembler/ARMAssembler.h
new file mode 100644
index 0000000000..3888226b21
--- /dev/null
+++ b/src/3rdparty/masm/assembler/ARMAssembler.h
@@ -0,0 +1,1129 @@
+/*
+ * Copyright (C) 2009, 2010 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARMAssembler_h
+#define ARMAssembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "AssemblerBufferWithConstantPool.h"
+#include "JITCompilationEffort.h"
+#include <wtf/Assertions.h>
+namespace JSC {
+
+ typedef uint32_t ARMWord;
+
+ namespace ARMRegisters {
+ typedef enum {
+ r0 = 0,
+ r1,
+ r2,
+ r3, S0 = r3, /* Same as thumb assembler. */
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12, S1 = r12,
+ r13, sp = r13,
+ r14, lr = r14,
+ r15, pc = r15
+ } RegisterID;
+
+ typedef enum {
+ d0,
+ d1,
+ d2,
+ d3,
+ d4,
+ d5,
+ d6,
+ d7, SD0 = d7, /* Same as thumb assembler. */
+ d8,
+ d9,
+ d10,
+ d11,
+ d12,
+ d13,
+ d14,
+ d15,
+ d16,
+ d17,
+ d18,
+ d19,
+ d20,
+ d21,
+ d22,
+ d23,
+ d24,
+ d25,
+ d26,
+ d27,
+ d28,
+ d29,
+ d30,
+ d31
+ } FPRegisterID;
+
+ } // namespace ARMRegisters
+
+ class ARMAssembler {
+ public:
+ typedef ARMRegisters::RegisterID RegisterID;
+ typedef ARMRegisters::FPRegisterID FPRegisterID;
+ typedef AssemblerBufferWithConstantPool<2048, 4, 4, ARMAssembler> ARMBuffer;
+ typedef SegmentedVector<AssemblerLabel, 64> Jumps;
+
+ ARMAssembler()
+ : m_indexOfTailOfLastWatchpoint(1)
+ {
+ }
+
+ // ARM conditional constants
+ typedef enum {
+ EQ = 0x00000000, // Zero
+ NE = 0x10000000, // Non-zero
+ CS = 0x20000000,
+ CC = 0x30000000,
+ MI = 0x40000000,
+ PL = 0x50000000,
+ VS = 0x60000000,
+ VC = 0x70000000,
+ HI = 0x80000000,
+ LS = 0x90000000,
+ GE = 0xa0000000,
+ LT = 0xb0000000,
+ GT = 0xc0000000,
+ LE = 0xd0000000,
+ AL = 0xe0000000
+ } Condition;
+
+ // ARM instruction constants
+ enum {
+ AND = (0x0 << 21),
+ EOR = (0x1 << 21),
+ SUB = (0x2 << 21),
+ RSB = (0x3 << 21),
+ ADD = (0x4 << 21),
+ ADC = (0x5 << 21),
+ SBC = (0x6 << 21),
+ RSC = (0x7 << 21),
+ TST = (0x8 << 21),
+ TEQ = (0x9 << 21),
+ CMP = (0xa << 21),
+ CMN = (0xb << 21),
+ ORR = (0xc << 21),
+ MOV = (0xd << 21),
+ BIC = (0xe << 21),
+ MVN = (0xf << 21),
+ MUL = 0x00000090,
+ MULL = 0x00c00090,
+ VMOV_F64 = 0x0eb00b40,
+ VADD_F64 = 0x0e300b00,
+ VDIV_F64 = 0x0e800b00,
+ VSUB_F64 = 0x0e300b40,
+ VMUL_F64 = 0x0e200b00,
+ VCMP_F64 = 0x0eb40b40,
+ VSQRT_F64 = 0x0eb10bc0,
+ VABS_F64 = 0x0eb00bc0,
+ VNEG_F64 = 0x0eb10b40,
+ STMDB = 0x09200000,
+ LDMIA = 0x08b00000,
+ B = 0x0a000000,
+ BL = 0x0b000000,
+ BX = 0x012fff10,
+ VMOV_VFP64 = 0x0c400a10,
+ VMOV_ARM64 = 0x0c500a10,
+ VMOV_VFP32 = 0x0e000a10,
+ VMOV_ARM32 = 0x0e100a10,
+ VCVT_F64_S32 = 0x0eb80bc0,
+ VCVT_S32_F64 = 0x0ebd0b40,
+ VCVT_U32_F64 = 0x0ebc0b40,
+ VCVT_F32_F64 = 0x0eb70bc0,
+ VCVT_F64_F32 = 0x0eb70ac0,
+ VMRS_APSR = 0x0ef1fa10,
+ CLZ = 0x016f0f10,
+ BKPT = 0xe1200070,
+ BLX = 0x012fff30,
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ MOVW = 0x03000000,
+ MOVT = 0x03400000,
+#endif
+ NOP = 0xe1a00000,
+ };
+
+ enum {
+ Op2Immediate = (1 << 25),
+ ImmediateForHalfWordTransfer = (1 << 22),
+ Op2InvertedImmediate = (1 << 26),
+ SetConditionalCodes = (1 << 20),
+ Op2IsRegisterArgument = (1 << 25),
+ // Data transfer flags.
+ DataTransferUp = (1 << 23),
+ DataTransferWriteBack = (1 << 21),
+ DataTransferPostUpdate = (1 << 24),
+ DataTransferLoad = (1 << 20),
+ ByteDataTransfer = (1 << 22),
+ };
+
+ enum DataTransferTypeA {
+ LoadUint32 = 0x05000000 | DataTransferLoad,
+ LoadUint8 = 0x05400000 | DataTransferLoad,
+ StoreUint32 = 0x05000000,
+ StoreUint8 = 0x05400000,
+ };
+
+ enum DataTransferTypeB {
+ LoadUint16 = 0x010000b0 | DataTransferLoad,
+ LoadInt16 = 0x010000f0 | DataTransferLoad,
+ LoadInt8 = 0x010000d0 | DataTransferLoad,
+ StoreUint16 = 0x010000b0,
+ };
+
+ enum DataTransferTypeFloat {
+ LoadFloat = 0x0d000a00 | DataTransferLoad,
+ LoadDouble = 0x0d000b00 | DataTransferLoad,
+ StoreFloat = 0x0d000a00,
+ StoreDouble = 0x0d000b00,
+ };
+
+ // Masks of ARM instructions
+ enum {
+ BranchOffsetMask = 0x00ffffff,
+ ConditionalFieldMask = 0xf0000000,
+ DataTransferOffsetMask = 0xfff,
+ };
+
+ enum {
+ MinimumBranchOffsetDistance = -0x00800000,
+ MaximumBranchOffsetDistance = 0x007fffff,
+ };
+
+ enum {
+ padForAlign8 = 0x00,
+ padForAlign16 = 0x0000,
+ padForAlign32 = 0xe12fff7f // 'bkpt 0xffff' instruction.
+ };
+
+ static const ARMWord InvalidImmediate = 0xf0000000;
+ static const ARMWord InvalidBranchTarget = 0xffffffff;
+ static const int DefaultPrefetchOffset = 2;
+
+ static const ARMWord BlxInstructionMask = 0x012fff30;
+ static const ARMWord LdrOrAddInstructionMask = 0x0ff00000;
+ static const ARMWord LdrPcImmediateInstructionMask = 0x0f7f0000;
+
+ static const ARMWord AddImmediateInstruction = 0x02800000;
+ static const ARMWord BlxInstruction = 0x012fff30;
+ static const ARMWord LdrImmediateInstruction = 0x05900000;
+ static const ARMWord LdrPcImmediateInstruction = 0x051f0000;
+
+ // Instruction formating
+
+ void emitInstruction(ARMWord op, int rd, int rn, ARMWord op2)
+ {
+ ASSERT(((op2 & ~Op2Immediate) <= 0xfff) || (((op2 & ~ImmediateForHalfWordTransfer) <= 0xfff)));
+ m_buffer.putInt(op | RN(rn) | RD(rd) | op2);
+ }
+
+ void emitDoublePrecisionInstruction(ARMWord op, int dd, int dn, int dm)
+ {
+ ASSERT((dd >= 0 && dd <= 31) && (dn >= 0 && dn <= 31) && (dm >= 0 && dm <= 31));
+ m_buffer.putInt(op | ((dd & 0xf) << 12) | ((dd & 0x10) << (22 - 4))
+ | ((dn & 0xf) << 16) | ((dn & 0x10) << (7 - 4))
+ | (dm & 0xf) | ((dm & 0x10) << (5 - 4)));
+ }
+
+ void emitSinglePrecisionInstruction(ARMWord op, int sd, int sn, int sm)
+ {
+ ASSERT((sd >= 0 && sd <= 31) && (sn >= 0 && sn <= 31) && (sm >= 0 && sm <= 31));
+ m_buffer.putInt(op | ((sd >> 1) << 12) | ((sd & 0x1) << 22)
+ | ((sn >> 1) << 16) | ((sn & 0x1) << 7)
+ | (sm >> 1) | ((sm & 0x1) << 5));
+ }
+
+ void bitAnd(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | AND, rd, rn, op2);
+ }
+
+ void bitAnds(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | AND | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void eor(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | EOR, rd, rn, op2);
+ }
+
+ void eors(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | EOR | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void sub(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | SUB, rd, rn, op2);
+ }
+
+ void subs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | SUB | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void rsb(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | RSB, rd, rn, op2);
+ }
+
+ void rsbs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | RSB | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void add(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ADD, rd, rn, op2);
+ }
+
+ void adds(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ADD | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void adc(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ADC, rd, rn, op2);
+ }
+
+ void adcs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ADC | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void sbc(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | SBC, rd, rn, op2);
+ }
+
+ void sbcs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | SBC | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void rsc(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | RSC, rd, rn, op2);
+ }
+
+ void rscs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | RSC | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void tst(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | TST | SetConditionalCodes, 0, rn, op2);
+ }
+
+ void teq(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | TEQ | SetConditionalCodes, 0, rn, op2);
+ }
+
+ void cmp(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | CMP | SetConditionalCodes, 0, rn, op2);
+ }
+
+ void cmn(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | CMN | SetConditionalCodes, 0, rn, op2);
+ }
+
+ void orr(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ORR, rd, rn, op2);
+ }
+
+ void orrs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ORR | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void mov(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | MOV, rd, ARMRegisters::r0, op2);
+ }
+
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ void movw(int rd, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT((op2 | 0xf0fff) == 0xf0fff);
+ m_buffer.putInt(toARMWord(cc) | MOVW | RD(rd) | op2);
+ }
+
+ void movt(int rd, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT((op2 | 0xf0fff) == 0xf0fff);
+ m_buffer.putInt(toARMWord(cc) | MOVT | RD(rd) | op2);
+ }
+#endif
+
+ void movs(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | MOV | SetConditionalCodes, rd, ARMRegisters::r0, op2);
+ }
+
+ void bic(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | BIC, rd, rn, op2);
+ }
+
+ void bics(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | BIC | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void mvn(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | MVN, rd, ARMRegisters::r0, op2);
+ }
+
+ void mvns(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | MVN | SetConditionalCodes, rd, ARMRegisters::r0, op2);
+ }
+
+ void mul(int rd, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | MUL | RN(rd) | RS(rn) | RM(rm));
+ }
+
+ void muls(int rd, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | MUL | SetConditionalCodes | RN(rd) | RS(rn) | RM(rm));
+ }
+
+ void mull(int rdhi, int rdlo, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm));
+ }
+
+ void vmov_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VMOV_F64, dd, 0, dm);
+ }
+
+ void vadd_f64(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VADD_F64, dd, dn, dm);
+ }
+
+ void vdiv_f64(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VDIV_F64, dd, dn, dm);
+ }
+
+ void vsub_f64(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VSUB_F64, dd, dn, dm);
+ }
+
+ void vmul_f64(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VMUL_F64, dd, dn, dm);
+ }
+
+ void vcmp_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCMP_F64, dd, 0, dm);
+ }
+
+ void vsqrt_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VSQRT_F64, dd, 0, dm);
+ }
+
+ void vabs_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VABS_F64, dd, 0, dm);
+ }
+
+ void vneg_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VNEG_F64, dd, 0, dm);
+ }
+
+ void ldrImmediate(int rd, ARMWord imm, Condition cc = AL)
+ {
+ m_buffer.putIntWithConstantInt(toARMWord(cc) | LoadUint32 | DataTransferUp | RN(ARMRegisters::pc) | RD(rd), imm, true);
+ }
+
+ void ldrUniqueImmediate(int rd, ARMWord imm, Condition cc = AL)
+ {
+ m_buffer.putIntWithConstantInt(toARMWord(cc) | LoadUint32 | DataTransferUp | RN(ARMRegisters::pc) | RD(rd), imm);
+ }
+
+ void dtrUp(DataTransferTypeA transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rb, op2);
+ }
+
+ void dtrUpRegister(DataTransferTypeA transferType, int rd, int rb, int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | DataTransferUp | Op2IsRegisterArgument, rd, rb, rm);
+ }
+
+ void dtrDown(DataTransferTypeA transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType, rd, rb, op2);
+ }
+
+ void dtrDownRegister(DataTransferTypeA transferType, int rd, int rb, int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | Op2IsRegisterArgument, rd, rb, rm);
+ }
+
+ void halfDtrUp(DataTransferTypeB transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rb, op2);
+ }
+
+ void halfDtrUpRegister(DataTransferTypeB transferType, int rd, int rn, int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rn, rm);
+ }
+
+ void halfDtrDown(DataTransferTypeB transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType, rd, rb, op2);
+ }
+
+ void halfDtrDownRegister(DataTransferTypeB transferType, int rd, int rn, int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType, rd, rn, rm);
+ }
+
+ void doubleDtrUp(DataTransferTypeFloat type, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT(op2 <= 0xff && rd <= 15);
+ /* Only d0-d15 and s0, s2, s4 ... s30 are supported. */
+ m_buffer.putInt(toARMWord(cc) | DataTransferUp | type | (rd << 12) | RN(rb) | op2);
+ }
+
+ void doubleDtrDown(DataTransferTypeFloat type, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT(op2 <= 0xff && rd <= 15);
+ /* Only d0-d15 and s0, s2, s4 ... s30 are supported. */
+ m_buffer.putInt(toARMWord(cc) | type | (rd << 12) | RN(rb) | op2);
+ }
+
+ void push(int reg, Condition cc = AL)
+ {
+ ASSERT(ARMWord(reg) <= 0xf);
+ m_buffer.putInt(toARMWord(cc) | StoreUint32 | DataTransferWriteBack | RN(ARMRegisters::sp) | RD(reg) | 0x4);
+ }
+
+ void pop(int reg, Condition cc = AL)
+ {
+ ASSERT(ARMWord(reg) <= 0xf);
+ m_buffer.putInt(toARMWord(cc) | (LoadUint32 ^ DataTransferPostUpdate) | DataTransferUp | RN(ARMRegisters::sp) | RD(reg) | 0x4);
+ }
+
+ inline void poke(int reg, Condition cc = AL)
+ {
+ dtrDown(StoreUint32, ARMRegisters::sp, 0, reg, cc);
+ }
+
+ inline void peek(int reg, Condition cc = AL)
+ {
+ dtrUp(LoadUint32, reg, ARMRegisters::sp, 0, cc);
+ }
+
+ void vmov_vfp64(int sm, int rt, int rt2, Condition cc = AL)
+ {
+ ASSERT(rt != rt2);
+ m_buffer.putInt(toARMWord(cc) | VMOV_VFP64 | RN(rt2) | RD(rt) | (sm & 0xf) | ((sm & 0x10) << (5 - 4)));
+ }
+
+ void vmov_arm64(int rt, int rt2, int sm, Condition cc = AL)
+ {
+ ASSERT(rt != rt2);
+ m_buffer.putInt(toARMWord(cc) | VMOV_ARM64 | RN(rt2) | RD(rt) | (sm & 0xf) | ((sm & 0x10) << (5 - 4)));
+ }
+
+ void vmov_vfp32(int sn, int rt, Condition cc = AL)
+ {
+ ASSERT(rt <= 15);
+ emitSinglePrecisionInstruction(toARMWord(cc) | VMOV_VFP32, rt << 1, sn, 0);
+ }
+
+ void vmov_arm32(int rt, int sn, Condition cc = AL)
+ {
+ ASSERT(rt <= 15);
+ emitSinglePrecisionInstruction(toARMWord(cc) | VMOV_ARM32, rt << 1, sn, 0);
+ }
+
+ void vcvt_f64_s32(int dd, int sm, Condition cc = AL)
+ {
+ ASSERT(!(sm & 0x1)); // sm must be divisible by 2
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F64_S32, dd, 0, (sm >> 1));
+ }
+
+ void vcvt_s32_f64(int sd, int dm, Condition cc = AL)
+ {
+ ASSERT(!(sd & 0x1)); // sd must be divisible by 2
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_S32_F64, (sd >> 1), 0, dm);
+ }
+
+ void vcvt_u32_f64(int sd, int dm, Condition cc = AL)
+ {
+ ASSERT(!(sd & 0x1)); // sd must be divisible by 2
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_U32_F64, (sd >> 1), 0, dm);
+ }
+
+ void vcvt_f64_f32(int dd, int sm, Condition cc = AL)
+ {
+ ASSERT(dd <= 15 && sm <= 15);
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F64_F32, dd, 0, sm);
+ }
+
+ void vcvt_f32_f64(int dd, int sm, Condition cc = AL)
+ {
+ ASSERT(dd <= 15 && sm <= 15);
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F32_F64, dd, 0, sm);
+ }
+
+ void vmrs_apsr(Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | VMRS_APSR);
+ }
+
+ void clz(int rd, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | CLZ | RD(rd) | RM(rm));
+ }
+
+ void bkpt(ARMWord value)
+ {
+ m_buffer.putInt(BKPT | ((value & 0xff0) << 4) | (value & 0xf));
+ }
+
+ void nop()
+ {
+ m_buffer.putInt(NOP);
+ }
+
+ void bx(int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | BX, 0, 0, RM(rm));
+ }
+
+ AssemblerLabel blx(int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | BLX, 0, 0, RM(rm));
+ return m_buffer.label();
+ }
+
+ static ARMWord lsl(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x00;
+ }
+
+ static ARMWord lsr(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x20;
+ }
+
+ static ARMWord asr(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x40;
+ }
+
+ static ARMWord lslRegister(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(shiftReg <= ARMRegisters::pc);
+ return reg | (shiftReg << 8) | 0x10;
+ }
+
+ static ARMWord lsrRegister(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(shiftReg <= ARMRegisters::pc);
+ return reg | (shiftReg << 8) | 0x30;
+ }
+
+ static ARMWord asrRegister(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(shiftReg <= ARMRegisters::pc);
+ return reg | (shiftReg << 8) | 0x50;
+ }
+
+ // General helpers
+
+ size_t codeSize() const
+ {
+ return m_buffer.codeSize();
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ m_buffer.ensureSpace(insnSpace, constSpace);
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_buffer.sizeOfConstantPool();
+ }
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ m_buffer.ensureSpaceForAnyInstruction();
+ return m_buffer.label();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ m_buffer.ensureSpaceForAnyInstruction(maxJumpReplacementSize() / sizeof(ARMWord));
+ AssemblerLabel result = m_buffer.label();
+ if (result.m_offset != (m_indexOfTailOfLastWatchpoint - maxJumpReplacementSize()))
+ result = label();
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return label();
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = labelIgnoringWatchpoints();
+ while (result.m_offset + 1 < m_indexOfTailOfLastWatchpoint) {
+ nop();
+ // The available number of instructions are ensured by labelForWatchpoint.
+ result = m_buffer.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ while (!m_buffer.isAligned(alignment))
+ mov(ARMRegisters::r0, ARMRegisters::r0);
+
+ return label();
+ }
+
+ AssemblerLabel loadBranchTarget(int rd, Condition cc = AL, int useConstantPool = 0)
+ {
+ ensureSpace(sizeof(ARMWord), sizeof(ARMWord));
+ m_jumps.append(m_buffer.codeSize() | (useConstantPool & 0x1));
+ ldrUniqueImmediate(rd, InvalidBranchTarget, cc);
+ return m_buffer.label();
+ }
+
+ AssemblerLabel jmp(Condition cc = AL, int useConstantPool = 0)
+ {
+ return loadBranchTarget(ARMRegisters::pc, cc, useConstantPool);
+ }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData&, void* ownerUID, JITCompilationEffort);
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+ // DFG assembly helpers for moving data between fp and registers.
+ void vmov(RegisterID rd1, RegisterID rd2, FPRegisterID rn)
+ {
+ vmov_arm64(rd1, rd2, rn);
+ }
+
+ void vmov(FPRegisterID rd, RegisterID rn1, RegisterID rn2)
+ {
+ vmov_vfp64(rd, rn1, rn2);
+ }
+
+ // Patching helpers
+
+ static ARMWord* getLdrImmAddress(ARMWord* insn)
+ {
+ // Check for call
+ if ((*insn & LdrPcImmediateInstructionMask) != LdrPcImmediateInstruction) {
+ // Must be BLX
+ ASSERT((*insn & BlxInstructionMask) == BlxInstruction);
+ insn--;
+ }
+
+ // Must be an ldr ..., [pc +/- imm]
+ ASSERT((*insn & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
+
+ ARMWord addr = reinterpret_cast<ARMWord>(insn) + DefaultPrefetchOffset * sizeof(ARMWord);
+ if (*insn & DataTransferUp)
+ return reinterpret_cast<ARMWord*>(addr + (*insn & DataTransferOffsetMask));
+ return reinterpret_cast<ARMWord*>(addr - (*insn & DataTransferOffsetMask));
+ }
+
+ static ARMWord* getLdrImmAddressOnPool(ARMWord* insn, uint32_t* constPool)
+ {
+ // Must be an ldr ..., [pc +/- imm]
+ ASSERT((*insn & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
+
+ if (*insn & 0x1)
+ return reinterpret_cast<ARMWord*>(constPool + ((*insn & DataTransferOffsetMask) >> 1));
+ return getLdrImmAddress(insn);
+ }
+
+ static void patchPointerInternal(intptr_t from, void* to)
+ {
+ ARMWord* insn = reinterpret_cast<ARMWord*>(from);
+ ARMWord* addr = getLdrImmAddress(insn);
+ *addr = reinterpret_cast<ARMWord>(to);
+ }
+
+ static ARMWord patchConstantPoolLoad(ARMWord load, ARMWord value)
+ {
+ value = (value << 1) + 1;
+ ASSERT(!(value & ~DataTransferOffsetMask));
+ return (load & ~DataTransferOffsetMask) | value;
+ }
+
+ static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
+
+ // Read pointers
+ static void* readPointer(void* from)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(from);
+ ARMWord* address = getLdrImmAddress(instruction);
+ return *reinterpret_cast<void**>(address);
+ }
+
+ // Patch pointers
+
+ static void linkPointer(void* code, AssemblerLabel from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to);
+ }
+
+ static void repatchInt32(void* where, int32_t to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(where), reinterpret_cast<void*>(to));
+ }
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(where);
+ ASSERT((*instruction & 0x0f700000) == LoadUint32);
+ if (value >= 0)
+ *instruction = (*instruction & 0xff7ff000) | DataTransferUp | value;
+ else
+ *instruction = (*instruction & 0xff7ff000) | -value;
+ cacheFlush(instruction, sizeof(ARMWord));
+ }
+
+ static void repatchPointer(void* from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(from), to);
+ }
+
+ // Linkers
+ static intptr_t getAbsoluteJumpAddress(void* base, int offset = 0)
+ {
+ return reinterpret_cast<intptr_t>(base) + offset - sizeof(ARMWord);
+ }
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to)
+ {
+ ARMWord* insn = reinterpret_cast<ARMWord*>(getAbsoluteJumpAddress(m_buffer.data(), from.m_offset));
+ ARMWord* addr = getLdrImmAddressOnPool(insn, m_buffer.poolAddress());
+ *addr = toARMWord(to.m_offset);
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ patchPointerInternal(getAbsoluteJumpAddress(from), to);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ patchPointerInternal(getAbsoluteJumpAddress(from), to);
+ }
+
+ static void* readCallTarget(void* from)
+ {
+ return reinterpret_cast<void*>(readPointer(reinterpret_cast<void*>(getAbsoluteJumpAddress(from))));
+ }
+
+ static void replaceWithJump(void* instructionStart, void* to)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
+ intptr_t difference = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(instruction) + DefaultPrefetchOffset * sizeof(ARMWord));
+
+ if (!(difference & 1)) {
+ difference >>= 2;
+ if ((difference <= MaximumBranchOffsetDistance && difference >= MinimumBranchOffsetDistance)) {
+ // Direct branch.
+ instruction[0] = B | AL | (difference & BranchOffsetMask);
+ cacheFlush(instruction, sizeof(ARMWord));
+ return;
+ }
+ }
+
+ // Load target.
+ instruction[0] = LoadUint32 | AL | RN(ARMRegisters::pc) | RD(ARMRegisters::pc) | 4;
+ instruction[1] = reinterpret_cast<ARMWord>(to);
+ cacheFlush(instruction, sizeof(ARMWord) * 2);
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return sizeof(ARMWord) * 2;
+ }
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
+ cacheFlush(instruction, sizeof(ARMWord));
+
+ ASSERT((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction || (*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction);
+ if ((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction) {
+ *instruction = (*instruction & ~LdrOrAddInstructionMask) | LdrImmediateInstruction;
+ cacheFlush(instruction, sizeof(ARMWord));
+ }
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
+ cacheFlush(instruction, sizeof(ARMWord));
+
+ ASSERT((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction || (*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction);
+ if ((*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction) {
+ *instruction = (*instruction & ~LdrOrAddInstructionMask) | AddImmediateInstruction;
+ cacheFlush(instruction, sizeof(ARMWord));
+ }
+ }
+
+ static void revertBranchPtrWithPatch(void* instructionStart, RegisterID rn, ARMWord imm)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
+
+ ASSERT((instruction[2] & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
+ instruction[0] = toARMWord(AL) | ((instruction[2] & 0x0fff0fff) + sizeof(ARMWord)) | RD(ARMRegisters::S1);
+ *getLdrImmAddress(instruction) = imm;
+ instruction[1] = toARMWord(AL) | CMP | SetConditionalCodes | RN(rn) | RM(ARMRegisters::S1);
+ cacheFlush(instruction, 2 * sizeof(ARMWord));
+ }
+
+ // Address operations
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<char*>(code) + label.m_offset);
+ }
+
+ // Address differences
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ return call.m_offset;
+ }
+
+ // Handle immediates
+
+ static ARMWord getOp2(ARMWord imm);
+
+ // Fast case if imm is known to be between 0 and 0xff
+ static ARMWord getOp2Byte(ARMWord imm)
+ {
+ ASSERT(imm <= 0xff);
+ return Op2Immediate | imm;
+ }
+
+ static ARMWord getOp2Half(ARMWord imm)
+ {
+ ASSERT(imm <= 0xff);
+ return ImmediateForHalfWordTransfer | (imm & 0x0f) | ((imm & 0xf0) << 4);
+ }
+
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ static ARMWord getImm16Op2(ARMWord imm)
+ {
+ if (imm <= 0xffff)
+ return (imm & 0xf000) << 4 | (imm & 0xfff);
+ return InvalidImmediate;
+ }
+#endif
+ ARMWord getImm(ARMWord imm, int tmpReg, bool invert = false);
+ void moveImm(ARMWord imm, int dest);
+ ARMWord encodeComplexImm(ARMWord imm, int dest);
+
+ // Memory load/store helpers
+
+ void dataTransfer32(DataTransferTypeA, RegisterID srcDst, RegisterID base, int32_t offset);
+ void baseIndexTransfer32(DataTransferTypeA, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+ void dataTransfer16(DataTransferTypeB, RegisterID srcDst, RegisterID base, int32_t offset);
+ void baseIndexTransfer16(DataTransferTypeB, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+ void dataTransferFloat(DataTransferTypeFloat, FPRegisterID srcDst, RegisterID base, int32_t offset);
+ void baseIndexTransferFloat(DataTransferTypeFloat, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+
+ // Constant pool hnadlers
+
+ static ARMWord placeConstantPoolBarrier(int offset)
+ {
+ offset = (offset - sizeof(ARMWord)) >> 2;
+ ASSERT((offset <= MaximumBranchOffsetDistance && offset >= MinimumBranchOffsetDistance));
+ return AL | B | (offset & BranchOffsetMask);
+ }
+
+#if OS(LINUX) && COMPILER(GCC)
+ static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
+ {
+ asm volatile(
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "mov r7, #0xf0000\n"
+ "add r7, r7, #0x2\n"
+ "mov r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (begin), "r" (end)
+ : "r0", "r1", "r2");
+ }
+#endif
+
+#if OS(LINUX) && COMPILER(RVCT)
+ static __asm void cacheFlush(void* code, size_t);
+#else
+ static void cacheFlush(void* code, size_t size)
+ {
+#if OS(LINUX) && COMPILER(GCC)
+ size_t page = pageSize();
+ uintptr_t current = reinterpret_cast<uintptr_t>(code);
+ uintptr_t end = current + size;
+ uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
+
+ if (end <= firstPageEnd) {
+ linuxPageFlush(current, end);
+ return;
+ }
+
+ linuxPageFlush(current, firstPageEnd);
+
+ for (current = firstPageEnd; current + page < end; current += page)
+ linuxPageFlush(current, current + page);
+
+ linuxPageFlush(current, end);
+#elif OS(WINCE)
+ CacheRangeFlush(code, size, CACHE_SYNC_ALL);
+#elif OS(QNX) && ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ UNUSED_PARAM(code);
+ UNUSED_PARAM(size);
+#elif OS(QNX)
+ msync(code, size, MS_INVALIDATE_ICACHE);
+#else
+#error "The cacheFlush support is missing on this platform."
+#endif
+ }
+#endif
+
+ private:
+ static ARMWord RM(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg;
+ }
+
+ static ARMWord RS(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg << 8;
+ }
+
+ static ARMWord RD(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg << 12;
+ }
+
+ static ARMWord RN(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg << 16;
+ }
+
+ static ARMWord getConditionalField(ARMWord i)
+ {
+ return i & ConditionalFieldMask;
+ }
+
+ static ARMWord toARMWord(Condition cc)
+ {
+ return static_cast<ARMWord>(cc);
+ }
+
+ static ARMWord toARMWord(uint32_t u)
+ {
+ return static_cast<ARMWord>(u);
+ }
+
+ int genInt(int reg, ARMWord imm, bool positive);
+
+ ARMBuffer m_buffer;
+ Jumps m_jumps;
+ uint32_t m_indexOfTailOfLastWatchpoint;
+ };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#endif // ARMAssembler_h
diff --git a/src/3rdparty/masm/assembler/ARMv7Assembler.cpp b/src/3rdparty/masm/assembler/ARMv7Assembler.cpp
new file mode 100644
index 0000000000..faca66421b
--- /dev/null
+++ b/src/3rdparty/masm/assembler/ARMv7Assembler.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#include "ARMv7Assembler.h"
+
+namespace JSC {
+
+}
+
+#endif
diff --git a/src/3rdparty/masm/assembler/ARMv7Assembler.h b/src/3rdparty/masm/assembler/ARMv7Assembler.h
new file mode 100644
index 0000000000..7dcf656921
--- /dev/null
+++ b/src/3rdparty/masm/assembler/ARMv7Assembler.h
@@ -0,0 +1,2790 @@
+/*
+ * Copyright (C) 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 University of Szeged
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARMAssembler_h
+#define ARMAssembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#include "AssemblerBuffer.h"
+#include <wtf/Assertions.h>
+#include <wtf/Vector.h>
+#include <stdint.h>
+
+namespace JSC {
+
+namespace ARMRegisters {
+ typedef enum {
+ r0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7, wr = r7, // thumb work register
+ r8,
+ r9, sb = r9, // static base
+ r10, sl = r10, // stack limit
+ r11, fp = r11, // frame pointer
+ r12, ip = r12,
+ r13, sp = r13,
+ r14, lr = r14,
+ r15, pc = r15,
+ } RegisterID;
+
+ typedef enum {
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ s8,
+ s9,
+ s10,
+ s11,
+ s12,
+ s13,
+ s14,
+ s15,
+ s16,
+ s17,
+ s18,
+ s19,
+ s20,
+ s21,
+ s22,
+ s23,
+ s24,
+ s25,
+ s26,
+ s27,
+ s28,
+ s29,
+ s30,
+ s31,
+ } FPSingleRegisterID;
+
+ typedef enum {
+ d0,
+ d1,
+ d2,
+ d3,
+ d4,
+ d5,
+ d6,
+ d7,
+ d8,
+ d9,
+ d10,
+ d11,
+ d12,
+ d13,
+ d14,
+ d15,
+ d16,
+ d17,
+ d18,
+ d19,
+ d20,
+ d21,
+ d22,
+ d23,
+ d24,
+ d25,
+ d26,
+ d27,
+ d28,
+ d29,
+ d30,
+ d31,
+ } FPDoubleRegisterID;
+
+ typedef enum {
+ q0,
+ q1,
+ q2,
+ q3,
+ q4,
+ q5,
+ q6,
+ q7,
+ q8,
+ q9,
+ q10,
+ q11,
+ q12,
+ q13,
+ q14,
+ q15,
+ q16,
+ q17,
+ q18,
+ q19,
+ q20,
+ q21,
+ q22,
+ q23,
+ q24,
+ q25,
+ q26,
+ q27,
+ q28,
+ q29,
+ q30,
+ q31,
+ } FPQuadRegisterID;
+
+ inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg)
+ {
+ ASSERT(reg < d16);
+ return (FPSingleRegisterID)(reg << 1);
+ }
+
+ inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg)
+ {
+ ASSERT(!(reg & 1));
+ return (FPDoubleRegisterID)(reg >> 1);
+ }
+}
+
+class ARMv7Assembler;
+class ARMThumbImmediate {
+ friend class ARMv7Assembler;
+
+ typedef uint8_t ThumbImmediateType;
+ static const ThumbImmediateType TypeInvalid = 0;
+ static const ThumbImmediateType TypeEncoded = 1;
+ static const ThumbImmediateType TypeUInt16 = 2;
+
+ typedef union {
+ int16_t asInt;
+ struct {
+ unsigned imm8 : 8;
+ unsigned imm3 : 3;
+ unsigned i : 1;
+ unsigned imm4 : 4;
+ };
+ // If this is an encoded immediate, then it may describe a shift, or a pattern.
+ struct {
+ unsigned shiftValue7 : 7;
+ unsigned shiftAmount : 5;
+ };
+ struct {
+ unsigned immediate : 8;
+ unsigned pattern : 4;
+ };
+ } ThumbImmediateValue;
+
+ // byte0 contains least significant bit; not using an array to make client code endian agnostic.
+ typedef union {
+ int32_t asInt;
+ struct {
+ uint8_t byte0;
+ uint8_t byte1;
+ uint8_t byte2;
+ uint8_t byte3;
+ };
+ } PatternBytes;
+
+ ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
+ {
+ if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
+ value >>= N; /* if any were set, lose the bottom N */
+ else /* if none of the top N bits are set, */
+ zeros += N; /* then we have identified N leading zeros */
+ }
+
+ static int32_t countLeadingZeros(uint32_t value)
+ {
+ if (!value)
+ return 32;
+
+ int32_t zeros = 0;
+ countLeadingZerosPartial(value, zeros, 16);
+ countLeadingZerosPartial(value, zeros, 8);
+ countLeadingZerosPartial(value, zeros, 4);
+ countLeadingZerosPartial(value, zeros, 2);
+ countLeadingZerosPartial(value, zeros, 1);
+ return zeros;
+ }
+
+ ARMThumbImmediate()
+ : m_type(TypeInvalid)
+ {
+ m_value.asInt = 0;
+ }
+
+ ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
+ : m_type(type)
+ , m_value(value)
+ {
+ }
+
+ ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
+ : m_type(TypeUInt16)
+ {
+ // Make sure this constructor is only reached with type TypeUInt16;
+ // this extra parameter makes the code a little clearer by making it
+ // explicit at call sites which type is being constructed
+ ASSERT_UNUSED(type, type == TypeUInt16);
+
+ m_value.asInt = value;
+ }
+
+public:
+ static ARMThumbImmediate makeEncodedImm(uint32_t value)
+ {
+ ThumbImmediateValue encoding;
+ encoding.asInt = 0;
+
+ // okay, these are easy.
+ if (value < 256) {
+ encoding.immediate = value;
+ encoding.pattern = 0;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ int32_t leadingZeros = countLeadingZeros(value);
+ // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
+ ASSERT(leadingZeros < 24);
+
+ // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
+ // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
+ // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
+ int32_t rightShiftAmount = 24 - leadingZeros;
+ if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
+ // Shift the value down to the low byte position. The assign to
+ // shiftValue7 drops the implicit top bit.
+ encoding.shiftValue7 = value >> rightShiftAmount;
+ // The endoded shift amount is the magnitude of a right rotate.
+ encoding.shiftAmount = 8 + leadingZeros;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ PatternBytes bytes;
+ bytes.asInt = value;
+
+ if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 3;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 1;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
+ encoding.immediate = bytes.byte1;
+ encoding.pattern = 2;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ return ARMThumbImmediate();
+ }
+
+ static ARMThumbImmediate makeUInt12(int32_t value)
+ {
+ return (!(value & 0xfffff000))
+ ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+ : ARMThumbImmediate();
+ }
+
+ static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
+ {
+ // If this is not a 12-bit unsigned it, try making an encoded immediate.
+ return (!(value & 0xfffff000))
+ ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+ : makeEncodedImm(value);
+ }
+
+ // The 'make' methods, above, return a !isValid() value if the argument
+ // cannot be represented as the requested type. This methods is called
+ // 'get' since the argument can always be represented.
+ static ARMThumbImmediate makeUInt16(uint16_t value)
+ {
+ return ARMThumbImmediate(TypeUInt16, value);
+ }
+
+ bool isValid()
+ {
+ return m_type != TypeInvalid;
+ }
+
+ uint16_t asUInt16() const { return m_value.asInt; }
+
+ // These methods rely on the format of encoded byte values.
+ bool isUInt3() { return !(m_value.asInt & 0xfff8); }
+ bool isUInt4() { return !(m_value.asInt & 0xfff0); }
+ bool isUInt5() { return !(m_value.asInt & 0xffe0); }
+ bool isUInt6() { return !(m_value.asInt & 0xffc0); }
+ bool isUInt7() { return !(m_value.asInt & 0xff80); }
+ bool isUInt8() { return !(m_value.asInt & 0xff00); }
+ bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
+ bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
+ bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
+ bool isUInt16() { return m_type == TypeUInt16; }
+ uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
+ uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
+ uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
+ uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
+ uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
+ uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
+ uint16_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
+ uint16_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
+ uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
+ uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
+
+ bool isEncodedImm() { return m_type == TypeEncoded; }
+
+private:
+ ThumbImmediateType m_type;
+ ThumbImmediateValue m_value;
+};
+
+typedef enum {
+ SRType_LSL,
+ SRType_LSR,
+ SRType_ASR,
+ SRType_ROR,
+
+ SRType_RRX = SRType_ROR
+} ARMShiftType;
+
+class ShiftTypeAndAmount {
+ friend class ARMv7Assembler;
+
+public:
+ ShiftTypeAndAmount()
+ {
+ m_u.type = (ARMShiftType)0;
+ m_u.amount = 0;
+ }
+
+ ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
+ {
+ m_u.type = type;
+ m_u.amount = amount & 31;
+ }
+
+ unsigned lo4() { return m_u.lo4; }
+ unsigned hi4() { return m_u.hi4; }
+
+private:
+ union {
+ struct {
+ unsigned lo4 : 4;
+ unsigned hi4 : 4;
+ };
+ struct {
+ unsigned type : 2;
+ unsigned amount : 6;
+ };
+ } m_u;
+};
+
+class ARMv7Assembler {
+public:
+ typedef ARMRegisters::RegisterID RegisterID;
+ typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID;
+ typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID;
+ typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
+
+ // (HS, LO, HI, LS) -> (AE, B, A, BE)
+ // (VS, VC) -> (O, NO)
+ typedef enum {
+ ConditionEQ,
+ ConditionNE,
+ ConditionHS, ConditionCS = ConditionHS,
+ ConditionLO, ConditionCC = ConditionLO,
+ ConditionMI,
+ ConditionPL,
+ ConditionVS,
+ ConditionVC,
+ ConditionHI,
+ ConditionLS,
+ ConditionGE,
+ ConditionLT,
+ ConditionGT,
+ ConditionLE,
+ ConditionAL,
+ ConditionInvalid
+ } Condition;
+
+#define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
+#define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
+ enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
+ JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
+ JumpCondition = JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
+ JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
+ JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
+ };
+ enum JumpLinkType {
+ LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
+ LinkJumpT1 = JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
+ LinkJumpT2 = JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
+ LinkJumpT3 = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
+ LinkJumpT4 = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
+ LinkConditionalJumpT4 = JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
+ LinkBX = JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
+ LinkConditionalBX = JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
+ };
+
+ class LinkRecord {
+ public:
+ LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
+ {
+ data.realTypes.m_from = from;
+ data.realTypes.m_to = to;
+ data.realTypes.m_type = type;
+ data.realTypes.m_linkType = LinkInvalid;
+ data.realTypes.m_condition = condition;
+ }
+ void operator=(const LinkRecord& other)
+ {
+ data.copyTypes.content[0] = other.data.copyTypes.content[0];
+ data.copyTypes.content[1] = other.data.copyTypes.content[1];
+ data.copyTypes.content[2] = other.data.copyTypes.content[2];
+ }
+ intptr_t from() const { return data.realTypes.m_from; }
+ void setFrom(intptr_t from) { data.realTypes.m_from = from; }
+ intptr_t to() const { return data.realTypes.m_to; }
+ JumpType type() const { return data.realTypes.m_type; }
+ JumpLinkType linkType() const { return data.realTypes.m_linkType; }
+ void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
+ Condition condition() const { return data.realTypes.m_condition; }
+ private:
+ union {
+ struct RealTypes {
+ intptr_t m_from : 31;
+ intptr_t m_to : 31;
+ JumpType m_type : 8;
+ JumpLinkType m_linkType : 8;
+ Condition m_condition : 16;
+ } realTypes;
+ struct CopyTypes {
+ uint32_t content[3];
+ } copyTypes;
+ COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
+ } data;
+ };
+
+ ARMv7Assembler()
+ : m_indexOfLastWatchpoint(INT_MIN)
+ , m_indexOfTailOfLastWatchpoint(INT_MIN)
+ {
+ }
+
+private:
+
+ // ARMv7, Appx-A.6.3
+ static bool BadReg(RegisterID reg)
+ {
+ return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
+ }
+
+ uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift)
+ {
+ uint32_t rdMask = (rdNum >> 1) << highBitsShift;
+ if (rdNum & 1)
+ rdMask |= 1 << lowBitShift;
+ return rdMask;
+ }
+
+ uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift)
+ {
+ uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
+ if (rdNum & 16)
+ rdMask |= 1 << highBitShift;
+ return rdMask;
+ }
+
+ typedef enum {
+ OP_ADD_reg_T1 = 0x1800,
+ OP_SUB_reg_T1 = 0x1A00,
+ OP_ADD_imm_T1 = 0x1C00,
+ OP_SUB_imm_T1 = 0x1E00,
+ OP_MOV_imm_T1 = 0x2000,
+ OP_CMP_imm_T1 = 0x2800,
+ OP_ADD_imm_T2 = 0x3000,
+ OP_SUB_imm_T2 = 0x3800,
+ OP_AND_reg_T1 = 0x4000,
+ OP_EOR_reg_T1 = 0x4040,
+ OP_TST_reg_T1 = 0x4200,
+ OP_RSB_imm_T1 = 0x4240,
+ OP_CMP_reg_T1 = 0x4280,
+ OP_ORR_reg_T1 = 0x4300,
+ OP_MVN_reg_T1 = 0x43C0,
+ OP_ADD_reg_T2 = 0x4400,
+ OP_MOV_reg_T1 = 0x4600,
+ OP_BLX = 0x4700,
+ OP_BX = 0x4700,
+ OP_STR_reg_T1 = 0x5000,
+ OP_STRH_reg_T1 = 0x5200,
+ OP_STRB_reg_T1 = 0x5400,
+ OP_LDRSB_reg_T1 = 0x5600,
+ OP_LDR_reg_T1 = 0x5800,
+ OP_LDRH_reg_T1 = 0x5A00,
+ OP_LDRB_reg_T1 = 0x5C00,
+ OP_LDRSH_reg_T1 = 0x5E00,
+ OP_STR_imm_T1 = 0x6000,
+ OP_LDR_imm_T1 = 0x6800,
+ OP_STRB_imm_T1 = 0x7000,
+ OP_LDRB_imm_T1 = 0x7800,
+ OP_STRH_imm_T1 = 0x8000,
+ OP_LDRH_imm_T1 = 0x8800,
+ OP_STR_imm_T2 = 0x9000,
+ OP_LDR_imm_T2 = 0x9800,
+ OP_ADD_SP_imm_T1 = 0xA800,
+ OP_ADD_SP_imm_T2 = 0xB000,
+ OP_SUB_SP_imm_T1 = 0xB080,
+ OP_BKPT = 0xBE00,
+ OP_IT = 0xBF00,
+ OP_NOP_T1 = 0xBF00,
+ } OpcodeID;
+
+ typedef enum {
+ OP_B_T1 = 0xD000,
+ OP_B_T2 = 0xE000,
+ OP_AND_reg_T2 = 0xEA00,
+ OP_TST_reg_T2 = 0xEA10,
+ OP_ORR_reg_T2 = 0xEA40,
+ OP_ORR_S_reg_T2 = 0xEA50,
+ OP_ASR_imm_T1 = 0xEA4F,
+ OP_LSL_imm_T1 = 0xEA4F,
+ OP_LSR_imm_T1 = 0xEA4F,
+ OP_ROR_imm_T1 = 0xEA4F,
+ OP_MVN_reg_T2 = 0xEA6F,
+ OP_EOR_reg_T2 = 0xEA80,
+ OP_ADD_reg_T3 = 0xEB00,
+ OP_ADD_S_reg_T3 = 0xEB10,
+ OP_SUB_reg_T2 = 0xEBA0,
+ OP_SUB_S_reg_T2 = 0xEBB0,
+ OP_CMP_reg_T2 = 0xEBB0,
+ OP_VMOV_CtoD = 0xEC00,
+ OP_VMOV_DtoC = 0xEC10,
+ OP_FSTS = 0xED00,
+ OP_VSTR = 0xED00,
+ OP_FLDS = 0xED10,
+ OP_VLDR = 0xED10,
+ OP_VMOV_CtoS = 0xEE00,
+ OP_VMOV_StoC = 0xEE10,
+ OP_VMUL_T2 = 0xEE20,
+ OP_VADD_T2 = 0xEE30,
+ OP_VSUB_T2 = 0xEE30,
+ OP_VDIV = 0xEE80,
+ OP_VABS_T2 = 0xEEB0,
+ OP_VCMP = 0xEEB0,
+ OP_VCVT_FPIVFP = 0xEEB0,
+ OP_VMOV_T2 = 0xEEB0,
+ OP_VMOV_IMM_T2 = 0xEEB0,
+ OP_VMRS = 0xEEB0,
+ OP_VNEG_T2 = 0xEEB0,
+ OP_VSQRT_T1 = 0xEEB0,
+ OP_VCVTSD_T1 = 0xEEB0,
+ OP_VCVTDS_T1 = 0xEEB0,
+ OP_B_T3a = 0xF000,
+ OP_B_T4a = 0xF000,
+ OP_AND_imm_T1 = 0xF000,
+ OP_TST_imm = 0xF010,
+ OP_ORR_imm_T1 = 0xF040,
+ OP_MOV_imm_T2 = 0xF040,
+ OP_MVN_imm = 0xF060,
+ OP_EOR_imm_T1 = 0xF080,
+ OP_ADD_imm_T3 = 0xF100,
+ OP_ADD_S_imm_T3 = 0xF110,
+ OP_CMN_imm = 0xF110,
+ OP_ADC_imm = 0xF140,
+ OP_SUB_imm_T3 = 0xF1A0,
+ OP_SUB_S_imm_T3 = 0xF1B0,
+ OP_CMP_imm_T2 = 0xF1B0,
+ OP_RSB_imm_T2 = 0xF1C0,
+ OP_RSB_S_imm_T2 = 0xF1D0,
+ OP_ADD_imm_T4 = 0xF200,
+ OP_MOV_imm_T3 = 0xF240,
+ OP_SUB_imm_T4 = 0xF2A0,
+ OP_MOVT = 0xF2C0,
+ OP_UBFX_T1 = 0xF3C0,
+ OP_NOP_T2a = 0xF3AF,
+ OP_STRB_imm_T3 = 0xF800,
+ OP_STRB_reg_T2 = 0xF800,
+ OP_LDRB_imm_T3 = 0xF810,
+ OP_LDRB_reg_T2 = 0xF810,
+ OP_STRH_imm_T3 = 0xF820,
+ OP_STRH_reg_T2 = 0xF820,
+ OP_LDRH_reg_T2 = 0xF830,
+ OP_LDRH_imm_T3 = 0xF830,
+ OP_STR_imm_T4 = 0xF840,
+ OP_STR_reg_T2 = 0xF840,
+ OP_LDR_imm_T4 = 0xF850,
+ OP_LDR_reg_T2 = 0xF850,
+ OP_STRB_imm_T2 = 0xF880,
+ OP_LDRB_imm_T2 = 0xF890,
+ OP_STRH_imm_T2 = 0xF8A0,
+ OP_LDRH_imm_T2 = 0xF8B0,
+ OP_STR_imm_T3 = 0xF8C0,
+ OP_LDR_imm_T3 = 0xF8D0,
+ OP_LDRSB_reg_T2 = 0xF910,
+ OP_LDRSH_reg_T2 = 0xF930,
+ OP_LSL_reg_T2 = 0xFA00,
+ OP_LSR_reg_T2 = 0xFA20,
+ OP_ASR_reg_T2 = 0xFA40,
+ OP_ROR_reg_T2 = 0xFA60,
+ OP_CLZ = 0xFAB0,
+ OP_SMULL_T1 = 0xFB80,
+#if CPU(APPLE_ARMV7S)
+ OP_SDIV_T1 = 0xFB90,
+ OP_UDIV_T1 = 0xFBB0,
+#endif
+ } OpcodeID1;
+
+ typedef enum {
+ OP_VADD_T2b = 0x0A00,
+ OP_VDIVb = 0x0A00,
+ OP_FLDSb = 0x0A00,
+ OP_VLDRb = 0x0A00,
+ OP_VMOV_IMM_T2b = 0x0A00,
+ OP_VMOV_T2b = 0x0A40,
+ OP_VMUL_T2b = 0x0A00,
+ OP_FSTSb = 0x0A00,
+ OP_VSTRb = 0x0A00,
+ OP_VMOV_StoCb = 0x0A10,
+ OP_VMOV_CtoSb = 0x0A10,
+ OP_VMOV_DtoCb = 0x0A10,
+ OP_VMOV_CtoDb = 0x0A10,
+ OP_VMRSb = 0x0A10,
+ OP_VABS_T2b = 0x0A40,
+ OP_VCMPb = 0x0A40,
+ OP_VCVT_FPIVFPb = 0x0A40,
+ OP_VNEG_T2b = 0x0A40,
+ OP_VSUB_T2b = 0x0A40,
+ OP_VSQRT_T1b = 0x0A40,
+ OP_VCVTSD_T1b = 0x0A40,
+ OP_VCVTDS_T1b = 0x0A40,
+ OP_NOP_T2b = 0x8000,
+ OP_B_T3b = 0x8000,
+ OP_B_T4b = 0x9000,
+ } OpcodeID2;
+
+ struct FourFours {
+ FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
+ {
+ m_u.f0 = f0;
+ m_u.f1 = f1;
+ m_u.f2 = f2;
+ m_u.f3 = f3;
+ }
+
+ union {
+ unsigned value;
+ struct {
+ unsigned f0 : 4;
+ unsigned f1 : 4;
+ unsigned f2 : 4;
+ unsigned f3 : 4;
+ };
+ } m_u;
+ };
+
+ class ARMInstructionFormatter;
+
+ // false means else!
+ bool ifThenElseConditionBit(Condition condition, bool isIf)
+ {
+ return isIf ? (condition & 1) : !(condition & 1);
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | (ifThenElseConditionBit(condition, inst3if) << 2)
+ | (ifThenElseConditionBit(condition, inst4if) << 1)
+ | 1;
+ ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | (ifThenElseConditionBit(condition, inst3if) << 2)
+ | 2;
+ ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | 4;
+ ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+
+ uint8_t ifThenElse(Condition condition)
+ {
+ int mask = 8;
+ return (condition << 4) | mask;
+ }
+
+public:
+
+ void adc(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm, rn, rd, imm);
+ }
+
+ void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+
+ if (rn == ARMRegisters::sp) {
+ ASSERT(!(imm.getUInt16() & 3));
+ if (!(rd & 8) && imm.isUInt10()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2));
+ return;
+ } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
+ m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, static_cast<uint8_t>(imm.getUInt9() >> 2));
+ return;
+ }
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
+ else {
+ ASSERT(imm.isUInt12());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
+ }
+ }
+
+ ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // NOTE: In an IT block, add doesn't modify the flags register.
+ ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (rd == rn)
+ m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
+ else if (rd == rm)
+ m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
+ else if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
+ else
+ add(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ // Not allowed in an IT (if then) block.
+ ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
+ }
+
+ // Not allowed in an IT (if then) block?
+ ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // Not allowed in an IT (if then) block.
+ ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
+ else
+ add_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
+ }
+
+ ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
+ else
+ ARM_and(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ ALWAYS_INLINE AssemblerLabel b()
+ {
+ m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
+ return m_formatter.label();
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ ALWAYS_INLINE AssemblerLabel blx(RegisterID rm)
+ {
+ ASSERT(rm != ARMRegisters::pc);
+ m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
+ return m_formatter.label();
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ ALWAYS_INLINE AssemblerLabel bx(RegisterID rm)
+ {
+ m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
+ return m_formatter.label();
+ }
+
+ void bkpt(uint8_t imm = 0)
+ {
+ m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
+ }
+
+ ALWAYS_INLINE void clz(RegisterID rd, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm));
+ }
+
+ ALWAYS_INLINE void cmn(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
+ }
+
+ ALWAYS_INLINE void cmp(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ if (!(rn & 8) && imm.isUInt8())
+ m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
+ else
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
+ }
+
+ ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
+ {
+ if ((rn | rm) & 8)
+ cmp(rn, rm, ShiftTypeAndAmount());
+ else
+ m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ void eor(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
+ else
+ eor(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void it(Condition cond)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
+ }
+
+ ALWAYS_INLINE void it(Condition cond, bool inst2if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
+ }
+
+ ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
+ }
+
+ ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
+ m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
+ }
+
+ ALWAYS_INLINE void ldrWide8BitImmediate(RegisterID rt, RegisterID rn, uint8_t immediate)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, immediate);
+ }
+
+ ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt7());
+ ASSERT(!((rt | rn) & 8));
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed load;
+ // if index is not set then is is a post-index load.
+ //
+ // If wback is set rn is updated - this is a pre or post index load,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt6())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed load;
+ // if index is not set then is is a post-index load.
+ //
+ // If wback is set rn is updated - this is a pre or post index load,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
+ }
+
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(!BadReg(rt)); // Memory hint
+ ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt5())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+
+ ASSERT(!(offset & ~0xff));
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
+ }
+
+ ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ ALWAYS_INLINE void movT3(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
+ }
+
+#if OS(LINUX) || OS(QNX)
+ static void revertJumpTo_movT3movtcmpT2(void* instructionStart, RegisterID left, RegisterID right, uintptr_t imm)
+ {
+ uint16_t* address = static_cast<uint16_t*>(instructionStart);
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm >> 16));
+ address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16);
+ address[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ address[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16);
+ address[4] = OP_CMP_reg_T2 | left;
+ cacheFlush(address, sizeof(uint16_t) * 5);
+ }
+#else
+ static void revertJumpTo_movT3(void* instructionStart, RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ uint16_t* address = static_cast<uint16_t*>(instructionStart);
+ address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm);
+ address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm);
+ cacheFlush(address, sizeof(uint16_t) * 2);
+ }
+#endif
+
+ ALWAYS_INLINE void mov(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!BadReg(rd));
+
+ if ((rd < 8) && imm.isUInt8())
+ m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
+ else if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
+ else
+ movT3(rd, imm);
+ }
+
+ ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
+ {
+ m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
+ }
+
+ ALWAYS_INLINE void movt(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isUInt16());
+ ASSERT(!BadReg(rd));
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
+ }
+
+ ALWAYS_INLINE void mvn(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
+ }
+
+ ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
+ {
+ if (!((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
+ else
+ mvn(rd, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
+ {
+ ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
+ sub(rd, zero, rm);
+ }
+
+ ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
+ }
+
+ ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void orr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
+ else
+ orr(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void orr_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
+ else
+ orr_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+#if CPU(APPLE_ARMV7S)
+ ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
+ }
+#endif
+
+ ALWAYS_INLINE void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rdLo));
+ ASSERT(!BadReg(rdHi));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ ASSERT(rdLo != rdHi);
+ m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
+ m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed store;
+ // if index is not set then is is a post-index store.
+ //
+ // If wback is set rn is updated - this is a pre or post index store,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed store;
+ // if index is not set then is is a post-index store.
+ //
+ // If wback is set rn is updated - this is a pre or post index store,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_STRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed store;
+ // if index is not set then is is a post-index store.
+ //
+ // If wback is set rn is updated - this is a pre or post index store,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT(!(offset & ~0xff));
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_STRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+
+ if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
+ ASSERT(!(imm.getUInt16() & 3));
+ m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
+ return;
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
+ else {
+ ASSERT(imm.isUInt12());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
+ }
+ }
+
+ ALWAYS_INLINE void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
+ {
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+ ASSERT(imm.isUInt12());
+
+ if (!((rd | rn) & 8) && !imm.getUInt12())
+ m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd);
+ else
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm);
+ }
+
+ ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // NOTE: In an IT block, add doesn't modify the flags register.
+ ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
+ else
+ sub(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ // Not allowed in an IT (if then) block.
+ void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+
+ if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
+ ASSERT(!(imm.getUInt16() & 3));
+ m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
+ return;
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
+ }
+
+ ALWAYS_INLINE void sub_S(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
+ {
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+ ASSERT(imm.isUInt12());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2, rn, rd, imm);
+ }
+
+ // Not allowed in an IT (if then) block?
+ ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // Not allowed in an IT (if then) block.
+ ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
+ else
+ sub_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void tst(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
+ }
+
+ ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
+ {
+ if ((rn | rm) & 8)
+ tst(rn, rm, ShiftTypeAndAmount());
+ else
+ m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
+ }
+
+ ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, unsigned lsb, unsigned width)
+ {
+ ASSERT(lsb < 32);
+ ASSERT((width >= 1) && (width <= 32));
+ ASSERT((lsb + width) <= 32);
+ m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
+ }
+
+#if CPU(APPLE_ARMV7S)
+ ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_UDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
+ }
+#endif
+
+ void vadd(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm);
+ }
+
+ void vcmp(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
+ }
+
+ void vcmpz(FPDoubleRegisterID rd)
+ {
+ m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
+ }
+
+ void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd, FPSingleRegisterID rm)
+ {
+ // boolean values are 64bit (toInt, unsigned, roundZero)
+ m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm);
+ }
+
+ void vcvt_floatingPointToSigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ // boolean values are 64bit (toInt, unsigned, roundZero)
+ m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm);
+ }
+
+ void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ // boolean values are 64bit (toInt, unsigned, roundZero)
+ m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, true, true), rd, rm);
+ }
+
+ void vdiv(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm);
+ }
+
+ void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
+ }
+
+ void flds(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ m_formatter.vfpMemOp(OP_FLDS, OP_FLDSb, false, rn, rd, imm);
+ }
+
+ void vmov(RegisterID rd, FPSingleRegisterID rn)
+ {
+ ASSERT(!BadReg(rd));
+ m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rn, rd, VFPOperand(0));
+ }
+
+ void vmov(FPSingleRegisterID rd, RegisterID rn)
+ {
+ ASSERT(!BadReg(rn));
+ m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rd, rn, VFPOperand(0));
+ }
+
+ void vmov(RegisterID rd1, RegisterID rd2, FPDoubleRegisterID rn)
+ {
+ ASSERT(!BadReg(rd1));
+ ASSERT(!BadReg(rd2));
+ m_formatter.vfpOp(OP_VMOV_DtoC, OP_VMOV_DtoCb, true, rd2, VFPOperand(rd1 | 16), rn);
+ }
+
+ void vmov(FPDoubleRegisterID rd, RegisterID rn1, RegisterID rn2)
+ {
+ ASSERT(!BadReg(rn1));
+ ASSERT(!BadReg(rn2));
+ m_formatter.vfpOp(OP_VMOV_CtoD, OP_VMOV_CtoDb, true, rn2, VFPOperand(rn1 | 16), rd);
+ }
+
+ void vmov(FPDoubleRegisterID rd, FPDoubleRegisterID rn)
+ {
+ m_formatter.vfpOp(OP_VMOV_T2, OP_VMOV_T2b, true, VFPOperand(0), rd, rn);
+ }
+
+ void vmrs(RegisterID reg = ARMRegisters::pc)
+ {
+ ASSERT(reg != ARMRegisters::sp);
+ m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0));
+ }
+
+ void vmul(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm);
+ }
+
+ void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm);
+ }
+
+ void fsts(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ m_formatter.vfpMemOp(OP_FSTS, OP_FSTSb, false, rn, rd, imm);
+ }
+
+ void vsub(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm);
+ }
+
+ void vabs(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VABS_T2, OP_VABS_T2b, true, VFPOperand(16), rd, rm);
+ }
+
+ void vneg(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VNEG_T2, OP_VNEG_T2b, true, VFPOperand(1), rd, rm);
+ }
+
+ void vsqrt(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VSQRT_T1, OP_VSQRT_T1b, true, VFPOperand(17), rd, rm);
+ }
+
+ void vcvtds(FPDoubleRegisterID rd, FPSingleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VCVTDS_T1, OP_VCVTDS_T1b, false, VFPOperand(23), rd, rm);
+ }
+
+ void vcvtsd(FPSingleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VCVTSD_T1, OP_VCVTSD_T1b, true, VFPOperand(23), rd, rm);
+ }
+
+ void nop()
+ {
+ m_formatter.oneWordOp8Imm8(OP_NOP_T1, 0);
+ }
+
+ void nopw()
+ {
+ m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b);
+ }
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ return m_formatter.label();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ AssemblerLabel result = m_formatter.label();
+ if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
+ result = label();
+ m_indexOfLastWatchpoint = result.m_offset;
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return result;
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = m_formatter.label();
+ while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+ if (UNLIKELY(static_cast<int>(result.m_offset) + 4 <= m_indexOfTailOfLastWatchpoint))
+ nopw();
+ else
+ nop();
+ result = m_formatter.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ while (!m_formatter.isAligned(alignment))
+ bkpt();
+
+ return label();
+ }
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ ASSERT(label.isSet());
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ int executableOffsetFor(int location)
+ {
+ if (!location)
+ return 0;
+ return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1];
+ }
+
+ int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
+
+ // Assembler admin methods:
+
+ static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
+ {
+ return a.from() < b.from();
+ }
+
+ bool canCompact(JumpType jumpType)
+ {
+ // The following cannot be compacted:
+ // JumpFixed: represents custom jump sequence
+ // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
+ // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
+ return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
+ }
+
+ JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
+ {
+ if (jumpType == JumpFixed)
+ return LinkInvalid;
+
+ // for patchable jump we must leave space for the longest code sequence
+ if (jumpType == JumpNoConditionFixedSize)
+ return LinkBX;
+ if (jumpType == JumpConditionFixedSize)
+ return LinkConditionalBX;
+
+ const int paddingSize = JUMP_ENUM_SIZE(jumpType);
+
+ if (jumpType == JumpCondition) {
+ // 2-byte conditional T1
+ const uint16_t* jumpT1Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT1)));
+ if (canBeJumpT1(jumpT1Location, to))
+ return LinkJumpT1;
+ // 4-byte conditional T3
+ const uint16_t* jumpT3Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT3)));
+ if (canBeJumpT3(jumpT3Location, to))
+ return LinkJumpT3;
+ // 4-byte conditional T4 with IT
+ const uint16_t* conditionalJumpT4Location =
+ reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkConditionalJumpT4)));
+ if (canBeJumpT4(conditionalJumpT4Location, to))
+ return LinkConditionalJumpT4;
+ } else {
+ // 2-byte unconditional T2
+ const uint16_t* jumpT2Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT2)));
+ if (canBeJumpT2(jumpT2Location, to))
+ return LinkJumpT2;
+ // 4-byte unconditional T4
+ const uint16_t* jumpT4Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT4)));
+ if (canBeJumpT4(jumpT4Location, to))
+ return LinkJumpT4;
+ // use long jump sequence
+ return LinkBX;
+ }
+
+ ASSERT(jumpType == JumpCondition);
+ return LinkConditionalBX;
+ }
+
+ JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+ {
+ JumpLinkType linkType = computeJumpType(record.type(), from, to);
+ record.setLinkType(linkType);
+ return linkType;
+ }
+
+ void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
+ {
+ int32_t ptr = regionStart / sizeof(int32_t);
+ const int32_t end = regionEnd / sizeof(int32_t);
+ int32_t* offsets = static_cast<int32_t*>(m_formatter.data());
+ while (ptr < end)
+ offsets[ptr++] = offset;
+ }
+
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
+ {
+ std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
+ return m_jumpsToLink;
+ }
+
+ void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
+ {
+ switch (record.linkType()) {
+ case LinkJumpT1:
+ linkJumpT1(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkJumpT2:
+ linkJumpT2(reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkJumpT3:
+ linkJumpT3(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkJumpT4:
+ linkJumpT4(reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkConditionalJumpT4:
+ linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkConditionalBX:
+ linkConditionalBX(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkBX:
+ linkBX(reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ void* unlinkedCode() { return m_formatter.data(); }
+ size_t codeSize() const { return m_formatter.codeSize(); }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ ASSERT(call.isSet());
+ return call.m_offset;
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
+ {
+ ASSERT(to.isSet());
+ ASSERT(from.isSet());
+ m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+
+ uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ linkJumpAbsolute(location, to);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
+ ASSERT(from.isSet());
+ ASSERT(reinterpret_cast<intptr_t>(to) & 1);
+
+ setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to, false);
+ }
+
+ static void linkPointer(void* code, AssemblerLabel where, void* value)
+ {
+ setPointer(reinterpret_cast<char*>(code) + where.m_offset, value, false);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
+
+ linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to);
+
+ cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
+ ASSERT(reinterpret_cast<intptr_t>(to) & 1);
+
+ setPointer(reinterpret_cast<uint16_t*>(from) - 1, to, true);
+ }
+
+ static void* readCallTarget(void* from)
+ {
+ return readPointer(reinterpret_cast<uint16_t*>(from) - 1);
+ }
+
+ static void repatchInt32(void* where, int32_t value)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ setInt32(where, value, true);
+ }
+
+ static void repatchCompact(void* where, int32_t offset)
+ {
+ ASSERT(offset >= -255 && offset <= 255);
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+
+ offset |= (add << 9);
+ offset |= (1 << 10);
+ offset |= (1 << 11);
+
+ uint16_t* location = reinterpret_cast<uint16_t*>(where);
+ location[1] &= ~((1 << 12) - 1);
+ location[1] |= offset;
+ cacheFlush(location, sizeof(uint16_t) * 2);
+ }
+
+ static void repatchPointer(void* where, void* value)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ setPointer(where, value, true);
+ }
+
+ static void* readPointer(void* where)
+ {
+ return reinterpret_cast<void*>(readInt32(where));
+ }
+
+ static void replaceWithJump(void* instructionStart, void* to)
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
+ ASSERT(!(bitwise_cast<uintptr_t>(to) & 1));
+
+#if OS(LINUX) || OS(QNX)
+ if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart), to)) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
+ linkJumpT4(ptr, to);
+ cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
+ } else {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 5;
+ linkBX(ptr, to);
+ cacheFlush(ptr - 5, sizeof(uint16_t) * 5);
+ }
+#else
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
+ linkJumpT4(ptr, to);
+ cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
+#endif
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+#if OS(LINUX) || OS(QNX)
+ return 10;
+#else
+ return 4;
+#endif
+ }
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
+ switch (ptr[0] & 0xFFF0) {
+ case OP_LDR_imm_T3:
+ break;
+ case OP_ADD_imm_T3:
+ ASSERT(!(ptr[1] & 0xF000));
+ ptr[0] &= 0x000F;
+ ptr[0] |= OP_LDR_imm_T3;
+ ptr[1] |= (ptr[1] & 0x0F00) << 4;
+ ptr[1] &= 0xF0FF;
+ cacheFlush(ptr, sizeof(uint16_t) * 2);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
+ switch (ptr[0] & 0xFFF0) {
+ case OP_LDR_imm_T3:
+ ASSERT(!(ptr[1] & 0x0F00));
+ ptr[0] &= 0x000F;
+ ptr[0] |= OP_ADD_imm_T3;
+ ptr[1] |= (ptr[1] & 0xF000) >> 4;
+ ptr[1] &= 0x0FFF;
+ cacheFlush(ptr, sizeof(uint16_t) * 2);
+ break;
+ case OP_ADD_imm_T3:
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ unsigned debugOffset() { return m_formatter.debugOffset(); }
+
+#if OS(LINUX)
+ static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
+ {
+ asm volatile(
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "movw r7, #0x2\n"
+ "movt r7, #0xf\n"
+ "movs r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (begin), "r" (end)
+ : "r0", "r1", "r2");
+ }
+#endif
+
+ static void cacheFlush(void* code, size_t size)
+ {
+#if OS(IOS)
+ sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
+#elif OS(LINUX)
+ size_t page = pageSize();
+ uintptr_t current = reinterpret_cast<uintptr_t>(code);
+ uintptr_t end = current + size;
+ uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
+
+ if (end <= firstPageEnd) {
+ linuxPageFlush(current, end);
+ return;
+ }
+
+ linuxPageFlush(current, firstPageEnd);
+
+ for (current = firstPageEnd; current + page < end; current += page)
+ linuxPageFlush(current, current + page);
+
+ linuxPageFlush(current, end);
+#elif OS(WINCE)
+ CacheRangeFlush(code, size, CACHE_SYNC_ALL);
+#elif OS(QNX)
+#if !ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ msync(code, size, MS_INVALIDATE_ICACHE);
+#else
+ UNUSED_PARAM(code);
+ UNUSED_PARAM(size);
+#endif
+#else
+#error "The cacheFlush support is missing on this platform."
+#endif
+ }
+
+private:
+ // VFP operations commonly take one or more 5-bit operands, typically representing a
+ // floating point register number. This will commonly be encoded in the instruction
+ // in two parts, with one single bit field, and one 4-bit field. In the case of
+ // double precision operands the high bit of the register number will be encoded
+ // separately, and for single precision operands the high bit of the register number
+ // will be encoded individually.
+ // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
+ // field to be encoded together in the instruction (the low 4-bits of a double
+ // register number, or the high 4-bits of a single register number), and bit 4
+ // contains the bit value to be encoded individually.
+ struct VFPOperand {
+ explicit VFPOperand(uint32_t value)
+ : m_value(value)
+ {
+ ASSERT(!(m_value & ~0x1f));
+ }
+
+ VFPOperand(FPDoubleRegisterID reg)
+ : m_value(reg)
+ {
+ }
+
+ VFPOperand(RegisterID reg)
+ : m_value(reg)
+ {
+ }
+
+ VFPOperand(FPSingleRegisterID reg)
+ : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top.
+ {
+ }
+
+ uint32_t bits1()
+ {
+ return m_value >> 4;
+ }
+
+ uint32_t bits4()
+ {
+ return m_value & 0xf;
+ }
+
+ uint32_t m_value;
+ };
+
+ VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero)
+ {
+ // Cannot specify rounding when converting to float.
+ ASSERT(toInteger || !isRoundZero);
+
+ uint32_t op = 0x8;
+ if (toInteger) {
+ // opc2 indicates both toInteger & isUnsigned.
+ op |= isUnsigned ? 0x4 : 0x5;
+ // 'op' field in instruction is isRoundZero
+ if (isRoundZero)
+ op |= 0x10;
+ } else {
+ ASSERT(!isRoundZero);
+ // 'op' field in instruction is isUnsigned
+ if (!isUnsigned)
+ op |= 0x10;
+ }
+ return VFPOperand(op);
+ }
+
+ static void setInt32(void* code, uint32_t value, bool flush)
+ {
+ uint16_t* location = reinterpret_cast<uint16_t*>(code);
+ ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
+
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
+ location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
+ location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
+
+ if (flush)
+ cacheFlush(location - 4, 4 * sizeof(uint16_t));
+ }
+
+ static int32_t readInt32(void* code)
+ {
+ uint16_t* location = reinterpret_cast<uint16_t*>(code);
+ ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
+
+ ARMThumbImmediate lo16;
+ ARMThumbImmediate hi16;
+ decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16, location[-4]);
+ decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16, location[-3]);
+ decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16, location[-2]);
+ decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16, location[-1]);
+ uint32_t result = hi16.asUInt16();
+ result <<= 16;
+ result |= lo16.asUInt16();
+ return static_cast<int32_t>(result);
+ }
+
+ static void setUInt7ForLoad(void* code, ARMThumbImmediate imm)
+ {
+ // Requires us to have planted a LDR_imm_T1
+ ASSERT(imm.isValid());
+ ASSERT(imm.isUInt7());
+ uint16_t* location = reinterpret_cast<uint16_t*>(code);
+ location[0] &= ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
+ location[0] |= (imm.getUInt7() >> 2) << 6;
+ cacheFlush(location, sizeof(uint16_t));
+ }
+
+ static void setPointer(void* code, void* value, bool flush)
+ {
+ setInt32(code, reinterpret_cast<uint32_t>(value), flush);
+ }
+
+ static bool isB(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
+ }
+
+ static bool isBX(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return (instruction[0] & 0xff87) == OP_BX;
+ }
+
+ static bool isMOV_imm_T3(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
+ }
+
+ static bool isMOVT(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
+ }
+
+ static bool isNOP_T1(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return instruction[0] == OP_NOP_T1;
+ }
+
+ static bool isNOP_T2(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
+ }
+
+ static bool canBeJumpT1(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T1 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+ return ((relative << 23) >> 23) == relative;
+ }
+
+ static bool canBeJumpT2(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T2 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+ return ((relative << 20) >> 20) == relative;
+ }
+
+ static bool canBeJumpT3(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ return ((relative << 11) >> 11) == relative;
+ }
+
+ static bool canBeJumpT4(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ return ((relative << 7) >> 7) == relative;
+ }
+
+ void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT1(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T1 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
+ }
+
+ static void linkJumpT2(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT2(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T2 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
+ }
+
+ void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT3(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
+ instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
+ }
+
+ static void linkJumpT4(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT4(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // ARM encoding for the top two bits below the sign bit is 'peculiar'.
+ if (relative >= 0)
+ relative ^= 0xC00000;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
+ instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+ }
+
+ void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ instruction[-3] = ifThenElse(cond) | OP_IT;
+ linkJumpT4(instruction, target);
+ }
+
+ static void linkBX(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
+ instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+ instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+ instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+ }
+
+ void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ linkBX(instruction, target);
+ instruction[-6] = ifThenElse(cond, true, true) | OP_IT;
+ }
+
+ static void linkJumpAbsolute(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
+ || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
+
+ if (canBeJumpT4(instruction, target)) {
+ // There may be a better way to fix this, but right now put the NOPs first, since in the
+ // case of an conditional branch this will be coming after an ITTT predicating *three*
+ // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
+ // variable wdith encoding - the previous instruction might *look* like an ITTT but
+ // actually be the second half of a 2-word op.
+ instruction[-5] = OP_NOP_T1;
+ instruction[-4] = OP_NOP_T2a;
+ instruction[-3] = OP_NOP_T2b;
+ linkJumpT4(instruction, target);
+ } else {
+ const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
+ instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+ instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+ instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+ }
+ }
+
+ static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
+ {
+ return op | (imm.m_value.i << 10) | imm.m_value.imm4;
+ }
+
+ static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate& result, uint16_t value)
+ {
+ result.m_value.i = (value >> 10) & 1;
+ result.m_value.imm4 = value & 15;
+ }
+
+ static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
+ {
+ return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
+ }
+
+ static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate& result, uint16_t value)
+ {
+ result.m_value.imm3 = (value >> 12) & 7;
+ result.m_value.imm8 = value & 255;
+ }
+
+ class ARMInstructionFormatter {
+ public:
+ ALWAYS_INLINE void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
+ {
+ m_buffer.putShort(op | (rd << 8) | imm);
+ }
+
+ ALWAYS_INLINE void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
+ }
+
+ ALWAYS_INLINE void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
+ {
+ m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
+ }
+
+ ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
+ {
+ m_buffer.putShort(op | imm);
+ }
+
+ ALWAYS_INLINE void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
+ }
+
+ ALWAYS_INLINE void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
+ {
+ m_buffer.putShort(op | imm);
+ }
+
+ ALWAYS_INLINE void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | (reg1 << 3) | reg2);
+ }
+
+ ALWAYS_INLINE void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
+ {
+ m_buffer.putShort(op | reg);
+ m_buffer.putShort(ff.m_u.value);
+ }
+
+ ALWAYS_INLINE void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
+ {
+ m_buffer.putShort(op);
+ m_buffer.putShort(ff.m_u.value);
+ }
+
+ ALWAYS_INLINE void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
+ {
+ m_buffer.putShort(op1);
+ m_buffer.putShort(op2);
+ }
+
+ ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
+ {
+ ARMThumbImmediate newImm = imm;
+ newImm.m_value.imm4 = imm4;
+
+ m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
+ m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
+ }
+
+ ALWAYS_INLINE void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
+ {
+ m_buffer.putShort(op | reg1);
+ m_buffer.putShort((reg2 << 12) | imm);
+ }
+
+ ALWAYS_INLINE void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm1, uint16_t imm2, uint16_t imm3)
+ {
+ m_buffer.putShort(op | reg1);
+ m_buffer.putShort((imm1 << 12) | (reg2 << 8) | (imm2 << 6) | imm3);
+ }
+
+ // Formats up instructions of the pattern:
+ // 111111111B11aaaa:bbbb222SA2C2cccc
+ // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
+ // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
+ ALWAYS_INLINE void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c)
+ {
+ ASSERT(!(op1 & 0x004f));
+ ASSERT(!(op2 & 0xf1af));
+ m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4());
+ m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4());
+ }
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ // (i.e. +/-(0..255) 32-bit words)
+ ALWAYS_INLINE void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm)
+ {
+ bool up = true;
+ if (imm < 0) {
+ imm = -imm;
+ up = false;
+ }
+
+ uint32_t offset = imm;
+ ASSERT(!(offset & ~0x3fc));
+ offset >>= 2;
+
+ m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn);
+ m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset);
+ }
+
+ // Administrative methods:
+
+ size_t codeSize() const { return m_buffer.codeSize(); }
+ AssemblerLabel label() const { return m_buffer.label(); }
+ bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
+ void* data() const { return m_buffer.data(); }
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+ private:
+ AssemblerBuffer m_buffer;
+ } m_formatter;
+
+ Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink;
+ int m_indexOfLastWatchpoint;
+ int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#endif // ARMAssembler_h
diff --git a/src/3rdparty/masm/assembler/AbstractMacroAssembler.h b/src/3rdparty/masm/assembler/AbstractMacroAssembler.h
new file mode 100644
index 0000000000..95eaf7d99d
--- /dev/null
+++ b/src/3rdparty/masm/assembler/AbstractMacroAssembler.h
@@ -0,0 +1,842 @@
+/*
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AbstractMacroAssembler_h
+#define AbstractMacroAssembler_h
+
+#include "AssemblerBuffer.h"
+#include "CodeLocation.h"
+#include "MacroAssemblerCodeRef.h"
+#include <wtf/CryptographicallyRandomNumber.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/UnusedParam.h>
+
+#if ENABLE(ASSEMBLER)
+
+
+#if PLATFORM(QT)
+#define ENABLE_JIT_CONSTANT_BLINDING 0
+#endif
+
+#ifndef ENABLE_JIT_CONSTANT_BLINDING
+#define ENABLE_JIT_CONSTANT_BLINDING 1
+#endif
+
+namespace JSC {
+
+class JumpReplacementWatchpoint;
+class LinkBuffer;
+class RepatchBuffer;
+class Watchpoint;
+namespace DFG {
+struct OSRExit;
+}
+
+template <class AssemblerType>
+class AbstractMacroAssembler {
+public:
+ friend class JITWriteBarrierBase;
+ typedef AssemblerType AssemblerType_T;
+
+ typedef MacroAssemblerCodePtr CodePtr;
+ typedef MacroAssemblerCodeRef CodeRef;
+
+ class Jump;
+
+ typedef typename AssemblerType::RegisterID RegisterID;
+
+ // Section 1: MacroAssembler operand types
+ //
+ // The following types are used as operands to MacroAssembler operations,
+ // describing immediate and memory operands to the instructions to be planted.
+
+ enum Scale {
+ TimesOne,
+ TimesTwo,
+ TimesFour,
+ TimesEight,
+ };
+
+ // Address:
+ //
+ // Describes a simple base-offset address.
+ struct Address {
+ explicit Address(RegisterID base, int32_t offset = 0)
+ : base(base)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ int32_t offset;
+ };
+
+ struct ExtendedAddress {
+ explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
+ : base(base)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ intptr_t offset;
+ };
+
+ // ImplicitAddress:
+ //
+ // This class is used for explicit 'load' and 'store' operations
+ // (as opposed to situations in which a memory operand is provided
+ // to a generic operation, such as an integer arithmetic instruction).
+ //
+ // In the case of a load (or store) operation we want to permit
+ // addresses to be implicitly constructed, e.g. the two calls:
+ //
+ // load32(Address(addrReg), destReg);
+ // load32(addrReg, destReg);
+ //
+ // Are equivalent, and the explicit wrapping of the Address in the former
+ // is unnecessary.
+ struct ImplicitAddress {
+ ImplicitAddress(RegisterID base)
+ : base(base)
+ , offset(0)
+ {
+ }
+
+ ImplicitAddress(Address address)
+ : base(address.base)
+ , offset(address.offset)
+ {
+ }
+
+ RegisterID base;
+ int32_t offset;
+ };
+
+ // BaseIndex:
+ //
+ // Describes a complex addressing mode.
+ struct BaseIndex {
+ BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
+ : base(base)
+ , index(index)
+ , scale(scale)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ RegisterID index;
+ Scale scale;
+ int32_t offset;
+ };
+
+ // AbsoluteAddress:
+ //
+ // Describes an memory operand given by a pointer. For regular load & store
+ // operations an unwrapped void* will be used, rather than using this.
+ struct AbsoluteAddress {
+ explicit AbsoluteAddress(const void* ptr)
+ : m_ptr(ptr)
+ {
+ }
+
+ const void* m_ptr;
+ };
+
+ // TrustedImmPtr:
+ //
+ // A pointer sized immediate operand to an instruction - this is wrapped
+ // in a class requiring explicit construction in order to differentiate
+ // from pointers used as absolute addresses to memory operations
+ struct TrustedImmPtr {
+ TrustedImmPtr() { }
+
+ explicit TrustedImmPtr(const void* value)
+ : m_value(value)
+ {
+ }
+
+ // This is only here so that TrustedImmPtr(0) does not confuse the C++
+ // overload handling rules.
+ explicit TrustedImmPtr(int value)
+ : m_value(0)
+ {
+ ASSERT_UNUSED(value, !value);
+ }
+
+ explicit TrustedImmPtr(size_t value)
+ : m_value(reinterpret_cast<void*>(value))
+ {
+ }
+
+ intptr_t asIntptr()
+ {
+ return reinterpret_cast<intptr_t>(m_value);
+ }
+
+ const void* m_value;
+ };
+
+ struct ImmPtr :
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ private TrustedImmPtr
+#else
+ public TrustedImmPtr
+#endif
+ {
+ explicit ImmPtr(const void* value)
+ : TrustedImmPtr(value)
+ {
+ }
+
+ TrustedImmPtr asTrustedImmPtr() { return *this; }
+ };
+
+ // TrustedImm32:
+ //
+ // A 32bit immediate operand to an instruction - this is wrapped in a
+ // class requiring explicit construction in order to prevent RegisterIDs
+ // (which are implemented as an enum) from accidentally being passed as
+ // immediate values.
+ struct TrustedImm32 {
+ TrustedImm32() { }
+
+ explicit TrustedImm32(int32_t value)
+ : m_value(value)
+ {
+ }
+
+#if !CPU(X86_64)
+ explicit TrustedImm32(TrustedImmPtr ptr)
+ : m_value(ptr.asIntptr())
+ {
+ }
+#endif
+
+ int32_t m_value;
+ };
+
+
+ struct Imm32 :
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ private TrustedImm32
+#else
+ public TrustedImm32
+#endif
+ {
+ explicit Imm32(int32_t value)
+ : TrustedImm32(value)
+ {
+ }
+#if !CPU(X86_64)
+ explicit Imm32(TrustedImmPtr ptr)
+ : TrustedImm32(ptr)
+ {
+ }
+#endif
+ const TrustedImm32& asTrustedImm32() const { return *this; }
+
+ };
+
+ // TrustedImm64:
+ //
+ // A 64bit immediate operand to an instruction - this is wrapped in a
+ // class requiring explicit construction in order to prevent RegisterIDs
+ // (which are implemented as an enum) from accidentally being passed as
+ // immediate values.
+ struct TrustedImm64 {
+ TrustedImm64() { }
+
+ explicit TrustedImm64(int64_t value)
+ : m_value(value)
+ {
+ }
+
+#if CPU(X86_64)
+ explicit TrustedImm64(TrustedImmPtr ptr)
+ : m_value(ptr.asIntptr())
+ {
+ }
+#endif
+
+ int64_t m_value;
+ };
+
+ struct Imm64 :
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ private TrustedImm64
+#else
+ public TrustedImm64
+#endif
+ {
+ explicit Imm64(int64_t value)
+ : TrustedImm64(value)
+ {
+ }
+#if CPU(X86_64)
+ explicit Imm64(TrustedImmPtr ptr)
+ : TrustedImm64(ptr)
+ {
+ }
+#endif
+ const TrustedImm64& asTrustedImm64() const { return *this; }
+ };
+
+ // Section 2: MacroAssembler code buffer handles
+ //
+ // The following types are used to reference items in the code buffer
+ // during JIT code generation. For example, the type Jump is used to
+ // track the location of a jump instruction so that it may later be
+ // linked to a label marking its destination.
+
+
+ // Label:
+ //
+ // A Label records a point in the generated instruction stream, typically such that
+ // it may be used as a destination for a jump.
+ class Label {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend struct DFG::OSRExit;
+ friend class Jump;
+ friend class JumpReplacementWatchpoint;
+ friend class MacroAssemblerCodeRef;
+ friend class LinkBuffer;
+ friend class Watchpoint;
+
+ public:
+ Label()
+ {
+ }
+
+ Label(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // ConvertibleLoadLabel:
+ //
+ // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr
+ // so that:
+ //
+ // loadPtr(Address(a, i), b)
+ //
+ // becomes:
+ //
+ // addPtr(TrustedImmPtr(i), a, b)
+ class ConvertibleLoadLabel {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+
+ public:
+ ConvertibleLoadLabel()
+ {
+ }
+
+ ConvertibleLoadLabel(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.labelIgnoringWatchpoints())
+ {
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // DataLabelPtr:
+ //
+ // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
+ // patched after the code has been generated.
+ class DataLabelPtr {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+ public:
+ DataLabelPtr()
+ {
+ }
+
+ DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // DataLabel32:
+ //
+ // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
+ // patched after the code has been generated.
+ class DataLabel32 {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+ public:
+ DataLabel32()
+ {
+ }
+
+ DataLabel32(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ AssemblerLabel label() const { return m_label; }
+
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // DataLabelCompact:
+ //
+ // A DataLabelCompact is used to refer to a location in the code containing a
+ // compact immediate to be patched after the code has been generated.
+ class DataLabelCompact {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+ public:
+ DataLabelCompact()
+ {
+ }
+
+ DataLabelCompact(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ DataLabelCompact(AssemblerLabel label)
+ : m_label(label)
+ {
+ }
+
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // Call:
+ //
+ // A Call object is a reference to a call instruction that has been planted
+ // into the code buffer - it is typically used to link the call, setting the
+ // relative offset such that when executed it will call to the desired
+ // destination.
+ class Call {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+
+ public:
+ enum Flags {
+ None = 0x0,
+ Linkable = 0x1,
+ Near = 0x2,
+ LinkableNear = 0x3,
+ };
+
+ Call()
+ : m_flags(None)
+ {
+ }
+
+ Call(AssemblerLabel jmp, Flags flags)
+ : m_label(jmp)
+ , m_flags(flags)
+ {
+ }
+
+ bool isFlagSet(Flags flag)
+ {
+ return m_flags & flag;
+ }
+
+ static Call fromTailJump(Jump jump)
+ {
+ return Call(jump.m_label, Linkable);
+ }
+
+ AssemblerLabel m_label;
+ private:
+ Flags m_flags;
+ };
+
+ // Jump:
+ //
+ // A jump object is a reference to a jump instruction that has been planted
+ // into the code buffer - it is typically used to link the jump, setting the
+ // relative offset such that when executed it will jump to the desired
+ // destination.
+ class Jump {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class Call;
+ friend struct DFG::OSRExit;
+ friend class LinkBuffer;
+ public:
+ Jump()
+ {
+ }
+
+#if CPU(ARM_THUMB2)
+ // Fixme: this information should be stored in the instruction stream, not in the Jump object.
+ Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid)
+ : m_label(jmp)
+ , m_type(type)
+ , m_condition(condition)
+ {
+ }
+#elif CPU(SH4)
+ Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar)
+ : m_label(jmp)
+ , m_type(type)
+ {
+ }
+#else
+ Jump(AssemblerLabel jmp)
+ : m_label(jmp)
+ {
+ }
+#endif
+
+ Label label() const
+ {
+ Label result;
+ result.m_label = m_label;
+ return result;
+ }
+
+ void link(AbstractMacroAssembler<AssemblerType>* masm) const
+ {
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset());
+#endif
+
+#if CPU(ARM_THUMB2)
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
+#elif CPU(SH4)
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type);
+#else
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
+#endif
+ }
+
+ void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) const
+ {
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset);
+#endif
+
+#if CPU(ARM_THUMB2)
+ masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
+#else
+ masm->m_assembler.linkJump(m_label, label.m_label);
+#endif
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+
+ private:
+ AssemblerLabel m_label;
+#if CPU(ARM_THUMB2)
+ ARMv7Assembler::JumpType m_type;
+ ARMv7Assembler::Condition m_condition;
+#endif
+#if CPU(SH4)
+ SH4Assembler::JumpType m_type;
+#endif
+ };
+
+ struct PatchableJump {
+ PatchableJump()
+ {
+ }
+
+ explicit PatchableJump(Jump jump)
+ : m_jump(jump)
+ {
+ }
+
+ operator Jump&() { return m_jump; }
+
+ Jump m_jump;
+ };
+
+ // JumpList:
+ //
+ // A JumpList is a set of Jump objects.
+ // All jumps in the set will be linked to the same destination.
+ class JumpList {
+ friend class LinkBuffer;
+
+ public:
+ typedef Vector<Jump, 2> JumpVector;
+
+ JumpList() { }
+
+ JumpList(Jump jump)
+ {
+ append(jump);
+ }
+
+ void link(AbstractMacroAssembler<AssemblerType>* masm)
+ {
+ size_t size = m_jumps.size();
+ for (size_t i = 0; i < size; ++i)
+ m_jumps[i].link(masm);
+ m_jumps.clear();
+ }
+
+ void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
+ {
+ size_t size = m_jumps.size();
+ for (size_t i = 0; i < size; ++i)
+ m_jumps[i].linkTo(label, masm);
+ m_jumps.clear();
+ }
+
+ void append(Jump jump)
+ {
+ m_jumps.append(jump);
+ }
+
+ void append(const JumpList& other)
+ {
+ m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
+ }
+
+ bool empty()
+ {
+ return !m_jumps.size();
+ }
+
+ void clear()
+ {
+ m_jumps.clear();
+ }
+
+ const JumpVector& jumps() const { return m_jumps; }
+
+ private:
+ JumpVector m_jumps;
+ };
+
+
+ // Section 3: Misc admin methods
+#if ENABLE(DFG_JIT)
+ Label labelIgnoringWatchpoints()
+ {
+ Label result;
+ result.m_label = m_assembler.labelIgnoringWatchpoints();
+ return result;
+ }
+#else
+ Label labelIgnoringWatchpoints()
+ {
+ return label();
+ }
+#endif
+
+ Label label()
+ {
+ return Label(this);
+ }
+
+ void padBeforePatch()
+ {
+ // Rely on the fact that asking for a label already does the padding.
+ (void)label();
+ }
+
+ Label watchpointLabel()
+ {
+ Label result;
+ result.m_label = m_assembler.labelForWatchpoint();
+ return result;
+ }
+
+ Label align()
+ {
+ m_assembler.align(16);
+ return Label(this);
+ }
+
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ class RegisterAllocationOffset {
+ public:
+ RegisterAllocationOffset(unsigned offset)
+ : m_offset(offset)
+ {
+ }
+
+ void check(unsigned low, unsigned high)
+ {
+ RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high);
+ }
+
+ private:
+ unsigned m_offset;
+ };
+
+ void addRegisterAllocationAtOffset(unsigned offset)
+ {
+ m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset));
+ }
+
+ void clearRegisterAllocationOffsets()
+ {
+ m_registerAllocationForOffsets.clear();
+ }
+
+ void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2)
+ {
+ if (offset1 > offset2)
+ std::swap(offset1, offset2);
+
+ size_t size = m_registerAllocationForOffsets.size();
+ for (size_t i = 0; i < size; ++i)
+ m_registerAllocationForOffsets[i].check(offset1, offset2);
+ }
+#endif
+
+ template<typename T, typename U>
+ static ptrdiff_t differenceBetween(T from, U to)
+ {
+ return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+ }
+
+ static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b)
+ {
+ return reinterpret_cast<ptrdiff_t>(b.executableAddress()) - reinterpret_cast<ptrdiff_t>(a.executableAddress());
+ }
+
+ unsigned debugOffset() { return m_assembler.debugOffset(); }
+
+ ALWAYS_INLINE static void cacheFlush(void* code, size_t size)
+ {
+ AssemblerType::cacheFlush(code, size);
+ }
+protected:
+ AbstractMacroAssembler()
+ : m_randomSource(cryptographicallyRandomNumber())
+ {
+ }
+
+ AssemblerType m_assembler;
+
+ uint32_t random()
+ {
+ return m_randomSource.getUint32();
+ }
+
+ WeakRandom m_randomSource;
+
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ Vector<RegisterAllocationOffset, 10> m_registerAllocationForOffsets;
+#endif
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ static bool scratchRegisterForBlinding() { return false; }
+ static bool shouldBlindForSpecificArch(uint32_t) { return true; }
+ static bool shouldBlindForSpecificArch(uint64_t) { return true; }
+#endif
+
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkJump(void* code, Jump jump, CodeLocationLabel target)
+ {
+ AssemblerType::linkJump(code, jump.m_label, target.dataLocation());
+ }
+
+ static void linkPointer(void* code, AssemblerLabel label, void* value)
+ {
+ AssemblerType::linkPointer(code, label, value);
+ }
+
+ static void* getLinkerAddress(void* code, AssemblerLabel label)
+ {
+ return AssemblerType::getRelocatedAddress(code, label);
+ }
+
+ static unsigned getLinkerCallReturnOffset(Call call)
+ {
+ return AssemblerType::getCallReturnOffset(call.m_label);
+ }
+
+ static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
+ {
+ AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
+ }
+
+ static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+ {
+ AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
+ {
+ AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value);
+ }
+
+ static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
+ {
+ AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
+ }
+
+ static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+ {
+ AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
+ }
+
+ static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr)
+ {
+ return AssemblerType::readPointer(dataLabelPtr.dataLocation());
+ }
+
+ static void replaceWithLoad(CodeLocationConvertibleLoad label)
+ {
+ AssemblerType::replaceWithLoad(label.dataLocation());
+ }
+
+ static void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
+ {
+ AssemblerType::replaceWithAddressComputation(label.dataLocation());
+ }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AbstractMacroAssembler_h
diff --git a/src/3rdparty/masm/assembler/AssemblerBuffer.h b/src/3rdparty/masm/assembler/AssemblerBuffer.h
new file mode 100644
index 0000000000..277ec1043c
--- /dev/null
+++ b/src/3rdparty/masm/assembler/AssemblerBuffer.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AssemblerBuffer_h
+#define AssemblerBuffer_h
+
+#if ENABLE(ASSEMBLER)
+
+#include "ExecutableAllocator.h"
+#include "JITCompilationEffort.h"
+#include "JSGlobalData.h"
+#include "stdint.h"
+#include <string.h>
+#include <wtf/Assertions.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/StdLibExtras.h>
+
+namespace JSC {
+
+ struct AssemblerLabel {
+ AssemblerLabel()
+ : m_offset(std::numeric_limits<uint32_t>::max())
+ {
+ }
+
+ explicit AssemblerLabel(uint32_t offset)
+ : m_offset(offset)
+ {
+ }
+
+ bool isSet() const { return (m_offset != std::numeric_limits<uint32_t>::max()); }
+
+ AssemblerLabel labelAtOffset(int offset) const
+ {
+ return AssemblerLabel(m_offset + offset);
+ }
+
+ uint32_t m_offset;
+ };
+
+ class AssemblerBuffer {
+ static const int inlineCapacity = 128;
+ public:
+ AssemblerBuffer()
+ : m_storage(inlineCapacity)
+ , m_buffer(&(*m_storage.begin()))
+ , m_capacity(inlineCapacity)
+ , m_index(0)
+ {
+ }
+
+ ~AssemblerBuffer()
+ {
+ }
+
+ bool isAvailable(int space)
+ {
+ return m_index + space <= m_capacity;
+ }
+
+ void ensureSpace(int space)
+ {
+ if (!isAvailable(space))
+ grow();
+ }
+
+ bool isAligned(int alignment) const
+ {
+ return !(m_index & (alignment - 1));
+ }
+
+ template<typename IntegralType>
+ void putIntegral(IntegralType value)
+ {
+ ensureSpace(sizeof(IntegralType));
+ putIntegralUnchecked(value);
+ }
+
+ template<typename IntegralType>
+ void putIntegralUnchecked(IntegralType value)
+ {
+ ASSERT(isAvailable(sizeof(IntegralType)));
+ *reinterpret_cast_ptr<IntegralType*>(m_buffer + m_index) = value;
+ m_index += sizeof(IntegralType);
+ }
+
+ void putByteUnchecked(int8_t value) { putIntegralUnchecked(value); }
+ void putByte(int8_t value) { putIntegral(value); }
+ void putShortUnchecked(int16_t value) { putIntegralUnchecked(value); }
+ void putShort(int16_t value) { putIntegral(value); }
+ void putIntUnchecked(int32_t value) { putIntegralUnchecked(value); }
+ void putInt(int32_t value) { putIntegral(value); }
+ void putInt64Unchecked(int64_t value) { putIntegralUnchecked(value); }
+ void putInt64(int64_t value) { putIntegral(value); }
+
+ void* data() const
+ {
+ return m_buffer;
+ }
+
+ size_t codeSize() const
+ {
+ return m_index;
+ }
+
+ AssemblerLabel label() const
+ {
+ return AssemblerLabel(m_index);
+ }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+ {
+ if (!m_index)
+ return 0;
+
+ RefPtr<ExecutableMemoryHandle> result = globalData.executableAllocator.allocate(globalData, m_index, ownerUID, effort);
+
+ if (!result)
+ return 0;
+
+ ExecutableAllocator::makeWritable(result->start(), result->sizeInBytes());
+
+ memcpy(result->start(), m_buffer, m_index);
+
+ return result.release();
+ }
+
+ unsigned debugOffset() { return m_index; }
+
+ protected:
+ void append(const char* data, int size)
+ {
+ if (!isAvailable(size))
+ grow(size);
+
+ memcpy(m_buffer + m_index, data, size);
+ m_index += size;
+ }
+
+ void grow(int extraCapacity = 0)
+ {
+ m_capacity += m_capacity / 2 + extraCapacity;
+
+ m_storage.grow(m_capacity);
+ m_buffer = &(*m_storage.begin());
+ }
+
+ private:
+ Vector<char, inlineCapacity, UnsafeVectorOverflow> m_storage;
+ char* m_buffer;
+ int m_capacity;
+ int m_index;
+ };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AssemblerBuffer_h
diff --git a/src/3rdparty/masm/assembler/AssemblerBufferWithConstantPool.h b/src/3rdparty/masm/assembler/AssemblerBufferWithConstantPool.h
new file mode 100644
index 0000000000..5377ef0c7a
--- /dev/null
+++ b/src/3rdparty/masm/assembler/AssemblerBufferWithConstantPool.h
@@ -0,0 +1,342 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AssemblerBufferWithConstantPool_h
+#define AssemblerBufferWithConstantPool_h
+
+#if ENABLE(ASSEMBLER)
+
+#include "AssemblerBuffer.h"
+#include <wtf/SegmentedVector.h>
+
+#define ASSEMBLER_HAS_CONSTANT_POOL 1
+
+namespace JSC {
+
+/*
+ On a constant pool 4 or 8 bytes data can be stored. The values can be
+ constants or addresses. The addresses should be 32 or 64 bits. The constants
+ should be double-precisions float or integer numbers which are hard to be
+ encoded as few machine instructions.
+
+ TODO: The pool is desinged to handle both 32 and 64 bits values, but
+ currently only the 4 bytes constants are implemented and tested.
+
+ The AssemblerBuffer can contain multiple constant pools. Each pool is inserted
+ into the instruction stream - protected by a jump instruction from the
+ execution flow.
+
+ The flush mechanism is called when no space remain to insert the next instruction
+ into the pool. Three values are used to determine when the constant pool itself
+ have to be inserted into the instruction stream (Assembler Buffer):
+
+ - maxPoolSize: size of the constant pool in bytes, this value cannot be
+ larger than the maximum offset of a PC relative memory load
+
+ - barrierSize: size of jump instruction in bytes which protects the
+ constant pool from execution
+
+ - maxInstructionSize: maximum length of a machine instruction in bytes
+
+ There are some callbacks which solve the target architecture specific
+ address handling:
+
+ - TYPE patchConstantPoolLoad(TYPE load, int value):
+ patch the 'load' instruction with the index of the constant in the
+ constant pool and return the patched instruction.
+
+ - void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr):
+ patch the a PC relative load instruction at 'loadAddr' address with the
+ final relative offset. The offset can be computed with help of
+ 'constPoolAddr' (the address of the constant pool) and index of the
+ constant (which is stored previously in the load instruction itself).
+
+ - TYPE placeConstantPoolBarrier(int size):
+ return with a constant pool barrier instruction which jumps over the
+ constant pool.
+
+ The 'put*WithConstant*' functions should be used to place a data into the
+ constant pool.
+*/
+
+template <int maxPoolSize, int barrierSize, int maxInstructionSize, class AssemblerType>
+class AssemblerBufferWithConstantPool : public AssemblerBuffer {
+ typedef SegmentedVector<uint32_t, 512> LoadOffsets;
+ using AssemblerBuffer::putIntegral;
+ using AssemblerBuffer::putIntegralUnchecked;
+public:
+ typedef struct {
+ short high;
+ short low;
+ } TwoShorts;
+
+ enum {
+ UniqueConst,
+ ReusableConst,
+ UnusedEntry,
+ };
+
+ AssemblerBufferWithConstantPool()
+ : AssemblerBuffer()
+ , m_numConsts(0)
+ , m_maxDistance(maxPoolSize)
+ , m_lastConstDelta(0)
+ {
+ m_pool = static_cast<uint32_t*>(fastMalloc(maxPoolSize));
+ m_mask = static_cast<char*>(fastMalloc(maxPoolSize / sizeof(uint32_t)));
+ }
+
+ ~AssemblerBufferWithConstantPool()
+ {
+ fastFree(m_mask);
+ fastFree(m_pool);
+ }
+
+ void ensureSpace(int space)
+ {
+ flushIfNoSpaceFor(space);
+ AssemblerBuffer::ensureSpace(space);
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ flushIfNoSpaceFor(insnSpace, constSpace);
+ AssemblerBuffer::ensureSpace(insnSpace);
+ }
+
+ void ensureSpaceForAnyInstruction(int amount = 1)
+ {
+ flushIfNoSpaceFor(amount * maxInstructionSize, amount * sizeof(uint64_t));
+ }
+
+ bool isAligned(int alignment)
+ {
+ flushIfNoSpaceFor(alignment);
+ return AssemblerBuffer::isAligned(alignment);
+ }
+
+ void putByteUnchecked(int value)
+ {
+ AssemblerBuffer::putByteUnchecked(value);
+ correctDeltas(1);
+ }
+
+ void putByte(int value)
+ {
+ flushIfNoSpaceFor(1);
+ AssemblerBuffer::putByte(value);
+ correctDeltas(1);
+ }
+
+ void putShortUnchecked(int value)
+ {
+ AssemblerBuffer::putShortUnchecked(value);
+ correctDeltas(2);
+ }
+
+ void putShort(int value)
+ {
+ flushIfNoSpaceFor(2);
+ AssemblerBuffer::putShort(value);
+ correctDeltas(2);
+ }
+
+ void putIntUnchecked(int value)
+ {
+ AssemblerBuffer::putIntUnchecked(value);
+ correctDeltas(4);
+ }
+
+ void putInt(int value)
+ {
+ flushIfNoSpaceFor(4);
+ AssemblerBuffer::putInt(value);
+ correctDeltas(4);
+ }
+
+ void putInt64Unchecked(int64_t value)
+ {
+ AssemblerBuffer::putInt64Unchecked(value);
+ correctDeltas(8);
+ }
+
+ void putIntegral(TwoShorts value)
+ {
+ putIntegral(value.high);
+ putIntegral(value.low);
+ }
+
+ void putIntegralUnchecked(TwoShorts value)
+ {
+ putIntegralUnchecked(value.high);
+ putIntegralUnchecked(value.low);
+ }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+ {
+ flushConstantPool(false);
+ return AssemblerBuffer::executableCopy(globalData, ownerUID, effort);
+ }
+
+ void putShortWithConstantInt(uint16_t insn, uint32_t constant, bool isReusable = false)
+ {
+ putIntegralWithConstantInt(insn, constant, isReusable);
+ }
+
+ void putIntWithConstantInt(uint32_t insn, uint32_t constant, bool isReusable = false)
+ {
+ putIntegralWithConstantInt(insn, constant, isReusable);
+ }
+
+ // This flushing mechanism can be called after any unconditional jumps.
+ void flushWithoutBarrier(bool isForced = false)
+ {
+ // Flush if constant pool is more than 60% full to avoid overuse of this function.
+ if (isForced || 5 * static_cast<uint32_t>(m_numConsts) > 3 * maxPoolSize / sizeof(uint32_t))
+ flushConstantPool(false);
+ }
+
+ uint32_t* poolAddress()
+ {
+ return m_pool;
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_numConsts;
+ }
+
+private:
+ void correctDeltas(int insnSize)
+ {
+ m_maxDistance -= insnSize;
+ m_lastConstDelta -= insnSize;
+ if (m_lastConstDelta < 0)
+ m_lastConstDelta = 0;
+ }
+
+ void correctDeltas(int insnSize, int constSize)
+ {
+ correctDeltas(insnSize);
+
+ m_maxDistance -= m_lastConstDelta;
+ m_lastConstDelta = constSize;
+ }
+
+ template<typename IntegralType>
+ void putIntegralWithConstantInt(IntegralType insn, uint32_t constant, bool isReusable)
+ {
+ if (!m_numConsts)
+ m_maxDistance = maxPoolSize;
+ flushIfNoSpaceFor(sizeof(IntegralType), 4);
+
+ m_loadOffsets.append(codeSize());
+ if (isReusable) {
+ for (int i = 0; i < m_numConsts; ++i) {
+ if (m_mask[i] == ReusableConst && m_pool[i] == constant) {
+ putIntegral(static_cast<IntegralType>(AssemblerType::patchConstantPoolLoad(insn, i)));
+ correctDeltas(sizeof(IntegralType));
+ return;
+ }
+ }
+ }
+
+ m_pool[m_numConsts] = constant;
+ m_mask[m_numConsts] = static_cast<char>(isReusable ? ReusableConst : UniqueConst);
+
+ putIntegral(static_cast<IntegralType>(AssemblerType::patchConstantPoolLoad(insn, m_numConsts)));
+ ++m_numConsts;
+
+ correctDeltas(sizeof(IntegralType), 4);
+ }
+
+ void flushConstantPool(bool useBarrier = true)
+ {
+ if (m_numConsts == 0)
+ return;
+ int alignPool = (codeSize() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1);
+
+ if (alignPool)
+ alignPool = sizeof(uint64_t) - alignPool;
+
+ // Callback to protect the constant pool from execution
+ if (useBarrier)
+ putIntegral(AssemblerType::placeConstantPoolBarrier(m_numConsts * sizeof(uint32_t) + alignPool));
+
+ if (alignPool) {
+ if (alignPool & 1)
+ AssemblerBuffer::putByte(AssemblerType::padForAlign8);
+ if (alignPool & 2)
+ AssemblerBuffer::putShort(AssemblerType::padForAlign16);
+ if (alignPool & 4)
+ AssemblerBuffer::putInt(AssemblerType::padForAlign32);
+ }
+
+ int constPoolOffset = codeSize();
+ append(reinterpret_cast<char*>(m_pool), m_numConsts * sizeof(uint32_t));
+
+ // Patch each PC relative load
+ for (LoadOffsets::Iterator iter = m_loadOffsets.begin(); iter != m_loadOffsets.end(); ++iter) {
+ void* loadAddr = reinterpret_cast<char*>(data()) + *iter;
+ AssemblerType::patchConstantPoolLoad(loadAddr, reinterpret_cast<char*>(data()) + constPoolOffset);
+ }
+
+ m_loadOffsets.clear();
+ m_numConsts = 0;
+ }
+
+ void flushIfNoSpaceFor(int nextInsnSize)
+ {
+ if (m_numConsts == 0)
+ return;
+ int lastConstDelta = m_lastConstDelta > nextInsnSize ? m_lastConstDelta - nextInsnSize : 0;
+ if ((m_maxDistance < nextInsnSize + lastConstDelta + barrierSize + (int)sizeof(uint32_t)))
+ flushConstantPool();
+ }
+
+ void flushIfNoSpaceFor(int nextInsnSize, int nextConstSize)
+ {
+ if (m_numConsts == 0)
+ return;
+ if ((m_maxDistance < nextInsnSize + m_lastConstDelta + nextConstSize + barrierSize + (int)sizeof(uint32_t)) ||
+ (m_numConsts * sizeof(uint32_t) + nextConstSize >= maxPoolSize))
+ flushConstantPool();
+ }
+
+ uint32_t* m_pool;
+ char* m_mask;
+ LoadOffsets m_loadOffsets;
+
+ int m_numConsts;
+ int m_maxDistance;
+ int m_lastConstDelta;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AssemblerBufferWithConstantPool_h
diff --git a/src/3rdparty/masm/assembler/CodeLocation.h b/src/3rdparty/masm/assembler/CodeLocation.h
new file mode 100644
index 0000000000..86d1f2b755
--- /dev/null
+++ b/src/3rdparty/masm/assembler/CodeLocation.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CodeLocation_h
+#define CodeLocation_h
+
+#include "MacroAssemblerCodeRef.h"
+
+#if ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+class CodeLocationInstruction;
+class CodeLocationLabel;
+class CodeLocationJump;
+class CodeLocationCall;
+class CodeLocationNearCall;
+class CodeLocationDataLabelCompact;
+class CodeLocationDataLabel32;
+class CodeLocationDataLabelPtr;
+class CodeLocationConvertibleLoad;
+
+// The CodeLocation* types are all pretty much do-nothing wrappers around
+// CodePtr (or MacroAssemblerCodePtr, to give it its full name). These
+// classes only exist to provide type-safety when linking and patching code.
+//
+// The one new piece of functionallity introduced by these classes is the
+// ability to create (or put another way, to re-discover) another CodeLocation
+// at an offset from one you already know. When patching code to optimize it
+// we often want to patch a number of instructions that are short, fixed
+// offsets apart. To reduce memory overhead we will only retain a pointer to
+// one of the instructions, and we will use the *AtOffset methods provided by
+// CodeLocationCommon to find the other points in the code to modify.
+class CodeLocationCommon : public MacroAssemblerCodePtr {
+public:
+ CodeLocationInstruction instructionAtOffset(int offset);
+ CodeLocationLabel labelAtOffset(int offset);
+ CodeLocationJump jumpAtOffset(int offset);
+ CodeLocationCall callAtOffset(int offset);
+ CodeLocationNearCall nearCallAtOffset(int offset);
+ CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset);
+ CodeLocationDataLabel32 dataLabel32AtOffset(int offset);
+ CodeLocationDataLabelCompact dataLabelCompactAtOffset(int offset);
+ CodeLocationConvertibleLoad convertibleLoadAtOffset(int offset);
+
+protected:
+ CodeLocationCommon()
+ {
+ }
+
+ CodeLocationCommon(MacroAssemblerCodePtr location)
+ : MacroAssemblerCodePtr(location)
+ {
+ }
+};
+
+class CodeLocationInstruction : public CodeLocationCommon {
+public:
+ CodeLocationInstruction() {}
+ explicit CodeLocationInstruction(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationInstruction(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationLabel : public CodeLocationCommon {
+public:
+ CodeLocationLabel() {}
+ explicit CodeLocationLabel(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationLabel(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationJump : public CodeLocationCommon {
+public:
+ CodeLocationJump() {}
+ explicit CodeLocationJump(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationJump(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationCall : public CodeLocationCommon {
+public:
+ CodeLocationCall() {}
+ explicit CodeLocationCall(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationCall(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationNearCall : public CodeLocationCommon {
+public:
+ CodeLocationNearCall() {}
+ explicit CodeLocationNearCall(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationNearCall(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabel32 : public CodeLocationCommon {
+public:
+ CodeLocationDataLabel32() {}
+ explicit CodeLocationDataLabel32(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationDataLabel32(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabelCompact : public CodeLocationCommon {
+public:
+ CodeLocationDataLabelCompact() { }
+ explicit CodeLocationDataLabelCompact(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) { }
+ explicit CodeLocationDataLabelCompact(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) { }
+};
+
+class CodeLocationDataLabelPtr : public CodeLocationCommon {
+public:
+ CodeLocationDataLabelPtr() {}
+ explicit CodeLocationDataLabelPtr(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationDataLabelPtr(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationConvertibleLoad : public CodeLocationCommon {
+public:
+ CodeLocationConvertibleLoad() { }
+ explicit CodeLocationConvertibleLoad(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) { }
+ explicit CodeLocationConvertibleLoad(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) { }
+};
+
+inline CodeLocationInstruction CodeLocationCommon::instructionAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationInstruction(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationLabel CodeLocationCommon::labelAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationLabel(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationJump CodeLocationCommon::jumpAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationJump(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationCall CodeLocationCommon::callAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabelPtr CodeLocationCommon::dataLabelPtrAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabelPtr(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabel32 CodeLocationCommon::dataLabel32AtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabel32(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabelCompact CodeLocationCommon::dataLabelCompactAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabelCompact(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationConvertibleLoad CodeLocationCommon::convertibleLoadAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationConvertibleLoad(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // CodeLocation_h
diff --git a/src/3rdparty/masm/assembler/LinkBuffer.cpp b/src/3rdparty/masm/assembler/LinkBuffer.cpp
new file mode 100644
index 0000000000..645eba5380
--- /dev/null
+++ b/src/3rdparty/masm/assembler/LinkBuffer.cpp
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LinkBuffer.h"
+
+#if ENABLE(ASSEMBLER)
+
+#include "Options.h"
+
+namespace JSC {
+
+LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassembly()
+{
+ performFinalization();
+
+ return CodeRef(m_executableMemory);
+}
+
+LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(const char* format, ...)
+{
+ ASSERT(Options::showDisassembly() || Options::showDFGDisassembly());
+
+ CodeRef result = finalizeCodeWithoutDisassembly();
+
+ dataLogF("Generated JIT code for ");
+ va_list argList;
+ va_start(argList, format);
+ WTF::dataLogFV(format, argList);
+ va_end(argList);
+ dataLogF(":\n");
+
+ dataLogF(" Code at [%p, %p):\n", result.code().executableAddress(), static_cast<char*>(result.code().executableAddress()) + result.size());
+ disassemble(result.code(), m_size, " ", WTF::dataFile());
+
+ return result;
+}
+
+void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort)
+{
+ ASSERT(!m_code);
+#if !ENABLE(BRANCH_COMPACTION)
+ m_executableMemory = m_assembler->m_assembler.executableCopy(*m_globalData, ownerUID, effort);
+ if (!m_executableMemory)
+ return;
+ m_code = m_executableMemory->start();
+ m_size = m_assembler->m_assembler.codeSize();
+ ASSERT(m_code);
+#else
+ m_initialSize = m_assembler->m_assembler.codeSize();
+ m_executableMemory = m_globalData->executableAllocator.allocate(*m_globalData, m_initialSize, ownerUID, effort);
+ if (!m_executableMemory)
+ return;
+ m_code = (uint8_t*)m_executableMemory->start();
+ ASSERT(m_code);
+ ExecutableAllocator::makeWritable(m_code, m_initialSize);
+ uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode();
+ uint8_t* outData = reinterpret_cast<uint8_t*>(m_code);
+ int readPtr = 0;
+ int writePtr = 0;
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink = m_assembler->jumpsToLink();
+ unsigned jumpCount = jumpsToLink.size();
+ for (unsigned i = 0; i < jumpCount; ++i) {
+ int offset = readPtr - writePtr;
+ ASSERT(!(offset & 1));
+
+ // Copy the instructions from the last jump to the current one.
+ size_t regionSize = jumpsToLink[i].from() - readPtr;
+ uint16_t* copySource = reinterpret_cast_ptr<uint16_t*>(inData + readPtr);
+ uint16_t* copyEnd = reinterpret_cast_ptr<uint16_t*>(inData + readPtr + regionSize);
+ uint16_t* copyDst = reinterpret_cast_ptr<uint16_t*>(outData + writePtr);
+ ASSERT(!(regionSize % 2));
+ ASSERT(!(readPtr % 2));
+ ASSERT(!(writePtr % 2));
+ while (copySource != copyEnd)
+ *copyDst++ = *copySource++;
+ m_assembler->recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset);
+ readPtr += regionSize;
+ writePtr += regionSize;
+
+ // Calculate absolute address of the jump target, in the case of backwards
+ // branches we need to be precise, forward branches we are pessimistic
+ const uint8_t* target;
+ if (jumpsToLink[i].to() >= jumpsToLink[i].from())
+ target = outData + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far
+ else
+ target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
+
+ JumpLinkType jumpLinkType = m_assembler->computeJumpType(jumpsToLink[i], outData + writePtr, target);
+ // Compact branch if we can...
+ if (m_assembler->canCompact(jumpsToLink[i].type())) {
+ // Step back in the write stream
+ int32_t delta = m_assembler->jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
+ if (delta) {
+ writePtr -= delta;
+ m_assembler->recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
+ }
+ }
+ jumpsToLink[i].setFrom(writePtr);
+ }
+ // Copy everything after the last jump
+ memcpy(outData + writePtr, inData + readPtr, m_initialSize - readPtr);
+ m_assembler->recordLinkOffsets(readPtr, m_initialSize, readPtr - writePtr);
+
+ for (unsigned i = 0; i < jumpCount; ++i) {
+ uint8_t* location = outData + jumpsToLink[i].from();
+ uint8_t* target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
+ m_assembler->link(jumpsToLink[i], location, target);
+ }
+
+ jumpsToLink.clear();
+ m_size = writePtr + m_initialSize - readPtr;
+ m_executableMemory->shrink(m_size);
+
+#if DUMP_LINK_STATISTICS
+ dumpLinkStatistics(m_code, m_initialSize, m_size);
+#endif
+#if DUMP_CODE
+ dumpCode(m_code, m_size);
+#endif
+#endif
+}
+
+void LinkBuffer::performFinalization()
+{
+#ifndef NDEBUG
+ ASSERT(!m_completed);
+ ASSERT(isValid());
+ m_completed = true;
+#endif
+
+#if ENABLE(BRANCH_COMPACTION)
+ ExecutableAllocator::makeExecutable(code(), m_initialSize);
+#else
+ ExecutableAllocator::makeExecutable(code(), m_size);
+#endif
+ MacroAssembler::cacheFlush(code(), m_size);
+}
+
+#if DUMP_LINK_STATISTICS
+void LinkBuffer::dumpLinkStatistics(void* code, size_t initializeSize, size_t finalSize)
+{
+ static unsigned linkCount = 0;
+ static unsigned totalInitialSize = 0;
+ static unsigned totalFinalSize = 0;
+ linkCount++;
+ totalInitialSize += initialSize;
+ totalFinalSize += finalSize;
+ dataLogF("link %p: orig %u, compact %u (delta %u, %.2f%%)\n",
+ code, static_cast<unsigned>(initialSize), static_cast<unsigned>(finalSize),
+ static_cast<unsigned>(initialSize - finalSize),
+ 100.0 * (initialSize - finalSize) / initialSize);
+ dataLogF("\ttotal %u: orig %u, compact %u (delta %u, %.2f%%)\n",
+ linkCount, totalInitialSize, totalFinalSize, totalInitialSize - totalFinalSize,
+ 100.0 * (totalInitialSize - totalFinalSize) / totalInitialSize);
+}
+#endif
+
+#if DUMP_CODE
+void LinkBuffer::dumpCode(void* code, size_t size)
+{
+#if CPU(ARM_THUMB2)
+ // Dump the generated code in an asm file format that can be assembled and then disassembled
+ // for debugging purposes. For example, save this output as jit.s:
+ // gcc -arch armv7 -c jit.s
+ // otool -tv jit.o
+ static unsigned codeCount = 0;
+ unsigned short* tcode = static_cast<unsigned short*>(code);
+ size_t tsize = size / sizeof(short);
+ char nameBuf[128];
+ snprintf(nameBuf, sizeof(nameBuf), "_jsc_jit%u", codeCount++);
+ dataLogF("\t.syntax unified\n"
+ "\t.section\t__TEXT,__text,regular,pure_instructions\n"
+ "\t.globl\t%s\n"
+ "\t.align 2\n"
+ "\t.code 16\n"
+ "\t.thumb_func\t%s\n"
+ "# %p\n"
+ "%s:\n", nameBuf, nameBuf, code, nameBuf);
+
+ for (unsigned i = 0; i < tsize; i++)
+ dataLogF("\t.short\t0x%x\n", tcode[i]);
+#elif CPU(ARM_TRADITIONAL)
+ // gcc -c jit.s
+ // objdump -D jit.o
+ static unsigned codeCount = 0;
+ unsigned int* tcode = static_cast<unsigned int*>(code);
+ size_t tsize = size / sizeof(unsigned int);
+ char nameBuf[128];
+ snprintf(nameBuf, sizeof(nameBuf), "_jsc_jit%u", codeCount++);
+ dataLogF("\t.globl\t%s\n"
+ "\t.align 4\n"
+ "\t.code 32\n"
+ "\t.text\n"
+ "# %p\n"
+ "%s:\n", nameBuf, code, nameBuf);
+
+ for (unsigned i = 0; i < tsize; i++)
+ dataLogF("\t.long\t0x%x\n", tcode[i]);
+#endif
+}
+#endif
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+
diff --git a/src/3rdparty/masm/assembler/LinkBuffer.h b/src/3rdparty/masm/assembler/LinkBuffer.h
new file mode 100644
index 0000000000..e1882433c1
--- /dev/null
+++ b/src/3rdparty/masm/assembler/LinkBuffer.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2009, 2010, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LinkBuffer_h
+#define LinkBuffer_h
+
+#if ENABLE(ASSEMBLER)
+
+#define DUMP_LINK_STATISTICS 0
+#define DUMP_CODE 0
+
+#define GLOBAL_THUNK_ID reinterpret_cast<void*>(static_cast<intptr_t>(-1))
+#define REGEXP_CODE_ID reinterpret_cast<void*>(static_cast<intptr_t>(-2))
+
+#include "JITCompilationEffort.h"
+#include "MacroAssembler.h"
+#include <wtf/DataLog.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+class JSGlobalData;
+
+// LinkBuffer:
+//
+// This class assists in linking code generated by the macro assembler, once code generation
+// has been completed, and the code has been copied to is final location in memory. At this
+// time pointers to labels within the code may be resolved, and relative offsets to external
+// addresses may be fixed.
+//
+// Specifically:
+// * Jump objects may be linked to external targets,
+// * The address of Jump objects may taken, such that it can later be relinked.
+// * The return address of a Call may be acquired.
+// * The address of a Label pointing into the code may be resolved.
+// * The value referenced by a DataLabel may be set.
+//
+class LinkBuffer {
+ WTF_MAKE_NONCOPYABLE(LinkBuffer);
+ typedef MacroAssemblerCodeRef CodeRef;
+ typedef MacroAssemblerCodePtr CodePtr;
+ typedef MacroAssembler::Label Label;
+ typedef MacroAssembler::Jump Jump;
+ typedef MacroAssembler::PatchableJump PatchableJump;
+ typedef MacroAssembler::JumpList JumpList;
+ typedef MacroAssembler::Call Call;
+ typedef MacroAssembler::DataLabelCompact DataLabelCompact;
+ typedef MacroAssembler::DataLabel32 DataLabel32;
+ typedef MacroAssembler::DataLabelPtr DataLabelPtr;
+ typedef MacroAssembler::ConvertibleLoadLabel ConvertibleLoadLabel;
+#if ENABLE(BRANCH_COMPACTION)
+ typedef MacroAssembler::LinkRecord LinkRecord;
+ typedef MacroAssembler::JumpLinkType JumpLinkType;
+#endif
+
+public:
+ LinkBuffer(JSGlobalData& globalData, MacroAssembler* masm, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
+ : m_size(0)
+#if ENABLE(BRANCH_COMPACTION)
+ , m_initialSize(0)
+#endif
+ , m_code(0)
+ , m_assembler(masm)
+ , m_globalData(&globalData)
+#ifndef NDEBUG
+ , m_completed(false)
+ , m_effort(effort)
+#endif
+ {
+ linkCode(ownerUID, effort);
+ }
+
+ ~LinkBuffer()
+ {
+ ASSERT(m_completed || (!m_executableMemory && m_effort == JITCompilationCanFail));
+ }
+
+ bool didFailToAllocate() const
+ {
+ return !m_executableMemory;
+ }
+
+ bool isValid() const
+ {
+ return !didFailToAllocate();
+ }
+
+ // These methods are used to link or set values at code generation time.
+
+ void link(Call call, FunctionPtr function)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ call.m_label = applyOffset(call.m_label);
+ MacroAssembler::linkCall(code(), call, function);
+ }
+
+ void link(Jump jump, CodeLocationLabel label)
+ {
+ jump.m_label = applyOffset(jump.m_label);
+ MacroAssembler::linkJump(code(), jump, label);
+ }
+
+ void link(JumpList list, CodeLocationLabel label)
+ {
+ for (unsigned i = 0; i < list.m_jumps.size(); ++i)
+ link(list.m_jumps[i], label);
+ }
+
+ void patch(DataLabelPtr label, void* value)
+ {
+ AssemblerLabel target = applyOffset(label.m_label);
+ MacroAssembler::linkPointer(code(), target, value);
+ }
+
+ void patch(DataLabelPtr label, CodeLocationLabel value)
+ {
+ AssemblerLabel target = applyOffset(label.m_label);
+ MacroAssembler::linkPointer(code(), target, value.executableAddress());
+ }
+
+ // These methods are used to obtain handles to allow the code to be relinked / repatched later.
+
+ CodeLocationCall locationOf(Call call)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ ASSERT(!call.isFlagSet(Call::Near));
+ return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)));
+ }
+
+ CodeLocationNearCall locationOfNearCall(Call call)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ ASSERT(call.isFlagSet(Call::Near));
+ return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)));
+ }
+
+ CodeLocationLabel locationOf(PatchableJump jump)
+ {
+ return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(jump.m_jump.m_label)));
+ }
+
+ CodeLocationLabel locationOf(Label label)
+ {
+ return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
+ {
+ return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ CodeLocationDataLabel32 locationOf(DataLabel32 label)
+ {
+ return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ CodeLocationDataLabelCompact locationOf(DataLabelCompact label)
+ {
+ return CodeLocationDataLabelCompact(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ CodeLocationConvertibleLoad locationOf(ConvertibleLoadLabel label)
+ {
+ return CodeLocationConvertibleLoad(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ // This method obtains the return address of the call, given as an offset from
+ // the start of the code.
+ unsigned returnAddressOffset(Call call)
+ {
+ call.m_label = applyOffset(call.m_label);
+ return MacroAssembler::getLinkerCallReturnOffset(call);
+ }
+
+ uint32_t offsetOf(Label label)
+ {
+ return applyOffset(label.m_label).m_offset;
+ }
+
+ // Upon completion of all patching 'FINALIZE_CODE()' should be called once to
+ // complete generation of the code. Alternatively, call
+ // finalizeCodeWithoutDisassembly() directly if you have your own way of
+ // displaying disassembly.
+
+ CodeRef finalizeCodeWithoutDisassembly();
+ CodeRef finalizeCodeWithDisassembly(const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
+
+ CodePtr trampolineAt(Label label)
+ {
+ return CodePtr(MacroAssembler::AssemblerType_T::getRelocatedAddress(code(), applyOffset(label.m_label)));
+ }
+
+ void* debugAddress()
+ {
+ return m_code;
+ }
+
+ size_t debugSize()
+ {
+ return m_size;
+ }
+
+private:
+ template <typename T> T applyOffset(T src)
+ {
+#if ENABLE(BRANCH_COMPACTION)
+ src.m_offset -= m_assembler->executableOffsetFor(src.m_offset);
+#endif
+ return src;
+ }
+
+ // Keep this private! - the underlying code should only be obtained externally via finalizeCode().
+ void* code()
+ {
+ return m_code;
+ }
+
+ void linkCode(void* ownerUID, JITCompilationEffort);
+
+ void performFinalization();
+
+#if DUMP_LINK_STATISTICS
+ static void dumpLinkStatistics(void* code, size_t initialSize, size_t finalSize);
+#endif
+
+#if DUMP_CODE
+ static void dumpCode(void* code, size_t);
+#endif
+
+ RefPtr<ExecutableMemoryHandle> m_executableMemory;
+ size_t m_size;
+#if ENABLE(BRANCH_COMPACTION)
+ size_t m_initialSize;
+#endif
+ void* m_code;
+ MacroAssembler* m_assembler;
+ JSGlobalData* m_globalData;
+#ifndef NDEBUG
+ bool m_completed;
+ JITCompilationEffort m_effort;
+#endif
+};
+
+#define FINALIZE_CODE_IF(condition, linkBufferReference, dataLogFArgumentsForHeading) \
+ (UNLIKELY((condition)) \
+ ? ((linkBufferReference).finalizeCodeWithDisassembly dataLogFArgumentsForHeading) \
+ : (linkBufferReference).finalizeCodeWithoutDisassembly())
+
+// Use this to finalize code, like so:
+//
+// CodeRef code = FINALIZE_CODE(linkBuffer, ("my super thingy number %d", number));
+//
+// Which, in disassembly mode, will print:
+//
+// Generated JIT code for my super thingy number 42:
+// Code at [0x123456, 0x234567]:
+// 0x123456: mov $0, 0
+// 0x12345a: ret
+//
+// ... and so on.
+//
+// Note that the dataLogFArgumentsForHeading are only evaluated when showDisassembly
+// is true, so you can hide expensive disassembly-only computations inside there.
+
+#define FINALIZE_CODE(linkBufferReference, dataLogFArgumentsForHeading) \
+ FINALIZE_CODE_IF(Options::showDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
+
+#define FINALIZE_DFG_CODE(linkBufferReference, dataLogFArgumentsForHeading) \
+ FINALIZE_CODE_IF((Options::showDisassembly() || Options::showDFGDisassembly()), linkBufferReference, dataLogFArgumentsForHeading)
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // LinkBuffer_h
diff --git a/src/3rdparty/masm/assembler/MIPSAssembler.h b/src/3rdparty/masm/assembler/MIPSAssembler.h
new file mode 100644
index 0000000000..7f553bb9a1
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MIPSAssembler.h
@@ -0,0 +1,1107 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ * Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY MIPS TECHNOLOGIES, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MIPS TECHNOLOGIES, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MIPSAssembler_h
+#define MIPSAssembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#include "AssemblerBuffer.h"
+#include "JITCompilationEffort.h"
+#include <wtf/Assertions.h>
+#include <wtf/SegmentedVector.h>
+
+namespace JSC {
+
+typedef uint32_t MIPSWord;
+
+namespace MIPSRegisters {
+typedef enum {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+ r16,
+ r17,
+ r18,
+ r19,
+ r20,
+ r21,
+ r22,
+ r23,
+ r24,
+ r25,
+ r26,
+ r27,
+ r28,
+ r29,
+ r30,
+ r31,
+ zero = r0,
+ at = r1,
+ v0 = r2,
+ v1 = r3,
+ a0 = r4,
+ a1 = r5,
+ a2 = r6,
+ a3 = r7,
+ t0 = r8,
+ t1 = r9,
+ t2 = r10,
+ t3 = r11,
+ t4 = r12,
+ t5 = r13,
+ t6 = r14,
+ t7 = r15,
+ s0 = r16,
+ s1 = r17,
+ s2 = r18,
+ s3 = r19,
+ s4 = r20,
+ s5 = r21,
+ s6 = r22,
+ s7 = r23,
+ t8 = r24,
+ t9 = r25,
+ k0 = r26,
+ k1 = r27,
+ gp = r28,
+ sp = r29,
+ fp = r30,
+ ra = r31
+} RegisterID;
+
+typedef enum {
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31
+} FPRegisterID;
+
+} // namespace MIPSRegisters
+
+class MIPSAssembler {
+public:
+ typedef MIPSRegisters::RegisterID RegisterID;
+ typedef MIPSRegisters::FPRegisterID FPRegisterID;
+ typedef SegmentedVector<AssemblerLabel, 64> Jumps;
+
+ MIPSAssembler()
+ : m_indexOfLastWatchpoint(INT_MIN)
+ , m_indexOfTailOfLastWatchpoint(INT_MIN)
+ {
+ }
+
+ // MIPS instruction opcode field position
+ enum {
+ OP_SH_RD = 11,
+ OP_SH_RT = 16,
+ OP_SH_RS = 21,
+ OP_SH_SHAMT = 6,
+ OP_SH_CODE = 16,
+ OP_SH_FD = 6,
+ OP_SH_FS = 11,
+ OP_SH_FT = 16
+ };
+
+ void emitInst(MIPSWord op)
+ {
+ void* oldBase = m_buffer.data();
+
+ m_buffer.putInt(op);
+
+ void* newBase = m_buffer.data();
+ if (oldBase != newBase)
+ relocateJumps(oldBase, newBase);
+ }
+
+ void nop()
+ {
+ emitInst(0x00000000);
+ }
+
+ /* Need to insert one load data delay nop for mips1. */
+ void loadDelayNop()
+ {
+#if WTF_MIPS_ISA(1)
+ nop();
+#endif
+ }
+
+ /* Need to insert one coprocessor access delay nop for mips1. */
+ void copDelayNop()
+ {
+#if WTF_MIPS_ISA(1)
+ nop();
+#endif
+ }
+
+ void move(RegisterID rd, RegisterID rs)
+ {
+ /* addu */
+ emitInst(0x00000021 | (rd << OP_SH_RD) | (rs << OP_SH_RS));
+ }
+
+ /* Set an immediate value to a register. This may generate 1 or 2
+ instructions. */
+ void li(RegisterID dest, int imm)
+ {
+ if (imm >= -32768 && imm <= 32767)
+ addiu(dest, MIPSRegisters::zero, imm);
+ else if (imm >= 0 && imm < 65536)
+ ori(dest, MIPSRegisters::zero, imm);
+ else {
+ lui(dest, imm >> 16);
+ if (imm & 0xffff)
+ ori(dest, dest, imm);
+ }
+ }
+
+ void lui(RegisterID rt, int imm)
+ {
+ emitInst(0x3c000000 | (rt << OP_SH_RT) | (imm & 0xffff));
+ }
+
+ void addiu(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x24000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void addu(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000021 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void subu(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000023 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void mult(RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000018 | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void div(RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x0000001a | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void mfhi(RegisterID rd)
+ {
+ emitInst(0x00000010 | (rd << OP_SH_RD));
+ }
+
+ void mflo(RegisterID rd)
+ {
+ emitInst(0x00000012 | (rd << OP_SH_RD));
+ }
+
+ void mul(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+#if WTF_MIPS_ISA_AT_LEAST(32)
+ emitInst(0x70000002 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+#else
+ mult(rs, rt);
+ mflo(rd);
+#endif
+ }
+
+ void andInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000024 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void andi(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x30000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void nor(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000027 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void orInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000025 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void ori(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x34000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void xorInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000026 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void xori(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x38000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void slt(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x0000002a | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void sltu(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x0000002b | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void sltiu(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x2c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void sll(RegisterID rd, RegisterID rt, int shamt)
+ {
+ emitInst(0x00000000 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
+ }
+
+ void sllv(RegisterID rd, RegisterID rt, RegisterID rs)
+ {
+ emitInst(0x00000004 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
+ }
+
+ void sra(RegisterID rd, RegisterID rt, int shamt)
+ {
+ emitInst(0x00000003 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
+ }
+
+ void srav(RegisterID rd, RegisterID rt, RegisterID rs)
+ {
+ emitInst(0x00000007 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
+ }
+
+ void srl(RegisterID rd, RegisterID rt, int shamt)
+ {
+ emitInst(0x00000002 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
+ }
+
+ void srlv(RegisterID rd, RegisterID rt, RegisterID rs)
+ {
+ emitInst(0x00000006 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
+ }
+
+ void lb(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x80000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lbu(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x90000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lw(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x8c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lwl(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x88000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lwr(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x98000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lh(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x84000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lhu(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x94000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void sb(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0xa0000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void sh(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0xa4000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void sw(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0xac000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void jr(RegisterID rs)
+ {
+ emitInst(0x00000008 | (rs << OP_SH_RS));
+ }
+
+ void jalr(RegisterID rs)
+ {
+ emitInst(0x0000f809 | (rs << OP_SH_RS));
+ }
+
+ void jal()
+ {
+ emitInst(0x0c000000);
+ }
+
+ void bkpt()
+ {
+ int value = 512; /* BRK_BUG */
+ emitInst(0x0000000d | ((value & 0x3ff) << OP_SH_CODE));
+ }
+
+ void bgez(RegisterID rs, int imm)
+ {
+ emitInst(0x04010000 | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void bltz(RegisterID rs, int imm)
+ {
+ emitInst(0x04000000 | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void beq(RegisterID rs, RegisterID rt, int imm)
+ {
+ emitInst(0x10000000 | (rs << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff));
+ }
+
+ void bne(RegisterID rs, RegisterID rt, int imm)
+ {
+ emitInst(0x14000000 | (rs << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff));
+ }
+
+ void bc1t()
+ {
+ emitInst(0x45010000);
+ }
+
+ void bc1f()
+ {
+ emitInst(0x45000000);
+ }
+
+ void appendJump()
+ {
+ m_jumps.append(m_buffer.label());
+ }
+
+ void addd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200000 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ }
+
+ void subd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200001 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ }
+
+ void muld(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200002 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ }
+
+ void divd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200003 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ }
+
+ void lwc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xc4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ copDelayNop();
+ }
+
+ void ldc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xd4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void swc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xe4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void sdc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xf4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void mtc1(RegisterID rt, FPRegisterID fs)
+ {
+ emitInst(0x44800000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+ copDelayNop();
+ }
+
+ void mthc1(RegisterID rt, FPRegisterID fs)
+ {
+ emitInst(0x44e00000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+ copDelayNop();
+ }
+
+ void mfc1(RegisterID rt, FPRegisterID fs)
+ {
+ emitInst(0x44000000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+ copDelayNop();
+ }
+
+ void sqrtd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200004 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void movd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200006 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void negd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200007 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void truncwd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x4620000d | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtdw(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46800021 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtds(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46000021 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtwd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200024 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtsd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200020 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void ceqd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200032 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cngtd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003f | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cnged(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003d | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cltd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003c | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cled(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003e | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cueqd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200033 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void coled(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200036 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void coltd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200034 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void culed(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200037 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cultd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200035 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ // General helpers
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ return m_buffer.label();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ AssemblerLabel result = m_buffer.label();
+ if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
+ result = label();
+ m_indexOfLastWatchpoint = result.m_offset;
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return result;
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = m_buffer.label();
+ while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+ nop();
+ result = m_buffer.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ while (!m_buffer.isAligned(alignment))
+ bkpt();
+
+ return label();
+ }
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<char*>(code) + label.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ // Assembler admin methods:
+
+ size_t codeSize() const
+ {
+ return m_buffer.codeSize();
+ }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+ {
+ RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData, ownerUID, effort);
+ if (!result)
+ return 0;
+
+ relocateJumps(m_buffer.data(), result->start());
+ return result.release();
+ }
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+ // Assembly helpers for moving data between fp and registers.
+ void vmov(RegisterID rd1, RegisterID rd2, FPRegisterID rn)
+ {
+#if WTF_MIPS_ISA_REV(2) && WTF_MIPS_FP64
+ mfc1(rd1, rn);
+ mfhc1(rd2, rn);
+#else
+ mfc1(rd1, rn);
+ mfc1(rd2, FPRegisterID(rn + 1));
+#endif
+ }
+
+ void vmov(FPRegisterID rd, RegisterID rn1, RegisterID rn2)
+ {
+#if WTF_MIPS_ISA_REV(2) && WTF_MIPS_FP64
+ mtc1(rn1, rd);
+ mthc1(rn2, rd);
+#else
+ mtc1(rn1, rd);
+ mtc1(rn2, FPRegisterID(rd + 1));
+#endif
+ }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ // The return address is after a call and a delay slot instruction
+ return call.m_offset;
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ static size_t linkDirectJump(void* code, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code));
+ size_t ops = 0;
+ int32_t slotAddr = reinterpret_cast<int>(insn) + 4;
+ int32_t toAddr = reinterpret_cast<int>(to);
+
+ if ((slotAddr & 0xf0000000) != (toAddr & 0xf0000000)) {
+ // lui
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((toAddr >> 16) & 0xffff);
+ ++insn;
+ // ori
+ *insn = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (toAddr & 0xffff);
+ ++insn;
+ // jr
+ *insn = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+ ++insn;
+ ops = 4 * sizeof(MIPSWord);
+ } else {
+ // j
+ *insn = 0x08000000 | ((toAddr & 0x0fffffff) >> 2);
+ ++insn;
+ ops = 2 * sizeof(MIPSWord);
+ }
+ // nop
+ *insn = 0x00000000;
+ return ops;
+ }
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to)
+ {
+ ASSERT(to.isSet());
+ ASSERT(from.isSet());
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(m_buffer.data()) + from.m_offset);
+ MIPSWord* toPos = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(m_buffer.data()) + to.m_offset);
+
+ ASSERT(!(*(insn - 1)) && !(*(insn - 2)) && !(*(insn - 3)) && !(*(insn - 5)));
+ insn = insn - 6;
+ linkWithOffset(insn, toPos);
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+
+ ASSERT(!(*(insn - 1)) && !(*(insn - 2)) && !(*(insn - 3)) && !(*(insn - 5)));
+ insn = insn - 6;
+ linkWithOffset(insn, to);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ linkCallInternal(insn, to);
+ }
+
+ static void linkPointer(void* code, AssemblerLabel from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ *insn = (*insn & 0xffff0000) | ((reinterpret_cast<intptr_t>(to) >> 16) & 0xffff);
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ *insn = (*insn & 0xffff0000) | (reinterpret_cast<intptr_t>(to) & 0xffff);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+
+ ASSERT(!(*(insn - 1)) && !(*(insn - 5)));
+ insn = insn - 6;
+ int flushSize = linkWithOffset(insn, to);
+
+ cacheFlush(insn, flushSize);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ void* start;
+ int size = linkCallInternal(from, to);
+ if (size == sizeof(MIPSWord))
+ start = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(from) - 2 * sizeof(MIPSWord));
+ else
+ start = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(from) - 4 * sizeof(MIPSWord));
+
+ cacheFlush(start, size);
+ }
+
+ static void repatchInt32(void* from, int32_t to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ *insn = (*insn & 0xffff0000) | ((to >> 16) & 0xffff);
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ *insn = (*insn & 0xffff0000) | (to & 0xffff);
+ insn--;
+ cacheFlush(insn, 2 * sizeof(MIPSWord));
+ }
+
+ static int32_t readInt32(void* from)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ int32_t result = (*insn & 0x0000ffff) << 16;
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ result |= *insn & 0x0000ffff;
+ return result;
+ }
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ repatchInt32(where, value);
+ }
+
+ static void repatchPointer(void* from, void* to)
+ {
+ repatchInt32(from, reinterpret_cast<int32_t>(to));
+ }
+
+ static void* readPointer(void* from)
+ {
+ return reinterpret_cast<void*>(readInt32(from));
+ }
+
+ static void* readCallTarget(void* from)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ insn -= 4;
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ int32_t result = (*insn & 0x0000ffff) << 16;
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ result |= *insn & 0x0000ffff;
+ return reinterpret_cast<void*>(result);
+ }
+
+ static void cacheFlush(void* code, size_t size)
+ {
+#if GCC_VERSION_AT_LEAST(4, 3, 0)
+#if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4, 4, 3)
+ int lineSize;
+ asm("rdhwr %0, $1" : "=r" (lineSize));
+ //
+ // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
+ // mips_expand_synci_loop that may execute synci one more time.
+ // "start" points to the fisrt byte of the cache line.
+ // "end" points to the last byte of the line before the last cache line.
+ // Because size is always a multiple of 4, this is safe to set
+ // "end" to the last byte.
+ //
+ intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
+ intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
+ __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
+#else
+ intptr_t end = reinterpret_cast<intptr_t>(code) + size;
+ __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
+#endif
+#else
+ _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
+#endif
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return sizeof(MIPSWord) * 4;
+ }
+
+ static void revertJumpToMove(void* instructionStart, RegisterID rt, int imm)
+ {
+ MIPSWord* insn = static_cast<MIPSWord*>(instructionStart);
+ size_t codeSize = 2 * sizeof(MIPSWord);
+
+ // lui
+ *insn = 0x3c000000 | (rt << OP_SH_RT) | ((imm >> 16) & 0xffff);
+ ++insn;
+ // ori
+ *insn = 0x34000000 | (rt << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff);
+ ++insn;
+ // if jr $t9
+ if (*insn == 0x03200008) {
+ *insn = 0x00000000;
+ codeSize += sizeof(MIPSWord);
+ }
+ cacheFlush(insn, codeSize);
+ }
+
+ static void replaceWithJump(void* instructionStart, void* to)
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 3));
+ ASSERT(!(bitwise_cast<uintptr_t>(to) & 3));
+ size_t ops = linkDirectJump(instructionStart, to);
+ cacheFlush(instructionStart, ops);
+ }
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(instructionStart);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ insn++;
+ ASSERT((*insn & 0xfc0007ff) == 0x00000021); // addu
+ insn++;
+ *insn = 0x8c000000 | ((*insn) & 0x3ffffff); // lw
+ cacheFlush(insn, 4);
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(instructionStart);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ insn++;
+ ASSERT((*insn & 0xfc0007ff) == 0x00000021); // addu
+ insn++;
+ *insn = 0x24000000 | ((*insn) & 0x3ffffff); // addiu
+ cacheFlush(insn, 4);
+ }
+
+private:
+ /* Update each jump in the buffer of newBase. */
+ void relocateJumps(void* oldBase, void* newBase)
+ {
+ // Check each jump
+ for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
+ int pos = iter->m_offset;
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(newBase) + pos);
+ insn = insn + 2;
+ // Need to make sure we have 5 valid instructions after pos
+ if ((unsigned)pos >= m_buffer.codeSize() - 5 * sizeof(MIPSWord))
+ continue;
+
+ if ((*insn & 0xfc000000) == 0x08000000) { // j
+ int offset = *insn & 0x03ffffff;
+ int oldInsnAddress = (int)insn - (int)newBase + (int)oldBase;
+ int topFourBits = (oldInsnAddress + 4) >> 28;
+ int oldTargetAddress = (topFourBits << 28) | (offset << 2);
+ int newTargetAddress = oldTargetAddress - (int)oldBase + (int)newBase;
+ int newInsnAddress = (int)insn;
+ if (((newInsnAddress + 4) >> 28) == (newTargetAddress >> 28))
+ *insn = 0x08000000 | ((newTargetAddress >> 2) & 0x3ffffff);
+ else {
+ /* lui */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+ /* jr */
+ *(insn + 2) = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+ }
+ } else if ((*insn & 0xffe00000) == 0x3c000000) { // lui
+ int high = (*insn & 0xffff) << 16;
+ int low = *(insn + 1) & 0xffff;
+ int oldTargetAddress = high | low;
+ int newTargetAddress = oldTargetAddress - (int)oldBase + (int)newBase;
+ /* lui */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+ }
+ }
+ }
+
+ static int linkWithOffset(MIPSWord* insn, void* to)
+ {
+ ASSERT((*insn & 0xfc000000) == 0x10000000 // beq
+ || (*insn & 0xfc000000) == 0x14000000 // bne
+ || (*insn & 0xffff0000) == 0x45010000 // bc1t
+ || (*insn & 0xffff0000) == 0x45000000); // bc1f
+ intptr_t diff = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(insn) - 4) >> 2;
+
+ if (diff < -32768 || diff > 32767 || *(insn + 2) != 0x10000003) {
+ /*
+ Convert the sequence:
+ beq $2, $3, target
+ nop
+ b 1f
+ nop
+ nop
+ nop
+ 1:
+
+ to the new sequence if possible:
+ bne $2, $3, 1f
+ nop
+ j target
+ nop
+ nop
+ nop
+ 1:
+
+ OR to the new sequence:
+ bne $2, $3, 1f
+ nop
+ lui $25, target >> 16
+ ori $25, $25, target & 0xffff
+ jr $25
+ nop
+ 1:
+
+ Note: beq/bne/bc1t/bc1f are converted to bne/beq/bc1f/bc1t.
+ */
+
+ if (*(insn + 2) == 0x10000003) {
+ if ((*insn & 0xfc000000) == 0x10000000) // beq
+ *insn = (*insn & 0x03ff0000) | 0x14000005; // bne
+ else if ((*insn & 0xfc000000) == 0x14000000) // bne
+ *insn = (*insn & 0x03ff0000) | 0x10000005; // beq
+ else if ((*insn & 0xffff0000) == 0x45010000) // bc1t
+ *insn = 0x45000005; // bc1f
+ else if ((*insn & 0xffff0000) == 0x45000000) // bc1f
+ *insn = 0x45010005; // bc1t
+ else
+ ASSERT(0);
+ }
+
+ insn = insn + 2;
+ if ((reinterpret_cast<intptr_t>(insn) + 4) >> 28
+ == reinterpret_cast<intptr_t>(to) >> 28) {
+ *insn = 0x08000000 | ((reinterpret_cast<intptr_t>(to) >> 2) & 0x3ffffff);
+ *(insn + 1) = 0;
+ return 4 * sizeof(MIPSWord);
+ }
+
+ intptr_t newTargetAddress = reinterpret_cast<intptr_t>(to);
+ /* lui */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+ /* jr */
+ *(insn + 2) = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+ return 5 * sizeof(MIPSWord);
+ }
+
+ *insn = (*insn & 0xffff0000) | (diff & 0xffff);
+ return sizeof(MIPSWord);
+ }
+
+ static int linkCallInternal(void* from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ insn = insn - 4;
+
+ if ((*(insn + 2) & 0xfc000000) == 0x0c000000) { // jal
+ if ((reinterpret_cast<intptr_t>(from) - 4) >> 28
+ == reinterpret_cast<intptr_t>(to) >> 28) {
+ *(insn + 2) = 0x0c000000 | ((reinterpret_cast<intptr_t>(to) >> 2) & 0x3ffffff);
+ return sizeof(MIPSWord);
+ }
+
+ /* lui $25, (to >> 16) & 0xffff */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((reinterpret_cast<intptr_t>(to) >> 16) & 0xffff);
+ /* ori $25, $25, to & 0xffff */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (reinterpret_cast<intptr_t>(to) & 0xffff);
+ /* jalr $25 */
+ *(insn + 2) = 0x0000f809 | (MIPSRegisters::t9 << OP_SH_RS);
+ return 3 * sizeof(MIPSWord);
+ }
+
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ ASSERT((*(insn + 1) & 0xfc000000) == 0x34000000); // ori
+
+ /* lui */
+ *insn = (*insn & 0xffff0000) | ((reinterpret_cast<intptr_t>(to) >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = (*(insn + 1) & 0xffff0000) | (reinterpret_cast<intptr_t>(to) & 0xffff);
+ return 2 * sizeof(MIPSWord);
+ }
+
+ AssemblerBuffer m_buffer;
+ Jumps m_jumps;
+ int m_indexOfLastWatchpoint;
+ int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#endif // MIPSAssembler_h
diff --git a/src/3rdparty/masm/assembler/MacroAssembler.h b/src/3rdparty/masm/assembler/MacroAssembler.h
new file mode 100644
index 0000000000..f74680d7fc
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssembler.h
@@ -0,0 +1,1465 @@
+/*
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssembler_h
+#define MacroAssembler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#if CPU(ARM_THUMB2)
+#include "MacroAssemblerARMv7.h"
+namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
+
+#elif CPU(ARM_TRADITIONAL)
+#include "MacroAssemblerARM.h"
+namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
+
+#elif CPU(MIPS)
+#include "MacroAssemblerMIPS.h"
+namespace JSC {
+typedef MacroAssemblerMIPS MacroAssemblerBase;
+};
+
+#elif CPU(X86)
+#include "MacroAssemblerX86.h"
+namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
+
+#elif CPU(X86_64)
+#include "MacroAssemblerX86_64.h"
+namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
+
+#elif CPU(SH4)
+#include "MacroAssemblerSH4.h"
+namespace JSC {
+typedef MacroAssemblerSH4 MacroAssemblerBase;
+};
+
+#else
+#error "The MacroAssembler is not supported on this platform."
+#endif
+
+namespace JSC {
+
+class MacroAssembler : public MacroAssemblerBase {
+public:
+
+ using MacroAssemblerBase::pop;
+ using MacroAssemblerBase::jump;
+ using MacroAssemblerBase::branch32;
+ using MacroAssemblerBase::move;
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ using MacroAssemblerBase::add32;
+ using MacroAssemblerBase::and32;
+ using MacroAssemblerBase::branchAdd32;
+ using MacroAssemblerBase::branchMul32;
+ using MacroAssemblerBase::branchSub32;
+ using MacroAssemblerBase::lshift32;
+ using MacroAssemblerBase::or32;
+ using MacroAssemblerBase::rshift32;
+ using MacroAssemblerBase::store32;
+ using MacroAssemblerBase::sub32;
+ using MacroAssemblerBase::urshift32;
+ using MacroAssemblerBase::xor32;
+#endif
+
+ static const double twoToThe32; // This is super useful for some double code.
+
+ // Utilities used by the DFG JIT.
+#if ENABLE(DFG_JIT)
+ using MacroAssemblerBase::invert;
+
+ static DoubleCondition invert(DoubleCondition cond)
+ {
+ switch (cond) {
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return DoubleEqual; // make compiler happy
+ }
+ }
+
+ static bool isInvertible(ResultCondition cond)
+ {
+ switch (cond) {
+ case Zero:
+ case NonZero:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static ResultCondition invert(ResultCondition cond)
+ {
+ switch (cond) {
+ case Zero:
+ return NonZero;
+ case NonZero:
+ return Zero;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return Zero; // Make compiler happy for release builds.
+ }
+ }
+#endif
+
+ // Platform agnostic onvenience functions,
+ // described in terms of other macro assembly methods.
+ void pop()
+ {
+ addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister);
+ }
+
+ void peek(RegisterID dest, int index = 0)
+ {
+ loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
+ }
+
+ Address addressForPoke(int index)
+ {
+ return Address(stackPointerRegister, (index * sizeof(void*)));
+ }
+
+ void poke(RegisterID src, int index = 0)
+ {
+ storePtr(src, addressForPoke(index));
+ }
+
+ void poke(TrustedImm32 value, int index = 0)
+ {
+ store32(value, addressForPoke(index));
+ }
+
+ void poke(TrustedImmPtr imm, int index = 0)
+ {
+ storePtr(imm, addressForPoke(index));
+ }
+
+#if CPU(X86_64)
+ void peek64(RegisterID dest, int index = 0)
+ {
+ load64(Address(stackPointerRegister, (index * sizeof(void*))), dest);
+ }
+
+ void poke(TrustedImm64 value, int index = 0)
+ {
+ store64(value, addressForPoke(index));
+ }
+
+ void poke64(RegisterID src, int index = 0)
+ {
+ store64(src, addressForPoke(index));
+ }
+#endif
+
+#if CPU(MIPS)
+ void poke(FPRegisterID src, int index = 0)
+ {
+ ASSERT(!(index & 1));
+ storeDouble(src, addressForPoke(index));
+ }
+#endif
+
+ // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
+ void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
+ {
+ branchPtr(cond, op1, imm).linkTo(target, this);
+ }
+ void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target)
+ {
+ branchPtr(cond, op1, imm).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
+ {
+ branch32(cond, op1, op2).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target)
+ {
+ branch32(cond, op1, imm).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target)
+ {
+ branch32(cond, op1, imm).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID left, Address right, Label target)
+ {
+ branch32(cond, left, right).linkTo(target, this);
+ }
+
+ Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
+ {
+ return branch32(commute(cond), right, left);
+ }
+
+ Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right)
+ {
+ return branch32(commute(cond), right, left);
+ }
+
+ void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
+ {
+ branchTestPtr(cond, reg).linkTo(target, this);
+ }
+
+#if !CPU(ARM_THUMB2)
+ PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
+ {
+ return PatchableJump(branchPtr(cond, left, right));
+ }
+
+ PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
+ }
+
+ PatchableJump patchableJump()
+ {
+ return PatchableJump(jump());
+ }
+
+ PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return PatchableJump(branchTest32(cond, reg, mask));
+ }
+#endif // !CPU(ARM_THUMB2)
+
+#if !CPU(ARM)
+ PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+ {
+ return PatchableJump(branch32(cond, reg, imm));
+ }
+#endif // !(CPU(ARM)
+
+ void jump(Label target)
+ {
+ jump().linkTo(target, this);
+ }
+
+ // Commute a relational condition, returns a new condition that will produce
+ // the same results given the same inputs but with their positions exchanged.
+ static RelationalCondition commute(RelationalCondition condition)
+ {
+ switch (condition) {
+ case Above:
+ return Below;
+ case AboveOrEqual:
+ return BelowOrEqual;
+ case Below:
+ return Above;
+ case BelowOrEqual:
+ return AboveOrEqual;
+ case GreaterThan:
+ return LessThan;
+ case GreaterThanOrEqual:
+ return LessThanOrEqual;
+ case LessThan:
+ return GreaterThan;
+ case LessThanOrEqual:
+ return GreaterThanOrEqual;
+ default:
+ break;
+ }
+
+ ASSERT(condition == Equal || condition == NotEqual);
+ return condition;
+ }
+
+ static const unsigned BlindingModulus = 64;
+ bool shouldConsiderBlinding()
+ {
+ return !(random() & (BlindingModulus - 1));
+ }
+
+ // Ptr methods
+ // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
+ // FIXME: should this use a test for 32-bitness instead of this specific exception?
+#if !CPU(X86_64)
+ void addPtr(Address src, RegisterID dest)
+ {
+ add32(src, dest);
+ }
+
+ void addPtr(AbsoluteAddress src, RegisterID dest)
+ {
+ add32(src, dest);
+ }
+
+ void addPtr(RegisterID src, RegisterID dest)
+ {
+ add32(src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ add32(imm, srcDest);
+ }
+
+ void addPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ add32(TrustedImm32(imm), dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ add32(imm, src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ add32(imm, address);
+ }
+
+ void andPtr(RegisterID src, RegisterID dest)
+ {
+ and32(src, dest);
+ }
+
+ void andPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ and32(imm, srcDest);
+ }
+
+ void negPtr(RegisterID dest)
+ {
+ neg32(dest);
+ }
+
+ void orPtr(RegisterID src, RegisterID dest)
+ {
+ or32(src, dest);
+ }
+
+ void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ or32(op1, op2, dest);
+ }
+
+ void orPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ or32(TrustedImm32(imm), dest);
+ }
+
+ void orPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ or32(imm, dest);
+ }
+
+ void subPtr(RegisterID src, RegisterID dest)
+ {
+ sub32(src, dest);
+ }
+
+ void subPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ sub32(imm, dest);
+ }
+
+ void subPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ sub32(TrustedImm32(imm), dest);
+ }
+
+ void xorPtr(RegisterID src, RegisterID dest)
+ {
+ xor32(src, dest);
+ }
+
+ void xorPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ xor32(imm, srcDest);
+ }
+
+
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void loadPtr(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void loadPtr(const void* address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load32WithAddressOffsetPatch(address, dest);
+ }
+
+ DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load32WithCompactAddressOffsetPatch(address, dest);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ move(Imm32(imm.asTrustedImmPtr()), dest);
+ }
+
+ void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ compare32(cond, left, right, dest);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(RegisterID src, BaseIndex address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(RegisterID src, void* address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(TrustedImmPtr imm, ImplicitAddress address)
+ {
+ store32(TrustedImm32(imm), address);
+ }
+
+ void storePtr(ImmPtr imm, Address address)
+ {
+ store32(Imm32(imm.asTrustedImmPtr()), address);
+ }
+
+ void storePtr(TrustedImmPtr imm, void* address)
+ {
+ store32(TrustedImm32(imm), address);
+ }
+
+ DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ return store32WithAddressOffsetPatch(src, address);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
+ {
+ return branch32(cond, left, TrustedImm32(right));
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
+ {
+ return branch32(cond, left, Imm32(right.asTrustedImmPtr()));
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
+ {
+ return branch32(cond, left, TrustedImm32(right));
+ }
+
+ Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right)
+ {
+ return branch32(cond, left, TrustedImm32(right));
+ }
+
+ Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchSub32(cond, src, dest);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ return branchTest32(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest32(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest32(cond, address, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest32(cond, address, mask);
+ }
+
+ Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd32(cond, src, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchSub32(cond, imm, dest);
+ }
+ using MacroAssemblerBase::branchTest8;
+ Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
+ }
+#else
+ void addPtr(RegisterID src, RegisterID dest)
+ {
+ add64(src, dest);
+ }
+
+ void addPtr(Address src, RegisterID dest)
+ {
+ add64(src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ add64(imm, srcDest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ add64(imm, src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, Address address)
+ {
+ add64(imm, address);
+ }
+
+ void addPtr(AbsoluteAddress src, RegisterID dest)
+ {
+ add64(src, dest);
+ }
+
+ void addPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ add64(TrustedImm64(imm), dest);
+ }
+
+ void addPtr(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ add64(imm, address);
+ }
+
+ void andPtr(RegisterID src, RegisterID dest)
+ {
+ and64(src, dest);
+ }
+
+ void andPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ and64(imm, srcDest);
+ }
+
+ void negPtr(RegisterID dest)
+ {
+ neg64(dest);
+ }
+
+ void orPtr(RegisterID src, RegisterID dest)
+ {
+ or64(src, dest);
+ }
+
+ void orPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ or64(imm, dest);
+ }
+
+ void orPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ or64(TrustedImm64(imm), dest);
+ }
+
+ void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ or64(op1, op2, dest);
+ }
+
+ void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ or64(imm, src, dest);
+ }
+
+ void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
+ {
+ rotateRight64(imm, srcDst);
+ }
+
+ void subPtr(RegisterID src, RegisterID dest)
+ {
+ sub64(src, dest);
+ }
+
+ void subPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ sub64(imm, dest);
+ }
+
+ void subPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ sub64(TrustedImm64(imm), dest);
+ }
+
+ void xorPtr(RegisterID src, RegisterID dest)
+ {
+ xor64(src, dest);
+ }
+
+ void xorPtr(RegisterID src, Address dest)
+ {
+ xor64(src, dest);
+ }
+
+ void xorPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ xor64(imm, srcDest);
+ }
+
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+ load64(address, dest);
+ }
+
+ void loadPtr(BaseIndex address, RegisterID dest)
+ {
+ load64(address, dest);
+ }
+
+ void loadPtr(const void* address, RegisterID dest)
+ {
+ load64(address, dest);
+ }
+
+ DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load64WithAddressOffsetPatch(address, dest);
+ }
+
+ DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load64WithCompactAddressOffsetPatch(address, dest);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+ store64(src, address);
+ }
+
+ void storePtr(RegisterID src, BaseIndex address)
+ {
+ store64(src, address);
+ }
+
+ void storePtr(RegisterID src, void* address)
+ {
+ store64(src, address);
+ }
+
+ void storePtr(TrustedImmPtr imm, ImplicitAddress address)
+ {
+ store64(TrustedImm64(imm), address);
+ }
+
+ void storePtr(TrustedImmPtr imm, BaseIndex address)
+ {
+ store64(TrustedImm64(imm), address);
+ }
+
+ DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ return store64WithAddressOffsetPatch(src, address);
+ }
+
+ void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ compare64(cond, left, right, dest);
+ }
+
+ void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ compare64(cond, left, right, dest);
+ }
+
+ void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+ {
+ test64(cond, reg, mask, dest);
+ }
+
+ void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
+ {
+ test64(cond, reg, mask, dest);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
+ {
+ return branch64(cond, left, TrustedImm64(right));
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
+ {
+ return branch64(cond, left, TrustedImm64(right));
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ return branchTest64(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, address, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg)
+ {
+ return branchTest64(cond, address, reg);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, address, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, address, mask);
+ }
+
+ Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchAdd64(cond, imm, dest);
+ }
+
+ Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd64(cond, src, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchSub64(cond, imm, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchSub64(cond, src, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+ {
+ return branchSub64(cond, src1, src2, dest);
+ }
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ using MacroAssemblerBase::and64;
+ using MacroAssemblerBase::convertInt32ToDouble;
+ using MacroAssemblerBase::store64;
+ bool shouldBlindDouble(double value)
+ {
+ // Don't trust NaN or +/-Infinity
+ if (!std::isfinite(value))
+ return shouldConsiderBlinding();
+
+ // Try to force normalisation, and check that there's no change
+ // in the bit pattern
+ if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
+ return shouldConsiderBlinding();
+
+ value = abs(value);
+ // Only allow a limited set of fractional components
+ double scaledValue = value * 8;
+ if (scaledValue / 8 != value)
+ return shouldConsiderBlinding();
+ double frac = scaledValue - floor(scaledValue);
+ if (frac != 0.0)
+ return shouldConsiderBlinding();
+
+ return value > 0xff;
+ }
+
+ bool shouldBlind(ImmPtr imm)
+ {
+#if ENABLE(FORCED_JIT_BLINDING)
+ UNUSED_PARAM(imm);
+ // Debug always blind all constants, if only so we know
+ // if we've broken blinding during patch development.
+ return true;
+#endif
+
+ // First off we'll special case common, "safe" values to avoid hurting
+ // performance too much
+ uintptr_t value = imm.asTrustedImmPtr().asIntptr();
+ switch (value) {
+ case 0xffff:
+ case 0xffffff:
+ case 0xffffffffL:
+ case 0xffffffffffL:
+ case 0xffffffffffffL:
+ case 0xffffffffffffffL:
+ case 0xffffffffffffffffL:
+ return false;
+ default: {
+ if (value <= 0xff)
+ return false;
+ if (~value <= 0xff)
+ return false;
+ }
+ }
+
+ if (!shouldConsiderBlinding())
+ return false;
+
+ return shouldBlindForSpecificArch(value);
+ }
+
+ struct RotatedImmPtr {
+ RotatedImmPtr(uintptr_t v1, uint8_t v2)
+ : value(v1)
+ , rotation(v2)
+ {
+ }
+ TrustedImmPtr value;
+ TrustedImm32 rotation;
+ };
+
+ RotatedImmPtr rotationBlindConstant(ImmPtr imm)
+ {
+ uint8_t rotation = random() % (sizeof(void*) * 8);
+ uintptr_t value = imm.asTrustedImmPtr().asIntptr();
+ value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation));
+ return RotatedImmPtr(value, rotation);
+ }
+
+ void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest)
+ {
+ move(constant.value, dest);
+ rotateRightPtr(constant.rotation, dest);
+ }
+
+ bool shouldBlind(Imm64 imm)
+ {
+#if ENABLE(FORCED_JIT_BLINDING)
+ UNUSED_PARAM(imm);
+ // Debug always blind all constants, if only so we know
+ // if we've broken blinding during patch development.
+ return true;
+#endif
+
+ // First off we'll special case common, "safe" values to avoid hurting
+ // performance too much
+ uint64_t value = imm.asTrustedImm64().m_value;
+ switch (value) {
+ case 0xffff:
+ case 0xffffff:
+ case 0xffffffffL:
+ case 0xffffffffffL:
+ case 0xffffffffffffL:
+ case 0xffffffffffffffL:
+ case 0xffffffffffffffffL:
+ return false;
+ default: {
+ if (value <= 0xff)
+ return false;
+ if (~value <= 0xff)
+ return false;
+
+ JSValue jsValue = JSValue::decode(value);
+ if (jsValue.isInt32())
+ return shouldBlind(Imm32(jsValue.asInt32()));
+ if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
+ return false;
+
+ if (!shouldBlindDouble(bitwise_cast<double>(value)))
+ return false;
+ }
+ }
+
+ if (!shouldConsiderBlinding())
+ return false;
+
+ return shouldBlindForSpecificArch(value);
+ }
+
+ struct RotatedImm64 {
+ RotatedImm64(uint64_t v1, uint8_t v2)
+ : value(v1)
+ , rotation(v2)
+ {
+ }
+ TrustedImm64 value;
+ TrustedImm32 rotation;
+ };
+
+ RotatedImm64 rotationBlindConstant(Imm64 imm)
+ {
+ uint8_t rotation = random() % (sizeof(int64_t) * 8);
+ uint64_t value = imm.asTrustedImm64().m_value;
+ value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation));
+ return RotatedImm64(value, rotation);
+ }
+
+ void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest)
+ {
+ move(constant.value, dest);
+ rotateRight64(constant.rotation, dest);
+ }
+
+ void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
+ convertInt32ToDouble(scratchRegister, dest);
+ } else
+ convertInt32ToDouble(imm.asTrustedImm32(), dest);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ if (shouldBlind(imm))
+ loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
+ else
+ move(imm.asTrustedImmPtr(), dest);
+ }
+
+ void move(Imm64 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm))
+ loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
+ else
+ move(imm.asTrustedImm64(), dest);
+ }
+
+ void and64(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = andBlindedConstant(imm);
+ and64(key.value1, dest);
+ and64(key.value2, dest);
+ } else
+ and64(imm.asTrustedImm32(), dest);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
+ {
+ if (shouldBlind(right)) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
+ return branchPtr(cond, left, scratchRegister);
+ }
+ return branchPtr(cond, left, right.asTrustedImmPtr());
+ }
+
+ void storePtr(ImmPtr imm, Address dest)
+ {
+ if (shouldBlind(imm)) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
+ storePtr(scratchRegister, dest);
+ } else
+ storePtr(imm.asTrustedImmPtr(), dest);
+ }
+
+ void store64(Imm64 imm, Address dest)
+ {
+ if (shouldBlind(imm)) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
+ store64(scratchRegister, dest);
+ } else
+ store64(imm.asTrustedImm64(), dest);
+ }
+
+#endif
+
+#endif // !CPU(X86_64)
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ bool shouldBlind(Imm32 imm)
+ {
+#if ENABLE(FORCED_JIT_BLINDING)
+ UNUSED_PARAM(imm);
+ // Debug always blind all constants, if only so we know
+ // if we've broken blinding during patch development.
+ return true;
+#else
+
+ // First off we'll special case common, "safe" values to avoid hurting
+ // performance too much
+ uint32_t value = imm.asTrustedImm32().m_value;
+ switch (value) {
+ case 0xffff:
+ case 0xffffff:
+ case 0xffffffff:
+ return false;
+ default:
+ if (value <= 0xff)
+ return false;
+ if (~value <= 0xff)
+ return false;
+ }
+
+ if (!shouldConsiderBlinding())
+ return false;
+
+ return shouldBlindForSpecificArch(value);
+#endif
+ }
+
+ struct BlindedImm32 {
+ BlindedImm32(int32_t v1, int32_t v2)
+ : value1(v1)
+ , value2(v2)
+ {
+ }
+ TrustedImm32 value1;
+ TrustedImm32 value2;
+ };
+
+ uint32_t keyForConstant(uint32_t value, uint32_t& mask)
+ {
+ uint32_t key = random();
+ if (value <= 0xff)
+ mask = 0xff;
+ else if (value <= 0xffff)
+ mask = 0xffff;
+ else if (value <= 0xffffff)
+ mask = 0xffffff;
+ else
+ mask = 0xffffffff;
+ return key & mask;
+ }
+
+ uint32_t keyForConstant(uint32_t value)
+ {
+ uint32_t mask = 0;
+ return keyForConstant(value, mask);
+ }
+
+ BlindedImm32 xorBlindConstant(Imm32 imm)
+ {
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t key = keyForConstant(baseValue);
+ return BlindedImm32(baseValue ^ key, key);
+ }
+
+ BlindedImm32 additionBlindedConstant(Imm32 imm)
+ {
+ // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
+ static uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
+
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3];
+ if (key > baseValue)
+ key = key - baseValue;
+ return BlindedImm32(baseValue - key, key);
+ }
+
+ BlindedImm32 andBlindedConstant(Imm32 imm)
+ {
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t mask = 0;
+ uint32_t key = keyForConstant(baseValue, mask);
+ ASSERT((baseValue & mask) == baseValue);
+ return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask);
+ }
+
+ BlindedImm32 orBlindedConstant(Imm32 imm)
+ {
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t mask = 0;
+ uint32_t key = keyForConstant(baseValue, mask);
+ ASSERT((baseValue & mask) == baseValue);
+ return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask);
+ }
+
+ void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest)
+ {
+ move(constant.value1, dest);
+ xor32(constant.value2, dest);
+ }
+
+ void add32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ add32(key.value1, dest);
+ add32(key.value2, dest);
+ } else
+ add32(imm.asTrustedImm32(), dest);
+ }
+
+ void addPtr(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ addPtr(key.value1, dest);
+ addPtr(key.value2, dest);
+ } else
+ addPtr(imm.asTrustedImm32(), dest);
+ }
+
+ void and32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = andBlindedConstant(imm);
+ and32(key.value1, dest);
+ and32(key.value2, dest);
+ } else
+ and32(imm.asTrustedImm32(), dest);
+ }
+
+ void andPtr(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = andBlindedConstant(imm);
+ andPtr(key.value1, dest);
+ andPtr(key.value2, dest);
+ } else
+ andPtr(imm.asTrustedImm32(), dest);
+ }
+
+ void and32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ if (src == dest)
+ return and32(imm.asTrustedImm32(), dest);
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ and32(src, dest);
+ } else
+ and32(imm.asTrustedImm32(), src, dest);
+ }
+
+ void move(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm))
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ else
+ move(imm.asTrustedImm32(), dest);
+ }
+
+ void or32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ if (src == dest)
+ return or32(imm, dest);
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ or32(src, dest);
+ } else
+ or32(imm.asTrustedImm32(), src, dest);
+ }
+
+ void or32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = orBlindedConstant(imm);
+ or32(key.value1, dest);
+ or32(key.value2, dest);
+ } else
+ or32(imm.asTrustedImm32(), dest);
+ }
+
+ void poke(Imm32 value, int index = 0)
+ {
+ store32(value, addressForPoke(index));
+ }
+
+ void poke(ImmPtr value, int index = 0)
+ {
+ storePtr(value, addressForPoke(index));
+ }
+
+#if CPU(X86_64)
+ void poke(Imm64 value, int index = 0)
+ {
+ store64(value, addressForPoke(index));
+ }
+#endif
+
+ void store32(Imm32 imm, Address dest)
+ {
+ if (shouldBlind(imm)) {
+#if CPU(X86) || CPU(X86_64)
+ BlindedImm32 blind = xorBlindConstant(imm);
+ store32(blind.value1, dest);
+ xor32(blind.value2, dest);
+#else
+ if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
+ loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
+ store32(scratchRegister, dest);
+ } else {
+ // If we don't have a scratch register available for use, we'll just
+ // place a random number of nops.
+ uint32_t nopCount = random() & 3;
+ while (nopCount--)
+ nop();
+ store32(imm.asTrustedImm32(), dest);
+ }
+#endif
+ } else
+ store32(imm.asTrustedImm32(), dest);
+ }
+
+ void sub32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ sub32(key.value1, dest);
+ sub32(key.value2, dest);
+ } else
+ sub32(imm.asTrustedImm32(), dest);
+ }
+
+ void subPtr(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ subPtr(key.value1, dest);
+ subPtr(key.value2, dest);
+ } else
+ subPtr(imm.asTrustedImm32(), dest);
+ }
+
+ void xor32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 blind = xorBlindConstant(imm);
+ xor32(blind.value1, src, dest);
+ xor32(blind.value2, dest);
+ } else
+ xor32(imm.asTrustedImm32(), src, dest);
+ }
+
+ void xor32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 blind = xorBlindConstant(imm);
+ xor32(blind.value1, dest);
+ xor32(blind.value2, dest);
+ } else
+ xor32(imm.asTrustedImm32(), dest);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
+ {
+ if (shouldBlind(right)) {
+ if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
+ loadXorBlindedConstant(xorBlindConstant(right), scratchRegister);
+ return branch32(cond, left, scratchRegister);
+ }
+ // If we don't have a scratch register available for use, we'll just
+ // place a random number of nops.
+ uint32_t nopCount = random() & 3;
+ while (nopCount--)
+ nop();
+ return branch32(cond, left, right.asTrustedImm32());
+ }
+
+ return branch32(cond, left, right.asTrustedImm32());
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
+ {
+ if (src == dest)
+ ASSERT(scratchRegisterForBlinding());
+
+ if (shouldBlind(imm)) {
+ if (src == dest) {
+ if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
+ move(src, scratchRegister);
+ src = scratchRegister;
+ }
+ }
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ return branchAdd32(cond, src, dest);
+ }
+ return branchAdd32(cond, src, imm.asTrustedImm32(), dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src == dest)
+ ASSERT(scratchRegisterForBlinding());
+
+ if (shouldBlind(imm)) {
+ if (src == dest) {
+ if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
+ move(src, scratchRegister);
+ src = scratchRegister;
+ }
+ }
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ return branchMul32(cond, src, dest);
+ }
+ return branchMul32(cond, imm.asTrustedImm32(), src, dest);
+ }
+
+ // branchSub32 takes a scratch register as 32 bit platforms make use of this,
+ // with src == dst, and on x86-32 we don't have a platform scratch register.
+ Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch)
+ {
+ if (shouldBlind(imm)) {
+ ASSERT(scratch != dest);
+ ASSERT(scratch != src);
+ loadXorBlindedConstant(xorBlindConstant(imm), scratch);
+ return branchSub32(cond, src, scratch, dest);
+ }
+ return branchSub32(cond, src, imm.asTrustedImm32(), dest);
+ }
+
+ // Immediate shifts only have 5 controllable bits
+ // so we'll consider them safe for now.
+ TrustedImm32 trustedImm32ForShift(Imm32 imm)
+ {
+ return TrustedImm32(imm.asTrustedImm32().m_value & 31);
+ }
+
+ void lshift32(Imm32 imm, RegisterID dest)
+ {
+ lshift32(trustedImm32ForShift(imm), dest);
+ }
+
+ void lshift32(RegisterID src, Imm32 amount, RegisterID dest)
+ {
+ lshift32(src, trustedImm32ForShift(amount), dest);
+ }
+
+ void rshift32(Imm32 imm, RegisterID dest)
+ {
+ rshift32(trustedImm32ForShift(imm), dest);
+ }
+
+ void rshift32(RegisterID src, Imm32 amount, RegisterID dest)
+ {
+ rshift32(src, trustedImm32ForShift(amount), dest);
+ }
+
+ void urshift32(Imm32 imm, RegisterID dest)
+ {
+ urshift32(trustedImm32ForShift(imm), dest);
+ }
+
+ void urshift32(RegisterID src, Imm32 amount, RegisterID dest)
+ {
+ urshift32(src, trustedImm32ForShift(amount), dest);
+ }
+#endif
+};
+
+} // namespace JSC
+
+#else // ENABLE(ASSEMBLER)
+
+// If there is no assembler for this platform, at least allow code to make references to
+// some of the things it would otherwise define, albeit without giving that code any way
+// of doing anything useful.
+class MacroAssembler {
+private:
+ MacroAssembler() { }
+
+public:
+
+ enum RegisterID { NoRegister };
+ enum FPRegisterID { NoFPRegister };
+};
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssembler_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARM.cpp b/src/3rdparty/masm/assembler/MacroAssemblerARM.cpp
new file mode 100644
index 0000000000..98dc3e9879
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerARM.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "MacroAssemblerARM.h"
+
+#if OS(LINUX)
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <elf.h>
+#include <asm/hwcap.h>
+#endif
+
+namespace JSC {
+
+static bool isVFPPresent()
+{
+#if OS(LINUX)
+ int fd = open("/proc/self/auxv", O_RDONLY);
+ if (fd > 0) {
+ Elf32_auxv_t aux;
+ while (read(fd, &aux, sizeof(Elf32_auxv_t))) {
+ if (aux.a_type == AT_HWCAP) {
+ close(fd);
+ return aux.a_un.a_val & HWCAP_VFP;
+ }
+ }
+ close(fd);
+ }
+#endif
+
+#if (COMPILER(RVCT) && defined(__TARGET_FPU_VFP)) || (COMPILER(GCC) && defined(__VFP_FP__))
+ return true;
+#else
+ return false;
+#endif
+}
+
+const bool MacroAssemblerARM::s_isVFPPresent = isVFPPresent();
+
+#if CPU(ARMV5_OR_LOWER)
+/* On ARMv5 and below, natural alignment is required. */
+void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+{
+ ARMWord op2;
+
+ ASSERT(address.scale >= 0 && address.scale <= 3);
+ op2 = m_assembler.lsl(address.index, static_cast<int>(address.scale));
+
+ if (address.offset >= 0 && address.offset + 0x2 <= 0xff) {
+ m_assembler.add(ARMRegisters::S0, address.base, op2);
+ m_assembler.halfDtrUp(ARMAssembler::LoadUint16, dest, ARMRegisters::S0, ARMAssembler::getOp2Half(address.offset));
+ m_assembler.halfDtrUp(ARMAssembler::LoadUint16, ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Half(address.offset + 0x2));
+ } else if (address.offset < 0 && address.offset >= -0xff) {
+ m_assembler.add(ARMRegisters::S0, address.base, op2);
+ m_assembler.halfDtrDown(ARMAssembler::LoadUint16, dest, ARMRegisters::S0, ARMAssembler::getOp2Half(-address.offset));
+ m_assembler.halfDtrDown(ARMAssembler::LoadUint16, ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Half(-address.offset - 0x2));
+ } else {
+ m_assembler.moveImm(address.offset, ARMRegisters::S0);
+ m_assembler.add(ARMRegisters::S0, ARMRegisters::S0, op2);
+ m_assembler.halfDtrUpRegister(ARMAssembler::LoadUint16, dest, address.base, ARMRegisters::S0);
+ m_assembler.add(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::Op2Immediate | 0x2);
+ m_assembler.halfDtrUpRegister(ARMAssembler::LoadUint16, ARMRegisters::S0, address.base, ARMRegisters::S0);
+ }
+ m_assembler.orr(dest, dest, m_assembler.lsl(ARMRegisters::S0, 16));
+}
+#endif
+
+}
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARM.h b/src/3rdparty/masm/assembler/MacroAssemblerARM.h
new file mode 100644
index 0000000000..01e34c97cd
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerARM.h
@@ -0,0 +1,1386 @@
+/*
+ * Copyright (C) 2008 Apple Inc.
+ * Copyright (C) 2009, 2010 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerARM_h
+#define MacroAssemblerARM_h
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "ARMAssembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
+ static const int DoubleConditionMask = 0x0f;
+ static const int DoubleConditionBitSpecial = 0x10;
+ COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
+public:
+ typedef ARMRegisters::FPRegisterID FPRegisterID;
+
+ enum RelationalCondition {
+ Equal = ARMAssembler::EQ,
+ NotEqual = ARMAssembler::NE,
+ Above = ARMAssembler::HI,
+ AboveOrEqual = ARMAssembler::CS,
+ Below = ARMAssembler::CC,
+ BelowOrEqual = ARMAssembler::LS,
+ GreaterThan = ARMAssembler::GT,
+ GreaterThanOrEqual = ARMAssembler::GE,
+ LessThan = ARMAssembler::LT,
+ LessThanOrEqual = ARMAssembler::LE
+ };
+
+ enum ResultCondition {
+ Overflow = ARMAssembler::VS,
+ Signed = ARMAssembler::MI,
+ Zero = ARMAssembler::EQ,
+ NonZero = ARMAssembler::NE
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = ARMAssembler::EQ,
+ DoubleNotEqual = ARMAssembler::NE | DoubleConditionBitSpecial,
+ DoubleGreaterThan = ARMAssembler::GT,
+ DoubleGreaterThanOrEqual = ARMAssembler::GE,
+ DoubleLessThan = ARMAssembler::CC,
+ DoubleLessThanOrEqual = ARMAssembler::LS,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = ARMAssembler::EQ | DoubleConditionBitSpecial,
+ DoubleNotEqualOrUnordered = ARMAssembler::NE,
+ DoubleGreaterThanOrUnordered = ARMAssembler::HI,
+ DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS,
+ DoubleLessThanOrUnordered = ARMAssembler::LT,
+ DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE,
+ };
+
+ static const RegisterID stackPointerRegister = ARMRegisters::sp;
+ static const RegisterID linkRegister = ARMRegisters::lr;
+
+ static const Scale ScalePtr = TimesFour;
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.adds(dest, dest, src);
+ }
+
+ void add32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.adds(dest, op1, op2);
+ }
+
+ void add32(TrustedImm32 imm, Address address)
+ {
+ load32(address, ARMRegisters::S1);
+ add32(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.adds(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void add32(AbsoluteAddress src, RegisterID dest)
+ {
+ move(TrustedImmPtr(src.m_ptr), ARMRegisters::S1);
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S1, ARMRegisters::S1, 0);
+ add32(ARMRegisters::S1, dest);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, ARMRegisters::S1);
+ add32(ARMRegisters::S1, dest);
+ }
+
+ void add32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.adds(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.bitAnds(dest, dest, src);
+ }
+
+ void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.bitAnds(dest, op1, op2);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
+ if (w & ARMAssembler::Op2InvertedImmediate)
+ m_assembler.bics(dest, dest, w & ~ARMAssembler::Op2InvertedImmediate);
+ else
+ m_assembler.bitAnds(dest, dest, w);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
+ if (w & ARMAssembler::Op2InvertedImmediate)
+ m_assembler.bics(dest, src, w & ~ARMAssembler::Op2InvertedImmediate);
+ else
+ m_assembler.bitAnds(dest, src, w);
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ load32(src, ARMRegisters::S1);
+ and32(ARMRegisters::S1, dest);
+ }
+
+ void lshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ lshift32(dest, shiftAmount, dest);
+ }
+
+ void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ ARMWord w = ARMAssembler::getOp2Byte(0x1f);
+ m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
+
+ m_assembler.movs(dest, m_assembler.lslRegister(src, ARMRegisters::S0));
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.movs(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.movs(dest, m_assembler.lsl(src, imm.m_value & 0x1f));
+ }
+
+ void mul32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op2 == dest) {
+ if (op1 == dest) {
+ move(op2, ARMRegisters::S0);
+ op2 = ARMRegisters::S0;
+ } else {
+ // Swap the operands.
+ RegisterID tmp = op1;
+ op1 = op2;
+ op2 = tmp;
+ }
+ }
+ m_assembler.muls(dest, op1, op2);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ mul32(src, dest, dest);
+ }
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, ARMRegisters::S0);
+ m_assembler.muls(dest, src, ARMRegisters::S0);
+ }
+
+ void neg32(RegisterID srcDest)
+ {
+ m_assembler.rsbs(srcDest, srcDest, ARMAssembler::getOp2Byte(0));
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orrs(dest, dest, src);
+ }
+
+ void or32(RegisterID src, AbsoluteAddress dest)
+ {
+ move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0);
+ load32(Address(ARMRegisters::S0), ARMRegisters::S1);
+ or32(src, ARMRegisters::S1);
+ store32(ARMRegisters::S1, ARMRegisters::S0);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.orrs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.orrs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.orrs(dest, op1, op2);
+ }
+
+ void rshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ rshift32(dest, shiftAmount, dest);
+ }
+
+ void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ ARMWord w = ARMAssembler::getOp2Byte(0x1f);
+ m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
+
+ m_assembler.movs(dest, m_assembler.asrRegister(src, ARMRegisters::S0));
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ rshift32(dest, imm, dest);
+ }
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.movs(dest, m_assembler.asr(src, imm.m_value & 0x1f));
+ }
+
+ void urshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ urshift32(dest, shiftAmount, dest);
+ }
+
+ void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ ARMWord w = ARMAssembler::getOp2Byte(0x1f);
+ m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
+
+ m_assembler.movs(dest, m_assembler.lsrRegister(src, ARMRegisters::S0));
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.movs(dest, m_assembler.lsr(dest, imm.m_value & 0x1f));
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.movs(dest, m_assembler.lsr(src, imm.m_value & 0x1f));
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subs(dest, dest, src);
+ }
+
+ void sub32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.subs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void sub32(TrustedImm32 imm, Address address)
+ {
+ load32(address, ARMRegisters::S1);
+ sub32(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, ARMRegisters::S1);
+ sub32(ARMRegisters::S1, dest);
+ }
+
+ void sub32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.subs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.eors(dest, dest, src);
+ }
+
+ void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.eors(dest, op1, op2);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.mvns(dest, dest);
+ else
+ m_assembler.eors(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.mvns(dest, src);
+ else
+ m_assembler.eors(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void countLeadingZeros32(RegisterID src, RegisterID dest)
+ {
+#if WTF_ARM_ARCH_AT_LEAST(5)
+ m_assembler.clz(dest, src);
+#else
+ UNUSED_PARAM(src);
+ UNUSED_PARAM(dest);
+ RELEASE_ASSERT_NOT_REACHED();
+#endif
+ }
+
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.dataTransfer32(ARMAssembler::LoadUint8, dest, address.base, address.offset);
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void load8Signed(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void load16(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.dataTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.offset);
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void load16Signed(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.dataTransfer32(ARMAssembler::LoadUint32, dest, address.base, address.offset);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint32, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+#if CPU(ARMV5_OR_LOWER)
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
+#else
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+#endif
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+ load16(address, dest);
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result(this);
+ ASSERT(address.offset >= 0 && address.offset <= 255);
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, address.base, address.offset);
+ return result;
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabel32 dataLabel(this);
+ m_assembler.ldrUniqueImmediate(ARMRegisters::S0, 0);
+ m_assembler.dtrUpRegister(ARMAssembler::LoadUint32, dest, address.base, ARMRegisters::S0);
+ return dataLabel;
+ }
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return value >= -4095 && value <= 4095;
+ }
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabelCompact dataLabel(this);
+ ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+ if (address.offset >= 0)
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, address.base, address.offset);
+ else
+ m_assembler.dtrDown(ARMAssembler::LoadUint32, dest, address.base, address.offset);
+ return dataLabel;
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ DataLabel32 dataLabel(this);
+ m_assembler.ldrUniqueImmediate(ARMRegisters::S0, 0);
+ m_assembler.dtrUpRegister(ARMAssembler::StoreUint32, src, address.base, ARMRegisters::S0);
+ return dataLabel;
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint8, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void store8(TrustedImm32 imm, const void* address)
+ {
+ move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
+ move(imm, ARMRegisters::S1);
+ m_assembler.dtrUp(ARMAssembler::StoreUint8, ARMRegisters::S1, ARMRegisters::S0, 0);
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransfer16(ARMAssembler::StoreUint16, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.dataTransfer32(ARMAssembler::StoreUint32, src, address.base, address.offset);
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint32, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void store32(TrustedImm32 imm, ImplicitAddress address)
+ {
+ move(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address);
+ }
+
+ void store32(TrustedImm32 imm, BaseIndex address)
+ {
+ move(imm, ARMRegisters::S1);
+ m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint32, ARMRegisters::S1, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void store32(RegisterID src, const void* address)
+ {
+ m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
+ m_assembler.dtrUp(ARMAssembler::StoreUint32, src, ARMRegisters::S0, 0);
+ }
+
+ void store32(TrustedImm32 imm, const void* address)
+ {
+ m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
+ m_assembler.moveImm(imm.m_value, ARMRegisters::S1);
+ m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S1, ARMRegisters::S0, 0);
+ }
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.pop(dest);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.push(src);
+ }
+
+ void push(Address address)
+ {
+ load32(address, ARMRegisters::S1);
+ push(ARMRegisters::S1);
+ }
+
+ void push(TrustedImm32 imm)
+ {
+ move(imm, ARMRegisters::S0);
+ push(ARMRegisters::S0);
+ }
+
+ void move(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.moveImm(imm.m_value, dest);
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.mov(dest, src);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ move(TrustedImm32(imm), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ move(reg1, ARMRegisters::S0);
+ move(reg2, reg1);
+ move(ARMRegisters::S0, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ load8(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ load8(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
+ {
+ m_assembler.cmp(left, right);
+ return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right, int useConstantPool = 0)
+ {
+ internalCompare32(left, right);
+ return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+ {
+ load32(right, ARMRegisters::S1);
+ return branch32(cond, left, ARMRegisters::S1);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+ {
+ load32(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ load32(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ load32(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ load32WithUnalignedHalfWords(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load8(address, ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
+ load8(Address(ARMRegisters::S1), ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ m_assembler.tst(reg, mask);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
+ if (w & ARMAssembler::Op2InvertedImmediate)
+ m_assembler.bics(ARMRegisters::S0, reg, w & ~ARMAssembler::Op2InvertedImmediate);
+ else
+ m_assembler.tst(reg, w);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load32(address, ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load32(address, ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
+ Jump jump()
+ {
+ return Jump(m_assembler.jmp());
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.bx(target);
+ }
+
+ void jump(Address address)
+ {
+ load32(address, ARMRegisters::pc);
+ }
+
+ void jump(AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), ARMRegisters::S0);
+ load32(Address(ARMRegisters::S0, 0), ARMRegisters::pc);
+ }
+
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.vmov(dest1, dest2, src);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID)
+ {
+ m_assembler.vmov(dest, src1, src2);
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(op1, op2, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(src, imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ void mull32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op2 == dest) {
+ if (op1 == dest) {
+ move(op2, ARMRegisters::S0);
+ op2 = ARMRegisters::S0;
+ } else {
+ // Swap the operands.
+ RegisterID tmp = op1;
+ op1 = op2;
+ op2 = tmp;
+ }
+ }
+ m_assembler.mull(ARMRegisters::S1, dest, op1, op2);
+ m_assembler.cmp(ARMRegisters::S1, m_assembler.asr(dest, 31));
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ mull32(src1, src2, dest);
+ cond = NonZero;
+ }
+ else
+ mul32(src1, src2, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchMul32(cond, src, dest, dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ move(imm, ARMRegisters::S0);
+ mull32(ARMRegisters::S0, src, dest);
+ cond = NonZero;
+ }
+ else
+ mul32(imm, src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(src, imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ m_assembler.subs(dest, op1, op2);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ neg32(srcDest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+ or32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+ {
+ internalCompare32(reg, imm);
+ Jump jump(m_assembler.loadBranchTarget(ARMRegisters::S1, ARMCondition(cond), true));
+ m_assembler.bx(ARMRegisters::S1, ARMCondition(cond));
+ return PatchableJump(jump);
+ }
+
+ void breakpoint()
+ {
+ m_assembler.bkpt(0);
+ }
+
+ Call nearCall()
+ {
+ m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
+ return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear);
+ }
+
+ Call call(RegisterID target)
+ {
+ return Call(m_assembler.blx(target), Call::None);
+ }
+
+ void call(Address address)
+ {
+ call32(address.base, address.offset);
+ }
+
+ void ret()
+ {
+ m_assembler.bx(linkRegister);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmp(left, right);
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ m_assembler.cmp(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
+ }
+
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ load8(left, ARMRegisters::S1);
+ compare32(cond, ARMRegisters::S1, right, dest);
+ }
+
+ void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmp(0, reg);
+ else
+ m_assembler.tst(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
+ }
+
+ void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ load32(address, ARMRegisters::S1);
+ test32(cond, ARMRegisters::S1, mask, dest);
+ }
+
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ load8(address, ARMRegisters::S1);
+ test32(cond, ARMRegisters::S1, mask, dest);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.add(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, ARMRegisters::S1);
+ add32(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address.m_ptr);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ ARMWord tmp;
+
+ move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S0, ARMRegisters::S1, 0);
+
+ if ((tmp = ARMAssembler::getOp2(imm.m_value)) != ARMAssembler::InvalidImmediate)
+ m_assembler.adds(ARMRegisters::S0, ARMRegisters::S0, tmp);
+ else if ((tmp = ARMAssembler::getOp2(-imm.m_value)) != ARMAssembler::InvalidImmediate)
+ m_assembler.subs(ARMRegisters::S0, ARMRegisters::S0, tmp);
+ else {
+ m_assembler.adds(ARMRegisters::S0, ARMRegisters::S0, m_assembler.getImm(imm.m_value, ARMRegisters::S1));
+ move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
+ }
+ m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S0, ARMRegisters::S1, 0);
+
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S0, ARMRegisters::S1, sizeof(ARMWord));
+ if (imm.m_value >= 0)
+ m_assembler.adc(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+ else
+ m_assembler.sbc(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+ m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S0, ARMRegisters::S1, sizeof(ARMWord));
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, ARMRegisters::S1);
+ sub32(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address.m_ptr);
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, ARMRegisters::S0, 0);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ load32(left.m_ptr, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ void relativeTableJump(RegisterID index, int scale)
+ {
+ ASSERT(scale >= 0 && scale <= 31);
+ m_assembler.add(ARMRegisters::pc, ARMRegisters::pc, m_assembler.lsl(index, scale));
+
+ // NOP the default prefetching
+ m_assembler.mov(ARMRegisters::r0, ARMRegisters::r0);
+ }
+
+ Call call()
+ {
+ ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
+ m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
+ return Call(m_assembler.blx(ARMRegisters::S1), Call::Linkable);
+ }
+
+ Call tailRecursiveCall()
+ {
+ return Call::fromTailJump(jump());
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ return Call::fromTailJump(oldJump);
+ }
+
+ DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+ {
+ DataLabelPtr dataLabel(this);
+ m_assembler.ldrUniqueImmediate(dest, reinterpret_cast<ARMWord>(initialValue.m_value));
+ return dataLabel;
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
+ dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S1);
+ Jump jump = branch32(cond, left, ARMRegisters::S1, true);
+ return jump;
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ load32(left, ARMRegisters::S1);
+ ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
+ dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
+ Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
+ return jump;
+ }
+
+ DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address);
+ return dataLabel;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+ {
+ return storePtrWithPatch(TrustedImmPtr(0), address);
+ }
+
+ // Floating point operators
+ static bool supportsFloatingPoint()
+ {
+ return s_isVFPPresent;
+ }
+
+ static bool supportsFloatingPointTruncate()
+ {
+ return false;
+ }
+
+ static bool supportsFloatingPointSqrt()
+ {
+ return s_isVFPPresent;
+ }
+ static bool supportsFloatingPointAbs() { return false; }
+
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ m_assembler.baseIndexTransferFloat(ARMAssembler::LoadFloat, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ m_assembler.dataTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.offset);
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+ m_assembler.baseIndexTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void loadDouble(const void* address, FPRegisterID dest)
+ {
+ move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
+ m_assembler.doubleDtrUp(ARMAssembler::LoadDouble, dest, ARMRegisters::S0, 0);
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransferFloat(ARMAssembler::StoreFloat, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.offset);
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void storeDouble(FPRegisterID src, const void* address)
+ {
+ move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
+ m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, ARMRegisters::S0, 0);
+ }
+
+ void moveDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.vmov_f64(dest, src);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vadd_f64(dest, dest, src);
+ }
+
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vadd_f64(dest, op1, op2);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, ARMRegisters::SD0);
+ addDouble(ARMRegisters::SD0, dest);
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ loadDouble(address.m_ptr, ARMRegisters::SD0);
+ addDouble(ARMRegisters::SD0, dest);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vdiv_f64(dest, dest, src);
+ }
+
+ void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vdiv_f64(dest, op1, op2);
+ }
+
+ void divDouble(Address src, FPRegisterID dest)
+ {
+ RELEASE_ASSERT_NOT_REACHED(); // Untested
+ loadDouble(src, ARMRegisters::SD0);
+ divDouble(ARMRegisters::SD0, dest);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vsub_f64(dest, dest, src);
+ }
+
+ void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vsub_f64(dest, op1, op2);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, ARMRegisters::SD0);
+ subDouble(ARMRegisters::SD0, dest);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmul_f64(dest, dest, src);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, ARMRegisters::SD0);
+ mulDouble(ARMRegisters::SD0, dest);
+ }
+
+ void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vmul_f64(dest, op1, op2);
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vsqrt_f64(dest, src);
+ }
+
+ void absDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vabs_f64(dest, src);
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vneg_f64(dest, src);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmov_vfp32(dest << 1, src);
+ m_assembler.vcvt_f64_s32(dest, dest << 1);
+ }
+
+ void convertInt32ToDouble(Address src, FPRegisterID dest)
+ {
+ load32(src, ARMRegisters::S1);
+ convertInt32ToDouble(ARMRegisters::S1, dest);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ move(TrustedImmPtr(src.m_ptr), ARMRegisters::S1);
+ load32(Address(ARMRegisters::S1), ARMRegisters::S1);
+ convertInt32ToDouble(ARMRegisters::S1, dest);
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.vcvt_f64_f32(dst, src);
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.vcvt_f32_f64(dst, src);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ m_assembler.vcmp_f64(left, right);
+ m_assembler.vmrs_apsr();
+ if (cond & DoubleConditionBitSpecial)
+ m_assembler.cmp(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::VS);
+ return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask)));
+ }
+
+ // Truncates 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, INT_MIN).
+ enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ truncateDoubleToInt32(src, dest);
+
+ m_assembler.add(ARMRegisters::S0, dest, ARMAssembler::getOp2Byte(1));
+ m_assembler.bic(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(1));
+
+ ARMWord w = ARMAssembler::getOp2(0x80000000);
+ ASSERT(w != ARMAssembler::InvalidImmediate);
+ m_assembler.cmp(ARMRegisters::S0, w);
+ return Jump(m_assembler.jmp(branchType == BranchIfTruncateFailed ? ARMAssembler::EQ : ARMAssembler::NE));
+ }
+
+ Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ truncateDoubleToUint32(src, dest);
+
+ m_assembler.add(ARMRegisters::S0, dest, ARMAssembler::getOp2Byte(1));
+ m_assembler.bic(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(1));
+
+ m_assembler.cmp(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+ return Jump(m_assembler.jmp(branchType == BranchIfTruncateFailed ? ARMAssembler::EQ : ARMAssembler::NE));
+ }
+
+ // Result is undefined if the value is outside of the integer range.
+ void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.vcvt_s32_f64(ARMRegisters::SD0 << 1, src);
+ m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
+ }
+
+ void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.vcvt_u32_f64(ARMRegisters::SD0 << 1, src);
+ m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
+ }
+
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID)
+ {
+ m_assembler.vcvt_s32_f64(ARMRegisters::SD0 << 1, src);
+ m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ m_assembler.vcvt_f64_s32(ARMRegisters::SD0, ARMRegisters::SD0 << 1);
+ failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
+
+ // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
+ failureCases.append(branchTest32(Zero, dest));
+ }
+
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.mov(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+ convertInt32ToDouble(ARMRegisters::S0, scratch);
+ return branchDouble(DoubleNotEqual, reg, scratch);
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.mov(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+ convertInt32ToDouble(ARMRegisters::S0, scratch);
+ return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+ }
+
+ // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+ static RelationalCondition invert(RelationalCondition cond)
+ {
+ ASSERT((static_cast<uint32_t>(cond & 0x0fffffff)) == 0 && static_cast<uint32_t>(cond) < static_cast<uint32_t>(ARMAssembler::AL));
+ return static_cast<RelationalCondition>(cond ^ 0x10000000);
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(reinterpret_cast<void(*)()>(ARMAssembler::readCallTarget(call.dataLocation())));
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ ARMAssembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ ARMAssembler::maxJumpReplacementSize();
+ return 0;
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ return label.labelAtOffset(0);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue)
+ {
+ ARMAssembler::revertBranchPtrWithPatch(instructionStart.dataLocation(), reg, reinterpret_cast<uintptr_t>(initialValue) & 0xffff);
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+protected:
+ ARMAssembler::Condition ARMCondition(RelationalCondition cond)
+ {
+ return static_cast<ARMAssembler::Condition>(cond);
+ }
+
+ ARMAssembler::Condition ARMCondition(ResultCondition cond)
+ {
+ return static_cast<ARMAssembler::Condition>(cond);
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ m_assembler.ensureSpace(insnSpace, constSpace);
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_assembler.sizeOfConstantPool();
+ }
+
+ void call32(RegisterID base, int32_t offset)
+ {
+ load32(Address(base, offset), ARMRegisters::S1);
+ m_assembler.blx(ARMRegisters::S1);
+ }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ void internalCompare32(RegisterID left, TrustedImm32 right)
+ {
+ ARMWord tmp = (static_cast<unsigned>(right.m_value) == 0x80000000) ? ARMAssembler::InvalidImmediate : m_assembler.getOp2(-right.m_value);
+ if (tmp != ARMAssembler::InvalidImmediate)
+ m_assembler.cmn(left, tmp);
+ else
+ m_assembler.cmp(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
+ }
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ ARMAssembler::linkCall(code, call.m_label, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static const bool s_isVFPPresent;
+};
+
+}
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#endif // MacroAssemblerARM_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h b/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h
new file mode 100644
index 0000000000..81c1d7e08a
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h
@@ -0,0 +1,1914 @@
+/*
+ * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 University of Szeged
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerARMv7_h
+#define MacroAssemblerARMv7_h
+
+#if ENABLE(ASSEMBLER)
+
+#include "ARMv7Assembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
+ // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7?
+ // - dTR is likely used more than aTR, and we'll get better instruction
+ // encoding if it's in the low 8 registers.
+ static const RegisterID dataTempRegister = ARMRegisters::ip;
+ static const RegisterID addressTempRegister = ARMRegisters::r3;
+
+ static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7;
+ inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
+
+public:
+ MacroAssemblerARMv7()
+ : m_makeJumpPatchable(false)
+ {
+ }
+
+ typedef ARMv7Assembler::LinkRecord LinkRecord;
+ typedef ARMv7Assembler::JumpType JumpType;
+ typedef ARMv7Assembler::JumpLinkType JumpLinkType;
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return value >= -255 && value <= 255;
+ }
+
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
+ void* unlinkedCode() { return m_assembler.unlinkedCode(); }
+ bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
+ JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
+ JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
+ void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
+ int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
+ void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
+
+ struct ArmAddress {
+ enum AddressType {
+ HasOffset,
+ HasIndex,
+ } type;
+ RegisterID base;
+ union {
+ int32_t offset;
+ struct {
+ RegisterID index;
+ Scale scale;
+ };
+ } u;
+
+ explicit ArmAddress(RegisterID base, int32_t offset = 0)
+ : type(HasOffset)
+ , base(base)
+ {
+ u.offset = offset;
+ }
+
+ explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
+ : type(HasIndex)
+ , base(base)
+ {
+ u.index = index;
+ u.scale = scale;
+ }
+ };
+
+public:
+ typedef ARMRegisters::FPDoubleRegisterID FPRegisterID;
+
+ static const Scale ScalePtr = TimesFour;
+
+ enum RelationalCondition {
+ Equal = ARMv7Assembler::ConditionEQ,
+ NotEqual = ARMv7Assembler::ConditionNE,
+ Above = ARMv7Assembler::ConditionHI,
+ AboveOrEqual = ARMv7Assembler::ConditionHS,
+ Below = ARMv7Assembler::ConditionLO,
+ BelowOrEqual = ARMv7Assembler::ConditionLS,
+ GreaterThan = ARMv7Assembler::ConditionGT,
+ GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
+ LessThan = ARMv7Assembler::ConditionLT,
+ LessThanOrEqual = ARMv7Assembler::ConditionLE
+ };
+
+ enum ResultCondition {
+ Overflow = ARMv7Assembler::ConditionVS,
+ Signed = ARMv7Assembler::ConditionMI,
+ Zero = ARMv7Assembler::ConditionEQ,
+ NonZero = ARMv7Assembler::ConditionNE
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = ARMv7Assembler::ConditionEQ,
+ DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
+ DoubleGreaterThan = ARMv7Assembler::ConditionGT,
+ DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
+ DoubleLessThan = ARMv7Assembler::ConditionLO,
+ DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
+ DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
+ DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
+ DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
+ DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
+ DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
+ };
+
+ static const RegisterID stackPointerRegister = ARMRegisters::sp;
+ static const RegisterID linkRegister = ARMRegisters::lr;
+
+ // Integer arithmetic operations:
+ //
+ // Operations are typically two operand - operation(source, srcDst)
+ // For many operations the source may be an TrustedImm32, the srcDst operand
+ // may often be a memory location (explictly described using an Address
+ // object).
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.add(dest, dest, src);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, dest, dest);
+ }
+
+ void add32(AbsoluteAddress src, RegisterID dest)
+ {
+ load32(src.m_ptr, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dest, src, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.add(dest, src, dataTempRegister);
+ }
+ }
+
+ void add32(TrustedImm32 imm, Address address)
+ {
+ load32(address, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address.m_ptr);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+
+ m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
+ else {
+ move(imm, addressTempRegister);
+ m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+ }
+ m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
+
+ m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
+ m_assembler.adc(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(imm.m_value >> 31));
+ m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
+ }
+
+ void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.ARM_and(dest, op1, op2);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.ARM_and(dest, src, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.ARM_and(dest, src, dataTempRegister);
+ }
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ and32(dest, src, dest);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ and32(imm, dest, dest);
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ and32(dataTempRegister, dest);
+ }
+
+ void countLeadingZeros32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.clz(dest, src);
+ }
+
+ void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ // Clamp the shift to the range 0..31
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+ ASSERT(armImm.isValid());
+ m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
+
+ m_assembler.lsl(dest, src, dataTempRegister);
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.lsl(dest, src, imm.m_value & 0x1f);
+ }
+
+ void lshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ lshift32(dest, shiftAmount, dest);
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ lshift32(dest, imm, dest);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.smull(dest, dataTempRegister, dest, src);
+ }
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, dataTempRegister);
+ m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
+ }
+
+ void neg32(RegisterID srcDest)
+ {
+ m_assembler.neg(srcDest, srcDest);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orr(dest, dest, src);
+ }
+
+ void or32(RegisterID src, AbsoluteAddress dest)
+ {
+ move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
+ load32(addressTempRegister, dataTempRegister);
+ or32(src, dataTempRegister);
+ store32(dataTempRegister, addressTempRegister);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ or32(imm, dest, dest);
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.orr(dest, op1, op2);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.orr(dest, src, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.orr(dest, src, dataTempRegister);
+ }
+ }
+
+ void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ // Clamp the shift to the range 0..31
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+ ASSERT(armImm.isValid());
+ m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
+
+ m_assembler.asr(dest, src, dataTempRegister);
+ }
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.asr(dest, src, imm.m_value & 0x1f);
+ }
+
+ void rshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ rshift32(dest, shiftAmount, dest);
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ rshift32(dest, imm, dest);
+ }
+
+ void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ // Clamp the shift to the range 0..31
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+ ASSERT(armImm.isValid());
+ m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
+
+ m_assembler.lsr(dest, src, dataTempRegister);
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.lsr(dest, src, imm.m_value & 0x1f);
+ }
+
+ void urshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ urshift32(dest, shiftAmount, dest);
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ urshift32(dest, imm, dest);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.sub(dest, dest, src);
+ }
+
+ void sub32(TrustedImm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.sub(dest, dest, dataTempRegister);
+ }
+ }
+
+ void sub32(TrustedImm32 imm, Address address)
+ {
+ load32(address, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ sub32(dataTempRegister, dest);
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address.m_ptr);
+ }
+
+ void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.eor(dest, op1, op2);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value == -1) {
+ m_assembler.mvn(dest, src);
+ return;
+ }
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.eor(dest, src, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.eor(dest, src, dataTempRegister);
+ }
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ xor32(dest, src, dest);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.mvn(dest, dest);
+ else
+ xor32(imm, dest, dest);
+ }
+
+
+ // Memory access operations:
+ //
+ // Loads are of the form load(address, destination) and stores of the form
+ // store(source, address). The source for a store may be an TrustedImm32. Address
+ // operand objects to loads and store will be implicitly constructed if a
+ // register is passed.
+
+private:
+ void load32(ArmAddress address, RegisterID dest)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.ldr(dest, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.ldr(dest, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void load16(ArmAddress address, RegisterID dest)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.ldrh(dest, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void load16Signed(ArmAddress address, RegisterID dest)
+ {
+ ASSERT(address.type == ArmAddress::HasIndex);
+ m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale);
+ }
+
+ void load8(ArmAddress address, RegisterID dest)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.ldrb(dest, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void load8Signed(ArmAddress address, RegisterID dest)
+ {
+ ASSERT(address.type == ArmAddress::HasIndex);
+ m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale);
+ }
+
+protected:
+ void store32(RegisterID src, ArmAddress address)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.str(src, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.str(src, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.str(src, address.base, address.u.offset, true, false);
+ }
+ }
+
+private:
+ void store8(RegisterID src, ArmAddress address)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.strb(src, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.strb(src, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.strb(src, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void store16(RegisterID src, ArmAddress address)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.strh(src, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.strh(src, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.strh(src, address.base, address.u.offset, true, false);
+ }
+ }
+
+public:
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ load32(setupArmAddress(address), dest);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ load32(setupArmAddress(address), dest);
+ }
+
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ load32(setupArmAddress(address), dest);
+ }
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+ load16(setupArmAddress(address), dest);
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ move(TrustedImmPtr(address), addressTempRegister);
+ m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result(this);
+ ASSERT(address.offset >= 0 && address.offset <= 255);
+ m_assembler.ldrWide8BitImmediate(dest, address.base, address.offset);
+ return result;
+ }
+
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ load8(setupArmAddress(address), dest);
+ }
+
+ void load8Signed(ImplicitAddress, RegisterID)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ load8(setupArmAddress(address), dest);
+ }
+
+ void load8Signed(BaseIndex address, RegisterID dest)
+ {
+ load8Signed(setupArmAddress(address), dest);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
+ load32(ArmAddress(address.base, dataTempRegister), dest);
+ return label;
+ }
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+
+ RegisterID base = address.base;
+
+ DataLabelCompact label(this);
+ ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+
+ m_assembler.ldr(dest, base, address.offset, true, false);
+ return label;
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
+ }
+
+ void load16Signed(BaseIndex address, RegisterID dest)
+ {
+ load16Signed(setupArmAddress(address), dest);
+ }
+
+ void load16(ImplicitAddress address, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset);
+ if (armImm.isValid())
+ m_assembler.ldrh(dest, address.base, armImm);
+ else {
+ move(TrustedImm32(address.offset), dataTempRegister);
+ m_assembler.ldrh(dest, address.base, dataTempRegister);
+ }
+ }
+
+ void load16Signed(ImplicitAddress, RegisterID)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
+ store32(src, ArmAddress(address.base, dataTempRegister));
+ return label;
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ store32(src, setupArmAddress(address));
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ store32(src, setupArmAddress(address));
+ }
+
+ void store32(TrustedImm32 imm, ImplicitAddress address)
+ {
+ move(imm, dataTempRegister);
+ store32(dataTempRegister, setupArmAddress(address));
+ }
+
+ void store32(TrustedImm32 imm, BaseIndex address)
+ {
+ move(imm, dataTempRegister);
+ store32(dataTempRegister, setupArmAddress(address));
+ }
+
+ void store32(RegisterID src, const void* address)
+ {
+ move(TrustedImmPtr(address), addressTempRegister);
+ m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ void store32(TrustedImm32 imm, const void* address)
+ {
+ move(imm, dataTempRegister);
+ store32(dataTempRegister, address);
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+ store8(src, setupArmAddress(address));
+ }
+
+ void store8(RegisterID src, void* address)
+ {
+ move(TrustedImmPtr(address), addressTempRegister);
+ store8(src, ArmAddress(addressTempRegister, 0));
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ move(imm, dataTempRegister);
+ store8(dataTempRegister, address);
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+ store16(src, setupArmAddress(address));
+ }
+
+ // Possibly clobbers src, but not on this architecture.
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.vmov(dest1, dest2, src);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+ {
+ UNUSED_PARAM(scratch);
+ m_assembler.vmov(dest, src1, src2);
+ }
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ static bool shouldBlindForSpecificArch(uint32_t value)
+ {
+ ARMThumbImmediate immediate = ARMThumbImmediate::makeEncodedImm(value);
+
+ // Couldn't be encoded as an immediate, so assume it's untrusted.
+ if (!immediate.isValid())
+ return true;
+
+ // If we can encode the immediate, we have less than 16 attacker
+ // controlled bits.
+ if (immediate.isEncodedImm())
+ return false;
+
+ // Don't let any more than 12 bits of an instruction word
+ // be controlled by an attacker.
+ return !immediate.isUInt12();
+ }
+#endif
+
+ // Floating-point operations:
+
+ static bool supportsFloatingPoint() { return true; }
+ static bool supportsFloatingPointTruncate() { return true; }
+ static bool supportsFloatingPointSqrt() { return true; }
+ static bool supportsFloatingPointAbs() { return true; }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(TrustedImm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.vldr(dest, base, offset);
+ }
+
+ void loadFloat(ImplicitAddress address, FPRegisterID dest)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(TrustedImm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.flds(ARMRegisters::asSingle(dest), base, offset);
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+ move(address.index, addressTempRegister);
+ lshift32(TrustedImm32(address.scale), addressTempRegister);
+ add32(address.base, addressTempRegister);
+ loadDouble(Address(addressTempRegister, address.offset), dest);
+ }
+
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ move(address.index, addressTempRegister);
+ lshift32(TrustedImm32(address.scale), addressTempRegister);
+ add32(address.base, addressTempRegister);
+ loadFloat(Address(addressTempRegister, address.offset), dest);
+ }
+
+ void moveDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.vmov(dest, src);
+ }
+
+ void loadDouble(const void* address, FPRegisterID dest)
+ {
+ move(TrustedImmPtr(address), addressTempRegister);
+ m_assembler.vldr(dest, addressTempRegister, 0);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(TrustedImm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.vstr(src, base, offset);
+ }
+
+ void storeFloat(FPRegisterID src, ImplicitAddress address)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(TrustedImm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.fsts(ARMRegisters::asSingle(src), base, offset);
+ }
+
+ void storeDouble(FPRegisterID src, const void* address)
+ {
+ move(TrustedImmPtr(address), addressTempRegister);
+ storeDouble(src, addressTempRegister);
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+ move(address.index, addressTempRegister);
+ lshift32(TrustedImm32(address.scale), addressTempRegister);
+ add32(address.base, addressTempRegister);
+ storeDouble(src, Address(addressTempRegister, address.offset));
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ move(address.index, addressTempRegister);
+ lshift32(TrustedImm32(address.scale), addressTempRegister);
+ add32(address.base, addressTempRegister);
+ storeFloat(src, Address(addressTempRegister, address.offset));
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vadd(dest, dest, src);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ addDouble(fpTempRegister, dest);
+ }
+
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vadd(dest, op1, op2);
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ loadDouble(address.m_ptr, fpTempRegister);
+ m_assembler.vadd(dest, dest, fpTempRegister);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vdiv(dest, dest, src);
+ }
+
+ void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vdiv(dest, op1, op2);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vsub(dest, dest, src);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ subDouble(fpTempRegister, dest);
+ }
+
+ void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vsub(dest, op1, op2);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmul(dest, dest, src);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ mulDouble(fpTempRegister, dest);
+ }
+
+ void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vmul(dest, op1, op2);
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vsqrt(dest, src);
+ }
+
+ void absDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vabs(dest, src);
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vneg(dest, src);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmov(fpTempRegister, src, src);
+ m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
+ }
+
+ void convertInt32ToDouble(Address address, FPRegisterID dest)
+ {
+ // Fixme: load directly into the fpr!
+ load32(address, dataTempRegister);
+ m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
+ m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ // Fixme: load directly into the fpr!
+ load32(address.m_ptr, dataTempRegister);
+ m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
+ m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.vcvtds(dst, ARMRegisters::asSingle(src));
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.vcvtsd(ARMRegisters::asSingle(dst), src);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ m_assembler.vcmp(left, right);
+ m_assembler.vmrs();
+
+ if (cond == DoubleNotEqual) {
+ // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
+ Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+ Jump result = makeBranch(ARMv7Assembler::ConditionNE);
+ unordered.link(this);
+ return result;
+ }
+ if (cond == DoubleEqualOrUnordered) {
+ Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+ Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
+ unordered.link(this);
+ // We get here if either unordered or equal.
+ Jump result = jump();
+ notEqual.link(this);
+ return result;
+ }
+ return makeBranch(cond);
+ }
+
+ enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ // Convert into dest.
+ m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
+
+ // Calculate 2x dest. If the value potentially underflowed, it will have
+ // clamped to 0x80000000, so 2x dest is zero in this case. In the case of
+ // overflow the result will be equal to -2.
+ Jump underflow = branchAdd32(Zero, dest, dest, dataTempRegister);
+ Jump noOverflow = branch32(NotEqual, dataTempRegister, TrustedImm32(-2));
+
+ // For BranchIfTruncateSuccessful, we branch if 'noOverflow' jumps.
+ underflow.link(this);
+ if (branchType == BranchIfTruncateSuccessful)
+ return noOverflow;
+
+ // We'll reach the current point in the code on failure, so plant a
+ // jump here & link the success case.
+ Jump failure = jump();
+ noOverflow.link(this);
+ return failure;
+ }
+
+ Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
+
+ Jump overflow = branch32(Equal, dest, TrustedImm32(0x7fffffff));
+ Jump success = branch32(GreaterThanOrEqual, dest, TrustedImm32(0));
+ overflow.link(this);
+
+ if (branchType == BranchIfTruncateSuccessful)
+ return success;
+
+ Jump failure = jump();
+ success.link(this);
+ return failure;
+ }
+
+ // Result is undefined if the value is outside of the integer range.
+ void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
+ }
+
+ void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.vcvt_floatingPointToUnsigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
+ }
+
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID)
+ {
+ m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ m_assembler.vcvt_signedToFloatingPoint(fpTempRegister, fpTempRegisterAsSingle());
+ failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
+
+ // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+ failureCases.append(branchTest32(Zero, dest));
+ }
+
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
+ {
+ m_assembler.vcmpz(reg);
+ m_assembler.vmrs();
+ Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+ Jump result = makeBranch(ARMv7Assembler::ConditionNE);
+ unordered.link(this);
+ return result;
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
+ {
+ m_assembler.vcmpz(reg);
+ m_assembler.vmrs();
+ Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+ Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
+ unordered.link(this);
+ // We get here if either unordered or equal.
+ Jump result = jump();
+ notEqual.link(this);
+ return result;
+ }
+
+ // Stack manipulation operations:
+ //
+ // The ABI is assumed to provide a stack abstraction to memory,
+ // containing machine word sized units of data. Push and pop
+ // operations add and remove a single register sized unit of data
+ // to or from the stack. Peek and poke operations read or write
+ // values on the stack, without moving the current stack position.
+
+ void pop(RegisterID dest)
+ {
+ // store postindexed with writeback
+ m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
+ }
+
+ void push(RegisterID src)
+ {
+ // store preindexed with writeback
+ m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true);
+ }
+
+ void push(Address address)
+ {
+ load32(address, dataTempRegister);
+ push(dataTempRegister);
+ }
+
+ void push(TrustedImm32 imm)
+ {
+ move(imm, dataTempRegister);
+ push(dataTempRegister);
+ }
+
+ // Register move operations:
+ //
+ // Move values in registers.
+
+ void move(TrustedImm32 imm, RegisterID dest)
+ {
+ uint32_t value = imm.m_value;
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
+
+ if (armImm.isValid())
+ m_assembler.mov(dest, armImm);
+ else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
+ m_assembler.mvn(dest, armImm);
+ else {
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
+ if (value & 0xffff0000)
+ m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
+ }
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.mov(dest, src);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ move(TrustedImm32(imm), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ move(reg1, dataTempRegister);
+ move(reg2, reg1);
+ move(dataTempRegister, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+
+ // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+ static RelationalCondition invert(RelationalCondition cond)
+ {
+ return static_cast<RelationalCondition>(cond ^ 1);
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ ARMv7Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return ARMv7Assembler::maxJumpReplacementSize();
+ }
+
+ // Forwards / external control flow operations:
+ //
+ // This set of jump and conditional branch operations return a Jump
+ // object which may linked at a later point, allow forwards jump,
+ // or jumps that will require external linkage (after the code has been
+ // relocated).
+ //
+ // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+ // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+ // used (representing the names 'below' and 'above').
+ //
+ // Operands to the comparision are provided in the expected order, e.g.
+ // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
+ // treated as a signed 32bit value, is less than or equal to 5.
+ //
+ // jz and jnz test whether the first operand is equal to zero, and take
+ // an optional second operand of a mask under which to perform the test.
+private:
+
+ // Should we be using TEQ for equal/not-equal?
+ void compare32(RegisterID left, TrustedImm32 right)
+ {
+ int32_t imm = right.m_value;
+ if (!imm)
+ m_assembler.tst(left, left);
+ else {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+ if (armImm.isValid())
+ m_assembler.cmp(left, armImm);
+ else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
+ m_assembler.cmn(left, armImm);
+ else {
+ move(TrustedImm32(imm), dataTempRegister);
+ m_assembler.cmp(left, dataTempRegister);
+ }
+ }
+ }
+
+ void test32(RegisterID reg, TrustedImm32 mask)
+ {
+ int32_t imm = mask.m_value;
+
+ if (imm == -1)
+ m_assembler.tst(reg, reg);
+ else {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+ if (armImm.isValid())
+ m_assembler.tst(reg, armImm);
+ else {
+ move(mask, dataTempRegister);
+ m_assembler.tst(reg, dataTempRegister);
+ }
+ }
+ }
+
+public:
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmp(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ compare32(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+ {
+ load32(right, dataTempRegister);
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+ {
+ load32(left, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32WithUnalignedHalfWords(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left.m_ptr, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ compare32(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ ASSERT(!(0xffffff00 & right.m_value));
+ // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
+ load8(left, addressTempRegister);
+ return branch8(cond, addressTempRegister, right);
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(0xffffff00 & right.m_value));
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load8(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ m_assembler.tst(reg, mask);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ test32(reg, mask);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
+ load32(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
+ load32(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
+ load8(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+ load8(Address(addressTempRegister), addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.bx(target);
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(Address address)
+ {
+ load32(address, dataTempRegister);
+ m_assembler.bx(dataTempRegister);
+ }
+
+ void jump(AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), dataTempRegister);
+ load32(Address(dataTempRegister), dataTempRegister);
+ m_assembler.bx(dataTempRegister);
+ }
+
+
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.add_S(dest, op1, op2);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add_S(dest, op1, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.add_S(dest, op1, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd32(cond, dest, src, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchAdd32(cond, dest, imm, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ // Move the high bits of the address into addressTempRegister,
+ // and load the value into dataTempRegister.
+ move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
+ m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+
+ // Do the add.
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // If the operand does not fit into an immediate then load it temporarily
+ // into addressTempRegister; since we're overwriting addressTempRegister
+ // we'll need to reload it with the high bits of the address afterwards.
+ move(imm, addressTempRegister);
+ m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
+ move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
+ }
+
+ // Store the result.
+ m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ m_assembler.smull(dest, dataTempRegister, src1, src2);
+
+ if (cond == Overflow) {
+ m_assembler.asr(addressTempRegister, dest, 31);
+ return branch32(NotEqual, addressTempRegister, dataTempRegister);
+ }
+
+ return branchTest32(cond, dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchMul32(cond, src, dest, dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, dataTempRegister);
+ return branchMul32(cond, dataTempRegister, src, dest);
+ }
+
+ Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+ {
+ ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
+ m_assembler.sub_S(srcDest, zero, srcDest);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ m_assembler.orr_S(dest, dest, src);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.sub_S(dest, op1, op2);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub_S(dest, op1, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.sub_S(dest, op1, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchSub32(cond, dest, src, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchSub32(cond, dest, imm, dest);
+ }
+
+ void relativeTableJump(RegisterID index, int scale)
+ {
+ ASSERT(scale >= 0 && scale <= 31);
+
+ // dataTempRegister will point after the jump if index register contains zero
+ move(ARMRegisters::pc, dataTempRegister);
+ m_assembler.add(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(9));
+
+ ShiftTypeAndAmount shift(SRType_LSL, scale);
+ m_assembler.add(dataTempRegister, dataTempRegister, index, shift);
+ jump(dataTempRegister);
+ }
+
+ // Miscellaneous operations:
+
+ void breakpoint(uint8_t imm = 0)
+ {
+ m_assembler.bkpt(imm);
+ }
+
+ ALWAYS_INLINE Call nearCall()
+ {
+ moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
+ }
+
+ ALWAYS_INLINE Call call()
+ {
+ moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
+ }
+
+ ALWAYS_INLINE Call call(RegisterID target)
+ {
+ return Call(m_assembler.blx(target), Call::None);
+ }
+
+ ALWAYS_INLINE Call call(Address address)
+ {
+ load32(address, dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::None);
+ }
+
+ ALWAYS_INLINE void ret()
+ {
+ m_assembler.bx(linkRegister);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmp(left, right);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
+ {
+ load32(left, dataTempRegister);
+ compare32(cond, dataTempRegister, right, dest);
+ }
+
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ load8(left, addressTempRegister);
+ compare32(cond, addressTempRegister, right, dest);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ compare32(left, right);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ // FIXME:
+ // The mask should be optional... paerhaps the argument order should be
+ // dest-src, operations always have a dest? ... possibly not true, considering
+ // asm ops like test, or pseudo ops like pop().
+ void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ load32(address, dataTempRegister);
+ test32(dataTempRegister, mask);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ load8(address, dataTempRegister);
+ test32(dataTempRegister, mask);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst)
+ {
+ padBeforePatch();
+ moveFixedWidthEncoding(imm, dst);
+ return DataLabel32(this);
+ }
+
+ ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst)
+ {
+ padBeforePatch();
+ moveFixedWidthEncoding(TrustedImm32(imm), dst);
+ return DataLabelPtr(this);
+ }
+
+ ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ load32(left, addressTempRegister);
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ return branch32(cond, addressTempRegister, dataTempRegister);
+ }
+
+ PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32(cond, left, TrustedImm32(right));
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branchTest32(cond, reg, mask);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32(cond, reg, imm);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableJump()
+ {
+ padBeforePatch();
+ m_makeJumpPatchable = true;
+ Jump result = jump();
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
+ store32(dataTempRegister, address);
+ return label;
+ }
+ ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
+
+
+ ALWAYS_INLINE Call tailRecursiveCall()
+ {
+ // Like a normal call, but don't link.
+ moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+ return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
+ }
+
+ ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ return tailRecursiveCall();
+ }
+
+
+ int executableOffsetFor(int location)
+ {
+ return m_assembler.executableOffsetFor(location);
+ }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation())));
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ const unsigned twoWordOpSize = 4;
+ return label.labelAtOffset(-twoWordOpSize * 2);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue)
+ {
+#if OS(LINUX) || OS(QNX)
+ ARMv7Assembler::revertJumpTo_movT3movtcmpT2(instructionStart.dataLocation(), rd, dataTempRegister, reinterpret_cast<uintptr_t>(initialValue));
+#else
+ UNUSED_PARAM(rd);
+ ARMv7Assembler::revertJumpTo_movT3(instructionStart.dataLocation(), dataTempRegister, ARMThumbImmediate::makeUInt16(reinterpret_cast<uintptr_t>(initialValue) & 0xffff));
+#endif
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+protected:
+ ALWAYS_INLINE Jump jump()
+ {
+ m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
+ moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+ return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
+ }
+
+ ALWAYS_INLINE Jump makeBranch(ARMv7Assembler::Condition cond)
+ {
+ m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
+ m_assembler.it(cond, true, true);
+ moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+ return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
+ }
+ ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(armV7Condition(cond)); }
+ ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(armV7Condition(cond)); }
+ ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
+
+ ArmAddress setupArmAddress(BaseIndex address)
+ {
+ if (address.offset) {
+ ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
+ if (imm.isValid())
+ m_assembler.add(addressTempRegister, address.base, imm);
+ else {
+ move(TrustedImm32(address.offset), addressTempRegister);
+ m_assembler.add(addressTempRegister, addressTempRegister, address.base);
+ }
+
+ return ArmAddress(addressTempRegister, address.index, address.scale);
+ } else
+ return ArmAddress(address.base, address.index, address.scale);
+ }
+
+ ArmAddress setupArmAddress(Address address)
+ {
+ if ((address.offset >= -0xff) && (address.offset <= 0xfff))
+ return ArmAddress(address.base, address.offset);
+
+ move(TrustedImm32(address.offset), addressTempRegister);
+ return ArmAddress(address.base, addressTempRegister);
+ }
+
+ ArmAddress setupArmAddress(ImplicitAddress address)
+ {
+ if ((address.offset >= -0xff) && (address.offset <= 0xfff))
+ return ArmAddress(address.base, address.offset);
+
+ move(TrustedImm32(address.offset), addressTempRegister);
+ return ArmAddress(address.base, addressTempRegister);
+ }
+
+ RegisterID makeBaseIndexBase(BaseIndex address)
+ {
+ if (!address.offset)
+ return address.base;
+
+ ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
+ if (imm.isValid())
+ m_assembler.add(addressTempRegister, address.base, imm);
+ else {
+ move(TrustedImm32(address.offset), addressTempRegister);
+ m_assembler.add(addressTempRegister, addressTempRegister, address.base);
+ }
+
+ return addressTempRegister;
+ }
+
+ void moveFixedWidthEncoding(TrustedImm32 imm, RegisterID dst)
+ {
+ uint32_t value = imm.m_value;
+ m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
+ m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
+ }
+
+ ARMv7Assembler::Condition armV7Condition(RelationalCondition cond)
+ {
+ return static_cast<ARMv7Assembler::Condition>(cond);
+ }
+
+ ARMv7Assembler::Condition armV7Condition(ResultCondition cond)
+ {
+ return static_cast<ARMv7Assembler::Condition>(cond);
+ }
+
+ ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
+ {
+ return static_cast<ARMv7Assembler::Condition>(cond);
+ }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ ARMv7Assembler::linkCall(code, call.m_label, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ bool m_makeJumpPatchable;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerARMv7_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerCodeRef.h b/src/3rdparty/masm/assembler/MacroAssemblerCodeRef.h
new file mode 100644
index 0000000000..89cffb1278
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerCodeRef.h
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2009, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerCodeRef_h
+#define MacroAssemblerCodeRef_h
+
+#include "Disassembler.h"
+#include "ExecutableAllocator.h"
+#include "LLIntData.h"
+#include <wtf/DataLog.h>
+#include <wtf/PassRefPtr.h>
+#include <wtf/RefPtr.h>
+#include <wtf/UnusedParam.h>
+
+// ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
+// instruction address on the platform (for example, check any alignment requirements).
+#if CPU(ARM_THUMB2)
+// ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded
+// into the processor are decorated with the bottom bit set, indicating that this is
+// thumb code (as oposed to 32-bit traditional ARM). The first test checks for both
+// decorated and undectorated null, and the second test ensures that the pointer is
+// decorated.
+#define ASSERT_VALID_CODE_POINTER(ptr) \
+ ASSERT(reinterpret_cast<intptr_t>(ptr) & ~1); \
+ ASSERT(reinterpret_cast<intptr_t>(ptr) & 1)
+#define ASSERT_VALID_CODE_OFFSET(offset) \
+ ASSERT(!(offset & 1)) // Must be multiple of 2.
+#else
+#define ASSERT_VALID_CODE_POINTER(ptr) \
+ ASSERT(ptr)
+#define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes!
+#endif
+
+#if CPU(X86) && OS(WINDOWS)
+#define CALLING_CONVENTION_IS_STDCALL 1
+#ifndef CDECL
+#if COMPILER(MSVC)
+#define CDECL __cdecl
+#else
+#define CDECL __attribute__ ((__cdecl))
+#endif // COMPILER(MSVC)
+#endif // CDECL
+#else
+#define CALLING_CONVENTION_IS_STDCALL 0
+#endif
+
+#if CPU(X86)
+#define HAS_FASTCALL_CALLING_CONVENTION 1
+#ifndef FASTCALL
+#if COMPILER(MSVC)
+#define FASTCALL __fastcall
+#else
+#define FASTCALL __attribute__ ((fastcall))
+#endif // COMPILER(MSVC)
+#endif // FASTCALL
+#else
+#define HAS_FASTCALL_CALLING_CONVENTION 0
+#endif // CPU(X86)
+
+namespace JSC {
+
+// FunctionPtr:
+//
+// FunctionPtr should be used to wrap pointers to C/C++ functions in JSC
+// (particularly, the stub functions).
+class FunctionPtr {
+public:
+ FunctionPtr()
+ : m_value(0)
+ {
+ }
+
+ template<typename returnType>
+ FunctionPtr(returnType(*value)())
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1>
+ FunctionPtr(returnType(*value)(argType1))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2>
+ FunctionPtr(returnType(*value)(argType1, argType2))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3>
+ FunctionPtr(returnType(*value)(argType1, argType2, argType3))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4>
+ FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4, typename argType5>
+ FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4, typename argType5, typename argType6>
+ FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5, argType6))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+// MSVC doesn't seem to treat functions with different calling conventions as
+// different types; these methods already defined for fastcall, below.
+#if CALLING_CONVENTION_IS_STDCALL && !OS(WINDOWS)
+
+ template<typename returnType>
+ FunctionPtr(returnType (CDECL *value)())
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1>
+ FunctionPtr(returnType (CDECL *value)(argType1))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2>
+ FunctionPtr(returnType (CDECL *value)(argType1, argType2))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3>
+ FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4>
+ FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3, argType4))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+#endif
+
+#if HAS_FASTCALL_CALLING_CONVENTION
+
+ template<typename returnType>
+ FunctionPtr(returnType (FASTCALL *value)())
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1>
+ FunctionPtr(returnType (FASTCALL *value)(argType1))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2>
+ FunctionPtr(returnType (FASTCALL *value)(argType1, argType2))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3>
+ FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4>
+ FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3, argType4))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+#endif
+
+ template<typename FunctionType>
+ explicit FunctionPtr(FunctionType* value)
+ // Using a C-ctyle cast here to avoid compiler error on RVTC:
+ // Error: #694: reinterpret_cast cannot cast away const or other type qualifiers
+ // (I guess on RVTC function pointers have a different constness to GCC/MSVC?)
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ void* value() const { return m_value; }
+ void* executableAddress() const { return m_value; }
+
+
+private:
+ void* m_value;
+};
+
+// ReturnAddressPtr:
+//
+// ReturnAddressPtr should be used to wrap return addresses generated by processor
+// 'call' instructions exectued in JIT code. We use return addresses to look up
+// exception and optimization information, and to repatch the call instruction
+// that is the source of the return address.
+class ReturnAddressPtr {
+public:
+ ReturnAddressPtr()
+ : m_value(0)
+ {
+ }
+
+ explicit ReturnAddressPtr(void* value)
+ : m_value(value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ explicit ReturnAddressPtr(FunctionPtr function)
+ : m_value(function.value())
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ void* value() const { return m_value; }
+
+private:
+ void* m_value;
+};
+
+// MacroAssemblerCodePtr:
+//
+// MacroAssemblerCodePtr should be used to wrap pointers to JIT generated code.
+class MacroAssemblerCodePtr {
+public:
+ MacroAssemblerCodePtr()
+ : m_value(0)
+ {
+ }
+
+ explicit MacroAssemblerCodePtr(void* value)
+#if CPU(ARM_THUMB2)
+ // Decorate the pointer as a thumb code pointer.
+ : m_value(reinterpret_cast<char*>(value) + 1)
+#else
+ : m_value(value)
+#endif
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ static MacroAssemblerCodePtr createFromExecutableAddress(void* value)
+ {
+ ASSERT_VALID_CODE_POINTER(value);
+ MacroAssemblerCodePtr result;
+ result.m_value = value;
+ return result;
+ }
+
+#if ENABLE(LLINT)
+ static MacroAssemblerCodePtr createLLIntCodePtr(LLIntCode codeId)
+ {
+ return createFromExecutableAddress(LLInt::getCodePtr(codeId));
+ }
+#endif
+
+ explicit MacroAssemblerCodePtr(ReturnAddressPtr ra)
+ : m_value(ra.value())
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ void* executableAddress() const { return m_value; }
+#if CPU(ARM_THUMB2)
+ // To use this pointer as a data address remove the decoration.
+ void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; }
+#else
+ void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; }
+#endif
+
+ bool operator!() const
+ {
+ return !m_value;
+ }
+
+private:
+ void* m_value;
+};
+
+// MacroAssemblerCodeRef:
+//
+// A reference to a section of JIT generated code. A CodeRef consists of a
+// pointer to the code, and a ref pointer to the pool from within which it
+// was allocated.
+class MacroAssemblerCodeRef {
+private:
+ // This is private because it's dangerous enough that we want uses of it
+ // to be easy to find - hence the static create method below.
+ explicit MacroAssemblerCodeRef(MacroAssemblerCodePtr codePtr)
+ : m_codePtr(codePtr)
+ {
+ ASSERT(m_codePtr);
+ }
+
+public:
+ MacroAssemblerCodeRef()
+ {
+ }
+
+ MacroAssemblerCodeRef(PassRefPtr<ExecutableMemoryHandle> executableMemory)
+ : m_codePtr(executableMemory->start())
+ , m_executableMemory(executableMemory)
+ {
+ ASSERT(m_executableMemory->isManaged());
+ ASSERT(m_executableMemory->start());
+ ASSERT(m_codePtr);
+ }
+
+ // Use this only when you know that the codePtr refers to code that is
+ // already being kept alive through some other means. Typically this means
+ // that codePtr is immortal.
+ static MacroAssemblerCodeRef createSelfManagedCodeRef(MacroAssemblerCodePtr codePtr)
+ {
+ return MacroAssemblerCodeRef(codePtr);
+ }
+
+#if ENABLE(LLINT)
+ // Helper for creating self-managed code refs from LLInt.
+ static MacroAssemblerCodeRef createLLIntCodeRef(LLIntCode codeId)
+ {
+ return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(codeId)));
+ }
+#endif
+
+ ExecutableMemoryHandle* executableMemory() const
+ {
+ return m_executableMemory.get();
+ }
+
+ MacroAssemblerCodePtr code() const
+ {
+ return m_codePtr;
+ }
+
+ size_t size() const
+ {
+ if (!m_executableMemory)
+ return 0;
+ return m_executableMemory->sizeInBytes();
+ }
+
+ bool tryToDisassemble(const char* prefix) const
+ {
+ return JSC::tryToDisassemble(m_codePtr, size(), prefix, WTF::dataFile());
+ }
+
+ bool operator!() const { return !m_codePtr; }
+
+private:
+ MacroAssemblerCodePtr m_codePtr;
+ RefPtr<ExecutableMemoryHandle> m_executableMemory;
+};
+
+} // namespace JSC
+
+#endif // MacroAssemblerCodeRef_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerMIPS.h b/src/3rdparty/masm/assembler/MacroAssemblerMIPS.h
new file mode 100644
index 0000000000..e18d86c5b3
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerMIPS.h
@@ -0,0 +1,2751 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY MIPS TECHNOLOGIES, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MIPS TECHNOLOGIES, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerMIPS_h
+#define MacroAssemblerMIPS_h
+
+#if ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#include "AbstractMacroAssembler.h"
+#include "MIPSAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerMIPS : public AbstractMacroAssembler<MIPSAssembler> {
+public:
+ typedef MIPSRegisters::FPRegisterID FPRegisterID;
+
+ MacroAssemblerMIPS()
+ : m_fixedWidth(false)
+ {
+ }
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return value >= -2147483647 - 1 && value <= 2147483647;
+ }
+
+ static const Scale ScalePtr = TimesFour;
+
+ // For storing immediate number
+ static const RegisterID immTempRegister = MIPSRegisters::t0;
+ // For storing data loaded from the memory
+ static const RegisterID dataTempRegister = MIPSRegisters::t1;
+ // For storing address base
+ static const RegisterID addrTempRegister = MIPSRegisters::t2;
+ // For storing compare result
+ static const RegisterID cmpTempRegister = MIPSRegisters::t3;
+
+ // FP temp register
+ static const FPRegisterID fpTempRegister = MIPSRegisters::f16;
+
+ static const int MaximumCompactPtrAlignedAddressOffset = 0x7FFFFFFF;
+
+ enum RelationalCondition {
+ Equal,
+ NotEqual,
+ Above,
+ AboveOrEqual,
+ Below,
+ BelowOrEqual,
+ GreaterThan,
+ GreaterThanOrEqual,
+ LessThan,
+ LessThanOrEqual
+ };
+
+ enum ResultCondition {
+ Overflow,
+ Signed,
+ Zero,
+ NonZero
+ };
+
+ enum DoubleCondition {
+ DoubleEqual,
+ DoubleNotEqual,
+ DoubleGreaterThan,
+ DoubleGreaterThanOrEqual,
+ DoubleLessThan,
+ DoubleLessThanOrEqual,
+ DoubleEqualOrUnordered,
+ DoubleNotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered
+ };
+
+ static const RegisterID stackPointerRegister = MIPSRegisters::sp;
+ static const RegisterID returnAddressRegister = MIPSRegisters::ra;
+
+ // Integer arithmetic operations:
+ //
+ // Operations are typically two operand - operation(source, srcDst)
+ // For many operations the source may be an TrustedImm32, the srcDst operand
+ // may often be a memory location (explictly described using an Address
+ // object).
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addu(dest, dest, src);
+ }
+
+ void add32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.addu(dest, op1, op2);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, dest, dest);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value >= -32768 && imm.m_value <= 32767
+ && !m_fixedWidth) {
+ /*
+ addiu dest, src, imm
+ */
+ m_assembler.addiu(dest, src, imm.m_value);
+ } else {
+ /*
+ li immTemp, imm
+ addu dest, src, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.addu(dest, src, immTempRegister);
+ }
+ }
+
+ void add32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, src, dest);
+ }
+
+ void add32(TrustedImm32 imm, Address address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ lw dataTemp, offset(base)
+ li immTemp, imm
+ addu dataTemp, dataTemp, immTemp
+ sw dataTemp, offset(base)
+ */
+ m_assembler.lw(dataTempRegister, address.base, address.offset);
+ if (imm.m_value >= -32768 && imm.m_value <= 32767
+ && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, address.base, address.offset);
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dataTemp, (offset & 0xffff)(addrTemp)
+ li immtemp, imm
+ addu dataTemp, dataTemp, immTemp
+ sw dataTemp, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dataTempRegister, addrTempRegister, address.offset);
+
+ if (imm.m_value >= -32768 && imm.m_value <= 32767 && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, addrTempRegister, address.offset);
+ }
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
+
+ void add32(AbsoluteAddress src, RegisterID dest)
+ {
+ load32(src.m_ptr, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
+
+ void add32(RegisterID src, Address dest)
+ {
+ if (dest.offset >= -32768 && dest.offset <= 32767 && !m_fixedWidth) {
+ /*
+ lw dataTemp, offset(base)
+ addu dataTemp, dataTemp, src
+ sw dataTemp, offset(base)
+ */
+ m_assembler.lw(dataTempRegister, dest.base, dest.offset);
+ m_assembler.addu(dataTempRegister, dataTempRegister, src);
+ m_assembler.sw(dataTempRegister, dest.base, dest.offset);
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dataTemp, (offset & 0xffff)(addrTemp)
+ addu dataTemp, dataTemp, src
+ sw dataTemp, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (dest.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, dest.base);
+ m_assembler.lw(dataTempRegister, addrTempRegister, dest.offset);
+ m_assembler.addu(dataTempRegister, dataTempRegister, src);
+ m_assembler.sw(dataTempRegister, addrTempRegister, dest.offset);
+ }
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ /*
+ li addrTemp, address
+ li immTemp, imm
+ lw cmpTemp, 0(addrTemp)
+ addu dataTemp, cmpTemp, immTemp
+ sw dataTemp, 0(addrTemp)
+ */
+ move(TrustedImmPtr(address.m_ptr), addrTempRegister);
+ m_assembler.lw(cmpTempRegister, addrTempRegister, 0);
+ if (imm.m_value >= -32768 && imm.m_value <= 32767 && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, cmpTempRegister, imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.addu(dataTempRegister, cmpTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, addrTempRegister, 0);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ /*
+ add32(imm, address)
+ sltu immTemp, dataTemp, cmpTemp # set carry-in bit
+ lw dataTemp, 4(addrTemp)
+ addiu dataTemp, imm.m_value >> 31 ? -1 : 0
+ addu dataTemp, dataTemp, immTemp
+ sw dataTemp, 4(addrTemp)
+ */
+ add32(imm, address);
+ m_assembler.sltu(immTempRegister, dataTempRegister, cmpTempRegister);
+ m_assembler.lw(dataTempRegister, addrTempRegister, 4);
+ if (imm.m_value >> 31)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, -1);
+ m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
+ m_assembler.sw(dataTempRegister, addrTempRegister, 4);
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ and32(dataTempRegister, dest);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andInsn(dest, dest, src);
+ }
+
+ void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.andInsn(dest, op1, op2);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ move(MIPSRegisters::zero, dest);
+ else if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth)
+ m_assembler.andi(dest, dest, imm.m_value);
+ else {
+ /*
+ li immTemp, imm
+ and dest, dest, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.andInsn(dest, dest, immTempRegister);
+ }
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ move(MIPSRegisters::zero, dest);
+ else if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth)
+ m_assembler.andi(dest, src, imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.andInsn(dest, src, immTempRegister);
+ }
+ }
+
+ void lshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.sllv(dest, dest, shiftAmount);
+ }
+
+ void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.sllv(dest, src, shiftAmount);
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ m_assembler.sllv(dest, dest, immTempRegister);
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ m_assembler.sllv(dest, src, immTempRegister);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.mul(dest, dest, src);
+ }
+
+ void mul32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.mul(dest, op1, op2);
+ }
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ move(MIPSRegisters::zero, dest);
+ else if (imm.m_value == 1 && !m_fixedWidth)
+ move(src, dest);
+ else {
+ /*
+ li dataTemp, imm
+ mul dest, src, dataTemp
+ */
+ move(imm, dataTempRegister);
+ m_assembler.mul(dest, src, dataTempRegister);
+ }
+ }
+
+ void neg32(RegisterID srcDest)
+ {
+ m_assembler.subu(srcDest, MIPSRegisters::zero, srcDest);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orInsn(dest, dest, src);
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.orInsn(dest, op1, op2);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ return;
+
+ if (imm.m_value > 0 && imm.m_value < 65535
+ && !m_fixedWidth) {
+ m_assembler.ori(dest, dest, imm.m_value);
+ return;
+ }
+
+ /*
+ li dataTemp, imm
+ or dest, dest, dataTemp
+ */
+ move(imm, dataTempRegister);
+ m_assembler.orInsn(dest, dest, dataTempRegister);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ return;
+
+ if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth) {
+ m_assembler.ori(dest, src, imm.m_value);
+ return;
+ }
+
+ /*
+ li dataTemp, imm
+ or dest, src, dataTemp
+ */
+ move(imm, dataTempRegister);
+ m_assembler.orInsn(dest, src, dataTempRegister);
+ }
+
+ void or32(RegisterID src, AbsoluteAddress dest)
+ {
+ load32(dest.m_ptr, dataTempRegister);
+ m_assembler.orInsn(dataTempRegister, dataTempRegister, src);
+ store32(dataTempRegister, dest.m_ptr);
+ }
+
+ void rshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.srav(dest, dest, shiftAmount);
+ }
+
+ void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.srav(dest, src, shiftAmount);
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.sra(dest, dest, imm.m_value);
+ }
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.sra(dest, src, imm.m_value);
+ }
+
+ void urshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.srlv(dest, dest, shiftAmount);
+ }
+
+ void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.srlv(dest, src, shiftAmount);
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.srl(dest, dest, imm.m_value);
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.srl(dest, src, imm.m_value);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subu(dest, dest, src);
+ }
+
+ void sub32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.subu(dest, op1, op2);
+ }
+
+ void sub32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value >= -32767 && imm.m_value <= 32768
+ && !m_fixedWidth) {
+ /*
+ addiu dest, src, imm
+ */
+ m_assembler.addiu(dest, dest, -imm.m_value);
+ } else {
+ /*
+ li immTemp, imm
+ subu dest, src, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.subu(dest, dest, immTempRegister);
+ }
+ }
+
+ void sub32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value >= -32767 && imm.m_value <= 32768
+ && !m_fixedWidth) {
+ /*
+ addiu dest, src, imm
+ */
+ m_assembler.addiu(dest, src, -imm.m_value);
+ } else {
+ /*
+ li immTemp, imm
+ subu dest, src, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.subu(dest, src, immTempRegister);
+ }
+ }
+
+ void sub32(TrustedImm32 imm, Address address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ lw dataTemp, offset(base)
+ li immTemp, imm
+ subu dataTemp, dataTemp, immTemp
+ sw dataTemp, offset(base)
+ */
+ m_assembler.lw(dataTempRegister, address.base, address.offset);
+ if (imm.m_value >= -32767 && imm.m_value <= 32768 && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, address.base, address.offset);
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dataTemp, (offset & 0xffff)(addrTemp)
+ li immtemp, imm
+ subu dataTemp, dataTemp, immTemp
+ sw dataTemp, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dataTempRegister, addrTempRegister, address.offset);
+
+ if (imm.m_value >= -32767 && imm.m_value <= 32768
+ && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, addrTempRegister, address.offset);
+ }
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ sub32(dataTempRegister, dest);
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ /*
+ li addrTemp, address
+ li immTemp, imm
+ lw dataTemp, 0(addrTemp)
+ subu dataTemp, dataTemp, immTemp
+ sw dataTemp, 0(addrTemp)
+ */
+ move(TrustedImmPtr(address.m_ptr), addrTempRegister);
+ m_assembler.lw(dataTempRegister, addrTempRegister, 0);
+
+ if (imm.m_value >= -32767 && imm.m_value <= 32768 && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, addrTempRegister, 0);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.xorInsn(dest, dest, src);
+ }
+
+ void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.xorInsn(dest, op1, op2);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value == -1) {
+ m_assembler.nor(dest, dest, MIPSRegisters::zero);
+ return;
+ }
+
+ /*
+ li immTemp, imm
+ xor dest, dest, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.xorInsn(dest, dest, immTempRegister);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value == -1) {
+ m_assembler.nor(dest, src, MIPSRegisters::zero);
+ return;
+ }
+
+ /*
+ li immTemp, imm
+ xor dest, dest, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.xorInsn(dest, src, immTempRegister);
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.sqrtd(dst, src);
+ }
+
+ void absDouble(FPRegisterID, FPRegisterID)
+ {
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result(this);
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dest, addrTempRegister, address.offset);
+ return result;
+ }
+
+ // Memory access operations:
+ //
+ // Loads are of the form load(address, destination) and stores of the form
+ // store(source, address). The source for a store may be an TrustedImm32. Address
+ // operand objects to loads and store will be implicitly constructed if a
+ // register is passed.
+
+ /* Need to use zero-extened load byte for load8. */
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.lbu(dest, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lbu dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lbu(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lbu dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lbu(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lbu dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lbu(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load8Signed(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lb dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lb(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lb dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lb(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.lw(dest, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lw dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lw dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lw(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+ load16(address, dest);
+ }
+
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32764
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ (Big-Endian)
+ lwl dest, address.offset(addrTemp)
+ lwr dest, address.offset+3(addrTemp)
+ (Little-Endian)
+ lwl dest, address.offset+3(addrTemp)
+ lwr dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+#if CPU(BIG_ENDIAN)
+ m_assembler.lwl(dest, addrTempRegister, address.offset);
+ m_assembler.lwr(dest, addrTempRegister, address.offset + 3);
+#else
+ m_assembler.lwl(dest, addrTempRegister, address.offset + 3);
+ m_assembler.lwr(dest, addrTempRegister, address.offset);
+
+#endif
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, address.offset >> 16
+ ori immTemp, immTemp, address.offset & 0xffff
+ addu addrTemp, addrTemp, immTemp
+ (Big-Endian)
+ lw dest, 0(at)
+ lw dest, 3(at)
+ (Little-Endian)
+ lw dest, 3(at)
+ lw dest, 0(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, address.offset >> 16);
+ m_assembler.ori(immTempRegister, immTempRegister, address.offset);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+#if CPU(BIG_ENDIAN)
+ m_assembler.lwl(dest, addrTempRegister, 0);
+ m_assembler.lwr(dest, addrTempRegister, 3);
+#else
+ m_assembler.lwl(dest, addrTempRegister, 3);
+ m_assembler.lwr(dest, addrTempRegister, 0);
+#endif
+ }
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ /*
+ li addrTemp, address
+ lw dest, 0(addrTemp)
+ */
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.lw(dest, addrTempRegister, 0);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ m_fixedWidth = true;
+ /*
+ lui addrTemp, address.offset >> 16
+ ori addrTemp, addrTemp, address.offset & 0xffff
+ addu addrTemp, addrTemp, address.base
+ lw dest, 0(addrTemp)
+ */
+ DataLabel32 dataLabel(this);
+ move(TrustedImm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dest, addrTempRegister, 0);
+ m_fixedWidth = false;
+ return dataLabel;
+ }
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabelCompact dataLabel(this);
+ load32WithAddressOffsetPatch(address, dest);
+ return dataLabel;
+ }
+
+ /* Need to use zero-extened load half-word for load16. */
+ void load16(ImplicitAddress address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.lhu(dest, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lhu dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lhu(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ /* Need to use zero-extened load half-word for load16. */
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lhu dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lhu(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lhu dest, (address.offset & 0xffff)(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lhu(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load16Signed(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lh dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lh(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lh dest, (address.offset & 0xffff)(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lh(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ m_fixedWidth = true;
+ /*
+ lui addrTemp, address.offset >> 16
+ ori addrTemp, addrTemp, address.offset & 0xffff
+ addu addrTemp, addrTemp, address.base
+ sw src, 0(addrTemp)
+ */
+ DataLabel32 dataLabel(this);
+ move(TrustedImm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sw(src, addrTempRegister, 0);
+ m_fixedWidth = false;
+ return dataLabel;
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sb src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sb(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sb src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.sb(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ /*
+ li immTemp, imm
+ li addrTemp, address
+ sb src, 0(addrTemp)
+ */
+ if (!imm.m_value && !m_fixedWidth) {
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sb(MIPSRegisters::zero, addrTempRegister, 0);
+ } else {
+ move(imm, immTempRegister);
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sb(immTempRegister, addrTempRegister, 0);
+ }
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sh src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sh(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sh src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.sh(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.sw(src, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ sw src, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sw(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sw src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sw(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sw src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.sw(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void store32(TrustedImm32 imm, ImplicitAddress address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ if (!imm.m_value)
+ m_assembler.sw(MIPSRegisters::zero, address.base, address.offset);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.sw(immTempRegister, address.base, address.offset);
+ }
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ sw immTemp, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ if (!imm.m_value && !m_fixedWidth)
+ m_assembler.sw(MIPSRegisters::zero, addrTempRegister, address.offset);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.sw(immTempRegister, addrTempRegister, address.offset);
+ }
+ }
+ }
+
+ void store32(TrustedImm32 imm, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767 && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sw src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ if (!imm.m_value)
+ m_assembler.sw(MIPSRegisters::zero, addrTempRegister, address.offset);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.sw(immTempRegister, addrTempRegister, address.offset);
+ }
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sw src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ if (!imm.m_value && !m_fixedWidth)
+ m_assembler.sw(MIPSRegisters::zero, addrTempRegister, address.offset);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.sw(immTempRegister, addrTempRegister, address.offset);
+ }
+ }
+ }
+
+
+ void store32(RegisterID src, const void* address)
+ {
+ /*
+ li addrTemp, address
+ sw src, 0(addrTemp)
+ */
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sw(src, addrTempRegister, 0);
+ }
+
+ void store32(TrustedImm32 imm, const void* address)
+ {
+ /*
+ li immTemp, imm
+ li addrTemp, address
+ sw src, 0(addrTemp)
+ */
+ if (!imm.m_value && !m_fixedWidth) {
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sw(MIPSRegisters::zero, addrTempRegister, 0);
+ } else {
+ move(imm, immTempRegister);
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sw(immTempRegister, addrTempRegister, 0);
+ }
+ }
+
+ // Floating-point operations:
+
+ static bool supportsFloatingPoint()
+ {
+#if WTF_MIPS_DOUBLE_FLOAT
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ static bool supportsFloatingPointTruncate()
+ {
+#if WTF_MIPS_DOUBLE_FLOAT && WTF_MIPS_ISA_AT_LEAST(2)
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ static bool supportsFloatingPointSqrt()
+ {
+#if WTF_MIPS_DOUBLE_FLOAT && WTF_MIPS_ISA_AT_LEAST(2)
+ return true;
+#else
+ return false;
+#endif
+ }
+ static bool supportsFloatingPointAbs() { return false; }
+
+ // Stack manipulation operations:
+ //
+ // The ABI is assumed to provide a stack abstraction to memory,
+ // containing machine word sized units of data. Push and pop
+ // operations add and remove a single register sized unit of data
+ // to or from the stack. Peek and poke operations read or write
+ // values on the stack, without moving the current stack position.
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.lw(dest, MIPSRegisters::sp, 0);
+ m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, 4);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, -4);
+ m_assembler.sw(src, MIPSRegisters::sp, 0);
+ }
+
+ void push(Address address)
+ {
+ load32(address, dataTempRegister);
+ push(dataTempRegister);
+ }
+
+ void push(TrustedImm32 imm)
+ {
+ move(imm, immTempRegister);
+ push(immTempRegister);
+ }
+
+ // Register move operations:
+ //
+ // Move values in registers.
+
+ void move(TrustedImm32 imm, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ move(MIPSRegisters::zero, dest);
+ else if (m_fixedWidth) {
+ m_assembler.lui(dest, imm.m_value >> 16);
+ m_assembler.ori(dest, dest, imm.m_value);
+ } else
+ m_assembler.li(dest, imm.m_value);
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest || m_fixedWidth)
+ m_assembler.move(dest, src);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ move(TrustedImm32(imm), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ move(reg1, immTempRegister);
+ move(reg2, reg1);
+ move(immTempRegister, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest || m_fixedWidth)
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest || m_fixedWidth)
+ move(src, dest);
+ }
+
+ // Forwards / external control flow operations:
+ //
+ // This set of jump and conditional branch operations return a Jump
+ // object which may linked at a later point, allow forwards jump,
+ // or jumps that will require external linkage (after the code has been
+ // relocated).
+ //
+ // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+ // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+ // used (representing the names 'below' and 'above').
+ //
+ // Operands to the comparision are provided in the expected order, e.g.
+ // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
+ // treated as a signed 32bit value, is less than or equal to 5.
+ //
+ // jz and jnz test whether the first operand is equal to zero, and take
+ // an optional second operand of a mask under which to perform the test.
+
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ // Make sure the immediate value is unsigned 8 bits.
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ load8(left, dataTempRegister);
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ // Make sure the immediate value is unsigned 8 bits.
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ load8(left, dataTempRegister);
+ move(right, immTempRegister);
+ compare32(cond, dataTempRegister, immTempRegister, dest);
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ load8(left, dataTempRegister);
+ // Be careful that the previous load8() uses immTempRegister.
+ // So, we need to put move() after load8().
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ if (cond == Equal)
+ return branchEqual(left, right);
+ if (cond == NotEqual)
+ return branchNotEqual(left, right);
+ if (cond == Above) {
+ m_assembler.sltu(cmpTempRegister, right, left);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == AboveOrEqual) {
+ m_assembler.sltu(cmpTempRegister, left, right);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Below) {
+ m_assembler.sltu(cmpTempRegister, left, right);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == BelowOrEqual) {
+ m_assembler.sltu(cmpTempRegister, right, left);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == GreaterThan) {
+ m_assembler.slt(cmpTempRegister, right, left);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == GreaterThanOrEqual) {
+ m_assembler.slt(cmpTempRegister, left, right);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == LessThan) {
+ m_assembler.slt(cmpTempRegister, left, right);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == LessThanOrEqual) {
+ m_assembler.slt(cmpTempRegister, right, left);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+
+ return Jump();
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ move(right, immTempRegister);
+ return branch32(cond, left, immTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+ {
+ load32(right, dataTempRegister);
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+ {
+ load32(left, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ load32(left, dataTempRegister);
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ load32(left, dataTempRegister);
+ // Be careful that the previous load32() uses immTempRegister.
+ // So, we need to put move() after load32().
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ load32WithUnalignedHalfWords(left, dataTempRegister);
+ // Be careful that the previous load32WithUnalignedHalfWords()
+ // uses immTempRegister.
+ // So, we need to put move() after load32WithUnalignedHalfWords().
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ load32(left.m_ptr, dataTempRegister);
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ m_assembler.andInsn(cmpTempRegister, reg, mask);
+ if (cond == Zero)
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ if (mask.m_value == -1 && !m_fixedWidth) {
+ if (cond == Zero)
+ return branchEqual(reg, MIPSRegisters::zero);
+ return branchNotEqual(reg, MIPSRegisters::zero);
+ }
+ move(mask, immTempRegister);
+ return branchTest32(cond, reg, immTempRegister);
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load32(address, dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load32(address, dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load8(address, dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ move(TrustedImmPtr(address.m_ptr), dataTempRegister);
+ load8(Address(dataTempRegister), dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump jump()
+ {
+ return branchEqual(MIPSRegisters::zero, MIPSRegisters::zero);
+ }
+
+ void jump(RegisterID target)
+ {
+ move(target, MIPSRegisters::t9);
+ m_assembler.jr(MIPSRegisters::t9);
+ m_assembler.nop();
+ }
+
+ void jump(Address address)
+ {
+ m_fixedWidth = true;
+ load32(address, MIPSRegisters::t9);
+ m_assembler.jr(MIPSRegisters::t9);
+ m_assembler.nop();
+ m_fixedWidth = false;
+ }
+
+ void jump(AbsoluteAddress address)
+ {
+ m_fixedWidth = true;
+ load32(address.m_ptr, MIPSRegisters::t9);
+ m_assembler.jr(MIPSRegisters::t9);
+ m_assembler.nop();
+ m_fixedWidth = false;
+ }
+
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.vmov(dest1, dest2, src);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+ {
+ UNUSED_PARAM(scratch);
+ m_assembler.vmov(dest, src1, src2);
+ }
+
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dest, dataTemp
+ xor cmpTemp, dataTemp, src
+ bltz cmpTemp, No_overflow # diff sign bit -> no overflow
+ addu dest, dataTemp, src
+ xor cmpTemp, dest, dataTemp
+ bgez cmpTemp, No_overflow # same sign big -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ move(dest, dataTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, src);
+ m_assembler.bltz(cmpTempRegister, 10);
+ m_assembler.addu(dest, dataTempRegister, src);
+ m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ add32(src, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ add32(src, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ add32(src, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dataTemp, op1
+ xor cmpTemp, dataTemp, op2
+ bltz cmpTemp, No_overflow # diff sign bit -> no overflow
+ addu dest, dataTemp, op2
+ xor cmpTemp, dest, dataTemp
+ bgez cmpTemp, No_overflow # same sign big -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ move(op1, dataTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, op2);
+ m_assembler.bltz(cmpTempRegister, 10);
+ m_assembler.addu(dest, dataTempRegister, op2);
+ m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ add32(op1, op2, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ add32(op1, op2, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ add32(op1, op2, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ return branchAdd32(cond, immTempRegister, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ move(src, dest);
+ return branchAdd32(cond, immTempRegister, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dataTemp, dest
+ xori cmpTemp, dataTemp, imm
+ bltz cmpTemp, No_overflow # diff sign bit -> no overflow
+ addiu dataTemp, dataTemp, imm
+ move dest, dataTemp
+ xori cmpTemp, dataTemp, imm
+ bgez cmpTemp, No_overflow # same sign big -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ if (imm.m_value >= -32768 && imm.m_value <= 32767 && !m_fixedWidth) {
+ load32(dest.m_ptr, dataTempRegister);
+ m_assembler.xori(cmpTempRegister, dataTempRegister, imm.m_value);
+ m_assembler.bltz(cmpTempRegister, 10);
+ m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
+ store32(dataTempRegister, dest.m_ptr);
+ m_assembler.xori(cmpTempRegister, dataTempRegister, imm.m_value);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ } else {
+ load32(dest.m_ptr, dataTempRegister);
+ move(imm, immTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister);
+ m_assembler.bltz(cmpTempRegister, 10);
+ m_assembler.addiu(dataTempRegister, dataTempRegister, immTempRegister);
+ store32(dataTempRegister, dest.m_ptr);
+ m_assembler.xori(cmpTempRegister, dataTempRegister, immTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ }
+ return jump();
+ }
+ move(imm, immTempRegister);
+ load32(dest.m_ptr, dataTempRegister);
+ add32(immTempRegister, dataTempRegister);
+ store32(dataTempRegister, dest.m_ptr);
+ if (cond == Signed) {
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dataTempRegister, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero)
+ return branchEqual(dataTempRegister, MIPSRegisters::zero);
+ if (cond == NonZero)
+ return branchNotEqual(dataTempRegister, MIPSRegisters::zero);
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ mult src, dest
+ mfhi dataTemp
+ mflo dest
+ sra addrTemp, dest, 31
+ beq dataTemp, addrTemp, No_overflow # all sign bits (bit 63 to bit 31) are the same -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ m_assembler.mult(src1, src2);
+ m_assembler.mfhi(dataTempRegister);
+ m_assembler.mflo(dest);
+ m_assembler.sra(addrTempRegister, dest, 31);
+ m_assembler.beq(dataTempRegister, addrTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ mul32(src1, src2, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ mul32(src1, src2, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ mul32(src1, src2, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ mult src, dest
+ mfhi dataTemp
+ mflo dest
+ sra addrTemp, dest, 31
+ beq dataTemp, addrTemp, No_overflow # all sign bits (bit 63 to bit 31) are the same -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ m_assembler.mult(src, dest);
+ m_assembler.mfhi(dataTempRegister);
+ m_assembler.mflo(dest);
+ m_assembler.sra(addrTempRegister, dest, 31);
+ m_assembler.beq(dataTempRegister, addrTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ mul32(src, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ mul32(src, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ mul32(src, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ return branchMul32(cond, immTempRegister, src, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dest, dataTemp
+ xor cmpTemp, dataTemp, src
+ bgez cmpTemp, No_overflow # same sign bit -> no overflow
+ subu dest, dataTemp, src
+ xor cmpTemp, dest, dataTemp
+ bgez cmpTemp, No_overflow # same sign bit -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ move(dest, dataTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, src);
+ m_assembler.bgez(cmpTempRegister, 10);
+ m_assembler.subu(dest, dataTempRegister, src);
+ m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ sub32(src, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ sub32(src, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ sub32(src, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ return branchSub32(cond, immTempRegister, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ return branchSub32(cond, src, immTempRegister, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dataTemp, op1
+ xor cmpTemp, dataTemp, op2
+ bgez cmpTemp, No_overflow # same sign bit -> no overflow
+ subu dest, dataTemp, op2
+ xor cmpTemp, dest, dataTemp
+ bgez cmpTemp, No_overflow # same sign bit -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ move(op1, dataTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, op2);
+ m_assembler.bgez(cmpTempRegister, 10);
+ m_assembler.subu(dest, dataTempRegister, op2);
+ m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ sub32(op1, op2, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ sub32(op1, op2, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ sub32(op1, op2, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+ {
+ m_assembler.li(dataTempRegister, -1);
+ return branchMul32(cond, dataTempRegister, srcDest);
+ }
+
+ Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Signed) {
+ or32(src, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ or32(src, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ or32(src, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ // Miscellaneous operations:
+
+ void breakpoint()
+ {
+ m_assembler.bkpt();
+ }
+
+ Call nearCall()
+ {
+ /* We need two words for relaxation. */
+ m_assembler.nop();
+ m_assembler.nop();
+ m_assembler.jal();
+ m_assembler.nop();
+ return Call(m_assembler.label(), Call::LinkableNear);
+ }
+
+ Call call()
+ {
+ m_assembler.lui(MIPSRegisters::t9, 0);
+ m_assembler.ori(MIPSRegisters::t9, MIPSRegisters::t9, 0);
+ m_assembler.jalr(MIPSRegisters::t9);
+ m_assembler.nop();
+ return Call(m_assembler.label(), Call::Linkable);
+ }
+
+ Call call(RegisterID target)
+ {
+ move(target, MIPSRegisters::t9);
+ m_assembler.jalr(MIPSRegisters::t9);
+ m_assembler.nop();
+ return Call(m_assembler.label(), Call::None);
+ }
+
+ Call call(Address address)
+ {
+ m_fixedWidth = true;
+ load32(address, MIPSRegisters::t9);
+ m_assembler.jalr(MIPSRegisters::t9);
+ m_assembler.nop();
+ m_fixedWidth = false;
+ return Call(m_assembler.label(), Call::None);
+ }
+
+ void ret()
+ {
+ m_assembler.jr(MIPSRegisters::ra);
+ m_assembler.nop();
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ if (cond == Equal) {
+ m_assembler.xorInsn(dest, left, right);
+ m_assembler.sltiu(dest, dest, 1);
+ } else if (cond == NotEqual) {
+ m_assembler.xorInsn(dest, left, right);
+ m_assembler.sltu(dest, MIPSRegisters::zero, dest);
+ } else if (cond == Above)
+ m_assembler.sltu(dest, right, left);
+ else if (cond == AboveOrEqual) {
+ m_assembler.sltu(dest, left, right);
+ m_assembler.xori(dest, dest, 1);
+ } else if (cond == Below)
+ m_assembler.sltu(dest, left, right);
+ else if (cond == BelowOrEqual) {
+ m_assembler.sltu(dest, right, left);
+ m_assembler.xori(dest, dest, 1);
+ } else if (cond == GreaterThan)
+ m_assembler.slt(dest, right, left);
+ else if (cond == GreaterThanOrEqual) {
+ m_assembler.slt(dest, left, right);
+ m_assembler.xori(dest, dest, 1);
+ } else if (cond == LessThan)
+ m_assembler.slt(dest, left, right);
+ else if (cond == LessThanOrEqual) {
+ m_assembler.slt(dest, right, left);
+ m_assembler.xori(dest, dest, 1);
+ }
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ move(right, immTempRegister);
+ compare32(cond, left, immTempRegister, dest);
+ }
+
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ load8(address, dataTempRegister);
+ if (mask.m_value == -1 && !m_fixedWidth) {
+ if (cond == Zero)
+ m_assembler.sltiu(dest, dataTempRegister, 1);
+ else
+ m_assembler.sltu(dest, MIPSRegisters::zero, dataTempRegister);
+ } else {
+ move(mask, immTempRegister);
+ m_assembler.andInsn(cmpTempRegister, dataTempRegister, immTempRegister);
+ if (cond == Zero)
+ m_assembler.sltiu(dest, cmpTempRegister, 1);
+ else
+ m_assembler.sltu(dest, MIPSRegisters::zero, cmpTempRegister);
+ }
+ }
+
+ void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ load32(address, dataTempRegister);
+ if (mask.m_value == -1 && !m_fixedWidth) {
+ if (cond == Zero)
+ m_assembler.sltiu(dest, dataTempRegister, 1);
+ else
+ m_assembler.sltu(dest, MIPSRegisters::zero, dataTempRegister);
+ } else {
+ move(mask, immTempRegister);
+ m_assembler.andInsn(cmpTempRegister, dataTempRegister, immTempRegister);
+ if (cond == Zero)
+ m_assembler.sltiu(dest, cmpTempRegister, 1);
+ else
+ m_assembler.sltu(dest, MIPSRegisters::zero, cmpTempRegister);
+ }
+ }
+
+ DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
+ {
+ m_fixedWidth = true;
+ DataLabel32 label(this);
+ move(imm, dest);
+ m_fixedWidth = false;
+ return label;
+ }
+
+ DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+ {
+ m_fixedWidth = true;
+ DataLabelPtr label(this);
+ move(initialValue, dest);
+ m_fixedWidth = false;
+ return label;
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ m_fixedWidth = true;
+ dataLabel = moveWithPatch(initialRightValue, immTempRegister);
+ Jump temp = branch32(cond, left, immTempRegister);
+ m_fixedWidth = false;
+ return temp;
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ m_fixedWidth = true;
+ load32(left, dataTempRegister);
+ dataLabel = moveWithPatch(initialRightValue, immTempRegister);
+ Jump temp = branch32(cond, dataTempRegister, immTempRegister);
+ m_fixedWidth = false;
+ return temp;
+ }
+
+ DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ m_fixedWidth = true;
+ DataLabelPtr dataLabel = moveWithPatch(initialValue, dataTempRegister);
+ store32(dataTempRegister, address);
+ m_fixedWidth = false;
+ return dataLabel;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+ {
+ return storePtrWithPatch(TrustedImmPtr(0), address);
+ }
+
+ Call tailRecursiveCall()
+ {
+ // Like a normal call, but don't update the returned address register
+ m_fixedWidth = true;
+ move(TrustedImm32(0), MIPSRegisters::t9);
+ m_assembler.jr(MIPSRegisters::t9);
+ m_assembler.nop();
+ m_fixedWidth = false;
+ return Call(m_assembler.label(), Call::Linkable);
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ return tailRecursiveCall();
+ }
+
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lwc1 dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lwc1(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lwc1 dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lwc1(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+#if WTF_MIPS_ISA(1)
+ /*
+ li addrTemp, address.offset
+ addu addrTemp, addrTemp, base
+ lwc1 dest, 0(addrTemp)
+ lwc1 dest+1, 4(addrTemp)
+ */
+ move(TrustedImm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lwc1(dest, addrTempRegister, 0);
+ m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, 4);
+#else
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ m_assembler.ldc1(dest, address.base, address.offset);
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ ldc1 dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.ldc1(dest, addrTempRegister, address.offset);
+ }
+#endif
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+#if WTF_MIPS_ISA(1)
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lwc1 dest, address.offset(addrTemp)
+ lwc1 dest+1, (address.offset+4)(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lwc1(dest, addrTempRegister, address.offset);
+ m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, address.offset + 4);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lwc1 dest, (address.offset & 0xffff)(at)
+ lwc1 dest+1, (address.offset & 0xffff + 4)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lwc1(dest, addrTempRegister, address.offset);
+ m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, address.offset + 4);
+ }
+#else
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ ldc1 dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.ldc1(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ ldc1 dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.ldc1(dest, addrTempRegister, address.offset);
+ }
+#endif
+ }
+
+ void loadDouble(const void* address, FPRegisterID dest)
+ {
+#if WTF_MIPS_ISA(1)
+ /*
+ li addrTemp, address
+ lwc1 dest, 0(addrTemp)
+ lwc1 dest+1, 4(addrTemp)
+ */
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.lwc1(dest, addrTempRegister, 0);
+ m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, 4);
+#else
+ /*
+ li addrTemp, address
+ ldc1 dest, 0(addrTemp)
+ */
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.ldc1(dest, addrTempRegister, 0);
+#endif
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ swc1 src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.swc1(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ swc1 src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.swc1(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+#if WTF_MIPS_ISA(1)
+ /*
+ li addrTemp, address.offset
+ addu addrTemp, addrTemp, base
+ swc1 dest, 0(addrTemp)
+ swc1 dest+1, 4(addrTemp)
+ */
+ move(TrustedImm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.swc1(src, addrTempRegister, 0);
+ m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, 4);
+#else
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.sdc1(src, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ sdc1 src, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sdc1(src, addrTempRegister, address.offset);
+ }
+#endif
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+#if WTF_MIPS_ISA(1)
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ swc1 src, address.offset(addrTemp)
+ swc1 src+1, (address.offset + 4)(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.swc1(src, addrTempRegister, address.offset);
+ m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, address.offset + 4);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ swc1 src, (address.offset & 0xffff)(at)
+ swc1 src+1, (address.offset & 0xffff + 4)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.swc1(src, addrTempRegister, address.offset);
+ m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, address.offset + 4);
+ }
+#else
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sdc1 src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sdc1(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sdc1 src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.sdc1(src, addrTempRegister, address.offset);
+ }
+#endif
+ }
+
+ void storeDouble(FPRegisterID src, const void* address)
+ {
+#if WTF_MIPS_ISA(1)
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.swc1(src, addrTempRegister, 0);
+ m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, 4);
+#else
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sdc1(src, addrTempRegister, 0);
+#endif
+ }
+
+ void moveDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ if (src != dest || m_fixedWidth)
+ m_assembler.movd(dest, src);
+ }
+
+ void swapDouble(FPRegisterID fr1, FPRegisterID fr2)
+ {
+ moveDouble(fr1, fpTempRegister);
+ moveDouble(fr2, fr1);
+ moveDouble(fpTempRegister, fr2);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.addd(dest, dest, src);
+ }
+
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.addd(dest, op1, op2);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ m_assembler.addd(dest, dest, fpTempRegister);
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ loadDouble(address.m_ptr, fpTempRegister);
+ m_assembler.addd(dest, dest, fpTempRegister);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.subd(dest, dest, src);
+ }
+
+ void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.subd(dest, op1, op2);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ m_assembler.subd(dest, dest, fpTempRegister);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.muld(dest, dest, src);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ m_assembler.muld(dest, dest, fpTempRegister);
+ }
+
+ void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.muld(dest, op1, op2);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.divd(dest, dest, src);
+ }
+
+ void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.divd(dest, op1, op2);
+ }
+
+ void divDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ m_assembler.divd(dest, dest, fpTempRegister);
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.negd(dest, src);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.mtc1(src, fpTempRegister);
+ m_assembler.cvtdw(dest, fpTempRegister);
+ }
+
+ void convertInt32ToDouble(Address src, FPRegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ m_assembler.mtc1(dataTempRegister, fpTempRegister);
+ m_assembler.cvtdw(dest, fpTempRegister);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ load32(src.m_ptr, dataTempRegister);
+ m_assembler.mtc1(dataTempRegister, fpTempRegister);
+ m_assembler.cvtdw(dest, fpTempRegister);
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.cvtds(dst, src);
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.cvtsd(dst, src);
+ }
+
+ void insertRelaxationWords()
+ {
+ /* We need four words for relaxation. */
+ m_assembler.beq(MIPSRegisters::zero, MIPSRegisters::zero, 3); // Jump over nops;
+ m_assembler.nop();
+ m_assembler.nop();
+ m_assembler.nop();
+ }
+
+ Jump branchTrue()
+ {
+ m_assembler.appendJump();
+ m_assembler.bc1t();
+ m_assembler.nop();
+ insertRelaxationWords();
+ return Jump(m_assembler.label());
+ }
+
+ Jump branchFalse()
+ {
+ m_assembler.appendJump();
+ m_assembler.bc1f();
+ m_assembler.nop();
+ insertRelaxationWords();
+ return Jump(m_assembler.label());
+ }
+
+ Jump branchEqual(RegisterID rs, RegisterID rt)
+ {
+ m_assembler.nop();
+ m_assembler.nop();
+ m_assembler.appendJump();
+ m_assembler.beq(rs, rt, 0);
+ m_assembler.nop();
+ insertRelaxationWords();
+ return Jump(m_assembler.label());
+ }
+
+ Jump branchNotEqual(RegisterID rs, RegisterID rt)
+ {
+ m_assembler.nop();
+ m_assembler.nop();
+ m_assembler.appendJump();
+ m_assembler.bne(rs, rt, 0);
+ m_assembler.nop();
+ insertRelaxationWords();
+ return Jump(m_assembler.label());
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ if (cond == DoubleEqual) {
+ m_assembler.ceqd(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleNotEqual) {
+ m_assembler.cueqd(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleGreaterThan) {
+ m_assembler.cngtd(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleGreaterThanOrEqual) {
+ m_assembler.cnged(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleLessThan) {
+ m_assembler.cltd(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleLessThanOrEqual) {
+ m_assembler.cled(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleEqualOrUnordered) {
+ m_assembler.cueqd(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleNotEqualOrUnordered) {
+ m_assembler.ceqd(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleGreaterThanOrUnordered) {
+ m_assembler.coled(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleGreaterThanOrEqualOrUnordered) {
+ m_assembler.coltd(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleLessThanOrUnordered) {
+ m_assembler.cultd(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleLessThanOrEqualOrUnordered) {
+ m_assembler.culed(left, right);
+ return branchTrue();
+ }
+ ASSERT(0);
+
+ return Jump();
+ }
+
+ // Truncates 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, INT_MAX 0x7fffffff).
+ enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ m_assembler.truncwd(fpTempRegister, src);
+ m_assembler.mfc1(dest, fpTempRegister);
+ return branch32(branchType == BranchIfTruncateFailed ? Equal : NotEqual, dest, TrustedImm32(0x7fffffff));
+ }
+
+ Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ m_assembler.truncwd(fpTempRegister, src);
+ m_assembler.mfc1(dest, fpTempRegister);
+ return branch32(branchType == BranchIfTruncateFailed ? Equal : NotEqual, dest, TrustedImm32(0));
+ }
+
+ // Result is undefined if the value is outside of the integer range.
+ void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.truncwd(fpTempRegister, src);
+ m_assembler.mfc1(dest, fpTempRegister);
+ }
+
+ // Result is undefined if src > 2^31
+ void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.truncwd(fpTempRegister, src);
+ m_assembler.mfc1(dest, fpTempRegister);
+ }
+
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
+ {
+ m_assembler.cvtwd(fpTempRegister, src);
+ m_assembler.mfc1(dest, fpTempRegister);
+
+ // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+ failureCases.append(branch32(Equal, dest, MIPSRegisters::zero));
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ convertInt32ToDouble(dest, fpTemp);
+ failureCases.append(branchDouble(DoubleNotEqualOrUnordered, fpTemp, src));
+ }
+
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.vmov(scratch, MIPSRegisters::zero, MIPSRegisters::zero);
+ return branchDouble(DoubleNotEqual, reg, scratch);
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.vmov(scratch, MIPSRegisters::zero, MIPSRegisters::zero);
+ return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+ }
+
+ // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+ static RelationalCondition invert(RelationalCondition cond)
+ {
+ RelationalCondition r;
+ if (cond == Equal)
+ r = NotEqual;
+ else if (cond == NotEqual)
+ r = Equal;
+ else if (cond == Above)
+ r = BelowOrEqual;
+ else if (cond == AboveOrEqual)
+ r = Below;
+ else if (cond == Below)
+ r = AboveOrEqual;
+ else if (cond == BelowOrEqual)
+ r = Above;
+ else if (cond == GreaterThan)
+ r = LessThanOrEqual;
+ else if (cond == GreaterThanOrEqual)
+ r = LessThan;
+ else if (cond == LessThan)
+ r = GreaterThanOrEqual;
+ else if (cond == LessThanOrEqual)
+ r = GreaterThan;
+ return r;
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(reinterpret_cast<void(*)()>(MIPSAssembler::readCallTarget(call.dataLocation())));
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ MIPSAssembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ MIPSAssembler::maxJumpReplacementSize();
+ return 0;
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ return label.labelAtOffset(0);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
+ {
+ MIPSAssembler::revertJumpToMove(instructionStart.dataLocation(), immTempRegister, reinterpret_cast<int>(initialValue) & 0xffff);
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+
+private:
+ // If m_fixedWidth is true, we will generate a fixed number of instructions.
+ // Otherwise, we can emit any number of instructions.
+ bool m_fixedWidth;
+
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ MIPSAssembler::linkCall(code, call.m_label, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ MIPSAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ MIPSAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+};
+
+}
+
+#endif // ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#endif // MacroAssemblerMIPS_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerSH4.cpp b/src/3rdparty/masm/assembler/MacroAssemblerSH4.cpp
new file mode 100644
index 0000000000..59de3ff48c
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerSH4.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2011 STMicroelectronics. All rights reserved.
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(SH4)
+
+#include "MacroAssemblerSH4.h"
+
+namespace JSC {
+
+void MacroAssemblerSH4::linkCall(void* code, Call call, FunctionPtr function)
+{
+ SH4Assembler::linkCall(code, call.m_label, function.value());
+}
+
+void MacroAssemblerSH4::repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+{
+ SH4Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+}
+
+void MacroAssemblerSH4::repatchCall(CodeLocationCall call, FunctionPtr destination)
+{
+ SH4Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerSH4.h b/src/3rdparty/masm/assembler/MacroAssemblerSH4.h
new file mode 100644
index 0000000000..56fb74d45b
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerSH4.h
@@ -0,0 +1,2293 @@
+/*
+ * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef MacroAssemblerSH4_h
+#define MacroAssemblerSH4_h
+
+#if ENABLE(ASSEMBLER) && CPU(SH4)
+
+#include "SH4Assembler.h"
+#include "AbstractMacroAssembler.h"
+#include <wtf/Assertions.h>
+
+namespace JSC {
+
+class MacroAssemblerSH4 : public AbstractMacroAssembler<SH4Assembler> {
+public:
+ typedef SH4Assembler::FPRegisterID FPRegisterID;
+
+ static const Scale ScalePtr = TimesFour;
+ static const FPRegisterID fscratch = SH4Registers::fr10;
+ static const RegisterID stackPointerRegister = SH4Registers::sp;
+ static const RegisterID linkRegister = SH4Registers::pr;
+ static const RegisterID scratchReg3 = SH4Registers::r13;
+
+ static const int MaximumCompactPtrAlignedAddressOffset = 60;
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return (value >= 0) && (value <= MaximumCompactPtrAlignedAddressOffset);
+ }
+
+ enum RelationalCondition {
+ Equal = SH4Assembler::EQ,
+ NotEqual = SH4Assembler::NE,
+ Above = SH4Assembler::HI,
+ AboveOrEqual = SH4Assembler::HS,
+ Below = SH4Assembler::LI,
+ BelowOrEqual = SH4Assembler::LS,
+ GreaterThan = SH4Assembler::GT,
+ GreaterThanOrEqual = SH4Assembler::GE,
+ LessThan = SH4Assembler::LT,
+ LessThanOrEqual = SH4Assembler::LE
+ };
+
+ enum ResultCondition {
+ Overflow = SH4Assembler::OF,
+ Signed = SH4Assembler::SI,
+ Zero = SH4Assembler::EQ,
+ NonZero = SH4Assembler::NE
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = SH4Assembler::EQ,
+ DoubleNotEqual = SH4Assembler::NE,
+ DoubleGreaterThan = SH4Assembler::GT,
+ DoubleGreaterThanOrEqual = SH4Assembler::GE,
+ DoubleLessThan = SH4Assembler::LT,
+ DoubleLessThanOrEqual = SH4Assembler::LE,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = SH4Assembler::EQU,
+ DoubleNotEqualOrUnordered = SH4Assembler::NEU,
+ DoubleGreaterThanOrUnordered = SH4Assembler::GTU,
+ DoubleGreaterThanOrEqualOrUnordered = SH4Assembler::GEU,
+ DoubleLessThanOrUnordered = SH4Assembler::LTU,
+ DoubleLessThanOrEqualOrUnordered = SH4Assembler::LEU,
+ };
+
+ RegisterID claimScratch()
+ {
+ return m_assembler.claimScratch();
+ }
+
+ void releaseScratch(RegisterID reg)
+ {
+ m_assembler.releaseScratch(reg);
+ }
+
+ // Integer arithmetic operations
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addlRegReg(src, dest);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (m_assembler.isImmediate(imm.m_value)) {
+ m_assembler.addlImm8r(imm.m_value, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm.m_value, scr);
+ m_assembler.addlRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.movlRegReg(src, dest);
+ add32(imm, dest);
+ }
+
+ void add32(TrustedImm32 imm, Address address)
+ {
+ RegisterID scr = claimScratch();
+ load32(address, scr);
+ add32(imm, scr);
+ store32(scr, address);
+ releaseScratch(scr);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ load32(src, scr);
+ m_assembler.addlRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void add32(AbsoluteAddress src, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ load32(src.m_ptr, scr);
+ m_assembler.addlRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andlRegReg(src, dest);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ if ((imm.m_value <= 255) && (imm.m_value >= 0) && (dest == SH4Registers::r0)) {
+ m_assembler.andlImm8r(imm.m_value, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant((imm.m_value), scr);
+ m_assembler.andlRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src != dest) {
+ move(imm, dest);
+ and32(src, dest);
+ return;
+ }
+
+ and32(imm, dest);
+ }
+
+ void lshift32(RegisterID shiftamount, RegisterID dest)
+ {
+ if (shiftamount == SH4Registers::r0)
+ m_assembler.andlImm8r(0x1f, shiftamount);
+ else {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(0x1f, scr);
+ m_assembler.andlRegReg(scr, shiftamount);
+ releaseScratch(scr);
+ }
+ m_assembler.shllRegReg(dest, shiftamount);
+ }
+
+ void rshift32(int imm, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(-imm, scr);
+ m_assembler.shaRegReg(dest, scr);
+ releaseScratch(scr);
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (!imm.m_value)
+ return;
+
+ if ((imm.m_value == 1) || (imm.m_value == 2) || (imm.m_value == 8) || (imm.m_value == 16)) {
+ m_assembler.shllImm8r(imm.m_value, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant((imm.m_value & 0x1f) , scr);
+ m_assembler.shllRegReg(dest, scr);
+ releaseScratch(scr);
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 shiftamount, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+
+ lshift32(shiftamount, dest);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.imullRegReg(src, dest);
+ m_assembler.stsmacl(dest);
+ }
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ move(imm, scr);
+ if (src != dest)
+ move(src, dest);
+ mul32(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orlRegReg(src, dest);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ if ((imm.m_value <= 255) && (imm.m_value >= 0) && (dest == SH4Registers::r0)) {
+ m_assembler.orlImm8r(imm.m_value, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm.m_value, scr);
+ m_assembler.orlRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ move(op1, dest);
+ else if (op1 == dest)
+ or32(op2, dest);
+ else {
+ move(op2, dest);
+ or32(op1, dest);
+ }
+ }
+
+
+void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src != dest) {
+ move(imm, dest);
+ or32(src, dest);
+ return;
+ }
+
+ or32(imm, dest);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src != dest) {
+ move(imm, dest);
+ xor32(src, dest);
+ return;
+ }
+
+ xor32(imm, dest);
+ }
+
+ void rshift32(RegisterID shiftamount, RegisterID dest)
+ {
+ if (shiftamount == SH4Registers::r0)
+ m_assembler.andlImm8r(0x1f, shiftamount);
+ else {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(0x1f, scr);
+ m_assembler.andlRegReg(scr, shiftamount);
+ releaseScratch(scr);
+ }
+ m_assembler.neg(shiftamount, shiftamount);
+ m_assembler.shaRegReg(dest, shiftamount);
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value & 0x1f)
+ rshift32(imm.m_value & 0x1f, dest);
+ }
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ rshift32(imm, dest);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.sublRegReg(src, dest);
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address, RegisterID scratchReg)
+ {
+ RegisterID result = claimScratch();
+
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scratchReg);
+ m_assembler.movlMemReg(scratchReg, result);
+
+ if (m_assembler.isImmediate(-imm.m_value))
+ m_assembler.addlImm8r(-imm.m_value, result);
+ else {
+ m_assembler.loadConstant(imm.m_value, scratchReg3);
+ m_assembler.sublRegReg(scratchReg3, result);
+ }
+
+ store32(result, scratchReg);
+ releaseScratch(result);
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ RegisterID result = claimScratch();
+ RegisterID scratchReg = claimScratch();
+
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scratchReg);
+ m_assembler.movlMemReg(scratchReg, result);
+
+ if (m_assembler.isImmediate(-imm.m_value))
+ m_assembler.addlImm8r(-imm.m_value, result);
+ else {
+ m_assembler.loadConstant(imm.m_value, scratchReg3);
+ m_assembler.sublRegReg(scratchReg3, result);
+ }
+
+ store32(result, scratchReg);
+ releaseScratch(result);
+ releaseScratch(scratchReg);
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address, RegisterID scratchReg)
+ {
+ RegisterID result = claimScratch();
+
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scratchReg);
+ m_assembler.movlMemReg(scratchReg, result);
+
+ if (m_assembler.isImmediate(imm.m_value))
+ m_assembler.addlImm8r(imm.m_value, result);
+ else {
+ m_assembler.loadConstant(imm.m_value, scratchReg3);
+ m_assembler.addlRegReg(scratchReg3, result);
+ }
+
+ store32(result, scratchReg);
+ releaseScratch(result);
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ RegisterID result = claimScratch();
+ RegisterID scratchReg = claimScratch();
+
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scratchReg);
+ m_assembler.movlMemReg(scratchReg, result);
+
+ if (m_assembler.isImmediate(imm.m_value))
+ m_assembler.addlImm8r(imm.m_value, result);
+ else {
+ m_assembler.loadConstant(imm.m_value, scratchReg3);
+ m_assembler.addlRegReg(scratchReg3, result);
+ }
+
+ store32(result, scratchReg);
+ releaseScratch(result);
+ releaseScratch(scratchReg);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ RegisterID scr1 = claimScratch();
+ RegisterID scr2 = claimScratch();
+
+ // Add 32-bit LSB first.
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scr1);
+ m_assembler.movlMemReg(scr1, scr1); // scr1 = 32-bit LSB of int64 @ address
+ m_assembler.loadConstant(imm.m_value, scr2);
+ m_assembler.clrt();
+ m_assembler.addclRegReg(scr1, scr2);
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scr1);
+ m_assembler.movlRegMem(scr2, scr1); // Update address with 32-bit LSB result.
+
+ // Then add 32-bit MSB.
+ m_assembler.addlImm8r(4, scr1);
+ m_assembler.movlMemReg(scr1, scr1); // scr1 = 32-bit MSB of int64 @ address
+ m_assembler.movt(scr2);
+ if (imm.m_value < 0)
+ m_assembler.addlImm8r(-1, scr2); // Sign extend imm value if needed.
+ m_assembler.addvlRegReg(scr2, scr1);
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr) + 4, scr2);
+ m_assembler.movlRegMem(scr1, scr2); // Update (address + 4) with 32-bit MSB result.
+
+ releaseScratch(scr2);
+ releaseScratch(scr1);
+ }
+
+ void sub32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (m_assembler.isImmediate(-imm.m_value)) {
+ m_assembler.addlImm8r(-imm.m_value, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm.m_value, scr);
+ m_assembler.sublRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ load32(src, scr);
+ m_assembler.sublRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.xorlRegReg(src, dest);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID srcDest)
+ {
+ if (imm.m_value == -1) {
+ m_assembler.notlReg(srcDest, srcDest);
+ return;
+ }
+
+ if ((srcDest != SH4Registers::r0) || (imm.m_value > 255) || (imm.m_value < 0)) {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant((imm.m_value), scr);
+ m_assembler.xorlRegReg(scr, srcDest);
+ releaseScratch(scr);
+ return;
+ }
+
+ m_assembler.xorlImm8r(imm.m_value, srcDest);
+ }
+
+ void compare32(int imm, RegisterID dst, RelationalCondition cond)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && (dst == SH4Registers::r0) && m_assembler.isImmediate(imm)) {
+ m_assembler.cmpEqImmR0(imm, dst);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm, scr);
+ m_assembler.cmplRegReg(scr, dst, SH4Condition(cond));
+ releaseScratch(scr);
+ }
+
+ void compare32(int offset, RegisterID base, RegisterID left, RelationalCondition cond)
+ {
+ RegisterID scr = claimScratch();
+ if (!offset) {
+ m_assembler.movlMemReg(base, scr);
+ m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
+ releaseScratch(scr);
+ return;
+ }
+
+ if ((offset < 0) || (offset >= 64)) {
+ m_assembler.loadConstant(offset, scr);
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movlMemReg(scr, scr);
+ m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
+ releaseScratch(scr);
+ return;
+ }
+
+ m_assembler.movlMemReg(offset >> 2, base, scr);
+ m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
+ releaseScratch(scr);
+ }
+
+ void testImm(int imm, int offset, RegisterID base)
+ {
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+
+ if ((offset < 0) || (offset >= 64)) {
+ m_assembler.loadConstant(offset, scr);
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movlMemReg(scr, scr);
+ } else if (offset)
+ m_assembler.movlMemReg(offset >> 2, base, scr);
+ else
+ m_assembler.movlMemReg(base, scr);
+ if (m_assembler.isImmediate(imm))
+ m_assembler.movImm8(imm, scr1);
+ else
+ m_assembler.loadConstant(imm, scr1);
+
+ m_assembler.testlRegReg(scr, scr1);
+ releaseScratch(scr);
+ releaseScratch(scr1);
+ }
+
+ void testlImm(int imm, RegisterID dst)
+ {
+ if ((dst == SH4Registers::r0) && (imm <= 255) && (imm >= 0)) {
+ m_assembler.testlImm8r(imm, dst);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm, scr);
+ m_assembler.testlRegReg(scr, dst);
+ releaseScratch(scr);
+ }
+
+ void compare32(RegisterID right, int offset, RegisterID base, RelationalCondition cond)
+ {
+ if (!offset) {
+ RegisterID scr = claimScratch();
+ m_assembler.movlMemReg(base, scr);
+ m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
+ releaseScratch(scr);
+ return;
+ }
+
+ if ((offset < 0) || (offset >= 64)) {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(offset, scr);
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movlMemReg(scr, scr);
+ m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
+ releaseScratch(scr);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.movlMemReg(offset >> 2, base, scr);
+ m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
+ releaseScratch(scr);
+ }
+
+ void compare32(int imm, int offset, RegisterID base, RelationalCondition cond)
+ {
+ if (!offset) {
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+ m_assembler.movlMemReg(base, scr);
+ m_assembler.loadConstant(imm, scr1);
+ m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
+ releaseScratch(scr1);
+ releaseScratch(scr);
+ return;
+ }
+
+ if ((offset < 0) || (offset >= 64)) {
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+ m_assembler.loadConstant(offset, scr);
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movlMemReg(scr, scr);
+ m_assembler.loadConstant(imm, scr1);
+ m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
+ releaseScratch(scr1);
+ releaseScratch(scr);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+ m_assembler.movlMemReg(offset >> 2, base, scr);
+ m_assembler.loadConstant(imm, scr1);
+ m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
+ releaseScratch(scr1);
+ releaseScratch(scr);
+ }
+
+ // Memory access operation
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ load32(address.base, address.offset, dest);
+ }
+
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ load8(address.base, address.offset, dest);
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ load8(scr, address.offset, dest);
+ releaseScratch(scr);
+ }
+
+ void load8Signed(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ load8Signed(scr, address.offset, dest);
+ releaseScratch(scr);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ load32(scr, address.offset, dest);
+ releaseScratch(scr);
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(const_cast<void*>(address)), dest);
+ m_assembler.movlMemReg(dest, dest);
+ }
+
+ void load32(RegisterID base, int offset, RegisterID dest)
+ {
+ if (!offset) {
+ m_assembler.movlMemReg(base, dest);
+ return;
+ }
+
+ if ((offset >= 0) && (offset < 64)) {
+ m_assembler.movlMemReg(offset >> 2, base, dest);
+ return;
+ }
+
+ if ((dest == SH4Registers::r0) && (dest != base)) {
+ m_assembler.loadConstant((offset), dest);
+ m_assembler.movlR0mr(base, dest);
+ return;
+ }
+
+ RegisterID scr;
+ if (dest == base)
+ scr = claimScratch();
+ else
+ scr = dest;
+ m_assembler.loadConstant((offset), scr);
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movlMemReg(scr, dest);
+
+ if (dest == base)
+ releaseScratch(scr);
+ }
+
+ void load8Signed(RegisterID base, int offset, RegisterID dest)
+ {
+ if (!offset) {
+ m_assembler.movbMemReg(base, dest);
+ return;
+ }
+
+ if ((offset > 0) && (offset < 64) && (dest == SH4Registers::r0)) {
+ m_assembler.movbMemReg(offset, base, dest);
+ return;
+ }
+
+ if (base != dest) {
+ m_assembler.loadConstant((offset), dest);
+ m_assembler.addlRegReg(base, dest);
+ m_assembler.movbMemReg(dest, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant((offset), scr);
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movbMemReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void load8(RegisterID base, int offset, RegisterID dest)
+ {
+ if (!offset) {
+ m_assembler.movbMemReg(base, dest);
+ m_assembler.extub(dest, dest);
+ return;
+ }
+
+ if ((offset > 0) && (offset < 64) && (dest == SH4Registers::r0)) {
+ m_assembler.movbMemReg(offset, base, dest);
+ m_assembler.extub(dest, dest);
+ return;
+ }
+
+ if (base != dest) {
+ m_assembler.loadConstant((offset), dest);
+ m_assembler.addlRegReg(base, dest);
+ m_assembler.movbMemReg(dest, dest);
+ m_assembler.extub(dest, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant((offset), scr);
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movbMemReg(scr, dest);
+ m_assembler.extub(dest, dest);
+ releaseScratch(scr);
+ }
+
+ void load32(RegisterID r0, RegisterID src, RegisterID dst)
+ {
+ ASSERT(r0 == SH4Registers::r0);
+ m_assembler.movlR0mr(src, dst);
+ }
+
+ void load32(RegisterID src, RegisterID dst)
+ {
+ m_assembler.movlMemReg(src, dst);
+ }
+
+ void load16(ImplicitAddress address, RegisterID dest)
+ {
+ if (!address.offset) {
+ m_assembler.movwMemReg(address.base, dest);
+ extuw(dest, dest);
+ return;
+ }
+
+ if ((address.offset > 0) && (address.offset < 64) && (dest == SH4Registers::r0)) {
+ m_assembler.movwMemReg(address.offset, address.base, dest);
+ extuw(dest, dest);
+ return;
+ }
+
+ if (address.base != dest) {
+ m_assembler.loadConstant((address.offset), dest);
+ m_assembler.addlRegReg(address.base, dest);
+ m_assembler.movwMemReg(dest, dest);
+ extuw(dest, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant((address.offset), scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movwMemReg(scr, dest);
+ extuw(dest, dest);
+ releaseScratch(scr);
+ }
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+
+ if (address.offset)
+ add32(TrustedImm32(address.offset), scr);
+
+ add32(address.base, scr);
+ load8(scr, scr1);
+ add32(TrustedImm32(1), scr);
+ load8(scr, dest);
+ m_assembler.shllImm8r(8, dest);
+ or32(scr1, dest);
+
+ releaseScratch(scr);
+ releaseScratch(scr1);
+ }
+
+ void load16(RegisterID src, RegisterID dest)
+ {
+ m_assembler.movwMemReg(src, dest);
+ extuw(dest, dest);
+ }
+
+ void load16Signed(RegisterID src, RegisterID dest)
+ {
+ m_assembler.movwMemReg(src, dest);
+ }
+
+ void load16(RegisterID r0, RegisterID src, RegisterID dest)
+ {
+ ASSERT(r0 == SH4Registers::r0);
+ m_assembler.movwR0mr(src, dest);
+ extuw(dest, dest);
+ }
+
+ void load16Signed(RegisterID r0, RegisterID src, RegisterID dest)
+ {
+ ASSERT(r0 == SH4Registers::r0);
+ m_assembler.movwR0mr(src, dest);
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+
+ if (address.offset)
+ add32(TrustedImm32(address.offset), scr);
+ if (address.base == SH4Registers::r0)
+ load16(address.base, scr, dest);
+ else {
+ add32(address.base, scr);
+ load16(scr, dest);
+ }
+
+ releaseScratch(scr);
+ }
+
+ void load16Signed(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+
+ if (address.offset)
+ add32(TrustedImm32(address.offset), scr);
+ if (address.base == SH4Registers::r0)
+ load16Signed(address.base, scr, dest);
+ else {
+ add32(address.base, scr);
+ load16Signed(scr, dest);
+ }
+
+ releaseScratch(scr);
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+
+ m_assembler.movbRegMem(src, scr);
+
+ releaseScratch(scr);
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+
+ m_assembler.movwRegMem(src, scr);
+
+ releaseScratch(scr);
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ RegisterID scr = claimScratch();
+ store32(src, address.offset, address.base, scr);
+ releaseScratch(scr);
+ }
+
+ void store32(RegisterID src, int offset, RegisterID base, RegisterID scr)
+ {
+ if (!offset) {
+ m_assembler.movlRegMem(src, base);
+ return;
+ }
+
+ if ((offset >=0) && (offset < 64)) {
+ m_assembler.movlRegMem(src, offset >> 2, base);
+ return;
+ }
+
+ m_assembler.loadConstant((offset), scr);
+ if (scr == SH4Registers::r0) {
+ m_assembler.movlRegMemr0(src, base);
+ return;
+ }
+
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movlRegMem(src, scr);
+ }
+
+ void store32(RegisterID src, RegisterID offset, RegisterID base)
+ {
+ ASSERT(offset == SH4Registers::r0);
+ m_assembler.movlRegMemr0(src, base);
+ }
+
+ void store32(RegisterID src, RegisterID dst)
+ {
+ m_assembler.movlRegMem(src, dst);
+ }
+
+ void store32(TrustedImm32 imm, ImplicitAddress address)
+ {
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+ m_assembler.loadConstant((imm.m_value), scr);
+ store32(scr, address.offset, address.base, scr1);
+ releaseScratch(scr);
+ releaseScratch(scr1);
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ store32(src, Address(scr, address.offset));
+
+ releaseScratch(scr);
+ }
+
+ void store32(TrustedImm32 imm, void* address)
+ {
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+ m_assembler.loadConstant((imm.m_value), scr);
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address), scr1);
+ m_assembler.movlRegMem(scr, scr1);
+ releaseScratch(scr);
+ releaseScratch(scr1);
+ }
+
+ void store32(RegisterID src, void* address)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address), scr);
+ m_assembler.movlRegMem(src, scr);
+ releaseScratch(scr);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ DataLabel32 label(this);
+ m_assembler.loadConstantUnReusable(address.offset, scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movlMemReg(scr, dest);
+ releaseScratch(scr);
+ return label;
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ RegisterID scr = claimScratch();
+ DataLabel32 label(this);
+ m_assembler.loadConstantUnReusable(address.offset, scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movlRegMem(src, scr);
+ releaseScratch(scr);
+ return label;
+ }
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabelCompact dataLabel(this);
+ ASSERT(address.offset <= MaximumCompactPtrAlignedAddressOffset);
+ ASSERT(address.offset >= 0);
+ m_assembler.movlMemRegCompact(address.offset >> 2, address.base, dest);
+ return dataLabel;
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result(this);
+
+ RegisterID scr = claimScratch();
+ m_assembler.movImm8(address.offset, scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movlMemReg(scr, dest);
+ releaseScratch(scr);
+
+ return result;
+ }
+
+ // Floating-point operations
+
+ static bool supportsFloatingPoint() { return true; }
+ static bool supportsFloatingPointTruncate() { return true; }
+ static bool supportsFloatingPointSqrt() { return true; }
+ static bool supportsFloatingPointAbs() { return false; }
+
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.fldsfpul((FPRegisterID)(src + 1));
+ m_assembler.stsfpulReg(dest1);
+ m_assembler.fldsfpul(src);
+ m_assembler.stsfpulReg(dest2);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+ {
+ UNUSED_PARAM(scratch);
+ m_assembler.ldsrmfpul(src1);
+ m_assembler.fstsfpul((FPRegisterID)(dest + 1));
+ m_assembler.ldsrmfpul(src2);
+ m_assembler.fstsfpul(dest);
+ }
+
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ if (address.offset)
+ add32(TrustedImm32(address.offset), scr);
+
+ m_assembler.fmovsReadrm(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ if (address.offset)
+ add32(TrustedImm32(address.offset), scr);
+
+ m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
+ m_assembler.fmovsReadrm(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+
+ m_assembler.loadConstant(address.offset, scr);
+ if (address.base == SH4Registers::r0) {
+ m_assembler.fmovsReadr0r(scr, (FPRegisterID)(dest + 1));
+ m_assembler.addlImm8r(4, scr);
+ m_assembler.fmovsReadr0r(scr, dest);
+ releaseScratch(scr);
+ return;
+ }
+
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
+ m_assembler.fmovsReadrm(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void loadDouble(const void* address, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address), scr);
+ m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
+ m_assembler.fmovsReadrm(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ if (address.offset)
+ add32(TrustedImm32(address.offset), scr);
+
+ m_assembler.fmovsWriterm(src, scr);
+
+ releaseScratch(scr);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(address.offset, scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.fmovsWriterm((FPRegisterID)(src + 1), scr);
+ m_assembler.addlImm8r(4, scr);
+ m_assembler.fmovsWriterm(src, scr);
+ releaseScratch(scr);
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ if (address.offset)
+ add32(TrustedImm32(address.offset), scr);
+
+ m_assembler.fmovsWriterm((FPRegisterID)(src + 1), scr);
+ m_assembler.addlImm8r(4, scr);
+ m_assembler.fmovsWriterm(src, scr);
+
+ releaseScratch(scr);
+ }
+
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ if (op1 == dest)
+ m_assembler.daddRegReg(op2, dest);
+ else {
+ m_assembler.dmovRegReg(op1, dest);
+ m_assembler.daddRegReg(op2, dest);
+ }
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.daddRegReg(src, dest);
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ loadDouble(address.m_ptr, fscratch);
+ addDouble(fscratch, dest);
+ }
+
+ void addDouble(Address address, FPRegisterID dest)
+ {
+ loadDouble(address, fscratch);
+ addDouble(fscratch, dest);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.dsubRegReg(src, dest);
+ }
+
+ void subDouble(Address address, FPRegisterID dest)
+ {
+ loadDouble(address, fscratch);
+ subDouble(fscratch, dest);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.dmulRegReg(src, dest);
+ }
+
+ void mulDouble(Address address, FPRegisterID dest)
+ {
+ loadDouble(address, fscratch);
+ mulDouble(fscratch, dest);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.ddivRegReg(src, dest);
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.fldsfpul(src);
+ m_assembler.dcnvsd(dst);
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.dcnvds(src);
+ m_assembler.fstsfpul(dst);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.ldsrmfpul(src);
+ m_assembler.floatfpulDreg(dest);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(src.m_ptr), scr);
+ convertInt32ToDouble(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void convertInt32ToDouble(Address src, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ load32(src, scr);
+ convertInt32ToDouble(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+ Jump m_jump;
+ JumpList end;
+
+ if (dest != SH4Registers::r0)
+ move(SH4Registers::r0, scr1);
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+
+ if (address.offset)
+ add32(TrustedImm32(address.offset), scr);
+
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 68, sizeof(uint32_t));
+ move(scr, SH4Registers::r0);
+ m_assembler.andlImm8r(0x3, SH4Registers::r0);
+ m_assembler.cmpEqImmR0(0x0, SH4Registers::r0);
+ m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear);
+ if (dest != SH4Registers::r0)
+ move(scr1, SH4Registers::r0);
+
+ load32(scr, dest);
+ end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear));
+ m_assembler.nop();
+ m_jump.link(this);
+ m_assembler.andlImm8r(0x1, SH4Registers::r0);
+ m_assembler.cmpEqImmR0(0x0, SH4Registers::r0);
+
+ if (dest != SH4Registers::r0)
+ move(scr1, SH4Registers::r0);
+
+ m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear);
+ load16(scr, scr1);
+ add32(TrustedImm32(2), scr);
+ load16(scr, dest);
+ m_assembler.shllImm8r(16, dest);
+ or32(scr1, dest);
+ end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear));
+ m_assembler.nop();
+ m_jump.link(this);
+ load8(scr, scr1);
+ add32(TrustedImm32(1), scr);
+ load16(scr, dest);
+ m_assembler.shllImm8r(8, dest);
+ or32(dest, scr1);
+ add32(TrustedImm32(2), scr);
+ load8(scr, dest);
+ m_assembler.shllImm8r(8, dest);
+ m_assembler.shllImm8r(16, dest);
+ or32(scr1, dest);
+ end.link(this);
+
+ releaseScratch(scr);
+ releaseScratch(scr1);
+ }
+
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ RegisterID scr = scratchReg3;
+ load32WithUnalignedHalfWords(left, scr);
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testlRegReg(scr, scr);
+ else
+ compare32(right.m_value, scr, cond);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.movImm8(0, scratchReg3);
+ convertInt32ToDouble(scratchReg3, scratch);
+ return branchDouble(DoubleNotEqual, reg, scratch);
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.movImm8(0, scratchReg3);
+ convertInt32ToDouble(scratchReg3, scratch);
+ return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ if (cond == DoubleEqual) {
+ m_assembler.dcmppeq(right, left);
+ return branchTrue();
+ }
+
+ if (cond == DoubleNotEqual) {
+ RegisterID scr = claimScratch();
+ JumpList end;
+ m_assembler.loadConstant(0x7fbfffff, scratchReg3);
+ m_assembler.dcnvds(right);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcnvds(left);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcmppeq(right, left);
+ releaseScratch(scr);
+ Jump m_jump = branchFalse();
+ end.link(this);
+ return m_jump;
+ }
+
+ if (cond == DoubleGreaterThan) {
+ m_assembler.dcmppgt(right, left);
+ return branchTrue();
+ }
+
+ if (cond == DoubleGreaterThanOrEqual) {
+ m_assembler.dcmppgt(left, right);
+ return branchFalse();
+ }
+
+ if (cond == DoubleLessThan) {
+ m_assembler.dcmppgt(left, right);
+ return branchTrue();
+ }
+
+ if (cond == DoubleLessThanOrEqual) {
+ m_assembler.dcmppgt(right, left);
+ return branchFalse();
+ }
+
+ if (cond == DoubleEqualOrUnordered) {
+ RegisterID scr = claimScratch();
+ JumpList end;
+ m_assembler.loadConstant(0x7fbfffff, scratchReg3);
+ m_assembler.dcnvds(right);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcnvds(left);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcmppeq(left, right);
+ Jump m_jump = Jump(m_assembler.je());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
+ releaseScratch(scr);
+ return m_jump;
+ }
+
+ if (cond == DoubleGreaterThanOrUnordered) {
+ RegisterID scr = claimScratch();
+ JumpList end;
+ m_assembler.loadConstant(0x7fbfffff, scratchReg3);
+ m_assembler.dcnvds(right);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcnvds(left);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcmppgt(right, left);
+ Jump m_jump = Jump(m_assembler.je());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
+ releaseScratch(scr);
+ return m_jump;
+ }
+
+ if (cond == DoubleGreaterThanOrEqualOrUnordered) {
+ RegisterID scr = claimScratch();
+ JumpList end;
+ m_assembler.loadConstant(0x7fbfffff, scratchReg3);
+ m_assembler.dcnvds(right);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcnvds(left);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcmppgt(left, right);
+ Jump m_jump = Jump(m_assembler.jne());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
+ releaseScratch(scr);
+ return m_jump;
+ }
+
+ if (cond == DoubleLessThanOrUnordered) {
+ RegisterID scr = claimScratch();
+ JumpList end;
+ m_assembler.loadConstant(0x7fbfffff, scratchReg3);
+ m_assembler.dcnvds(right);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcnvds(left);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcmppgt(left, right);
+ Jump m_jump = Jump(m_assembler.je());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
+ releaseScratch(scr);
+ return m_jump;
+ }
+
+ if (cond == DoubleLessThanOrEqualOrUnordered) {
+ RegisterID scr = claimScratch();
+ JumpList end;
+ m_assembler.loadConstant(0x7fbfffff, scratchReg3);
+ m_assembler.dcnvds(right);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcnvds(left);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcmppgt(right, left);
+ Jump m_jump = Jump(m_assembler.jne());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
+ releaseScratch(scr);
+ return m_jump;
+ }
+
+ ASSERT(cond == DoubleNotEqualOrUnordered);
+ RegisterID scr = claimScratch();
+ JumpList end;
+ m_assembler.loadConstant(0x7fbfffff, scratchReg3);
+ m_assembler.dcnvds(right);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcnvds(left);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcmppeq(right, left);
+ Jump m_jump = Jump(m_assembler.jne());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
+ releaseScratch(scr);
+ return m_jump;
+ }
+
+ Jump branchTrue()
+ {
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t));
+ Jump m_jump = Jump(m_assembler.je());
+ m_assembler.extraInstrForBranch(scratchReg3);
+ return m_jump;
+ }
+
+ Jump branchFalse()
+ {
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t));
+ Jump m_jump = Jump(m_assembler.jne());
+ m_assembler.extraInstrForBranch(scratchReg3);
+ return m_jump;
+ }
+
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ RegisterID scr = claimScratch();
+ move(left.index, scr);
+ lshift32(TrustedImm32(left.scale), scr);
+ add32(left.base, scr);
+ load32(scr, left.offset, scr);
+ compare32(right.m_value, scr, cond);
+ releaseScratch(scr);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ if (dest != src)
+ m_assembler.dmovRegReg(src, dest);
+ m_assembler.dsqrt(dest);
+ }
+
+ void absDouble(FPRegisterID, FPRegisterID)
+ {
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ RegisterID addressTempRegister = claimScratch();
+ load8(address, addressTempRegister);
+ Jump jmp = branchTest32(cond, addressTempRegister, mask);
+ releaseScratch(addressTempRegister);
+ return jmp;
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ RegisterID addressTempRegister = claimScratch();
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+ load8(Address(addressTempRegister), addressTempRegister);
+ Jump jmp = branchTest32(cond, addressTempRegister, mask);
+ releaseScratch(addressTempRegister);
+ return jmp;
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ RegisterID addressTempRegister = claimScratch();
+ load8(left, addressTempRegister);
+ Jump jmp = branch32(cond, addressTempRegister, right);
+ releaseScratch(addressTempRegister);
+ return jmp;
+ }
+
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ RegisterID addressTempRegister = claimScratch();
+ load8(left, addressTempRegister);
+ compare32(cond, addressTempRegister, right, dest);
+ releaseScratch(addressTempRegister);
+ }
+
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.ftrcdrmfpul(src);
+ m_assembler.stsfpulReg(dest);
+ m_assembler.loadConstant(0x7fffffff, scratchReg3);
+ m_assembler.cmplRegReg(dest, scratchReg3, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 14, sizeof(uint32_t));
+ m_assembler.branch(BT_OPCODE, 2);
+ m_assembler.addlImm8r(1, scratchReg3);
+ m_assembler.cmplRegReg(dest, scratchReg3, SH4Condition(Equal));
+ return branchTrue();
+ }
+
+ // Stack manipulation operations
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.popReg(dest);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.pushReg(src);
+ }
+
+ void push(Address address)
+ {
+ if (!address.offset) {
+ push(address.base);
+ return;
+ }
+
+ if ((address.offset < 0) || (address.offset >= 64)) {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(address.offset, scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movlMemReg(scr, SH4Registers::sp);
+ m_assembler.addlImm8r(-4, SH4Registers::sp);
+ releaseScratch(scr);
+ return;
+ }
+
+ m_assembler.movlMemReg(address.offset >> 2, address.base, SH4Registers::sp);
+ m_assembler.addlImm8r(-4, SH4Registers::sp);
+ }
+
+ void push(TrustedImm32 imm)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm.m_value, scr);
+ push(scr);
+ releaseScratch(scr);
+ }
+
+ // Register move operations
+
+ void move(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.loadConstant(imm.m_value, dest);
+ }
+
+ DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+ {
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize, sizeof(uint32_t));
+ DataLabelPtr dataLabel(this);
+ m_assembler.loadConstantUnReusable(reinterpret_cast<uint32_t>(initialValue.m_value), dest);
+ return dataLabel;
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.movlRegReg(src, dest);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ m_assembler.loadConstant(imm.asIntptr(), dest);
+ }
+
+ void extuw(RegisterID src, RegisterID dst)
+ {
+ m_assembler.extuw(src, dst);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmplRegReg(right, left, SH4Condition(cond));
+ if (cond != NotEqual) {
+ m_assembler.movt(dest);
+ return;
+ }
+
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
+ m_assembler.movImm8(0, dest);
+ m_assembler.branch(BT_OPCODE, 0);
+ m_assembler.movImm8(1, dest);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ if (left != dest) {
+ move(right, dest);
+ compare32(cond, left, dest, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ move(right, scr);
+ compare32(cond, left, scr, dest);
+ releaseScratch(scr);
+ }
+
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+
+ load8(address, dest);
+ if (mask.m_value == -1)
+ compare32(0, dest, static_cast<RelationalCondition>(cond));
+ else
+ testlImm(mask.m_value, dest);
+ if (cond != NonZero) {
+ m_assembler.movt(dest);
+ return;
+ }
+
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
+ m_assembler.movImm8(0, dest);
+ m_assembler.branch(BT_OPCODE, 0);
+ m_assembler.movImm8(1, dest);
+ }
+
+ void loadPtrLinkReg(ImplicitAddress address)
+ {
+ RegisterID scr = claimScratch();
+ load32(address, scr);
+ m_assembler.ldspr(scr);
+ releaseScratch(scr);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmplRegReg(right, left, SH4Condition(cond));
+ /* BT label => BF off
+ nop LDR reg
+ nop braf @reg
+ nop nop
+ */
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testlRegReg(left, left);
+ else
+ compare32(right.m_value, left, cond);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+ {
+ compare32(right.offset, right.base, left, cond);
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+ {
+ compare32(right, left.offset, left.base, cond);
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ compare32(right.m_value, left.offset, left.base, cond);
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ RegisterID scr = claimScratch();
+
+ move(TrustedImm32(reinterpret_cast<uint32_t>(left.m_ptr)), scr);
+ m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
+ releaseScratch(scr);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ RegisterID addressTempRegister = claimScratch();
+
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(left.m_ptr), addressTempRegister);
+ m_assembler.movlMemReg(addressTempRegister, addressTempRegister);
+ compare32(right.m_value, addressTempRegister, cond);
+ releaseScratch(addressTempRegister);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ RegisterID scr = claimScratch();
+
+ move(left.index, scr);
+ lshift32(TrustedImm32(left.scale), scr);
+
+ if (left.offset)
+ add32(TrustedImm32(left.offset), scr);
+ add32(left.base, scr);
+ load8(scr, scr);
+ RegisterID scr1 = claimScratch();
+ m_assembler.loadConstant(right.m_value, scr1);
+ releaseScratch(scr);
+ releaseScratch(scr1);
+
+ return branch32(cond, scr, scr1);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+
+ m_assembler.testlRegReg(reg, mask);
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+
+ if (mask.m_value == -1)
+ m_assembler.testlRegReg(reg, reg);
+ else
+ testlImm(mask.m_value, reg);
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+
+ if (mask.m_value == -1)
+ compare32(0, address.offset, address.base, static_cast<RelationalCondition>(cond));
+ else
+ testImm(mask.m_value, address.offset, address.base);
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ load32(scr, address.offset, scr);
+
+ if (mask.m_value == -1)
+ m_assembler.testlRegReg(scr, scr);
+ else
+ testlImm(mask.m_value, scr);
+
+ releaseScratch(scr);
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump jump()
+ {
+ return Jump(m_assembler.jmp());
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.jmpReg(target);
+ }
+
+ void jump(Address address)
+ {
+ RegisterID scr = claimScratch();
+
+ if ((address.offset < 0) || (address.offset >= 64)) {
+ m_assembler.loadConstant(address.offset, scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movlMemReg(scr, scr);
+ } else if (address.offset)
+ m_assembler.movlMemReg(address.offset >> 2, address.base, scr);
+ else
+ m_assembler.movlMemReg(address.base, scr);
+ m_assembler.jmpReg(scr);
+
+ releaseScratch(scr);
+ }
+
+ // Arithmetic control flow operations
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (cond == Overflow) {
+ m_assembler.addvlRegReg(src, dest);
+ return branchTrue();
+ }
+
+ if (cond == Signed) {
+ m_assembler.addlRegReg(src, dest);
+ // Check if dest is negative
+ m_assembler.cmppz(dest);
+ return branchFalse();
+ }
+
+ m_assembler.addlRegReg(src, dest);
+ compare32(0, dest, Equal);
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ move(imm, scratchReg3);
+ return branchAdd32(cond, scratchReg3, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (src != dest)
+ move(src, dest);
+
+ if (cond == Overflow) {
+ move(imm, scratchReg3);
+ m_assembler.addvlRegReg(scratchReg3, dest);
+ return branchTrue();
+ }
+
+ add32(imm, dest);
+
+ if (cond == Signed) {
+ m_assembler.cmppz(dest);
+ return branchFalse();
+ }
+
+ compare32(0, dest, Equal);
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (cond == Overflow) {
+ RegisterID scr1 = claimScratch();
+ RegisterID scr = claimScratch();
+ m_assembler.dmullRegReg(src, dest);
+ m_assembler.stsmacl(dest);
+ m_assembler.movImm8(-31, scr);
+ m_assembler.movlRegReg(dest, scr1);
+ m_assembler.shaRegReg(scr1, scr);
+ m_assembler.stsmach(scr);
+ m_assembler.cmplRegReg(scr, scr1, SH4Condition(Equal));
+ releaseScratch(scr1);
+ releaseScratch(scr);
+ return branchFalse();
+ }
+
+ m_assembler.imullRegReg(src, dest);
+ m_assembler.stsmacl(dest);
+ if (cond == Signed) {
+ // Check if dest is negative
+ m_assembler.cmppz(dest);
+ return branchFalse();
+ }
+
+ compare32(0, dest, static_cast<RelationalCondition>(cond));
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ move(imm, scratchReg3);
+ if (src != dest)
+ move(src, dest);
+
+ return branchMul32(cond, scratchReg3, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (cond == Overflow) {
+ m_assembler.subvlRegReg(src, dest);
+ return branchTrue();
+ }
+
+ if (cond == Signed) {
+ // Check if dest is negative
+ m_assembler.sublRegReg(src, dest);
+ compare32(0, dest, LessThan);
+ return branchTrue();
+ }
+
+ sub32(src, dest);
+ compare32(0, dest, static_cast<RelationalCondition>(cond));
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ move(imm, scratchReg3);
+ return branchSub32(cond, scratchReg3, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, scratchReg3);
+ if (src != dest)
+ move(src, dest);
+ return branchSub32(cond, scratchReg3, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ if (src1 != dest)
+ move(src1, dest);
+ return branchSub32(cond, src2, dest);
+ }
+
+ Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (cond == Signed) {
+ or32(src, dest);
+ compare32(0, dest, static_cast<RelationalCondition>(LessThan));
+ return branchTrue();
+ }
+
+ or32(src, dest);
+ compare32(0, dest, static_cast<RelationalCondition>(cond));
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
+ {
+ m_assembler.ftrcdrmfpul(src);
+ m_assembler.stsfpulReg(dest);
+ convertInt32ToDouble(dest, fscratch);
+ failureCases.append(branchDouble(DoubleNotEqualOrUnordered, fscratch, src));
+
+ if (dest == SH4Registers::r0)
+ m_assembler.cmpEqImmR0(0, dest);
+ else {
+ m_assembler.movImm8(0, scratchReg3);
+ m_assembler.cmplRegReg(scratchReg3, dest, SH4Condition(Equal));
+ }
+ failureCases.append(branchTrue());
+ }
+
+ void neg32(RegisterID dst)
+ {
+ m_assembler.neg(dst, dst);
+ }
+
+ void urshift32(RegisterID shiftamount, RegisterID dest)
+ {
+ if (shiftamount == SH4Registers::r0)
+ m_assembler.andlImm8r(0x1f, shiftamount);
+ else {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(0x1f, scr);
+ m_assembler.andlRegReg(scr, shiftamount);
+ releaseScratch(scr);
+ }
+ m_assembler.neg(shiftamount, shiftamount);
+ m_assembler.shllRegReg(dest, shiftamount);
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(-(imm.m_value & 0x1f), scr);
+ m_assembler.shaRegReg(dest, scr);
+ releaseScratch(scr);
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 shiftamount, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+
+ urshift32(shiftamount, dest);
+ }
+
+ Call call()
+ {
+ return Call(m_assembler.call(), Call::Linkable);
+ }
+
+ Call nearCall()
+ {
+ return Call(m_assembler.call(), Call::LinkableNear);
+ }
+
+ Call call(RegisterID target)
+ {
+ return Call(m_assembler.call(target), Call::None);
+ }
+
+ void call(Address address, RegisterID target)
+ {
+ load32(address.base, address.offset, target);
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 2);
+ m_assembler.branch(JSR_OPCODE, target);
+ m_assembler.nop();
+ }
+
+ void breakpoint()
+ {
+ m_assembler.bkpt();
+ m_assembler.nop();
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ RegisterID dataTempRegister = claimScratch();
+
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ m_assembler.cmplRegReg(dataTempRegister, left, SH4Condition(cond));
+ releaseScratch(dataTempRegister);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ RegisterID scr = claimScratch();
+
+ m_assembler.loadConstant(left.offset, scr);
+ m_assembler.addlRegReg(left.base, scr);
+ m_assembler.movlMemReg(scr, scr);
+ RegisterID scr1 = claimScratch();
+ dataLabel = moveWithPatch(initialRightValue, scr1);
+ m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
+ releaseScratch(scr);
+ releaseScratch(scr1);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ void ret()
+ {
+ m_assembler.ret();
+ m_assembler.nop();
+ }
+
+ DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ RegisterID scr = claimScratch();
+ DataLabelPtr label = moveWithPatch(initialValue, scr);
+ store32(scr, address);
+ releaseScratch(scr);
+ return label;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
+
+ int sizeOfConstantPool()
+ {
+ return m_assembler.sizeOfConstantPool();
+ }
+
+ Call tailRecursiveCall()
+ {
+ RegisterID scr = claimScratch();
+
+ m_assembler.loadConstantUnReusable(0x0, scr, true);
+ Jump m_jump = Jump(m_assembler.jmp(scr));
+ releaseScratch(scr);
+
+ return Call::fromTailJump(m_jump);
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ return tailRecursiveCall();
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(reinterpret_cast<void(*)()>(SH4Assembler::readCallTarget(call.dataLocation())));
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ return label.labelAtOffset(0);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
+ {
+ SH4Assembler::revertJump(instructionStart.dataLocation(), reinterpret_cast<uintptr_t>(initialValue) & 0xffff);
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+protected:
+ SH4Assembler::Condition SH4Condition(RelationalCondition cond)
+ {
+ return static_cast<SH4Assembler::Condition>(cond);
+ }
+
+ SH4Assembler::Condition SH4Condition(ResultCondition cond)
+ {
+ return static_cast<SH4Assembler::Condition>(cond);
+ }
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void*, Call, FunctionPtr);
+ static void repatchCall(CodeLocationCall, CodeLocationLabel);
+ static void repatchCall(CodeLocationCall, FunctionPtr);
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerSH4_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerX86.h b/src/3rdparty/masm/assembler/MacroAssemblerX86.h
new file mode 100644
index 0000000000..27a030edfd
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerX86.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerX86_h
+#define MacroAssemblerX86_h
+
+#if ENABLE(ASSEMBLER) && CPU(X86)
+
+#include "MacroAssemblerX86Common.h"
+
+namespace JSC {
+
+class MacroAssemblerX86 : public MacroAssemblerX86Common {
+public:
+ static const Scale ScalePtr = TimesFour;
+
+ using MacroAssemblerX86Common::add32;
+ using MacroAssemblerX86Common::and32;
+ using MacroAssemblerX86Common::branchAdd32;
+ using MacroAssemblerX86Common::branchSub32;
+ using MacroAssemblerX86Common::sub32;
+ using MacroAssemblerX86Common::or32;
+ using MacroAssemblerX86Common::load32;
+ using MacroAssemblerX86Common::store32;
+ using MacroAssemblerX86Common::store8;
+ using MacroAssemblerX86Common::branch32;
+ using MacroAssemblerX86Common::call;
+ using MacroAssemblerX86Common::jump;
+ using MacroAssemblerX86Common::addDouble;
+ using MacroAssemblerX86Common::loadDouble;
+ using MacroAssemblerX86Common::storeDouble;
+ using MacroAssemblerX86Common::convertInt32ToDouble;
+ using MacroAssemblerX86Common::branchTest8;
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.leal_mr(imm.m_value, src, dest);
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.addl_im(imm.m_value, address.m_ptr);
+ }
+
+ void add32(AbsoluteAddress address, RegisterID dest)
+ {
+ m_assembler.addl_mr(address.m_ptr, dest);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.addl_im(imm.m_value, address.m_ptr);
+ m_assembler.adcl_im(imm.m_value >> 31, reinterpret_cast<const char*>(address.m_ptr) + sizeof(int32_t));
+ }
+
+ void and32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.andl_im(imm.m_value, address.m_ptr);
+ }
+
+ void or32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.orl_im(imm.m_value, address.m_ptr);
+ }
+
+ void or32(RegisterID reg, AbsoluteAddress address)
+ {
+ m_assembler.orl_rm(reg, address.m_ptr);
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.subl_im(imm.m_value, address.m_ptr);
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ m_assembler.movl_mr(address, dest);
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
+ m_assembler.movl_mr(address.offset, address.base, dest);
+ return result;
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ m_assembler.addsd_mr(address.m_ptr, dest);
+ }
+
+ void storeDouble(FPRegisterID src, const void* address)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_rm(src, address);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ m_assembler.cvtsi2sd_mr(src.m_ptr, dest);
+ }
+
+ void store32(TrustedImm32 imm, void* address)
+ {
+ m_assembler.movl_i32m(imm.m_value, address);
+ }
+
+ void store32(RegisterID src, void* address)
+ {
+ m_assembler.movl_rm(src, address);
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ ASSERT(-128 <= imm.m_value && imm.m_value < 128);
+ m_assembler.movb_i8m(imm.m_value, address);
+ }
+
+ // Possibly clobbers src.
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ movePackedToInt32(src, dest1);
+ rshiftPacked(TrustedImm32(32), src);
+ movePackedToInt32(src, dest2);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+ {
+ moveInt32ToPacked(src1, dest);
+ moveInt32ToPacked(src2, scratch);
+ lshiftPacked(TrustedImm32(32), scratch);
+ orPacked(scratch, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ m_assembler.addl_im(imm.m_value, dest.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ m_assembler.subl_im(imm.m_value, dest.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ m_assembler.cmpl_rm(right, left.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ m_assembler.cmpl_im(right.m_value, left.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Call call()
+ {
+ return Call(m_assembler.call(), Call::Linkable);
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(AbsoluteAddress address)
+ {
+ m_assembler.jmp_m(address.m_ptr);
+ }
+
+ Call tailRecursiveCall()
+ {
+ return Call::fromTailJump(jump());
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ return Call::fromTailJump(oldJump);
+ }
+
+
+ DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movl_i32r(initialValue.asIntptr(), dest);
+ return DataLabelPtr(this);
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
+ if (mask.m_value == -1)
+ m_assembler.cmpb_im(0, address.m_ptr);
+ else
+ m_assembler.testb_im(mask.m_value, address.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ padBeforePatch();
+ m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
+ dataLabel = DataLabelPtr(this);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ padBeforePatch();
+ m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
+ dataLabel = DataLabelPtr(this);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ padBeforePatch();
+ m_assembler.movl_i32m(initialValue.asIntptr(), address.offset, address.base);
+ return DataLabelPtr(this);
+ }
+
+ static bool supportsFloatingPoint() { return isSSE2Present(); }
+ // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
+ static bool supportsFloatingPointTruncate() { return isSSE2Present(); }
+ static bool supportsFloatingPointSqrt() { return isSSE2Present(); }
+ static bool supportsFloatingPointAbs() { return isSSE2Present(); }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ intptr_t offset = reinterpret_cast<int32_t*>(call.dataLocation())[-1];
+ return FunctionPtr(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(call.dataLocation()) + offset));
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ const int opcodeBytes = 1;
+ const int modRMBytes = 1;
+ const int immediateBytes = 4;
+ const int totalBytes = opcodeBytes + modRMBytes + immediateBytes;
+ ASSERT(totalBytes >= maxJumpReplacementSize());
+ return label.labelAtOffset(-totalBytes);
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
+ {
+ const int opcodeBytes = 1;
+ const int modRMBytes = 1;
+ const int offsetBytes = 0;
+ const int immediateBytes = 4;
+ const int totalBytes = opcodeBytes + modRMBytes + offsetBytes + immediateBytes;
+ ASSERT(totalBytes >= maxJumpReplacementSize());
+ return label.labelAtOffset(-totalBytes);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue)
+ {
+ X86Assembler::revertJumpTo_cmpl_ir_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), reg);
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address address, void* initialValue)
+ {
+ ASSERT(!address.offset);
+ X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), 0, address.base);
+ }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ X86Assembler::linkCall(code, call.m_label, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerX86_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerX86Common.h b/src/3rdparty/masm/assembler/MacroAssemblerX86Common.h
new file mode 100644
index 0000000000..53cb80c210
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerX86Common.h
@@ -0,0 +1,1541 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerX86Common_h
+#define MacroAssemblerX86Common_h
+
+#if ENABLE(ASSEMBLER)
+
+#include "X86Assembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
+protected:
+#if CPU(X86_64)
+ static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
+#endif
+
+ static const int DoubleConditionBitInvert = 0x10;
+ static const int DoubleConditionBitSpecial = 0x20;
+ static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
+
+public:
+ typedef X86Assembler::FPRegisterID FPRegisterID;
+ typedef X86Assembler::XMMRegisterID XMMRegisterID;
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return value >= -128 && value <= 127;
+ }
+
+ enum RelationalCondition {
+ Equal = X86Assembler::ConditionE,
+ NotEqual = X86Assembler::ConditionNE,
+ Above = X86Assembler::ConditionA,
+ AboveOrEqual = X86Assembler::ConditionAE,
+ Below = X86Assembler::ConditionB,
+ BelowOrEqual = X86Assembler::ConditionBE,
+ GreaterThan = X86Assembler::ConditionG,
+ GreaterThanOrEqual = X86Assembler::ConditionGE,
+ LessThan = X86Assembler::ConditionL,
+ LessThanOrEqual = X86Assembler::ConditionLE
+ };
+
+ enum ResultCondition {
+ Overflow = X86Assembler::ConditionO,
+ Signed = X86Assembler::ConditionS,
+ Zero = X86Assembler::ConditionE,
+ NonZero = X86Assembler::ConditionNE
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
+ DoubleNotEqual = X86Assembler::ConditionNE,
+ DoubleGreaterThan = X86Assembler::ConditionA,
+ DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
+ DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
+ DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = X86Assembler::ConditionE,
+ DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
+ DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
+ DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
+ DoubleLessThanOrUnordered = X86Assembler::ConditionB,
+ DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
+ };
+ COMPILE_ASSERT(
+ !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
+ DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
+
+ static const RegisterID stackPointerRegister = X86Registers::esp;
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
+#if CPU(X86_64)
+ static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
+#if OS(DARWIN) // On 64-bit systems other than DARWIN uint64_t and uintptr_t are the same type so overload is prohibited.
+ static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; }
+#endif
+#endif
+#endif
+
+ // Integer arithmetic operations:
+ //
+ // Operations are typically two operand - operation(source, srcDst)
+ // For many operations the source may be an TrustedImm32, the srcDst operand
+ // may often be a memory location (explictly described using an Address
+ // object).
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addl_rr(src, dest);
+ }
+
+ void add32(TrustedImm32 imm, Address address)
+ {
+ m_assembler.addl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.addl_ir(imm.m_value, dest);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ m_assembler.addl_mr(src.offset, src.base, dest);
+ }
+
+ void add32(RegisterID src, Address dest)
+ {
+ m_assembler.addl_rm(src, dest.offset, dest.base);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.leal_mr(imm.m_value, src, dest);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andl_rr(src, dest);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.andl_ir(imm.m_value, dest);
+ }
+
+ void and32(RegisterID src, Address dest)
+ {
+ m_assembler.andl_rm(src, dest.offset, dest.base);
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ m_assembler.andl_mr(src.offset, src.base, dest);
+ }
+
+ void and32(TrustedImm32 imm, Address address)
+ {
+ m_assembler.andl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ zeroExtend32ToPtr(op1, dest);
+ else if (op1 == dest)
+ and32(op2, dest);
+ else {
+ move(op2, dest);
+ and32(op1, dest);
+ }
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ and32(imm, dest);
+ }
+
+ void lshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (shift_amount == X86Registers::ecx)
+ m_assembler.shll_CLr(dest);
+ else {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ // If we dest is ecx, then shift the swapped register!
+ swap(shift_amount, X86Registers::ecx);
+ m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
+ swap(shift_amount, X86Registers::ecx);
+ }
+ }
+
+ void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (src != dest)
+ move(src, dest);
+ lshift32(shift_amount, dest);
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.shll_i8r(imm.m_value, dest);
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ lshift32(imm, dest);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.imull_rr(src, dest);
+ }
+
+ void mul32(Address src, RegisterID dest)
+ {
+ m_assembler.imull_mr(src.offset, src.base, dest);
+ }
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.imull_i32r(src, imm.m_value, dest);
+ }
+
+ void neg32(RegisterID srcDest)
+ {
+ m_assembler.negl_r(srcDest);
+ }
+
+ void neg32(Address srcDest)
+ {
+ m_assembler.negl_m(srcDest.offset, srcDest.base);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orl_rr(src, dest);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.orl_ir(imm.m_value, dest);
+ }
+
+ void or32(RegisterID src, Address dest)
+ {
+ m_assembler.orl_rm(src, dest.offset, dest.base);
+ }
+
+ void or32(Address src, RegisterID dest)
+ {
+ m_assembler.orl_mr(src.offset, src.base, dest);
+ }
+
+ void or32(TrustedImm32 imm, Address address)
+ {
+ m_assembler.orl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ zeroExtend32ToPtr(op1, dest);
+ else if (op1 == dest)
+ or32(op2, dest);
+ else {
+ move(op2, dest);
+ or32(op1, dest);
+ }
+ }
+
+ void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ or32(imm, dest);
+ }
+
+ void rshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (shift_amount == X86Registers::ecx)
+ m_assembler.sarl_CLr(dest);
+ else {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ // If we dest is ecx, then shift the swapped register!
+ swap(shift_amount, X86Registers::ecx);
+ m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
+ swap(shift_amount, X86Registers::ecx);
+ }
+ }
+
+ void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (src != dest)
+ move(src, dest);
+ rshift32(shift_amount, dest);
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.sarl_i8r(imm.m_value, dest);
+ }
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ rshift32(imm, dest);
+ }
+
+ void urshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (shift_amount == X86Registers::ecx)
+ m_assembler.shrl_CLr(dest);
+ else {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ // If we dest is ecx, then shift the swapped register!
+ swap(shift_amount, X86Registers::ecx);
+ m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
+ swap(shift_amount, X86Registers::ecx);
+ }
+ }
+
+ void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (src != dest)
+ move(src, dest);
+ urshift32(shift_amount, dest);
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.shrl_i8r(imm.m_value, dest);
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ urshift32(imm, dest);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subl_rr(src, dest);
+ }
+
+ void sub32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.subl_ir(imm.m_value, dest);
+ }
+
+ void sub32(TrustedImm32 imm, Address address)
+ {
+ m_assembler.subl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ m_assembler.subl_mr(src.offset, src.base, dest);
+ }
+
+ void sub32(RegisterID src, Address dest)
+ {
+ m_assembler.subl_rm(src, dest.offset, dest.base);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.xorl_rr(src, dest);
+ }
+
+ void xor32(TrustedImm32 imm, Address dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.notl_m(dest.offset, dest.base);
+ else
+ m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.notl_r(dest);
+ else
+ m_assembler.xorl_ir(imm.m_value, dest);
+ }
+
+ void xor32(RegisterID src, Address dest)
+ {
+ m_assembler.xorl_rm(src, dest.offset, dest.base);
+ }
+
+ void xor32(Address src, RegisterID dest)
+ {
+ m_assembler.xorl_mr(src.offset, src.base, dest);
+ }
+
+ void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ move(TrustedImm32(0), dest);
+ else if (op1 == dest)
+ xor32(op2, dest);
+ else {
+ move(op2, dest);
+ xor32(op1, dest);
+ }
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ xor32(imm, dest);
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.sqrtsd_rr(src, dst);
+ }
+
+ void absDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(src != dst);
+ static const double negativeZeroConstant = -0.0;
+ loadDouble(&negativeZeroConstant, dst);
+ m_assembler.andnpd_rr(src, dst);
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(src != dst);
+ static const double negativeZeroConstant = -0.0;
+ loadDouble(&negativeZeroConstant, dst);
+ m_assembler.xorpd_rr(src, dst);
+ }
+
+
+ // Memory access operations:
+ //
+ // Loads are of the form load(address, destination) and stores of the form
+ // store(source, address). The source for a store may be an TrustedImm32. Address
+ // operand objects to loads and store will be implicitly constructed if a
+ // register is passed.
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movl_mr(address.offset, address.base, dest);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+ load16(address, dest);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movl_mr_disp32(address.offset, address.base, dest);
+ return DataLabel32(this);
+ }
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movl_mr_disp8(address.offset, address.base, dest);
+ return DataLabelCompact(this);
+ }
+
+ static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
+ {
+ ASSERT(isCompactPtrAlignedAddressOffset(value));
+ AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value);
+ }
+
+ DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movl_mr_disp8(address.offset, address.base, dest);
+ return DataLabelCompact(this);
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movzbl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movzbl_mr(address.offset, address.base, dest);
+ }
+
+ void load8Signed(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load8Signed(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movsbl_mr(address.offset, address.base, dest);
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load16(Address address, RegisterID dest)
+ {
+ m_assembler.movzwl_mr(address.offset, address.base, dest);
+ }
+
+ void load16Signed(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load16Signed(Address address, RegisterID dest)
+ {
+ m_assembler.movswl_mr(address.offset, address.base, dest);
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ padBeforePatch();
+ m_assembler.movl_rm_disp32(src, address.offset, address.base);
+ return DataLabel32(this);
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.movl_rm(src, address.offset, address.base);
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store32(TrustedImm32 imm, ImplicitAddress address)
+ {
+ m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
+ }
+
+ void store32(TrustedImm32 imm, BaseIndex address)
+ {
+ m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store8(TrustedImm32 imm, Address address)
+ {
+ ASSERT(-128 <= imm.m_value && imm.m_value < 128);
+ m_assembler.movb_i8m(imm.m_value, address.offset, address.base);
+ }
+
+ void store8(TrustedImm32 imm, BaseIndex address)
+ {
+ ASSERT(-128 <= imm.m_value && imm.m_value < 128);
+ m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+#if CPU(X86)
+ // On 32-bit x86 we can only store from the first 4 registers;
+ // esp..edi are mapped to the 'h' registers!
+ if (src >= 4) {
+ // Pick a temporary register.
+ RegisterID temp;
+ if (address.base != X86Registers::eax && address.index != X86Registers::eax)
+ temp = X86Registers::eax;
+ else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
+ temp = X86Registers::ebx;
+ else {
+ ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
+ temp = X86Registers::ecx;
+ }
+
+ // Swap to the temporary register to perform the store.
+ swap(src, temp);
+ m_assembler.movb_rm(temp, address.offset, address.base, address.index, address.scale);
+ swap(src, temp);
+ return;
+ }
+#endif
+ m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+#if CPU(X86)
+ // On 32-bit x86 we can only store from the first 4 registers;
+ // esp..edi are mapped to the 'h' registers!
+ if (src >= 4) {
+ // Pick a temporary register.
+ RegisterID temp;
+ if (address.base != X86Registers::eax && address.index != X86Registers::eax)
+ temp = X86Registers::eax;
+ else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
+ temp = X86Registers::ebx;
+ else {
+ ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
+ temp = X86Registers::ecx;
+ }
+
+ // Swap to the temporary register to perform the store.
+ swap(src, temp);
+ m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale);
+ swap(src, temp);
+ return;
+ }
+#endif
+ m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+
+ // Floating-point operation:
+ //
+ // Presently only supports SSE, not x87 floating point.
+
+ void moveDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ if (src != dest)
+ m_assembler.movsd_rr(src, dest);
+ }
+
+ void loadDouble(const void* address, FPRegisterID dest)
+ {
+#if CPU(X86)
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_mr(address, dest);
+#else
+ move(TrustedImmPtr(address), scratchRegister);
+ loadDouble(scratchRegister, dest);
+#endif
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_mr(address.offset, address.base, dest);
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movss_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_rm(src, address.offset, address.base);
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movss_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvtsd2ss_rr(src, dst);
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvtss2sd_rr(src, dst);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.addsd_rr(src, dest);
+ }
+
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ if (op1 == dest)
+ addDouble(op2, dest);
+ else {
+ moveDouble(op2, dest);
+ addDouble(op1, dest);
+ }
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.addsd_mr(src.offset, src.base, dest);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.divsd_rr(src, dest);
+ }
+
+ void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ // B := A / B is invalid.
+ ASSERT(op1 == dest || op2 != dest);
+
+ moveDouble(op1, dest);
+ divDouble(op2, dest);
+ }
+
+ void divDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.divsd_mr(src.offset, src.base, dest);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.subsd_rr(src, dest);
+ }
+
+ void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ // B := A - B is invalid.
+ ASSERT(op1 == dest || op2 != dest);
+
+ moveDouble(op1, dest);
+ subDouble(op2, dest);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.subsd_mr(src.offset, src.base, dest);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.mulsd_rr(src, dest);
+ }
+
+ void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ if (op1 == dest)
+ mulDouble(op2, dest);
+ else {
+ moveDouble(op2, dest);
+ mulDouble(op1, dest);
+ }
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.mulsd_mr(src.offset, src.base, dest);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvtsi2sd_rr(src, dest);
+ }
+
+ void convertInt32ToDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ ASSERT(isSSE2Present());
+
+ if (cond & DoubleConditionBitInvert)
+ m_assembler.ucomisd_rr(left, right);
+ else
+ m_assembler.ucomisd_rr(right, left);
+
+ if (cond == DoubleEqual) {
+ if (left == right)
+ return Jump(m_assembler.jnp());
+ Jump isUnordered(m_assembler.jp());
+ Jump result = Jump(m_assembler.je());
+ isUnordered.link(this);
+ return result;
+ } else if (cond == DoubleNotEqualOrUnordered) {
+ if (left == right)
+ return Jump(m_assembler.jp());
+ Jump isUnordered(m_assembler.jp());
+ Jump isEqual(m_assembler.je());
+ isUnordered.link(this);
+ Jump result = jump();
+ isEqual.link(this);
+ return result;
+ }
+
+ ASSERT(!(cond & DoubleConditionBitSpecial));
+ return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
+ }
+
+ // Truncates 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, INT_MIN).
+ enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+ return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
+ }
+
+ Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+ return branch32(branchType ? GreaterThanOrEqual : LessThan, dest, TrustedImm32(0));
+ }
+
+ void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+ }
+
+#if CPU(X86_64)
+ void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2siq_rr(src, dest);
+ }
+#endif
+
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+
+ // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+ failureCases.append(branchTest32(Zero, dest));
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ convertInt32ToDouble(dest, fpTemp);
+ m_assembler.ucomisd_rr(fpTemp, src);
+ failureCases.append(m_assembler.jp());
+ failureCases.append(m_assembler.jne());
+ }
+
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.xorpd_rr(scratch, scratch);
+ return branchDouble(DoubleNotEqual, reg, scratch);
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.xorpd_rr(scratch, scratch);
+ return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+ }
+
+ void lshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.psllq_i8r(imm.m_value, reg);
+ }
+
+ void rshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.psrlq_i8r(imm.m_value, reg);
+ }
+
+ void orPacked(XMMRegisterID src, XMMRegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.por_rr(src, dst);
+ }
+
+ void moveInt32ToPacked(RegisterID src, XMMRegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movd_rr(src, dst);
+ }
+
+ void movePackedToInt32(XMMRegisterID src, RegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movd_rr(src, dst);
+ }
+
+ // Stack manipulation operations:
+ //
+ // The ABI is assumed to provide a stack abstraction to memory,
+ // containing machine word sized units of data. Push and pop
+ // operations add and remove a single register sized unit of data
+ // to or from the stack. Peek and poke operations read or write
+ // values on the stack, without moving the current stack position.
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.pop_r(dest);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.push_r(src);
+ }
+
+ void push(Address address)
+ {
+ m_assembler.push_m(address.offset, address.base);
+ }
+
+ void push(TrustedImm32 imm)
+ {
+ m_assembler.push_i32(imm.m_value);
+ }
+
+
+ // Register move operations:
+ //
+ // Move values in registers.
+
+ void move(TrustedImm32 imm, RegisterID dest)
+ {
+ // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
+ // may be useful to have a separate version that sign extends the value?
+ if (!imm.m_value)
+ m_assembler.xorl_rr(dest, dest);
+ else
+ m_assembler.movl_i32r(imm.m_value, dest);
+ }
+
+#if CPU(X86_64)
+ void move(RegisterID src, RegisterID dest)
+ {
+ // Note: on 64-bit this is is a full register move; perhaps it would be
+ // useful to have separate move32 & movePtr, with move32 zero extending?
+ if (src != dest)
+ m_assembler.movq_rr(src, dest);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ m_assembler.movq_i64r(imm.asIntptr(), dest);
+ }
+
+ void move(TrustedImm64 imm, RegisterID dest)
+ {
+ m_assembler.movq_i64r(imm.m_value, dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ if (reg1 != reg2)
+ m_assembler.xchgq_rr(reg1, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ m_assembler.movsxd_rr(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ m_assembler.movl_rr(src, dest);
+ }
+#else
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.movl_rr(src, dest);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ m_assembler.movl_i32r(imm.asIntptr(), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ if (reg1 != reg2)
+ m_assembler.xchgl_rr(reg1, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+#endif
+
+
+ // Forwards / external control flow operations:
+ //
+ // This set of jump and conditional branch operations return a Jump
+ // object which may linked at a later point, allow forwards jump,
+ // or jumps that will require external linkage (after the code has been
+ // relocated).
+ //
+ // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+ // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+ // used (representing the names 'below' and 'above').
+ //
+ // Operands to the comparision are provided in the expected order, e.g.
+ // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
+ // treated as a signed 32bit value, is less than or equal to 5.
+ //
+ // jz and jnz test whether the first operand is equal to zero, and take
+ // an optional second operand of a mask under which to perform the test.
+
+public:
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ m_assembler.cmpb_im(right.m_value, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmpl_rr(right, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testl_rr(left, left);
+ else
+ m_assembler.cmpl_ir(right.m_value, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+ {
+ m_assembler.cmpl_mr(right.offset, right.base, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+ {
+ m_assembler.cmpl_rm(right, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ m_assembler.cmpl_im(right.m_value, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ m_assembler.testl_rr(reg, mask);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // if we are only interested in the low seven bits, this can be tested with a testb
+ if (mask.m_value == -1)
+ m_assembler.testl_rr(reg, reg);
+ else
+ m_assembler.testl_i32r(mask.m_value, reg);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpl_im(0, address.offset, address.base);
+ else
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
+ else
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
+ ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
+ if (mask.m_value == -1)
+ m_assembler.cmpb_im(0, address.offset, address.base);
+ else
+ m_assembler.testb_im(mask.m_value, address.offset, address.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
+ ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
+ if (mask.m_value == -1)
+ m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
+ else
+ m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+
+ m_assembler.cmpb_im(right.m_value, left.offset, left.base, left.index, left.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump jump()
+ {
+ return Jump(m_assembler.jmp());
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.jmp_r(target);
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(Address address)
+ {
+ m_assembler.jmp_m(address.offset, address.base);
+ }
+
+
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest)
+ {
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest)
+ {
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
+ {
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ if (src1 == dest)
+ return branchAdd32(cond, src2, dest);
+ move(src2, dest);
+ return branchAdd32(cond, src1, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(src, dest);
+ return branchAdd32(cond, imm, dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ mul32(src, dest);
+ if (cond != Overflow)
+ m_assembler.testl_rr(dest, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchMul32(ResultCondition cond, Address src, RegisterID dest)
+ {
+ mul32(src, dest);
+ if (cond != Overflow)
+ m_assembler.testl_rr(dest, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ mul32(imm, src, dest);
+ if (cond != Overflow)
+ m_assembler.testl_rr(dest, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ if (src1 == dest)
+ return branchMul32(cond, src2, dest);
+ move(src2, dest);
+ return branchMul32(cond, src1, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ sub32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ sub32(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest)
+ {
+ sub32(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, Address dest)
+ {
+ sub32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, Address src, RegisterID dest)
+ {
+ sub32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ // B := A - B is invalid.
+ ASSERT(src1 == dest || src2 != dest);
+
+ move(src1, dest);
+ return branchSub32(cond, src2, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+ {
+ move(src1, dest);
+ return branchSub32(cond, src2, dest);
+ }
+
+ Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+ {
+ neg32(srcDest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ or32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+
+ // Miscellaneous operations:
+
+ void breakpoint()
+ {
+ m_assembler.int3();
+ }
+
+ Call nearCall()
+ {
+ return Call(m_assembler.call(), Call::LinkableNear);
+ }
+
+ Call call(RegisterID target)
+ {
+ return Call(m_assembler.call(target), Call::None);
+ }
+
+ void call(Address address)
+ {
+ m_assembler.call_m(address.offset, address.base);
+ }
+
+ void ret()
+ {
+ m_assembler.ret();
+ }
+
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ m_assembler.cmpb_im(right.m_value, left.offset, left.base);
+ set32(x86Condition(cond), dest);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmpl_rr(right, left);
+ set32(x86Condition(cond), dest);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testl_rr(left, left);
+ else
+ m_assembler.cmpl_ir(right.m_value, left);
+ set32(x86Condition(cond), dest);
+ }
+
+ // FIXME:
+ // The mask should be optional... perhaps the argument order should be
+ // dest-src, operations always have a dest? ... possibly not true, considering
+ // asm ops like test, or pseudo ops like pop().
+
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpb_im(0, address.offset, address.base);
+ else
+ m_assembler.testb_im(mask.m_value, address.offset, address.base);
+ set32(x86Condition(cond), dest);
+ }
+
+ void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpl_im(0, address.offset, address.base);
+ else
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+ set32(x86Condition(cond), dest);
+ }
+
+ // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+ static RelationalCondition invert(RelationalCondition cond)
+ {
+ return static_cast<RelationalCondition>(cond ^ 1);
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ X86Assembler::replaceWithJump(instructionStart.executableAddress(), destination.executableAddress());
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return X86Assembler::maxJumpReplacementSize();
+ }
+
+protected:
+ X86Assembler::Condition x86Condition(RelationalCondition cond)
+ {
+ return static_cast<X86Assembler::Condition>(cond);
+ }
+
+ X86Assembler::Condition x86Condition(ResultCondition cond)
+ {
+ return static_cast<X86Assembler::Condition>(cond);
+ }
+
+ void set32(X86Assembler::Condition cond, RegisterID dest)
+ {
+#if CPU(X86)
+ // On 32-bit x86 we can only set the first 4 registers;
+ // esp..edi are mapped to the 'h' registers!
+ if (dest >= 4) {
+ m_assembler.xchgl_rr(dest, X86Registers::eax);
+ m_assembler.setCC_r(cond, X86Registers::eax);
+ m_assembler.movzbl_rr(X86Registers::eax, X86Registers::eax);
+ m_assembler.xchgl_rr(dest, X86Registers::eax);
+ return;
+ }
+#endif
+ m_assembler.setCC_r(cond, dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+
+private:
+ // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
+ // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
+ friend class MacroAssemblerX86;
+
+#if CPU(X86)
+#if OS(MAC_OS_X)
+
+ // All X86 Macs are guaranteed to support at least SSE2,
+ static bool isSSE2Present()
+ {
+ return true;
+ }
+
+#else // OS(MAC_OS_X)
+
+ enum SSE2CheckState {
+ NotCheckedSSE2,
+ HasSSE2,
+ NoSSE2
+ };
+
+ static bool isSSE2Present()
+ {
+ if (s_sse2CheckState == NotCheckedSSE2) {
+ // Default the flags value to zero; if the compiler is
+ // not MSVC or GCC we will read this as SSE2 not present.
+ int flags = 0;
+#if COMPILER(MSVC)
+ _asm {
+ mov eax, 1 // cpuid function 1 gives us the standard feature set
+ cpuid;
+ mov flags, edx;
+ }
+#elif COMPILER(GCC)
+ asm (
+ "movl $0x1, %%eax;"
+ "pushl %%ebx;"
+ "cpuid;"
+ "popl %%ebx;"
+ "movl %%edx, %0;"
+ : "=g" (flags)
+ :
+ : "%eax", "%ecx", "%edx"
+ );
+#endif
+ static const int SSE2FeatureBit = 1 << 26;
+ s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
+ }
+ // Only check once.
+ ASSERT(s_sse2CheckState != NotCheckedSSE2);
+
+ return s_sse2CheckState == HasSSE2;
+ }
+
+ static SSE2CheckState s_sse2CheckState;
+
+#endif // OS(MAC_OS_X)
+#elif !defined(NDEBUG) // CPU(X86)
+
+ // On x86-64 we should never be checking for SSE2 in a non-debug build,
+ // but non debug add this method to keep the asserts above happy.
+ static bool isSSE2Present()
+ {
+ return true;
+ }
+
+#endif
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerX86Common_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerX86_64.h b/src/3rdparty/masm/assembler/MacroAssemblerX86_64.h
new file mode 100644
index 0000000000..c711e6f8da
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerX86_64.h
@@ -0,0 +1,643 @@
+/*
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerX86_64_h
+#define MacroAssemblerX86_64_h
+
+#if ENABLE(ASSEMBLER) && CPU(X86_64)
+
+#include "MacroAssemblerX86Common.h"
+
+#define REPTACH_OFFSET_CALL_R11 3
+
+namespace JSC {
+
+class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
+public:
+ static const Scale ScalePtr = TimesEight;
+
+ using MacroAssemblerX86Common::add32;
+ using MacroAssemblerX86Common::and32;
+ using MacroAssemblerX86Common::branchAdd32;
+ using MacroAssemblerX86Common::or32;
+ using MacroAssemblerX86Common::sub32;
+ using MacroAssemblerX86Common::load32;
+ using MacroAssemblerX86Common::store32;
+ using MacroAssemblerX86Common::store8;
+ using MacroAssemblerX86Common::call;
+ using MacroAssemblerX86Common::jump;
+ using MacroAssemblerX86Common::addDouble;
+ using MacroAssemblerX86Common::loadDouble;
+ using MacroAssemblerX86Common::convertInt32ToDouble;
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ add32(imm, Address(scratchRegister));
+ }
+
+ void and32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ and32(imm, Address(scratchRegister));
+ }
+
+ void add32(AbsoluteAddress address, RegisterID dest)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ add32(Address(scratchRegister), dest);
+ }
+
+ void or32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ or32(imm, Address(scratchRegister));
+ }
+
+ void or32(RegisterID reg, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ or32(reg, Address(scratchRegister));
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ sub32(imm, Address(scratchRegister));
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ if (dest == X86Registers::eax)
+ m_assembler.movl_mEAX(address);
+ else {
+ move(TrustedImmPtr(address), dest);
+ load32(dest, dest);
+ }
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ m_assembler.addsd_mr(0, scratchRegister, dest);
+ }
+
+ void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
+ {
+ move(imm, scratchRegister);
+ m_assembler.cvtsi2sd_rr(scratchRegister, dest);
+ }
+
+ void store32(TrustedImm32 imm, void* address)
+ {
+ move(TrustedImmPtr(address), scratchRegister);
+ store32(imm, scratchRegister);
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ move(TrustedImmPtr(address), scratchRegister);
+ store8(imm, Address(scratchRegister));
+ }
+
+ Call call()
+ {
+ DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
+ Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
+ ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
+ return result;
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ jump(Address(scratchRegister));
+ }
+
+ Call tailRecursiveCall()
+ {
+ DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
+ Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
+ ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
+ return Call::fromTailJump(newJump);
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
+ Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
+ ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
+ return Call::fromTailJump(newJump);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest)
+ {
+ move(TrustedImmPtr(dest.m_ptr), scratchRegister);
+ add32(src, Address(scratchRegister));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ void add64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addq_rr(src, dest);
+ }
+
+ void add64(Address src, RegisterID dest)
+ {
+ m_assembler.addq_mr(src.offset, src.base, dest);
+ }
+
+ void add64(AbsoluteAddress src, RegisterID dest)
+ {
+ move(TrustedImmPtr(src.m_ptr), scratchRegister);
+ add64(Address(scratchRegister), dest);
+ }
+
+ void add64(TrustedImm32 imm, RegisterID srcDest)
+ {
+ m_assembler.addq_ir(imm.m_value, srcDest);
+ }
+
+ void add64(TrustedImm64 imm, RegisterID dest)
+ {
+ move(imm, scratchRegister);
+ add64(scratchRegister, dest);
+ }
+
+ void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.leaq_mr(imm.m_value, src, dest);
+ }
+
+ void add64(TrustedImm32 imm, Address address)
+ {
+ m_assembler.addq_im(imm.m_value, address.offset, address.base);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ add64(imm, Address(scratchRegister));
+ }
+
+ void and64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andq_rr(src, dest);
+ }
+
+ void and64(TrustedImm32 imm, RegisterID srcDest)
+ {
+ m_assembler.andq_ir(imm.m_value, srcDest);
+ }
+
+ void neg64(RegisterID dest)
+ {
+ m_assembler.negq_r(dest);
+ }
+
+ void or64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orq_rr(src, dest);
+ }
+
+ void or64(TrustedImm64 imm, RegisterID dest)
+ {
+ move(imm, scratchRegister);
+ or64(scratchRegister, dest);
+ }
+
+ void or64(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.orq_ir(imm.m_value, dest);
+ }
+
+ void or64(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ move(op1, dest);
+ else if (op1 == dest)
+ or64(op2, dest);
+ else {
+ move(op2, dest);
+ or64(op1, dest);
+ }
+ }
+
+ void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ or64(imm, dest);
+ }
+
+ void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
+ {
+ m_assembler.rorq_i8r(imm.m_value, srcDst);
+ }
+
+ void sub64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subq_rr(src, dest);
+ }
+
+ void sub64(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.subq_ir(imm.m_value, dest);
+ }
+
+ void sub64(TrustedImm64 imm, RegisterID dest)
+ {
+ move(imm, scratchRegister);
+ sub64(scratchRegister, dest);
+ }
+
+ void xor64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.xorq_rr(src, dest);
+ }
+
+ void xor64(RegisterID src, Address dest)
+ {
+ m_assembler.xorq_rm(src, dest.offset, dest.base);
+ }
+
+ void xor64(TrustedImm32 imm, RegisterID srcDest)
+ {
+ m_assembler.xorq_ir(imm.m_value, srcDest);
+ }
+
+ void load64(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movq_mr(address.offset, address.base, dest);
+ }
+
+ void load64(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load64(const void* address, RegisterID dest)
+ {
+ if (dest == X86Registers::eax)
+ m_assembler.movq_mEAX(address);
+ else {
+ move(TrustedImmPtr(address), dest);
+ load64(dest, dest);
+ }
+ }
+
+ DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movq_mr_disp32(address.offset, address.base, dest);
+ return DataLabel32(this);
+ }
+
+ DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movq_mr_disp8(address.offset, address.base, dest);
+ return DataLabelCompact(this);
+ }
+
+ void store64(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.movq_rm(src, address.offset, address.base);
+ }
+
+ void store64(RegisterID src, BaseIndex address)
+ {
+ m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store64(RegisterID src, void* address)
+ {
+ if (src == X86Registers::eax)
+ m_assembler.movq_EAXm(address);
+ else {
+ move(TrustedImmPtr(address), scratchRegister);
+ store64(src, scratchRegister);
+ }
+ }
+
+ void store64(TrustedImm64 imm, ImplicitAddress address)
+ {
+ move(imm, scratchRegister);
+ store64(scratchRegister, address);
+ }
+
+ void store64(TrustedImm64 imm, BaseIndex address)
+ {
+ move(imm, scratchRegister);
+ m_assembler.movq_rm(scratchRegister, address.offset, address.base, address.index, address.scale);
+ }
+
+ DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ padBeforePatch();
+ m_assembler.movq_rm_disp32(src, address.offset, address.base);
+ return DataLabel32(this);
+ }
+
+ void move64ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.movq_rr(src, dest);
+ }
+
+ void moveDoubleTo64(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.movq_rr(src, dest);
+ }
+
+ void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testq_rr(left, left);
+ else
+ m_assembler.cmpq_ir(right.m_value, left);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+
+ void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmpq_rr(right, left);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+
+ Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmpq_rr(right, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) {
+ m_assembler.testq_rr(left, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+ move(right, scratchRegister);
+ return branch64(cond, left, scratchRegister);
+ }
+
+ Jump branch64(RelationalCondition cond, RegisterID left, Address right)
+ {
+ m_assembler.cmpq_mr(right.offset, right.base, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ move(TrustedImmPtr(left.m_ptr), scratchRegister);
+ return branch64(cond, Address(scratchRegister), right);
+ }
+
+ Jump branch64(RelationalCondition cond, Address left, RegisterID right)
+ {
+ m_assembler.cmpq_rm(right, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
+ {
+ move(right, scratchRegister);
+ return branch64(cond, left, scratchRegister);
+ }
+
+ Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ m_assembler.testq_rr(reg, mask);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // if we are only interested in the low seven bits, this can be tested with a testb
+ if (mask.m_value == -1)
+ m_assembler.testq_rr(reg, reg);
+ else if ((mask.m_value & ~0x7f) == 0)
+ m_assembler.testb_i8r(mask.m_value, reg);
+ else
+ m_assembler.testq_i32r(mask.m_value, reg);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+ {
+ if (mask.m_value == -1)
+ m_assembler.testq_rr(reg, reg);
+ else if ((mask.m_value & ~0x7f) == 0)
+ m_assembler.testb_i8r(mask.m_value, reg);
+ else
+ m_assembler.testq_i32r(mask.m_value, reg);
+ set32(x86Condition(cond), dest);
+ }
+
+ void test64(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
+ {
+ m_assembler.testq_rr(reg, mask);
+ set32(x86Condition(cond), dest);
+ }
+
+ Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load64(address.m_ptr, scratchRegister);
+ return branchTest64(cond, scratchRegister, mask);
+ }
+
+ Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpq_im(0, address.offset, address.base);
+ else
+ m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest64(ResultCondition cond, Address address, RegisterID reg)
+ {
+ m_assembler.testq_rm(reg, address.offset, address.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
+ else
+ m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+
+ Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ add64(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ add64(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ sub64(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ sub64(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub64(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+ {
+ move(src1, dest);
+ return branchSub64(cond, src2, dest);
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
+ m_assembler.movq_mr(address.offset, address.base, dest);
+ return result;
+ }
+
+ DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movq_i64r(initialValue.asIntptr(), dest);
+ return DataLabelPtr(this);
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, scratchRegister);
+ return branch64(cond, left, scratchRegister);
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, scratchRegister);
+ return branch64(cond, left, scratchRegister);
+ }
+
+ DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr label = moveWithPatch(initialValue, scratchRegister);
+ store64(scratchRegister, address);
+ return label;
+ }
+
+ using MacroAssemblerX86Common::branchTest8;
+ Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ TrustedImmPtr addr(reinterpret_cast<void*>(address.offset));
+ MacroAssemblerX86Common::move(addr, scratchRegister);
+ return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister, address.base, TimesOne), mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister), mask);
+ }
+
+ static bool supportsFloatingPoint() { return true; }
+ // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
+ static bool supportsFloatingPointTruncate() { return true; }
+ static bool supportsFloatingPointSqrt() { return true; }
+ static bool supportsFloatingPointAbs() { return true; }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation()));
+ }
+
+ static RegisterID scratchRegisterForBlinding() { return scratchRegister; }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ const int rexBytes = 1;
+ const int opcodeBytes = 1;
+ const int immediateBytes = 8;
+ const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
+ ASSERT(totalBytes >= maxJumpReplacementSize());
+ return label.labelAtOffset(-totalBytes);
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
+ {
+ return startOfBranchPtrWithPatchOnRegister(label);
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue)
+ {
+ X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
+ {
+ X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister);
+ }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ if (!call.isFlagSet(Call::Near))
+ X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPTACH_OFFSET_CALL_R11), function.value());
+ else
+ X86Assembler::linkCall(code, call.m_label, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
+ }
+
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerX86_64_h
diff --git a/src/3rdparty/masm/assembler/RepatchBuffer.h b/src/3rdparty/masm/assembler/RepatchBuffer.h
new file mode 100644
index 0000000000..dbb56f9ad5
--- /dev/null
+++ b/src/3rdparty/masm/assembler/RepatchBuffer.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RepatchBuffer_h
+#define RepatchBuffer_h
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include <MacroAssembler.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+// RepatchBuffer:
+//
+// This class is used to modify code after code generation has been completed,
+// and after the code has potentially already been executed. This mechanism is
+// used to apply optimizations to the code.
+//
+class RepatchBuffer {
+ typedef MacroAssemblerCodePtr CodePtr;
+
+public:
+ RepatchBuffer(CodeBlock* codeBlock)
+ {
+ JITCode& code = codeBlock->getJITCode();
+ m_start = code.start();
+ m_size = code.size();
+
+ ExecutableAllocator::makeWritable(m_start, m_size);
+ }
+
+ ~RepatchBuffer()
+ {
+ ExecutableAllocator::makeExecutable(m_start, m_size);
+ }
+
+ void relink(CodeLocationJump jump, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchJump(jump, destination);
+ }
+
+ void relink(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchCall(call, destination);
+ }
+
+ void relink(CodeLocationCall call, FunctionPtr destination)
+ {
+ MacroAssembler::repatchCall(call, destination);
+ }
+
+ void relink(CodeLocationNearCall nearCall, CodePtr destination)
+ {
+ MacroAssembler::repatchNearCall(nearCall, CodeLocationLabel(destination));
+ }
+
+ void relink(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchNearCall(nearCall, destination);
+ }
+
+ void repatch(CodeLocationDataLabel32 dataLabel32, int32_t value)
+ {
+ MacroAssembler::repatchInt32(dataLabel32, value);
+ }
+
+ void repatch(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
+ {
+ MacroAssembler::repatchCompact(dataLabelCompact, value);
+ }
+
+ void repatch(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+ {
+ MacroAssembler::repatchPointer(dataLabelPtr, value);
+ }
+
+ void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
+ {
+ relink(CodeLocationCall(CodePtr(returnAddress)), label);
+ }
+
+ void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
+ {
+ relinkCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
+ }
+
+ void relinkCallerToFunction(ReturnAddressPtr returnAddress, FunctionPtr function)
+ {
+ relink(CodeLocationCall(CodePtr(returnAddress)), function);
+ }
+
+ void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
+ {
+ relink(CodeLocationNearCall(CodePtr(returnAddress)), label);
+ }
+
+ void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
+ {
+ relinkNearCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
+ }
+
+ void replaceWithLoad(CodeLocationConvertibleLoad label)
+ {
+ MacroAssembler::replaceWithLoad(label);
+ }
+
+ void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
+ {
+ MacroAssembler::replaceWithAddressComputation(label);
+ }
+
+ void setLoadInstructionIsActive(CodeLocationConvertibleLoad label, bool isActive)
+ {
+ if (isActive)
+ replaceWithLoad(label);
+ else
+ replaceWithAddressComputation(label);
+ }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ return MacroAssembler::startOfBranchPtrWithPatchOnRegister(label);
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
+ {
+ return MacroAssembler::startOfPatchableBranchPtrWithPatchOnAddress(label);
+ }
+
+ void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ MacroAssembler::replaceWithJump(instructionStart, destination);
+ }
+
+ // This is a *bit* of a silly API, since we currently always also repatch the
+ // immediate after calling this. But I'm fine with that, since this just feels
+ // less yucky.
+ void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, MacroAssembler::RegisterID reg, void* value)
+ {
+ MacroAssembler::revertJumpReplacementToBranchPtrWithPatch(instructionStart, reg, value);
+ }
+
+ void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, MacroAssembler::Address address, void* value)
+ {
+ MacroAssembler::revertJumpReplacementToPatchableBranchPtrWithPatch(instructionStart, address, value);
+ }
+
+private:
+ void* m_start;
+ size_t m_size;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // RepatchBuffer_h
diff --git a/src/3rdparty/masm/assembler/SH4Assembler.h b/src/3rdparty/masm/assembler/SH4Assembler.h
new file mode 100644
index 0000000000..b7a166ea99
--- /dev/null
+++ b/src/3rdparty/masm/assembler/SH4Assembler.h
@@ -0,0 +1,2152 @@
+/*
+ * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SH4Assembler_h
+#define SH4Assembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(SH4)
+
+#include "AssemblerBuffer.h"
+#include "AssemblerBufferWithConstantPool.h"
+#include "JITCompilationEffort.h"
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <wtf/Assertions.h>
+#include <wtf/DataLog.h>
+#include <wtf/Vector.h>
+
+#ifndef NDEBUG
+#define SH4_ASSEMBLER_TRACING
+#endif
+
+namespace JSC {
+typedef uint16_t SH4Word;
+
+enum {
+ INVALID_OPCODE = 0xffff,
+ ADD_OPCODE = 0x300c,
+ ADDIMM_OPCODE = 0x7000,
+ ADDC_OPCODE = 0x300e,
+ ADDV_OPCODE = 0x300f,
+ AND_OPCODE = 0x2009,
+ ANDIMM_OPCODE = 0xc900,
+ DIV0_OPCODE = 0x2007,
+ DIV1_OPCODE = 0x3004,
+ BF_OPCODE = 0x8b00,
+ BFS_OPCODE = 0x8f00,
+ BRA_OPCODE = 0xa000,
+ BRAF_OPCODE = 0x0023,
+ NOP_OPCODE = 0x0009,
+ BSR_OPCODE = 0xb000,
+ RTS_OPCODE = 0x000b,
+ BT_OPCODE = 0x8900,
+ BTS_OPCODE = 0x8d00,
+ BSRF_OPCODE = 0x0003,
+ BRK_OPCODE = 0x003b,
+ FTRC_OPCODE = 0xf03d,
+ CMPEQ_OPCODE = 0x3000,
+ CMPEQIMM_OPCODE = 0x8800,
+ CMPGE_OPCODE = 0x3003,
+ CMPGT_OPCODE = 0x3007,
+ CMPHI_OPCODE = 0x3006,
+ CMPHS_OPCODE = 0x3002,
+ CMPPL_OPCODE = 0x4015,
+ CMPPZ_OPCODE = 0x4011,
+ CMPSTR_OPCODE = 0x200c,
+ DT_OPCODE = 0x4010,
+ FCMPEQ_OPCODE = 0xf004,
+ FCMPGT_OPCODE = 0xf005,
+ FMOV_OPCODE = 0xf00c,
+ FADD_OPCODE = 0xf000,
+ FMUL_OPCODE = 0xf002,
+ FSUB_OPCODE = 0xf001,
+ FDIV_OPCODE = 0xf003,
+ FNEG_OPCODE = 0xf04d,
+ JMP_OPCODE = 0x402b,
+ JSR_OPCODE = 0x400b,
+ LDSPR_OPCODE = 0x402a,
+ LDSLPR_OPCODE = 0x4026,
+ MOV_OPCODE = 0x6003,
+ MOVIMM_OPCODE = 0xe000,
+ MOVB_WRITE_RN_OPCODE = 0x2000,
+ MOVB_WRITE_RNDEC_OPCODE = 0x2004,
+ MOVB_WRITE_R0RN_OPCODE = 0x0004,
+ MOVB_WRITE_OFFGBR_OPCODE = 0xc000,
+ MOVB_WRITE_OFFRN_OPCODE = 0x8000,
+ MOVB_READ_RM_OPCODE = 0x6000,
+ MOVB_READ_RMINC_OPCODE = 0x6004,
+ MOVB_READ_R0RM_OPCODE = 0x000c,
+ MOVB_READ_OFFGBR_OPCODE = 0xc400,
+ MOVB_READ_OFFRM_OPCODE = 0x8400,
+ MOVL_WRITE_RN_OPCODE = 0x2002,
+ MOVL_WRITE_RNDEC_OPCODE = 0x2006,
+ MOVL_WRITE_R0RN_OPCODE = 0x0006,
+ MOVL_WRITE_OFFGBR_OPCODE = 0xc200,
+ MOVL_WRITE_OFFRN_OPCODE = 0x1000,
+ MOVL_READ_RM_OPCODE = 0x6002,
+ MOVL_READ_RMINC_OPCODE = 0x6006,
+ MOVL_READ_R0RM_OPCODE = 0x000e,
+ MOVL_READ_OFFGBR_OPCODE = 0xc600,
+ MOVL_READ_OFFPC_OPCODE = 0xd000,
+ MOVL_READ_OFFRM_OPCODE = 0x5000,
+ MOVW_WRITE_RN_OPCODE = 0x2001,
+ MOVW_READ_RM_OPCODE = 0x6001,
+ MOVW_READ_R0RM_OPCODE = 0x000d,
+ MOVW_READ_OFFRM_OPCODE = 0x8500,
+ MOVW_READ_OFFPC_OPCODE = 0x9000,
+ MOVA_READ_OFFPC_OPCODE = 0xc700,
+ MOVT_OPCODE = 0x0029,
+ MULL_OPCODE = 0x0007,
+ DMULL_L_OPCODE = 0x3005,
+ STSMACL_OPCODE = 0x001a,
+ STSMACH_OPCODE = 0x000a,
+ DMULSL_OPCODE = 0x300d,
+ NEG_OPCODE = 0x600b,
+ NEGC_OPCODE = 0x600a,
+ NOT_OPCODE = 0x6007,
+ OR_OPCODE = 0x200b,
+ ORIMM_OPCODE = 0xcb00,
+ ORBIMM_OPCODE = 0xcf00,
+ SETS_OPCODE = 0x0058,
+ SETT_OPCODE = 0x0018,
+ SHAD_OPCODE = 0x400c,
+ SHAL_OPCODE = 0x4020,
+ SHAR_OPCODE = 0x4021,
+ SHLD_OPCODE = 0x400d,
+ SHLL_OPCODE = 0x4000,
+ SHLL2_OPCODE = 0x4008,
+ SHLL8_OPCODE = 0x4018,
+ SHLL16_OPCODE = 0x4028,
+ SHLR_OPCODE = 0x4001,
+ SHLR2_OPCODE = 0x4009,
+ SHLR8_OPCODE = 0x4019,
+ SHLR16_OPCODE = 0x4029,
+ STSPR_OPCODE = 0x002a,
+ STSLPR_OPCODE = 0x4022,
+ FLOAT_OPCODE = 0xf02d,
+ SUB_OPCODE = 0x3008,
+ SUBC_OPCODE = 0x300a,
+ SUBV_OPCODE = 0x300b,
+ TST_OPCODE = 0x2008,
+ TSTIMM_OPCODE = 0xc800,
+ TSTB_OPCODE = 0xcc00,
+ EXTUB_OPCODE = 0x600c,
+ EXTUW_OPCODE = 0x600d,
+ XOR_OPCODE = 0x200a,
+ XORIMM_OPCODE = 0xca00,
+ XORB_OPCODE = 0xce00,
+ FMOVS_READ_RM_INC_OPCODE = 0xf009,
+ FMOVS_READ_RM_OPCODE = 0xf008,
+ FMOVS_READ_R0RM_OPCODE = 0xf006,
+ FMOVS_WRITE_RN_OPCODE = 0xf00a,
+ FMOVS_WRITE_RN_DEC_OPCODE = 0xf00b,
+ FMOVS_WRITE_R0RN_OPCODE = 0xf007,
+ FCNVDS_DRM_FPUL_OPCODE = 0xf0bd,
+ FCNVSD_FPUL_DRN_OPCODE = 0xf0ad,
+ LDS_RM_FPUL_OPCODE = 0x405a,
+ FLDS_FRM_FPUL_OPCODE = 0xf01d,
+ STS_FPUL_RN_OPCODE = 0x005a,
+ FSTS_FPUL_FRN_OPCODE = 0xF00d,
+ LDSFPSCR_OPCODE = 0x406a,
+ STSFPSCR_OPCODE = 0x006a,
+ LDSRMFPUL_OPCODE = 0x405a,
+ FSTSFPULFRN_OPCODE = 0xf00d,
+ FSQRT_OPCODE = 0xf06d,
+ FSCHG_OPCODE = 0xf3fd,
+ CLRT_OPCODE = 8,
+};
+
+namespace SH4Registers {
+typedef enum {
+ r0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14, fp = r14,
+ r15, sp = r15,
+ pc,
+ pr,
+} RegisterID;
+
+typedef enum {
+ fr0, dr0 = fr0,
+ fr1,
+ fr2, dr2 = fr2,
+ fr3,
+ fr4, dr4 = fr4,
+ fr5,
+ fr6, dr6 = fr6,
+ fr7,
+ fr8, dr8 = fr8,
+ fr9,
+ fr10, dr10 = fr10,
+ fr11,
+ fr12, dr12 = fr12,
+ fr13,
+ fr14, dr14 = fr14,
+ fr15,
+} FPRegisterID;
+}
+
+inline uint16_t getOpcodeGroup1(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0xf) << 8) | ((rn & 0xf) << 4));
+}
+
+inline uint16_t getOpcodeGroup2(uint16_t opc, int rm)
+{
+ return (opc | ((rm & 0xf) << 8));
+}
+
+inline uint16_t getOpcodeGroup3(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0xf) << 8) | (rn & 0xff));
+}
+
+inline uint16_t getOpcodeGroup4(uint16_t opc, int rm, int rn, int offset)
+{
+ return (opc | ((rm & 0xf) << 8) | ((rn & 0xf) << 4) | (offset & 0xf));
+}
+
+inline uint16_t getOpcodeGroup5(uint16_t opc, int rm)
+{
+ return (opc | (rm & 0xff));
+}
+
+inline uint16_t getOpcodeGroup6(uint16_t opc, int rm)
+{
+ return (opc | (rm & 0xfff));
+}
+
+inline uint16_t getOpcodeGroup7(uint16_t opc, int rm)
+{
+ return (opc | ((rm & 0x7) << 9));
+}
+
+inline uint16_t getOpcodeGroup8(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0x7) << 9) | ((rn & 0x7) << 5));
+}
+
+inline uint16_t getOpcodeGroup9(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0xf) << 8) | ((rn & 0x7) << 5));
+}
+
+inline uint16_t getOpcodeGroup10(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0x7) << 9) | ((rn & 0xf) << 4));
+}
+
+inline uint16_t getOpcodeGroup11(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0xf) << 4) | (rn & 0xf));
+}
+
+inline uint16_t getRn(uint16_t x)
+{
+ return ((x & 0xf00) >> 8);
+}
+
+inline uint16_t getRm(uint16_t x)
+{
+ return ((x & 0xf0) >> 4);
+}
+
+inline uint16_t getDisp(uint16_t x)
+{
+ return (x & 0xf);
+}
+
+inline uint16_t getImm8(uint16_t x)
+{
+ return (x & 0xff);
+}
+
+inline uint16_t getImm12(uint16_t x)
+{
+ return (x & 0xfff);
+}
+
+inline uint16_t getDRn(uint16_t x)
+{
+ return ((x & 0xe00) >> 9);
+}
+
+inline uint16_t getDRm(uint16_t x)
+{
+ return ((x & 0xe0) >> 5);
+}
+
+class SH4Assembler {
+public:
+ typedef SH4Registers::RegisterID RegisterID;
+ typedef SH4Registers::FPRegisterID FPRegisterID;
+ typedef AssemblerBufferWithConstantPool<512, 4, 2, SH4Assembler> SH4Buffer;
+ static const RegisterID scratchReg1 = SH4Registers::r3;
+ static const RegisterID scratchReg2 = SH4Registers::r11;
+ static const uint32_t maxInstructionSize = 16;
+
+ enum {
+ padForAlign8 = 0x00,
+ padForAlign16 = 0x0009,
+ padForAlign32 = 0x00090009,
+ };
+
+ enum JumpType {
+ JumpFar,
+ JumpNear
+ };
+
+ SH4Assembler()
+ {
+ m_claimscratchReg = 0x0;
+ }
+
+ // SH4 condition codes
+ typedef enum {
+ EQ = 0x0, // Equal
+ NE = 0x1, // Not Equal
+ HS = 0x2, // Unsigend Greater Than equal
+ HI = 0x3, // Unsigend Greater Than
+ LS = 0x4, // Unsigend Lower or Same
+ LI = 0x5, // Unsigend Lower
+ GE = 0x6, // Greater or Equal
+ LT = 0x7, // Less Than
+ GT = 0x8, // Greater Than
+ LE = 0x9, // Less or Equal
+ OF = 0xa, // OverFlow
+ SI = 0xb, // Signed
+ EQU= 0xc, // Equal or unordered(NaN)
+ NEU= 0xd,
+ GTU= 0xe,
+ GEU= 0xf,
+ LTU= 0x10,
+ LEU= 0x11,
+ } Condition;
+
+ // Opaque label types
+public:
+ bool isImmediate(int constant)
+ {
+ return ((constant <= 127) && (constant >= -128));
+ }
+
+ RegisterID claimScratch()
+ {
+ ASSERT((m_claimscratchReg != 0x3));
+
+ if (!(m_claimscratchReg & 0x1)) {
+ m_claimscratchReg = (m_claimscratchReg | 0x1);
+ return scratchReg1;
+ }
+
+ m_claimscratchReg = (m_claimscratchReg | 0x2);
+ return scratchReg2;
+ }
+
+ void releaseScratch(RegisterID scratchR)
+ {
+ if (scratchR == scratchReg1)
+ m_claimscratchReg = (m_claimscratchReg & 0x2);
+ else
+ m_claimscratchReg = (m_claimscratchReg & 0x1);
+ }
+
+ // Stack operations
+
+ void pushReg(RegisterID reg)
+ {
+ if (reg == SH4Registers::pr) {
+ oneShortOp(getOpcodeGroup2(STSLPR_OPCODE, SH4Registers::sp));
+ return;
+ }
+
+ oneShortOp(getOpcodeGroup1(MOVL_WRITE_RNDEC_OPCODE, SH4Registers::sp, reg));
+ }
+
+ void popReg(RegisterID reg)
+ {
+ if (reg == SH4Registers::pr) {
+ oneShortOp(getOpcodeGroup2(LDSLPR_OPCODE, SH4Registers::sp));
+ return;
+ }
+
+ oneShortOp(getOpcodeGroup1(MOVL_READ_RMINC_OPCODE, reg, SH4Registers::sp));
+ }
+
+ void movt(RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup2(MOVT_OPCODE, dst);
+ oneShortOp(opc);
+ }
+
+ // Arithmetic operations
+
+ void addlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(ADD_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void addclRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(ADDC_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void addvlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(ADDV_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void addlImm8r(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 127) && (imm8 >= -128));
+
+ uint16_t opc = getOpcodeGroup3(ADDIMM_OPCODE, dst, imm8);
+ oneShortOp(opc);
+ }
+
+ void andlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(AND_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void andlImm8r(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 255) && (imm8 >= 0));
+ ASSERT(dst == SH4Registers::r0);
+
+ uint16_t opc = getOpcodeGroup5(ANDIMM_OPCODE, imm8);
+ oneShortOp(opc);
+ }
+
+ void div1lRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(DIV1_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void div0lRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(DIV0_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void notlReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(NOT_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void orlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(OR_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void orlImm8r(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 255) && (imm8 >= 0));
+ ASSERT(dst == SH4Registers::r0);
+
+ uint16_t opc = getOpcodeGroup5(ORIMM_OPCODE, imm8);
+ oneShortOp(opc);
+ }
+
+ void sublRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(SUB_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void subvlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(SUBV_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void xorlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(XOR_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void xorlImm8r(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 255) && (imm8 >= 0));
+ ASSERT(dst == SH4Registers::r0);
+
+ uint16_t opc = getOpcodeGroup5(XORIMM_OPCODE, imm8);
+ oneShortOp(opc);
+ }
+
+ void shllImm8r(int imm, RegisterID dst)
+ {
+ switch (imm) {
+ case 1:
+ oneShortOp(getOpcodeGroup2(SHLL_OPCODE, dst));
+ break;
+ case 2:
+ oneShortOp(getOpcodeGroup2(SHLL2_OPCODE, dst));
+ break;
+ case 8:
+ oneShortOp(getOpcodeGroup2(SHLL8_OPCODE, dst));
+ break;
+ case 16:
+ oneShortOp(getOpcodeGroup2(SHLL16_OPCODE, dst));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void neg(RegisterID dst, RegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup1(NEG_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void shllRegReg(RegisterID dst, RegisterID rShift)
+ {
+ uint16_t opc = getOpcodeGroup1(SHLD_OPCODE, dst, rShift);
+ oneShortOp(opc);
+ }
+
+ void shlrRegReg(RegisterID dst, RegisterID rShift)
+ {
+ neg(rShift, rShift);
+ shllRegReg(dst, rShift);
+ }
+
+ void sharRegReg(RegisterID dst, RegisterID rShift)
+ {
+ neg(rShift, rShift);
+ shaRegReg(dst, rShift);
+ }
+
+ void shaRegReg(RegisterID dst, RegisterID rShift)
+ {
+ uint16_t opc = getOpcodeGroup1(SHAD_OPCODE, dst, rShift);
+ oneShortOp(opc);
+ }
+
+ void shlrImm8r(int imm, RegisterID dst)
+ {
+ switch (imm) {
+ case 1:
+ oneShortOp(getOpcodeGroup2(SHLR_OPCODE, dst));
+ break;
+ case 2:
+ oneShortOp(getOpcodeGroup2(SHLR2_OPCODE, dst));
+ break;
+ case 8:
+ oneShortOp(getOpcodeGroup2(SHLR8_OPCODE, dst));
+ break;
+ case 16:
+ oneShortOp(getOpcodeGroup2(SHLR16_OPCODE, dst));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void imullRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MULL_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void dmullRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(DMULL_L_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void dmulslRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(DMULSL_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void stsmacl(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(STSMACL_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void stsmach(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(STSMACH_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ // Comparisons
+
+ void cmplRegReg(RegisterID left, RegisterID right, Condition cond)
+ {
+ switch (cond) {
+ case NE:
+ oneShortOp(getOpcodeGroup1(CMPEQ_OPCODE, right, left));
+ break;
+ case GT:
+ oneShortOp(getOpcodeGroup1(CMPGT_OPCODE, right, left));
+ break;
+ case EQ:
+ oneShortOp(getOpcodeGroup1(CMPEQ_OPCODE, right, left));
+ break;
+ case GE:
+ oneShortOp(getOpcodeGroup1(CMPGE_OPCODE, right, left));
+ break;
+ case HS:
+ oneShortOp(getOpcodeGroup1(CMPHS_OPCODE, right, left));
+ break;
+ case HI:
+ oneShortOp(getOpcodeGroup1(CMPHI_OPCODE, right, left));
+ break;
+ case LI:
+ oneShortOp(getOpcodeGroup1(CMPHI_OPCODE, left, right));
+ break;
+ case LS:
+ oneShortOp(getOpcodeGroup1(CMPHS_OPCODE, left, right));
+ break;
+ case LE:
+ oneShortOp(getOpcodeGroup1(CMPGE_OPCODE, left, right));
+ break;
+ case LT:
+ oneShortOp(getOpcodeGroup1(CMPGT_OPCODE, left, right));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void cmppl(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(CMPPL_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void cmppz(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(CMPPZ_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void cmpEqImmR0(int imm, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup5(CMPEQIMM_OPCODE, imm);
+ oneShortOp(opc);
+ }
+
+ void testlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(TST_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void testlImm8r(int imm, RegisterID dst)
+ {
+ ASSERT((dst == SH4Registers::r0) && (imm <= 255) && (imm >= 0));
+
+ uint16_t opc = getOpcodeGroup5(TSTIMM_OPCODE, imm);
+ oneShortOp(opc);
+ }
+
+ void nop()
+ {
+ oneShortOp(NOP_OPCODE, false);
+ }
+
+ void sett()
+ {
+ oneShortOp(SETT_OPCODE);
+ }
+
+ void clrt()
+ {
+ oneShortOp(CLRT_OPCODE);
+ }
+
+ void fschg()
+ {
+ oneShortOp(FSCHG_OPCODE);
+ }
+
+ void bkpt()
+ {
+ oneShortOp(BRK_OPCODE, false);
+ }
+
+ void branch(uint16_t opc, int label)
+ {
+ switch (opc) {
+ case BT_OPCODE:
+ ASSERT((label <= 127) && (label >= -128));
+ oneShortOp(getOpcodeGroup5(BT_OPCODE, label));
+ break;
+ case BRA_OPCODE:
+ ASSERT((label <= 2047) && (label >= -2048));
+ oneShortOp(getOpcodeGroup6(BRA_OPCODE, label));
+ break;
+ case BF_OPCODE:
+ ASSERT((label <= 127) && (label >= -128));
+ oneShortOp(getOpcodeGroup5(BF_OPCODE, label));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void branch(uint16_t opc, RegisterID reg)
+ {
+ switch (opc) {
+ case BRAF_OPCODE:
+ oneShortOp(getOpcodeGroup2(BRAF_OPCODE, reg));
+ break;
+ case JMP_OPCODE:
+ oneShortOp(getOpcodeGroup2(JMP_OPCODE, reg));
+ break;
+ case JSR_OPCODE:
+ oneShortOp(getOpcodeGroup2(JSR_OPCODE, reg));
+ break;
+ case BSRF_OPCODE:
+ oneShortOp(getOpcodeGroup2(BSRF_OPCODE, reg));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void ldspr(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(LDSPR_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void stspr(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(STSPR_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void extub(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(EXTUB_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void extuw(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(EXTUW_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ // float operations
+
+ void ldsrmfpul(RegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(LDS_RM_FPUL_OPCODE, src);
+ oneShortOp(opc);
+ }
+
+ void fneg(FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup2(FNEG_OPCODE, dst);
+ oneShortOp(opc, true, false);
+ }
+
+ void fsqrt(FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup2(FSQRT_OPCODE, dst);
+ oneShortOp(opc, true, false);
+ }
+
+ void stsfpulReg(RegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(STS_FPUL_RN_OPCODE, src);
+ oneShortOp(opc);
+ }
+
+ void floatfpulfrn(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(FLOAT_OPCODE, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmull(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMUL_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsReadrm(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_READ_RM_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsWriterm(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_RN_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsWriter0r(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_R0RN_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsReadr0r(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_READ_R0RM_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsReadrminc(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_READ_RM_INC_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsWriterndec(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_RN_DEC_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void ftrcRegfpul(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(FTRC_OPCODE, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fldsfpul(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(FLDS_FRM_FPUL_OPCODE, src);
+ oneShortOp(opc);
+ }
+
+ void fstsfpul(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(FSTS_FPUL_FRN_OPCODE, src);
+ oneShortOp(opc);
+ }
+
+ void ldsfpscr(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(LDSFPSCR_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void stsfpscr(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(STSFPSCR_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ // double operations
+
+ void dcnvds(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup7(FCNVDS_DRM_FPUL_OPCODE, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dcnvsd(FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup7(FCNVSD_FPUL_DRN_OPCODE, dst >> 1);
+ oneShortOp(opc);
+ }
+
+ void dcmppeq(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FCMPEQ_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dcmppgt(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FCMPGT_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dmulRegReg(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FMUL_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dsubRegReg(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FSUB_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void daddRegReg(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FADD_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dmovRegReg(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FMOV_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void ddivRegReg(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FDIV_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dsqrt(FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup7(FSQRT_OPCODE, dst >> 1);
+ oneShortOp(opc);
+ }
+
+ void dneg(FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup7(FNEG_OPCODE, dst >> 1);
+ oneShortOp(opc);
+ }
+
+ void fmovReadrm(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup10(FMOVS_READ_RM_OPCODE, dst >> 1, src);
+ oneShortOp(opc);
+ }
+
+ void fmovWriterm(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_RN_OPCODE, dst, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void fmovWriter0r(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_R0RN_OPCODE, dst, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void fmovReadr0r(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup10(FMOVS_READ_R0RM_OPCODE, dst >> 1, src);
+ oneShortOp(opc);
+ }
+
+ void fmovReadrminc(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup10(FMOVS_READ_RM_INC_OPCODE, dst >> 1, src);
+ oneShortOp(opc);
+ }
+
+ void fmovWriterndec(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_RN_DEC_OPCODE, dst, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void floatfpulDreg(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup7(FLOAT_OPCODE, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void ftrcdrmfpul(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup7(FTRC_OPCODE, src >> 1);
+ oneShortOp(opc);
+ }
+
+ // Various move ops
+
+ void movImm8(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 127) && (imm8 >= -128));
+
+ uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, imm8);
+ oneShortOp(opc);
+ }
+
+ void movlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOV_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movwRegMem(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVW_WRITE_RN_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movwMemReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVW_READ_RM_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movwPCReg(int offset, RegisterID base, RegisterID dst)
+ {
+ ASSERT(base == SH4Registers::pc);
+ ASSERT((offset <= 255) && (offset >= 0));
+
+ uint16_t opc = getOpcodeGroup3(MOVW_READ_OFFPC_OPCODE, dst, offset);
+ oneShortOp(opc);
+ }
+
+ void movwMemReg(int offset, RegisterID base, RegisterID dst)
+ {
+ ASSERT(dst == SH4Registers::r0);
+
+ uint16_t opc = getOpcodeGroup11(MOVW_READ_OFFRM_OPCODE, base, offset);
+ oneShortOp(opc);
+ }
+
+ void movwR0mr(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVW_READ_R0RM_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movlRegMem(RegisterID src, int offset, RegisterID base)
+ {
+ ASSERT((offset <= 15) && (offset >= 0));
+
+ if (!offset) {
+ oneShortOp(getOpcodeGroup1(MOVL_WRITE_RN_OPCODE, base, src));
+ return;
+ }
+
+ oneShortOp(getOpcodeGroup4(MOVL_WRITE_OFFRN_OPCODE, base, src, offset));
+ }
+
+ void movlRegMem(RegisterID src, RegisterID base)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVL_WRITE_RN_OPCODE, base, src);
+ oneShortOp(opc);
+ }
+
+ void movlMemReg(int offset, RegisterID base, RegisterID dst)
+ {
+ if (base == SH4Registers::pc) {
+ ASSERT((offset <= 255) && (offset >= 0));
+ oneShortOp(getOpcodeGroup3(MOVL_READ_OFFPC_OPCODE, dst, offset));
+ return;
+ }
+
+ ASSERT((offset <= 15) && (offset >= 0));
+ if (!offset) {
+ oneShortOp(getOpcodeGroup1(MOVL_READ_RM_OPCODE, dst, base));
+ return;
+ }
+
+ oneShortOp(getOpcodeGroup4(MOVL_READ_OFFRM_OPCODE, dst, base, offset));
+ }
+
+ void movlMemRegCompact(int offset, RegisterID base, RegisterID dst)
+ {
+ oneShortOp(getOpcodeGroup4(MOVL_READ_OFFRM_OPCODE, dst, base, offset));
+ }
+
+ void movbRegMem(RegisterID src, RegisterID base)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVB_WRITE_RN_OPCODE, base, src);
+ oneShortOp(opc);
+ }
+
+ void movbMemReg(int offset, RegisterID base, RegisterID dst)
+ {
+ ASSERT(dst == SH4Registers::r0);
+
+ uint16_t opc = getOpcodeGroup11(MOVB_READ_OFFRM_OPCODE, base, offset);
+ oneShortOp(opc);
+ }
+
+ void movbR0mr(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVB_READ_R0RM_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movbMemReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVB_READ_RM_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movlMemReg(RegisterID base, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVL_READ_RM_OPCODE, dst, base);
+ oneShortOp(opc);
+ }
+
+ void movlMemRegIn(RegisterID base, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVL_READ_RMINC_OPCODE, dst, base);
+ oneShortOp(opc);
+ }
+
+ void movlR0mr(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVL_READ_R0RM_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movlRegMemr0(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVL_WRITE_R0RN_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movlImm8r(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 127) && (imm8 >= -128));
+
+ uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, imm8);
+ oneShortOp(opc);
+ }
+
+ void loadConstant(uint32_t constant, RegisterID dst)
+ {
+ if (((int)constant <= 0x7f) && ((int)constant >= -0x80)) {
+ movImm8(constant, dst);
+ return;
+ }
+
+ uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, 0);
+
+ m_buffer.ensureSpace(maxInstructionSize, sizeof(uint32_t));
+ printInstr(getOpcodeGroup3(MOVIMM_OPCODE, dst, constant), m_buffer.codeSize());
+ m_buffer.putShortWithConstantInt(opc, constant, true);
+ }
+
+ void loadConstantUnReusable(uint32_t constant, RegisterID dst, bool ensureSpace = false)
+ {
+ uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, 0);
+
+ if (ensureSpace)
+ m_buffer.ensureSpace(maxInstructionSize, sizeof(uint32_t));
+
+ printInstr(getOpcodeGroup3(MOVIMM_OPCODE, dst, constant), m_buffer.codeSize());
+ m_buffer.putShortWithConstantInt(opc, constant);
+ }
+
+ // Flow control
+
+ AssemblerLabel call()
+ {
+ RegisterID scr = claimScratch();
+ m_buffer.ensureSpace(maxInstructionSize + 4, sizeof(uint32_t));
+ loadConstantUnReusable(0x0, scr);
+ branch(JSR_OPCODE, scr);
+ nop();
+ releaseScratch(scr);
+ return m_buffer.label();
+ }
+
+ AssemblerLabel call(RegisterID dst)
+ {
+ m_buffer.ensureSpace(maxInstructionSize + 2);
+ branch(JSR_OPCODE, dst);
+ nop();
+ return m_buffer.label();
+ }
+
+ AssemblerLabel jmp()
+ {
+ RegisterID scr = claimScratch();
+ m_buffer.ensureSpace(maxInstructionSize + 4, sizeof(uint32_t));
+ AssemblerLabel label = m_buffer.label();
+ loadConstantUnReusable(0x0, scr);
+ branch(BRAF_OPCODE, scr);
+ nop();
+ releaseScratch(scr);
+ return label;
+ }
+
+ void extraInstrForBranch(RegisterID dst)
+ {
+ loadConstantUnReusable(0x0, dst);
+ nop();
+ nop();
+ }
+
+ AssemblerLabel jmp(RegisterID dst)
+ {
+ jmpReg(dst);
+ return m_buffer.label();
+ }
+
+ void jmpReg(RegisterID dst)
+ {
+ m_buffer.ensureSpace(maxInstructionSize + 2);
+ branch(JMP_OPCODE, dst);
+ nop();
+ }
+
+ AssemblerLabel jne()
+ {
+ AssemblerLabel label = m_buffer.label();
+ branch(BF_OPCODE, 0);
+ return label;
+ }
+
+ AssemblerLabel je()
+ {
+ AssemblerLabel label = m_buffer.label();
+ branch(BT_OPCODE, 0);
+ return label;
+ }
+
+ AssemblerLabel bra()
+ {
+ AssemblerLabel label = m_buffer.label();
+ branch(BRA_OPCODE, 0);
+ return label;
+ }
+
+ void ret()
+ {
+ m_buffer.ensureSpace(maxInstructionSize + 2);
+ oneShortOp(RTS_OPCODE, false);
+ }
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ m_buffer.ensureSpaceForAnyInstruction();
+ return m_buffer.label();
+ }
+
+ AssemblerLabel label()
+ {
+ m_buffer.ensureSpaceForAnyInstruction();
+ return m_buffer.label();
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_buffer.sizeOfConstantPool();
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ m_buffer.ensureSpace(maxInstructionSize + 2);
+ while (!m_buffer.isAligned(alignment)) {
+ nop();
+ m_buffer.ensureSpace(maxInstructionSize + 2);
+ }
+ return label();
+ }
+
+ static void changePCrelativeAddress(int offset, uint16_t* instructionPtr, uint32_t newAddress)
+ {
+ uint32_t address = (offset << 2) + ((reinterpret_cast<uint32_t>(instructionPtr) + 4) &(~0x3));
+ *reinterpret_cast<uint32_t*>(address) = newAddress;
+ }
+
+ static uint32_t readPCrelativeAddress(int offset, uint16_t* instructionPtr)
+ {
+ uint32_t address = (offset << 2) + ((reinterpret_cast<uint32_t>(instructionPtr) + 4) &(~0x3));
+ return *reinterpret_cast<uint32_t*>(address);
+ }
+
+ static uint16_t* getInstructionPtr(void* code, int offset)
+ {
+ return reinterpret_cast<uint16_t*> (reinterpret_cast<uint32_t>(code) + offset);
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+
+ uint16_t* instructionPtr = getInstructionPtr(code, from.m_offset);
+ uint16_t instruction = *instructionPtr;
+ int offsetBits = (reinterpret_cast<uint32_t>(to) - reinterpret_cast<uint32_t>(code)) - from.m_offset;
+
+ if (((instruction & 0xff00) == BT_OPCODE) || ((instruction & 0xff00) == BF_OPCODE)) {
+ /* BT label ==> BF 2
+ nop LDR reg
+ nop braf @reg
+ nop nop
+ */
+ offsetBits -= 8;
+ instruction ^= 0x0202;
+ *instructionPtr++ = instruction;
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits);
+ instruction = (BRAF_OPCODE | (*instructionPtr++ & 0xf00));
+ *instructionPtr = instruction;
+ printBlockInstr(instructionPtr - 2, from.m_offset, 3);
+ return;
+ }
+
+ /* MOV #imm, reg => LDR reg
+ braf @reg braf @reg
+ nop nop
+ */
+ ASSERT((*(instructionPtr + 1) & BRAF_OPCODE) == BRAF_OPCODE);
+
+ offsetBits -= 4;
+ if (offsetBits >= -4096 && offsetBits <= 4094) {
+ *instructionPtr = getOpcodeGroup6(BRA_OPCODE, offsetBits >> 1);
+ *(++instructionPtr) = NOP_OPCODE;
+ printBlockInstr(instructionPtr - 1, from.m_offset, 2);
+ return;
+ }
+
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits - 2);
+ printInstr(*instructionPtr, from.m_offset + 2);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ uint16_t* instructionPtr = getInstructionPtr(code, from.m_offset);
+ instructionPtr -= 3;
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast<uint32_t>(to));
+ }
+
+ static void linkPointer(void* code, AssemblerLabel where, void* value)
+ {
+ uint16_t* instructionPtr = getInstructionPtr(code, where.m_offset);
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast<uint32_t>(value));
+ }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ ASSERT(call.isSet());
+ return call.m_offset;
+ }
+
+ static uint32_t* getLdrImmAddressOnPool(SH4Word* insn, uint32_t* constPool)
+ {
+ return (constPool + (*insn & 0xff));
+ }
+
+ static SH4Word patchConstantPoolLoad(SH4Word load, int value)
+ {
+ return ((load & ~0xff) | value);
+ }
+
+ static SH4Buffer::TwoShorts placeConstantPoolBarrier(int offset)
+ {
+ ASSERT(((offset >> 1) <=2047) && ((offset >> 1) >= -2048));
+
+ SH4Buffer::TwoShorts m_barrier;
+ m_barrier.high = (BRA_OPCODE | (offset >> 1));
+ m_barrier.low = NOP_OPCODE;
+ printInstr(((BRA_OPCODE | (offset >> 1))), 0);
+ printInstr(NOP_OPCODE, 0);
+ return m_barrier;
+ }
+
+ static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+ {
+ SH4Word* instructionPtr = reinterpret_cast<SH4Word*>(loadAddr);
+ SH4Word instruction = *instructionPtr;
+ SH4Word index = instruction & 0xff;
+
+ if ((instruction & 0xf000) != MOVIMM_OPCODE)
+ return;
+
+ ASSERT((((reinterpret_cast<uint32_t>(constPoolAddr) - reinterpret_cast<uint32_t>(loadAddr)) + index * 4)) < 1024);
+
+ int offset = reinterpret_cast<uint32_t>(constPoolAddr) + (index * 4) - ((reinterpret_cast<uint32_t>(instructionPtr) & ~0x03) + 4);
+ instruction &=0xf00;
+ instruction |= 0xd000;
+ offset &= 0x03ff;
+ instruction |= (offset >> 2);
+ *instructionPtr = instruction;
+ printInstr(instruction, reinterpret_cast<uint32_t>(loadAddr));
+ }
+
+ static void repatchPointer(void* where, void* value)
+ {
+ patchPointer(where, value);
+ }
+
+ static void* readPointer(void* code)
+ {
+ return reinterpret_cast<void*>(readInt32(code));
+ }
+
+ static void repatchInt32(void* where, int32_t value)
+ {
+ uint16_t* instructionPtr = reinterpret_cast<uint16_t*>(where);
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, value);
+ }
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ ASSERT(value >= 0);
+ ASSERT(value <= 60);
+ *reinterpret_cast<uint16_t*>(where) = ((*reinterpret_cast<uint16_t*>(where) & 0xfff0) | (value >> 2));
+ cacheFlush(reinterpret_cast<uint16_t*>(where), sizeof(uint16_t));
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ uint16_t* instructionPtr = reinterpret_cast<uint16_t*>(from);
+ instructionPtr -= 3;
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast<uint32_t>(to));
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ uint16_t* instructionPtr = reinterpret_cast<uint16_t*> (from);
+ uint16_t instruction = *instructionPtr;
+ int32_t offsetBits = (reinterpret_cast<uint32_t>(to) - reinterpret_cast<uint32_t>(from));
+
+ if (((*instructionPtr & 0xff00) == BT_OPCODE) || ((*instructionPtr & 0xff00) == BF_OPCODE)) {
+ offsetBits -= 8;
+ instructionPtr++;
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits);
+ instruction = (BRAF_OPCODE | (*instructionPtr++ & 0xf00));
+ *instructionPtr = instruction;
+ printBlockInstr(instructionPtr, reinterpret_cast<uint32_t>(from) + 1, 3);
+ return;
+ }
+
+ ASSERT((*(instructionPtr + 1) & BRAF_OPCODE) == BRAF_OPCODE);
+ offsetBits -= 4;
+ if (offsetBits >= -4096 && offsetBits <= 4094) {
+ *instructionPtr = getOpcodeGroup6(BRA_OPCODE, offsetBits >> 1);
+ *(++instructionPtr) = NOP_OPCODE;
+ printBlockInstr(instructionPtr - 2, reinterpret_cast<uint32_t>(from), 2);
+ return;
+ }
+
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits - 2);
+ printInstr(*instructionPtr, reinterpret_cast<uint32_t>(from));
+ }
+
+ // Linking & patching
+
+ static void revertJump(void* instructionStart, SH4Word imm)
+ {
+ SH4Word *insn = reinterpret_cast<SH4Word*>(instructionStart);
+ SH4Word disp;
+
+ ASSERT((insn[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+
+ disp = insn[0] & 0x00ff;
+ insn += 2 + (disp << 1); // PC += 4 + (disp*4)
+ insn = (SH4Word *) ((unsigned) insn & (~3));
+ insn[0] = imm;
+ cacheFlush(insn, sizeof(SH4Word));
+ }
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type = JumpFar)
+ {
+ ASSERT(to.isSet());
+ ASSERT(from.isSet());
+
+ uint16_t* instructionPtr = getInstructionPtr(data(), from.m_offset);
+ uint16_t instruction = *instructionPtr;
+ int offsetBits;
+
+ if (type == JumpNear) {
+ ASSERT((instruction == BT_OPCODE) || (instruction == BF_OPCODE) || (instruction == BRA_OPCODE));
+ int offset = (codeSize() - from.m_offset) - 4;
+ *instructionPtr++ = instruction | (offset >> 1);
+ printInstr(*instructionPtr, from.m_offset + 2);
+ return;
+ }
+
+ if (((instruction & 0xff00) == BT_OPCODE) || ((instruction & 0xff00) == BF_OPCODE)) {
+ /* BT label => BF 2
+ nop LDR reg
+ nop braf @reg
+ nop nop
+ */
+ offsetBits = (to.m_offset - from.m_offset) - 8;
+ instruction ^= 0x0202;
+ *instructionPtr++ = instruction;
+ if ((*instructionPtr & 0xf000) == 0xe000) {
+ uint32_t* addr = getLdrImmAddressOnPool(instructionPtr, m_buffer.poolAddress());
+ *addr = offsetBits;
+ } else
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits);
+ instruction = (BRAF_OPCODE | (*instructionPtr++ & 0xf00));
+ *instructionPtr = instruction;
+ printBlockInstr(instructionPtr - 2, from.m_offset, 3);
+ return;
+ }
+
+ /* MOV # imm, reg => LDR reg
+ braf @reg braf @reg
+ nop nop
+ */
+ ASSERT((*(instructionPtr + 1) & BRAF_OPCODE) == BRAF_OPCODE);
+ offsetBits = (to.m_offset - from.m_offset) - 4;
+ if (offsetBits >= -4096 && offsetBits <= 4094) {
+ *instructionPtr = getOpcodeGroup6(BRA_OPCODE, offsetBits >> 1);
+ *(++instructionPtr) = NOP_OPCODE;
+ printBlockInstr(instructionPtr - 1, from.m_offset, 2);
+ return;
+ }
+
+ instruction = *instructionPtr;
+ if ((instruction & 0xf000) == 0xe000) {
+ uint32_t* addr = getLdrImmAddressOnPool(instructionPtr, m_buffer.poolAddress());
+ *addr = offsetBits - 2;
+ printInstr(*instructionPtr, from.m_offset + 2);
+ return;
+ }
+
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits - 2);
+ printInstr(*instructionPtr, from.m_offset + 2);
+ }
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<char*>(code) + label.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ static void patchPointer(void* code, AssemblerLabel where, void* value)
+ {
+ patchPointer(reinterpret_cast<uint32_t*>(code) + where.m_offset, value);
+ }
+
+ static void patchPointer(void* code, void* value)
+ {
+ patchInt32(code, reinterpret_cast<uint32_t>(value));
+ }
+
+ static void patchInt32(void* code, uint32_t value)
+ {
+ changePCrelativeAddress((*(reinterpret_cast<uint16_t*>(code)) & 0xff), reinterpret_cast<uint16_t*>(code), value);
+ }
+
+ static uint32_t readInt32(void* code)
+ {
+ return readPCrelativeAddress((*(reinterpret_cast<uint16_t*>(code)) & 0xff), reinterpret_cast<uint16_t*>(code));
+ }
+
+ static void* readCallTarget(void* from)
+ {
+ uint16_t* instructionPtr = static_cast<uint16_t*>(from);
+ instructionPtr -= 3;
+ return reinterpret_cast<void*>(readPCrelativeAddress((*instructionPtr & 0xff), instructionPtr));
+ }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+ {
+ return m_buffer.executableCopy(globalData, ownerUID, effort);
+ }
+
+ static void cacheFlush(void* code, size_t size)
+ {
+#if !OS(LINUX)
+#error "The cacheFlush support is missing on this platform."
+#elif defined CACHEFLUSH_D_L2
+ syscall(__NR_cacheflush, reinterpret_cast<unsigned>(code), size, CACHEFLUSH_D_WB | CACHEFLUSH_I | CACHEFLUSH_D_L2);
+#else
+ syscall(__NR_cacheflush, reinterpret_cast<unsigned>(code), size, CACHEFLUSH_D_WB | CACHEFLUSH_I);
+#endif
+ }
+
+ void prefix(uint16_t pre)
+ {
+ m_buffer.putByte(pre);
+ }
+
+ void oneShortOp(uint16_t opcode, bool checksize = true, bool isDouble = true)
+ {
+ printInstr(opcode, m_buffer.codeSize(), isDouble);
+ if (checksize)
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putShortUnchecked(opcode);
+ }
+
+ void ensureSpace(int space)
+ {
+ m_buffer.ensureSpace(space);
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ m_buffer.ensureSpace(insnSpace, constSpace);
+ }
+
+ // Administrative methods
+
+ void* data() const { return m_buffer.data(); }
+ size_t codeSize() const { return m_buffer.codeSize(); }
+
+#ifdef SH4_ASSEMBLER_TRACING
+ static void printInstr(uint16_t opc, unsigned size, bool isdoubleInst = true)
+ {
+ if (!getenv("JavaScriptCoreDumpJIT"))
+ return;
+
+ const char *format = 0;
+ printfStdoutInstr("offset: 0x%8.8x\t", size);
+ switch (opc) {
+ case BRK_OPCODE:
+ format = " BRK\n";
+ break;
+ case NOP_OPCODE:
+ format = " NOP\n";
+ break;
+ case RTS_OPCODE:
+ format =" *RTS\n";
+ break;
+ case SETS_OPCODE:
+ format = " SETS\n";
+ break;
+ case SETT_OPCODE:
+ format = " SETT\n";
+ break;
+ case CLRT_OPCODE:
+ format = " CLRT\n";
+ break;
+ case FSCHG_OPCODE:
+ format = " FSCHG\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format);
+ return;
+ }
+ switch (opc & 0xf0ff) {
+ case BRAF_OPCODE:
+ format = " *BRAF R%d\n";
+ break;
+ case DT_OPCODE:
+ format = " DT R%d\n";
+ break;
+ case CMPPL_OPCODE:
+ format = " CMP/PL R%d\n";
+ break;
+ case CMPPZ_OPCODE:
+ format = " CMP/PZ R%d\n";
+ break;
+ case JMP_OPCODE:
+ format = " *JMP @R%d\n";
+ break;
+ case JSR_OPCODE:
+ format = " *JSR @R%d\n";
+ break;
+ case LDSPR_OPCODE:
+ format = " LDS R%d, PR\n";
+ break;
+ case LDSLPR_OPCODE:
+ format = " LDS.L @R%d+, PR\n";
+ break;
+ case MOVT_OPCODE:
+ format = " MOVT R%d\n";
+ break;
+ case SHAL_OPCODE:
+ format = " SHAL R%d\n";
+ break;
+ case SHAR_OPCODE:
+ format = " SHAR R%d\n";
+ break;
+ case SHLL_OPCODE:
+ format = " SHLL R%d\n";
+ break;
+ case SHLL2_OPCODE:
+ format = " SHLL2 R%d\n";
+ break;
+ case SHLL8_OPCODE:
+ format = " SHLL8 R%d\n";
+ break;
+ case SHLL16_OPCODE:
+ format = " SHLL16 R%d\n";
+ break;
+ case SHLR_OPCODE:
+ format = " SHLR R%d\n";
+ break;
+ case SHLR2_OPCODE:
+ format = " SHLR2 R%d\n";
+ break;
+ case SHLR8_OPCODE:
+ format = " SHLR8 R%d\n";
+ break;
+ case SHLR16_OPCODE:
+ format = " SHLR16 R%d\n";
+ break;
+ case STSPR_OPCODE:
+ format = " STS PR, R%d\n";
+ break;
+ case STSLPR_OPCODE:
+ format = " STS.L PR, @-R%d\n";
+ break;
+ case LDS_RM_FPUL_OPCODE:
+ format = " LDS R%d, FPUL\n";
+ break;
+ case STS_FPUL_RN_OPCODE:
+ format = " STS FPUL, R%d \n";
+ break;
+ case FLDS_FRM_FPUL_OPCODE:
+ format = " FLDS FR%d, FPUL\n";
+ break;
+ case FSTS_FPUL_FRN_OPCODE:
+ format = " FSTS FPUL, R%d \n";
+ break;
+ case LDSFPSCR_OPCODE:
+ format = " LDS R%d, FPSCR \n";
+ break;
+ case STSFPSCR_OPCODE:
+ format = " STS FPSCR, R%d \n";
+ break;
+ case STSMACL_OPCODE:
+ format = " STS MACL, R%d \n";
+ break;
+ case STSMACH_OPCODE:
+ format = " STS MACH, R%d \n";
+ break;
+ case BSRF_OPCODE:
+ format = " *BSRF R%d";
+ break;
+ case FTRC_OPCODE:
+ format = " FTRC FR%d, FPUL\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getRn(opc));
+ return;
+ }
+ switch (opc & 0xf0ff) {
+ case FNEG_OPCODE:
+ format = " FNEG DR%d\n";
+ break;
+ case FLOAT_OPCODE:
+ format = " FLOAT DR%d\n";
+ break;
+ case FTRC_OPCODE:
+ format = " FTRC FR%d, FPUL\n";
+ break;
+ case FSQRT_OPCODE:
+ format = " FSQRT FR%d\n";
+ break;
+ case FCNVDS_DRM_FPUL_OPCODE:
+ format = " FCNVDS FR%d, FPUL\n";
+ break;
+ case FCNVSD_FPUL_DRN_OPCODE:
+ format = " FCNVSD FPUL, FR%d\n";
+ break;
+ }
+ if (format) {
+ if (isdoubleInst)
+ printfStdoutInstr(format, getDRn(opc) << 1);
+ else
+ printfStdoutInstr(format, getRn(opc));
+ return;
+ }
+ switch (opc & 0xf00f) {
+ case ADD_OPCODE:
+ format = " ADD R%d, R%d\n";
+ break;
+ case ADDC_OPCODE:
+ format = " ADDC R%d, R%d\n";
+ break;
+ case ADDV_OPCODE:
+ format = " ADDV R%d, R%d\n";
+ break;
+ case AND_OPCODE:
+ format = " AND R%d, R%d\n";
+ break;
+ case DIV1_OPCODE:
+ format = " DIV1 R%d, R%d\n";
+ break;
+ case CMPEQ_OPCODE:
+ format = " CMP/EQ R%d, R%d\n";
+ break;
+ case CMPGE_OPCODE:
+ format = " CMP/GE R%d, R%d\n";
+ break;
+ case CMPGT_OPCODE:
+ format = " CMP/GT R%d, R%d\n";
+ break;
+ case CMPHI_OPCODE:
+ format = " CMP/HI R%d, R%d\n";
+ break;
+ case CMPHS_OPCODE:
+ format = " CMP/HS R%d, R%d\n";
+ break;
+ case MOV_OPCODE:
+ format = " MOV R%d, R%d\n";
+ break;
+ case MOVB_WRITE_RN_OPCODE:
+ format = " MOV.B R%d, @R%d\n";
+ break;
+ case MOVB_WRITE_RNDEC_OPCODE:
+ format = " MOV.B R%d, @-R%d\n";
+ break;
+ case MOVB_WRITE_R0RN_OPCODE:
+ format = " MOV.B R%d, @(R0, R%d)\n";
+ break;
+ case MOVB_READ_RM_OPCODE:
+ format = " MOV.B @R%d, R%d\n";
+ break;
+ case MOVB_READ_RMINC_OPCODE:
+ format = " MOV.B @R%d+, R%d\n";
+ break;
+ case MOVB_READ_R0RM_OPCODE:
+ format = " MOV.B @(R0, R%d), R%d\n";
+ break;
+ case MOVL_WRITE_RN_OPCODE:
+ format = " MOV.L R%d, @R%d\n";
+ break;
+ case MOVL_WRITE_RNDEC_OPCODE:
+ format = " MOV.L R%d, @-R%d\n";
+ break;
+ case MOVL_WRITE_R0RN_OPCODE:
+ format = " MOV.L R%d, @(R0, R%d)\n";
+ break;
+ case MOVL_READ_RM_OPCODE:
+ format = " MOV.L @R%d, R%d\n";
+ break;
+ case MOVL_READ_RMINC_OPCODE:
+ format = " MOV.L @R%d+, R%d\n";
+ break;
+ case MOVL_READ_R0RM_OPCODE:
+ format = " MOV.L @(R0, R%d), R%d\n";
+ break;
+ case MULL_OPCODE:
+ format = " MUL.L R%d, R%d\n";
+ break;
+ case DMULL_L_OPCODE:
+ format = " DMULU.L R%d, R%d\n";
+ break;
+ case DMULSL_OPCODE:
+ format = " DMULS.L R%d, R%d\n";
+ break;
+ case NEG_OPCODE:
+ format = " NEG R%d, R%d\n";
+ break;
+ case NEGC_OPCODE:
+ format = " NEGC R%d, R%d\n";
+ break;
+ case NOT_OPCODE:
+ format = " NOT R%d, R%d\n";
+ break;
+ case OR_OPCODE:
+ format = " OR R%d, R%d\n";
+ break;
+ case SHAD_OPCODE:
+ format = " SHAD R%d, R%d\n";
+ break;
+ case SHLD_OPCODE:
+ format = " SHLD R%d, R%d\n";
+ break;
+ case SUB_OPCODE:
+ format = " SUB R%d, R%d\n";
+ break;
+ case SUBC_OPCODE:
+ format = " SUBC R%d, R%d\n";
+ break;
+ case SUBV_OPCODE:
+ format = " SUBV R%d, R%d\n";
+ break;
+ case TST_OPCODE:
+ format = " TST R%d, R%d\n";
+ break;
+ case XOR_OPCODE:
+ format = " XOR R%d, R%d\n";break;
+ case MOVW_WRITE_RN_OPCODE:
+ format = " MOV.W R%d, @R%d\n";
+ break;
+ case MOVW_READ_RM_OPCODE:
+ format = " MOV.W @R%d, R%d\n";
+ break;
+ case MOVW_READ_R0RM_OPCODE:
+ format = " MOV.W @(R0, R%d), R%d\n";
+ break;
+ case EXTUB_OPCODE:
+ format = " EXTU.B R%d, R%d\n";
+ break;
+ case EXTUW_OPCODE:
+ format = " EXTU.W R%d, R%d\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getRm(opc), getRn(opc));
+ return;
+ }
+ switch (opc & 0xf00f) {
+ case FSUB_OPCODE:
+ format = " FSUB FR%d, FR%d\n";
+ break;
+ case FADD_OPCODE:
+ format = " FADD FR%d, FR%d\n";
+ break;
+ case FDIV_OPCODE:
+ format = " FDIV FR%d, FR%d\n";
+ break;
+ case FMUL_OPCODE:
+ format = " DMULL FR%d, FR%d\n";
+ break;
+ case FMOV_OPCODE:
+ format = " FMOV FR%d, FR%d\n";
+ break;
+ case FCMPEQ_OPCODE:
+ format = " FCMP/EQ FR%d, FR%d\n";
+ break;
+ case FCMPGT_OPCODE:
+ format = " FCMP/GT FR%d, FR%d\n";
+ break;
+ }
+ if (format) {
+ if (isdoubleInst)
+ printfStdoutInstr(format, getDRm(opc) << 1, getDRn(opc) << 1);
+ else
+ printfStdoutInstr(format, getRm(opc), getRn(opc));
+ return;
+ }
+ switch (opc & 0xf00f) {
+ case FMOVS_WRITE_RN_DEC_OPCODE:
+ format = " %s FR%d, @-R%d\n";
+ break;
+ case FMOVS_WRITE_RN_OPCODE:
+ format = " %s FR%d, @R%d\n";
+ break;
+ case FMOVS_WRITE_R0RN_OPCODE:
+ format = " %s FR%d, @(R0, R%d)\n";
+ break;
+ }
+ if (format) {
+ if (isdoubleInst)
+ printfStdoutInstr(format, "FMOV", getDRm(opc) << 1, getDRn(opc));
+ else
+ printfStdoutInstr(format, "FMOV.S", getRm(opc), getRn(opc));
+ return;
+ }
+ switch (opc & 0xf00f) {
+ case FMOVS_READ_RM_OPCODE:
+ format = " %s @R%d, FR%d\n";
+ break;
+ case FMOVS_READ_RM_INC_OPCODE:
+ format = " %s @R%d+, FR%d\n";
+ break;
+ case FMOVS_READ_R0RM_OPCODE:
+ format = " %s @(R0, R%d), FR%d\n";
+ break;
+ }
+ if (format) {
+ if (isdoubleInst)
+ printfStdoutInstr(format, "FMOV", getDRm(opc), getDRn(opc) << 1);
+ else
+ printfStdoutInstr(format, "FMOV.S", getRm(opc), getRn(opc));
+ return;
+ }
+ switch (opc & 0xff00) {
+ case BF_OPCODE:
+ format = " BF %d\n";
+ break;
+ case BFS_OPCODE:
+ format = " *BF/S %d\n";
+ break;
+ case ANDIMM_OPCODE:
+ format = " AND #%d, R0\n";
+ break;
+ case BT_OPCODE:
+ format = " BT %d\n";
+ break;
+ case BTS_OPCODE:
+ format = " *BT/S %d\n";
+ break;
+ case CMPEQIMM_OPCODE:
+ format = " CMP/EQ #%d, R0\n";
+ break;
+ case MOVB_WRITE_OFFGBR_OPCODE:
+ format = " MOV.B R0, @(%d, GBR)\n";
+ break;
+ case MOVB_READ_OFFGBR_OPCODE:
+ format = " MOV.B @(%d, GBR), R0\n";
+ break;
+ case MOVL_WRITE_OFFGBR_OPCODE:
+ format = " MOV.L R0, @(%d, GBR)\n";
+ break;
+ case MOVL_READ_OFFGBR_OPCODE:
+ format = " MOV.L @(%d, GBR), R0\n";
+ break;
+ case MOVA_READ_OFFPC_OPCODE:
+ format = " MOVA @(%d, PC), R0\n";
+ break;
+ case ORIMM_OPCODE:
+ format = " OR #%d, R0\n";
+ break;
+ case ORBIMM_OPCODE:
+ format = " OR.B #%d, @(R0, GBR)\n";
+ break;
+ case TSTIMM_OPCODE:
+ format = " TST #%d, R0\n";
+ break;
+ case TSTB_OPCODE:
+ format = " TST.B %d, @(R0, GBR)\n";
+ break;
+ case XORIMM_OPCODE:
+ format = " XOR #%d, R0\n";
+ break;
+ case XORB_OPCODE:
+ format = " XOR.B %d, @(R0, GBR)\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getImm8(opc));
+ return;
+ }
+ switch (opc & 0xff00) {
+ case MOVB_WRITE_OFFRN_OPCODE:
+ format = " MOV.B R0, @(%d, R%d)\n";
+ break;
+ case MOVB_READ_OFFRM_OPCODE:
+ format = " MOV.B @(%d, R%d), R0\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getDisp(opc), getRm(opc));
+ return;
+ }
+ switch (opc & 0xf000) {
+ case BRA_OPCODE:
+ format = " *BRA %d\n";
+ break;
+ case BSR_OPCODE:
+ format = " *BSR %d\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getImm12(opc));
+ return;
+ }
+ switch (opc & 0xf000) {
+ case MOVL_READ_OFFPC_OPCODE:
+ format = " MOV.L @(%d, PC), R%d\n";
+ break;
+ case ADDIMM_OPCODE:
+ format = " ADD #%d, R%d\n";
+ break;
+ case MOVIMM_OPCODE:
+ format = " MOV #%d, R%d\n";
+ break;
+ case MOVW_READ_OFFPC_OPCODE:
+ format = " MOV.W @(%d, PC), R%d\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getImm8(opc), getRn(opc));
+ return;
+ }
+ switch (opc & 0xf000) {
+ case MOVL_WRITE_OFFRN_OPCODE:
+ format = " MOV.L R%d, @(%d, R%d)\n";
+ printfStdoutInstr(format, getRm(opc), getDisp(opc), getRn(opc));
+ break;
+ case MOVL_READ_OFFRM_OPCODE:
+ format = " MOV.L @(%d, R%d), R%d\n";
+ printfStdoutInstr(format, getDisp(opc), getRm(opc), getRn(opc));
+ break;
+ }
+ }
+
+ static void printfStdoutInstr(const char* format, ...)
+ {
+ if (getenv("JavaScriptCoreDumpJIT")) {
+ va_list args;
+ va_start(args, format);
+ vprintfStdoutInstr(format, args);
+ va_end(args);
+ }
+ }
+
+ static void vprintfStdoutInstr(const char* format, va_list args)
+ {
+ if (getenv("JavaScriptCoreDumpJIT"))
+ WTF::dataLogFV(format, args);
+ }
+
+ static void printBlockInstr(uint16_t* first, unsigned offset, int nbInstr)
+ {
+ printfStdoutInstr(">> repatch instructions after link\n");
+ for (int i = 0; i <= nbInstr; i++)
+ printInstr(*(first + i), offset + i);
+ printfStdoutInstr(">> end repatch\n");
+ }
+#else
+ static void printInstr(uint16_t opc, unsigned size, bool isdoubleInst = true) { };
+ static void printBlockInstr(uint16_t* first, unsigned offset, int nbInstr) { };
+#endif
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ SH4Word* insPtr = reinterpret_cast<SH4Word*>(instructionStart);
+
+ insPtr += 2; // skip MOV and ADD opcodes
+
+ if (((*insPtr) & 0xf00f) != MOVL_READ_RM_OPCODE) {
+ *insPtr = MOVL_READ_RM_OPCODE | (*insPtr & 0x0ff0);
+ cacheFlush(insPtr, sizeof(SH4Word));
+ }
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ SH4Word* insPtr = reinterpret_cast<SH4Word*>(instructionStart);
+
+ insPtr += 2; // skip MOV and ADD opcodes
+
+ if (((*insPtr) & 0xf00f) != MOV_OPCODE) {
+ *insPtr = MOV_OPCODE | (*insPtr & 0x0ff0);
+ cacheFlush(insPtr, sizeof(SH4Word));
+ }
+ }
+
+private:
+ SH4Buffer m_buffer;
+ int m_claimscratchReg;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(SH4)
+
+#endif // SH4Assembler_h
diff --git a/src/3rdparty/masm/assembler/X86Assembler.h b/src/3rdparty/masm/assembler/X86Assembler.h
new file mode 100644
index 0000000000..092e775ab5
--- /dev/null
+++ b/src/3rdparty/masm/assembler/X86Assembler.h
@@ -0,0 +1,2540 @@
+/*
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef X86Assembler_h
+#define X86Assembler_h
+
+#if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
+
+#include "AssemblerBuffer.h"
+#include "JITCompilationEffort.h"
+#include <stdint.h>
+#include <wtf/Assertions.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
+
+namespace X86Registers {
+ typedef enum {
+ eax,
+ ecx,
+ edx,
+ ebx,
+ esp,
+ ebp,
+ esi,
+ edi,
+
+#if CPU(X86_64)
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+#endif
+ } RegisterID;
+
+ typedef enum {
+ xmm0,
+ xmm1,
+ xmm2,
+ xmm3,
+ xmm4,
+ xmm5,
+ xmm6,
+ xmm7,
+ } XMMRegisterID;
+}
+
+class X86Assembler {
+public:
+ typedef X86Registers::RegisterID RegisterID;
+ typedef X86Registers::XMMRegisterID XMMRegisterID;
+ typedef XMMRegisterID FPRegisterID;
+
+ typedef enum {
+ ConditionO,
+ ConditionNO,
+ ConditionB,
+ ConditionAE,
+ ConditionE,
+ ConditionNE,
+ ConditionBE,
+ ConditionA,
+ ConditionS,
+ ConditionNS,
+ ConditionP,
+ ConditionNP,
+ ConditionL,
+ ConditionGE,
+ ConditionLE,
+ ConditionG,
+
+ ConditionC = ConditionB,
+ ConditionNC = ConditionAE,
+ } Condition;
+
+private:
+ typedef enum {
+ OP_ADD_EvGv = 0x01,
+ OP_ADD_GvEv = 0x03,
+ OP_OR_EvGv = 0x09,
+ OP_OR_GvEv = 0x0B,
+ OP_2BYTE_ESCAPE = 0x0F,
+ OP_AND_EvGv = 0x21,
+ OP_AND_GvEv = 0x23,
+ OP_SUB_EvGv = 0x29,
+ OP_SUB_GvEv = 0x2B,
+ PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
+ OP_XOR_EvGv = 0x31,
+ OP_XOR_GvEv = 0x33,
+ OP_CMP_EvGv = 0x39,
+ OP_CMP_GvEv = 0x3B,
+#if CPU(X86_64)
+ PRE_REX = 0x40,
+#endif
+ OP_PUSH_EAX = 0x50,
+ OP_POP_EAX = 0x58,
+#if CPU(X86_64)
+ OP_MOVSXD_GvEv = 0x63,
+#endif
+ PRE_OPERAND_SIZE = 0x66,
+ PRE_SSE_66 = 0x66,
+ OP_PUSH_Iz = 0x68,
+ OP_IMUL_GvEvIz = 0x69,
+ OP_GROUP1_EbIb = 0x80,
+ OP_GROUP1_EvIz = 0x81,
+ OP_GROUP1_EvIb = 0x83,
+ OP_TEST_EbGb = 0x84,
+ OP_TEST_EvGv = 0x85,
+ OP_XCHG_EvGv = 0x87,
+ OP_MOV_EbGb = 0x88,
+ OP_MOV_EvGv = 0x89,
+ OP_MOV_GvEv = 0x8B,
+ OP_LEA = 0x8D,
+ OP_GROUP1A_Ev = 0x8F,
+ OP_NOP = 0x90,
+ OP_CDQ = 0x99,
+ OP_MOV_EAXOv = 0xA1,
+ OP_MOV_OvEAX = 0xA3,
+ OP_MOV_EAXIv = 0xB8,
+ OP_GROUP2_EvIb = 0xC1,
+ OP_RET = 0xC3,
+ OP_GROUP11_EvIb = 0xC6,
+ OP_GROUP11_EvIz = 0xC7,
+ OP_INT3 = 0xCC,
+ OP_GROUP2_Ev1 = 0xD1,
+ OP_GROUP2_EvCL = 0xD3,
+ OP_ESCAPE_DD = 0xDD,
+ OP_CALL_rel32 = 0xE8,
+ OP_JMP_rel32 = 0xE9,
+ PRE_SSE_F2 = 0xF2,
+ PRE_SSE_F3 = 0xF3,
+ OP_HLT = 0xF4,
+ OP_GROUP3_EbIb = 0xF6,
+ OP_GROUP3_Ev = 0xF7,
+ OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
+ OP_GROUP5_Ev = 0xFF,
+ } OneByteOpcodeID;
+
+ typedef enum {
+ OP2_MOVSD_VsdWsd = 0x10,
+ OP2_MOVSD_WsdVsd = 0x11,
+ OP2_MOVSS_VsdWsd = 0x10,
+ OP2_MOVSS_WsdVsd = 0x11,
+ OP2_CVTSI2SD_VsdEd = 0x2A,
+ OP2_CVTTSD2SI_GdWsd = 0x2C,
+ OP2_UCOMISD_VsdWsd = 0x2E,
+ OP2_ADDSD_VsdWsd = 0x58,
+ OP2_MULSD_VsdWsd = 0x59,
+ OP2_CVTSD2SS_VsdWsd = 0x5A,
+ OP2_CVTSS2SD_VsdWsd = 0x5A,
+ OP2_SUBSD_VsdWsd = 0x5C,
+ OP2_DIVSD_VsdWsd = 0x5E,
+ OP2_SQRTSD_VsdWsd = 0x51,
+ OP2_ANDNPD_VpdWpd = 0x55,
+ OP2_XORPD_VpdWpd = 0x57,
+ OP2_MOVD_VdEd = 0x6E,
+ OP2_MOVD_EdVd = 0x7E,
+ OP2_JCC_rel32 = 0x80,
+ OP_SETCC = 0x90,
+ OP2_IMUL_GvEv = 0xAF,
+ OP2_MOVZX_GvEb = 0xB6,
+ OP2_MOVSX_GvEb = 0xBE,
+ OP2_MOVZX_GvEw = 0xB7,
+ OP2_MOVSX_GvEw = 0xBF,
+ OP2_PEXTRW_GdUdIb = 0xC5,
+ OP2_PSLLQ_UdqIb = 0x73,
+ OP2_PSRLQ_UdqIb = 0x73,
+ OP2_POR_VdqWdq = 0XEB,
+ } TwoByteOpcodeID;
+
+ TwoByteOpcodeID jccRel32(Condition cond)
+ {
+ return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
+ }
+
+ TwoByteOpcodeID setccOpcode(Condition cond)
+ {
+ return (TwoByteOpcodeID)(OP_SETCC + cond);
+ }
+
+ typedef enum {
+ GROUP1_OP_ADD = 0,
+ GROUP1_OP_OR = 1,
+ GROUP1_OP_ADC = 2,
+ GROUP1_OP_AND = 4,
+ GROUP1_OP_SUB = 5,
+ GROUP1_OP_XOR = 6,
+ GROUP1_OP_CMP = 7,
+
+ GROUP1A_OP_POP = 0,
+
+ GROUP2_OP_ROL = 0,
+ GROUP2_OP_ROR = 1,
+ GROUP2_OP_RCL = 2,
+ GROUP2_OP_RCR = 3,
+
+ GROUP2_OP_SHL = 4,
+ GROUP2_OP_SHR = 5,
+ GROUP2_OP_SAR = 7,
+
+ GROUP3_OP_TEST = 0,
+ GROUP3_OP_NOT = 2,
+ GROUP3_OP_NEG = 3,
+ GROUP3_OP_IDIV = 7,
+
+ GROUP5_OP_CALLN = 2,
+ GROUP5_OP_JMPN = 4,
+ GROUP5_OP_PUSH = 6,
+
+ GROUP11_MOV = 0,
+
+ GROUP14_OP_PSLLQ = 6,
+ GROUP14_OP_PSRLQ = 2,
+
+ ESCAPE_DD_FSTP_doubleReal = 3,
+ } GroupOpcodeID;
+
+ class X86InstructionFormatter;
+public:
+
+ X86Assembler()
+ : m_indexOfLastWatchpoint(INT_MIN)
+ , m_indexOfTailOfLastWatchpoint(INT_MIN)
+ {
+ }
+
+ // Stack operations:
+
+ void push_r(RegisterID reg)
+ {
+ m_formatter.oneByteOp(OP_PUSH_EAX, reg);
+ }
+
+ void pop_r(RegisterID reg)
+ {
+ m_formatter.oneByteOp(OP_POP_EAX, reg);
+ }
+
+ void push_i32(int imm)
+ {
+ m_formatter.oneByteOp(OP_PUSH_Iz);
+ m_formatter.immediate32(imm);
+ }
+
+ void push_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
+ }
+
+ void pop_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
+ }
+
+ // Arithmetic operations:
+
+#if !CPU(X86_64)
+ void adcl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void addl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
+ }
+
+ void addl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
+ }
+
+#if !CPU(X86_64)
+ void addl_mr(const void* addr, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_ADD_GvEv, dst, addr);
+ }
+#endif
+
+ void addl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
+ }
+
+ void addl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
+ void addq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
+ }
+
+ void addq_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_ADD_GvEv, dst, base, offset);
+ }
+
+ void addq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addq_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void addl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void andl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
+ }
+
+ void andl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
+ }
+
+ void andl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
+ }
+
+ void andl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void andl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
+ void andq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
+ }
+
+ void andq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void andl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void negl_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
+ }
+
+#if CPU(X86_64)
+ void negq_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
+ }
+#endif
+
+ void negl_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
+ }
+
+ void notl_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
+ }
+
+ void notl_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
+ }
+
+ void orl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
+ }
+
+ void orl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
+ }
+
+ void orl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset);
+ }
+
+ void orl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void orl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
+ void orq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
+ }
+
+ void orq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void orl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void orl_rm(RegisterID src, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_OR_EvGv, src, addr);
+ }
+#endif
+
+ void subl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
+ }
+
+ void subl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
+ }
+
+ void subl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
+ }
+
+ void subl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void subl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
+ void subq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
+ }
+
+ void subq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void subl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void xorl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
+ }
+
+ void xorl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset);
+ }
+
+ void xorl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset);
+ }
+
+ void xorl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void xorl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
+ void xorq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
+ }
+
+ void xorq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void xorq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_XOR_EvGv, src, base, offset);
+ }
+
+ void rorq_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_ROR, dst);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_ROR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+#endif
+
+ void sarl_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void sarl_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
+ }
+
+ void shrl_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void shrl_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst);
+ }
+
+ void shll_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void shll_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
+ }
+
+#if CPU(X86_64)
+ void sarq_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
+ }
+
+ void sarq_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+#endif
+
+ void imull_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
+ }
+
+ void imull_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
+ }
+
+ void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
+ m_formatter.immediate32(value);
+ }
+
+ void idivl_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
+ }
+
+ // Comparisons:
+
+ void cmpl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
+ }
+
+ void cmpl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
+ }
+
+ void cmpl_mr(int offset, RegisterID base, RegisterID src)
+ {
+ m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
+ }
+
+ void cmpl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpl_ir_force32(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void cmpl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpb_im(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ }
+
+#if CPU(X86)
+ void cmpb_im(int imm, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, addr);
+ m_formatter.immediate8(imm);
+ }
+#endif
+
+ void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpl_im_force32(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+#if CPU(X86_64)
+ void cmpq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
+ }
+
+ void cmpq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
+ }
+
+ void cmpq_mr(int offset, RegisterID base, RegisterID src)
+ {
+ m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
+ }
+
+ void cmpq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpq_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void cmpl_rm(RegisterID reg, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
+ }
+
+ void cmpl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void cmpw_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
+ }
+
+ void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void testl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
+ }
+
+ void testl_i32r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void testl_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void testb_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp8(OP_TEST_EbGb, src, dst);
+ }
+
+ void testb_im(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ }
+
+#if CPU(X86)
+ void testb_im(int imm, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, addr);
+ m_formatter.immediate8(imm);
+ }
+#endif
+
+ void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+
+#if CPU(X86_64)
+ void testq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
+ }
+
+ void testq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_TEST_EvGv, src, base, offset);
+ }
+
+ void testq_i32r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void testq_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+#endif
+
+ void testw_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
+ }
+
+ void testb_i8r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
+ m_formatter.immediate8(imm);
+ }
+
+ void setCC_r(Condition cond, RegisterID dst)
+ {
+ m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
+ }
+
+ void sete_r(RegisterID dst)
+ {
+ m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
+ }
+
+ void setz_r(RegisterID dst)
+ {
+ sete_r(dst);
+ }
+
+ void setne_r(RegisterID dst)
+ {
+ m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
+ }
+
+ void setnz_r(RegisterID dst)
+ {
+ setne_r(dst);
+ }
+
+ // Various move ops:
+
+ void cdq()
+ {
+ m_formatter.oneByteOp(OP_CDQ);
+ }
+
+ void fstpl(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_ESCAPE_DD, ESCAPE_DD_FSTP_doubleReal, base, offset);
+ }
+
+ void xchgl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
+ }
+
+#if CPU(X86_64)
+ void xchgq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
+ }
+#endif
+
+ void movl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
+ }
+
+ void movl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
+ }
+
+ void movl_mEAX(const void* addr)
+ {
+ m_formatter.oneByteOp(OP_MOV_EAXOv);
+#if CPU(X86_64)
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+#else
+ m_formatter.immediate32(reinterpret_cast<int>(addr));
+#endif
+ }
+
+ void movl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movl_mr_disp8(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp_disp8(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
+ }
+
+ void movl_i32r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void movl_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void movl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+
+#if !CPU(X86_64)
+ void movb_i8m(int imm, const void* addr)
+ {
+ ASSERT(-128 <= imm && imm < 128);
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, addr);
+ m_formatter.immediate8(imm);
+ }
+#endif
+
+ void movb_i8m(int imm, int offset, RegisterID base)
+ {
+ ASSERT(-128 <= imm && imm < 128);
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void movb_i8m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ ASSERT(-128 <= imm && imm < 128);
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void movb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, index, scale, offset);
+ }
+
+ void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp8(OP_MOV_EvGv, src, base, index, scale, offset);
+ }
+
+ void movl_EAXm(const void* addr)
+ {
+ m_formatter.oneByteOp(OP_MOV_OvEAX);
+#if CPU(X86_64)
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+#else
+ m_formatter.immediate32(reinterpret_cast<int>(addr));
+#endif
+ }
+
+#if CPU(X86_64)
+ void movq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
+ }
+
+ void movq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
+ }
+
+ void movq_mEAX(const void* addr)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EAXOv);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
+ void movq_EAXm(const void* addr)
+ {
+ m_formatter.oneByteOp64(OP_MOV_OvEAX);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
+ void movq_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movq_mr_disp8(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64_disp8(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
+ }
+
+ void movq_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void movq_i64r(int64_t imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
+ m_formatter.immediate64(imm);
+ }
+
+ void movsxd_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
+ }
+
+
+#else
+ void movl_rm(RegisterID src, const void* addr)
+ {
+ if (src == X86Registers::eax)
+ movl_EAXm(addr);
+ else
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
+ }
+
+ void movl_mr(const void* addr, RegisterID dst)
+ {
+ if (dst == X86Registers::eax)
+ movl_mEAX(addr);
+ else
+ m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
+ }
+
+ void movl_i32m(int imm, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
+ m_formatter.immediate32(imm);
+ }
+#endif
+
+ void movzwl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
+ }
+
+ void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
+ }
+
+ void movswl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, offset);
+ }
+
+ void movswl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, index, scale, offset);
+ }
+
+ void movzbl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, offset);
+ }
+
+ void movzbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, index, scale, offset);
+ }
+
+ void movsbl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, offset);
+ }
+
+ void movsbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, index, scale, offset);
+ }
+
+ void movzbl_rr(RegisterID src, RegisterID dst)
+ {
+ // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
+ // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
+ // REX prefixes are defined to be silently ignored by the processor.
+ m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
+ }
+
+ void leal_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_LEA, dst, base, offset);
+ }
+#if CPU(X86_64)
+ void leaq_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
+ }
+#endif
+
+ // Flow control:
+
+ AssemblerLabel call()
+ {
+ m_formatter.oneByteOp(OP_CALL_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel call(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
+ return m_formatter.label();
+ }
+
+ void call_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
+ }
+
+ AssemblerLabel jmp()
+ {
+ m_formatter.oneByteOp(OP_JMP_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ // Return a AssemblerLabel so we have a label to the jump, so we can use this
+ // To make a tail recursive call on x86-64. The MacroAssembler
+ // really shouldn't wrap this as a Jump, since it can't be linked. :-/
+ AssemblerLabel jmp_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
+ return m_formatter.label();
+ }
+
+ void jmp_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
+ }
+
+#if !CPU(X86_64)
+ void jmp_m(const void* address)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, address);
+ }
+#endif
+
+ AssemblerLabel jne()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionNE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jnz()
+ {
+ return jne();
+ }
+
+ AssemblerLabel je()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jz()
+ {
+ return je();
+ }
+
+ AssemblerLabel jl()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionL));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jb()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionB));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jle()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionLE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jbe()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionBE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jge()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionGE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jg()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionG));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel ja()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionA));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jae()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionAE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jo()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionO));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jnp()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionNP));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jp()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionP));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel js()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionS));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jCC(Condition cond)
+ {
+ m_formatter.twoByteOp(jccRel32(cond));
+ return m_formatter.immediateRel32();
+ }
+
+ // SSE operations:
+
+ void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+#if !CPU(X86_64)
+ void addsd_mr(const void* address, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, address);
+ }
+#endif
+
+ void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
+ }
+
+ void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
+ }
+
+#if !CPU(X86_64)
+ void cvtsi2sd_mr(const void* address, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
+ }
+#endif
+
+ void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
+ }
+
+ void cvtsd2ss_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, (RegisterID)src);
+ }
+
+ void cvtss2sd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F3);
+ m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, (RegisterID)src);
+ }
+
+#if CPU(X86_64)
+ void cvttsd2siq_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp64(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
+ }
+#endif
+
+ void movd_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
+ }
+
+ void movd_rr(RegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_MOVD_VdEd, (RegisterID)dst, src);
+ }
+
+#if CPU(X86_64)
+ void movq_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
+ }
+
+ void movq_rr(RegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
+ }
+#endif
+
+ void movsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
+ }
+
+ void movsd_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
+ }
+
+ void movss_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_SSE_F3);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
+ }
+
+ void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void movsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
+ }
+
+ void movss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F3);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
+ }
+
+#if !CPU(X86_64)
+ void movsd_mr(const void* address, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
+ }
+ void movsd_rm(XMMRegisterID src, const void* address)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, address);
+ }
+#endif
+
+ void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
+ m_formatter.immediate8(whichWord);
+ }
+
+ void psllq_i8r(int imm, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp8(OP2_PSLLQ_UdqIb, GROUP14_OP_PSLLQ, (RegisterID)dst);
+ m_formatter.immediate8(imm);
+ }
+
+ void psrlq_i8r(int imm, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp8(OP2_PSRLQ_UdqIb, GROUP14_OP_PSRLQ, (RegisterID)dst);
+ m_formatter.immediate8(imm);
+ }
+
+ void por_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_POR_VdqWdq, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void andnpd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_ANDNPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void sqrtsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ // Misc instructions:
+
+ void int3()
+ {
+ m_formatter.oneByteOp(OP_INT3);
+ }
+
+ void ret()
+ {
+ m_formatter.oneByteOp(OP_RET);
+ }
+
+ void predictNotTaken()
+ {
+ m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
+ }
+
+ // Assembler admin methods:
+
+ size_t codeSize() const
+ {
+ return m_formatter.codeSize();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ AssemblerLabel result = m_formatter.label();
+ if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
+ result = label();
+ m_indexOfLastWatchpoint = result.m_offset;
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return result;
+ }
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ return m_formatter.label();
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = m_formatter.label();
+ while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+ nop();
+ result = m_formatter.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ while (!m_formatter.isAligned(alignment))
+ m_formatter.oneByteOp(OP_HLT);
+
+ return label();
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to)
+ {
+ ASSERT(from.isSet());
+ ASSERT(to.isSet());
+
+ char* code = reinterpret_cast<char*>(m_formatter.data());
+ ASSERT(!reinterpret_cast<int32_t*>(code + from.m_offset)[-1]);
+ setRel32(code + from.m_offset, code + to.m_offset);
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+
+ setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+
+ setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
+ }
+
+ static void linkPointer(void* code, AssemblerLabel where, void* value)
+ {
+ ASSERT(where.isSet());
+
+ setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ setRel32(from, to);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ setRel32(from, to);
+ }
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ ASSERT(value >= std::numeric_limits<int8_t>::min());
+ ASSERT(value <= std::numeric_limits<int8_t>::max());
+ setInt8(where, value);
+ }
+
+ static void repatchInt32(void* where, int32_t value)
+ {
+ setInt32(where, value);
+ }
+
+ static void repatchPointer(void* where, void* value)
+ {
+ setPointer(where, value);
+ }
+
+ static void* readPointer(void* where)
+ {
+ return reinterpret_cast<void**>(where)[-1];
+ }
+
+ static void replaceWithJump(void* instructionStart, void* to)
+ {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ uint8_t* dstPtr = reinterpret_cast<uint8_t*>(to);
+ intptr_t distance = (intptr_t)(dstPtr - (ptr + 5));
+ ptr[0] = static_cast<uint8_t>(OP_JMP_rel32);
+ *reinterpret_cast<int32_t*>(ptr + 1) = static_cast<int32_t>(distance);
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return 5;
+ }
+
+#if CPU(X86_64)
+ static void revertJumpTo_movq_i64r(void* instructionStart, int64_t imm, RegisterID dst)
+ {
+ const int rexBytes = 1;
+ const int opcodeBytes = 1;
+ ASSERT(rexBytes + opcodeBytes <= maxJumpReplacementSize());
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ ptr[0] = PRE_REX | (1 << 3) | (dst >> 3);
+ ptr[1] = OP_MOV_EAXIv | (dst & 7);
+
+ union {
+ uint64_t asWord;
+ uint8_t asBytes[8];
+ } u;
+ u.asWord = imm;
+ for (unsigned i = rexBytes + opcodeBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
+ ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
+ }
+#endif
+
+ static void revertJumpTo_cmpl_ir_force32(void* instructionStart, int32_t imm, RegisterID dst)
+ {
+ const int opcodeBytes = 1;
+ const int modRMBytes = 1;
+ ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize());
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ ptr[0] = OP_GROUP1_EvIz;
+ ptr[1] = (X86InstructionFormatter::ModRmRegister << 6) | (GROUP1_OP_CMP << 3) | dst;
+ union {
+ uint32_t asWord;
+ uint8_t asBytes[4];
+ } u;
+ u.asWord = imm;
+ for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
+ ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
+ }
+
+ static void revertJumpTo_cmpl_im_force32(void* instructionStart, int32_t imm, int offset, RegisterID dst)
+ {
+ ASSERT_UNUSED(offset, !offset);
+ const int opcodeBytes = 1;
+ const int modRMBytes = 1;
+ ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize());
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ ptr[0] = OP_GROUP1_EvIz;
+ ptr[1] = (X86InstructionFormatter::ModRmMemoryNoDisp << 6) | (GROUP1_OP_CMP << 3) | dst;
+ union {
+ uint32_t asWord;
+ uint8_t asBytes[4];
+ } u;
+ u.asWord = imm;
+ for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
+ ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
+ }
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+#if CPU(X86_64)
+ if ((*ptr & ~15) == PRE_REX)
+ ptr++;
+#endif
+ switch (*ptr) {
+ case OP_MOV_GvEv:
+ break;
+ case OP_LEA:
+ *ptr = OP_MOV_GvEv;
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+#if CPU(X86_64)
+ if ((*ptr & ~15) == PRE_REX)
+ ptr++;
+#endif
+ switch (*ptr) {
+ case OP_MOV_GvEv:
+ *ptr = OP_LEA;
+ break;
+ case OP_LEA:
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ ASSERT(call.isSet());
+ return call.m_offset;
+ }
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ ASSERT(label.isSet());
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+ {
+ return m_formatter.executableCopy(globalData, ownerUID, effort);
+ }
+
+ unsigned debugOffset() { return m_formatter.debugOffset(); }
+
+ void nop()
+ {
+ m_formatter.oneByteOp(OP_NOP);
+ }
+
+ // This is a no-op on x86
+ ALWAYS_INLINE static void cacheFlush(void*, size_t) { }
+
+private:
+
+ static void setPointer(void* where, void* value)
+ {
+ reinterpret_cast<void**>(where)[-1] = value;
+ }
+
+ static void setInt32(void* where, int32_t value)
+ {
+ reinterpret_cast<int32_t*>(where)[-1] = value;
+ }
+
+ static void setInt8(void* where, int8_t value)
+ {
+ reinterpret_cast<int8_t*>(where)[-1] = value;
+ }
+
+ static void setRel32(void* from, void* to)
+ {
+ intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
+ ASSERT(offset == static_cast<int32_t>(offset));
+
+ setInt32(from, offset);
+ }
+
+ class X86InstructionFormatter {
+
+ static const int maxInstructionSize = 16;
+
+ public:
+
+ enum ModRmMode {
+ ModRmMemoryNoDisp,
+ ModRmMemoryDisp8,
+ ModRmMemoryDisp32,
+ ModRmRegister,
+ };
+
+ // Legacy prefix bytes:
+ //
+ // These are emmitted prior to the instruction.
+
+ void prefix(OneByteOpcodeID pre)
+ {
+ m_buffer.putByte(pre);
+ }
+
+ // Word-sized operands / no operand instruction formatters.
+ //
+ // In addition to the opcode, the following operand permutations are supported:
+ // * None - instruction takes no operands.
+ // * One register - the low three bits of the RegisterID are added into the opcode.
+ // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
+ // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
+ // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
+ //
+ // For 32-bit x86 targets, the address operand may also be provided as a void*.
+ // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
+ //
+ // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
+
+ void oneByteOp(OneByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(0, 0, reg);
+ m_buffer.putByteUnchecked(opcode + (reg & 7));
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(reg, base, offset);
+ }
+
+ void oneByteOp_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp8(reg, base, offset);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+#if !CPU(X86_64)
+ void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, address);
+ }
+#endif
+
+ void twoByteOp(TwoByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, index, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+#if !CPU(X86_64)
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, address);
+ }
+#endif
+
+#if CPU(X86_64)
+ // Quad-word-sized operands:
+ //
+ // Used to format 64-bit operantions, planting a REX.w prefix.
+ // When planting d64 or f64 instructions, not requiring a REX.w prefix,
+ // the normal (non-'64'-postfixed) formatters should be used.
+
+ void oneByteOp64(OneByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(0, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(0, 0, reg);
+ m_buffer.putByteUnchecked(opcode + (reg & 7));
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(reg, base, offset);
+ }
+
+ void oneByteOp64_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp8(reg, base, offset);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+ void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+#endif
+
+ // Byte-operands:
+ //
+ // These methods format byte operations. Byte operations differ from the normal
+ // formatters in the circumstances under which they will decide to emit REX prefixes.
+ // These should be used where any register operand signifies a byte register.
+ //
+ // The disctinction is due to the handling of register numbers in the range 4..7 on
+ // x86-64. These register numbers may either represent the second byte of the first
+ // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
+ //
+ // Since ah..bh cannot be used in all permutations of operands (specifically cannot
+ // be accessed where a REX prefix is present), these are likely best treated as
+ // deprecated. In order to ensure the correct registers spl..dil are selected a
+ // REX prefix will be emitted for any byte register operand in the range 4..15.
+ //
+ // These formatters may be used in instructions where a mix of operand sizes, in which
+ // case an unnecessary REX will be emitted, for example:
+ // movzbl %al, %edi
+ // In this case a REX will be planted since edi is 7 (and were this a byte operand
+ // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
+ // be silently ignored by the processor.
+ //
+ // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
+ // is provided to check byte register operands.
+
+ void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(groupOp, rm);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(rm), reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg) || regRequiresRex(index) || regRequiresRex(base), reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(groupOp, rm);
+ }
+
+ // Immediates:
+ //
+ // An immedaite should be appended where appropriate after an op has been emitted.
+ // The writes are unchecked since the opcode formatters above will have ensured space.
+
+ void immediate8(int imm)
+ {
+ m_buffer.putByteUnchecked(imm);
+ }
+
+ void immediate16(int imm)
+ {
+ m_buffer.putShortUnchecked(imm);
+ }
+
+ void immediate32(int imm)
+ {
+ m_buffer.putIntUnchecked(imm);
+ }
+
+ void immediate64(int64_t imm)
+ {
+ m_buffer.putInt64Unchecked(imm);
+ }
+
+ AssemblerLabel immediateRel32()
+ {
+ m_buffer.putIntUnchecked(0);
+ return label();
+ }
+
+ // Administrative methods:
+
+ size_t codeSize() const { return m_buffer.codeSize(); }
+ AssemblerLabel label() const { return m_buffer.label(); }
+ bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
+ void* data() const { return m_buffer.data(); }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+ {
+ return m_buffer.executableCopy(globalData, ownerUID, effort);
+ }
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+ private:
+
+ // Internals; ModRm and REX formatters.
+
+ static const RegisterID noBase = X86Registers::ebp;
+ static const RegisterID hasSib = X86Registers::esp;
+ static const RegisterID noIndex = X86Registers::esp;
+#if CPU(X86_64)
+ static const RegisterID noBase2 = X86Registers::r13;
+ static const RegisterID hasSib2 = X86Registers::r12;
+
+ // Registers r8 & above require a REX prefixe.
+ inline bool regRequiresRex(int reg)
+ {
+ return (reg >= X86Registers::r8);
+ }
+
+ // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
+ inline bool byteRegRequiresRex(int reg)
+ {
+ return (reg >= X86Registers::esp);
+ }
+
+ // Format a REX prefix byte.
+ inline void emitRex(bool w, int r, int x, int b)
+ {
+ ASSERT(r >= 0);
+ ASSERT(x >= 0);
+ ASSERT(b >= 0);
+ m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
+ }
+
+ // Used to plant a REX byte with REX.w set (for 64-bit operations).
+ inline void emitRexW(int r, int x, int b)
+ {
+ emitRex(true, r, x, b);
+ }
+
+ // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
+ // regRequiresRex() to check other registers (i.e. address base & index).
+ inline void emitRexIf(bool condition, int r, int x, int b)
+ {
+ if (condition) emitRex(false, r, x, b);
+ }
+
+ // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
+ inline void emitRexIfNeeded(int r, int x, int b)
+ {
+ emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
+ }
+#else
+ // No REX prefix bytes on 32-bit x86.
+ inline bool regRequiresRex(int) { return false; }
+ inline bool byteRegRequiresRex(int) { return false; }
+ inline void emitRexIf(bool, int, int, int) {}
+ inline void emitRexIfNeeded(int, int, int) {}
+#endif
+
+ void putModRm(ModRmMode mode, int reg, RegisterID rm)
+ {
+ m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
+ }
+
+ void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
+ {
+ ASSERT(mode != ModRmRegister);
+
+ putModRm(mode, reg, hasSib);
+ m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
+ }
+
+ void registerModRM(int reg, RegisterID rm)
+ {
+ putModRm(ModRmRegister, reg, rm);
+ }
+
+ void memoryModRM(int reg, RegisterID base, int offset)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+#if CPU(X86_64)
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
+ putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
+ m_buffer.putIntUnchecked(offset);
+ }
+ } else {
+#if CPU(X86_64)
+ if (!offset && (base != noBase) && (base != noBase2))
+#else
+ if (!offset && (base != noBase))
+#endif
+ putModRm(ModRmMemoryNoDisp, reg, base);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRm(ModRmMemoryDisp8, reg, base);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp32, reg, base);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+ }
+
+ void memoryModRM_disp8(int reg, RegisterID base, int offset)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+ ASSERT(CAN_SIGN_EXTEND_8_32(offset));
+#if CPU(X86_64)
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp8, reg, base);
+ m_buffer.putByteUnchecked(offset);
+ }
+ }
+
+ void memoryModRM_disp32(int reg, RegisterID base, int offset)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+#if CPU(X86_64)
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
+ m_buffer.putIntUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp32, reg, base);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+
+ void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ ASSERT(index != noIndex);
+
+#if CPU(X86_64)
+ if (!offset && (base != noBase) && (base != noBase2))
+#else
+ if (!offset && (base != noBase))
+#endif
+ putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+
+#if !CPU(X86_64)
+ void memoryModRM(int reg, const void* address)
+ {
+ // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
+ putModRm(ModRmMemoryNoDisp, reg, noBase);
+ m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
+ }
+#endif
+
+ AssemblerBuffer m_buffer;
+ } m_formatter;
+ int m_indexOfLastWatchpoint;
+ int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(X86)
+
+#endif // X86Assembler_h
diff --git a/src/3rdparty/masm/config.h b/src/3rdparty/masm/config.h
new file mode 100644
index 0000000000..5f59f311e3
--- /dev/null
+++ b/src/3rdparty/masm/config.h
@@ -0,0 +1,56 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef MASM_CONFIG_H
+#define MASM_CONFIG_H
+
+#include <wtf/Platform.h>
+#ifdef __cplusplus
+#include <wtf/Vector.h>
+#include <wtf/FastAllocBase.h>
+#include <wtf/RefPtr.h>
+#include <cmath>
+#else
+#include <math.h>
+#endif
+#include <limits.h>
+
+#endif // MASM_CONFIG_H
diff --git a/src/3rdparty/masm/create_regex_tables b/src/3rdparty/masm/create_regex_tables
new file mode 100644
index 0000000000..7544b75cd9
--- /dev/null
+++ b/src/3rdparty/masm/create_regex_tables
@@ -0,0 +1,121 @@
+# Copyright (C) 2010, 2013 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+types = {
+ "wordchar": { "UseTable" : True, "data": ['_', ('0','9'), ('A', 'Z'), ('a','z')]},
+ "nonwordchar": { "UseTable" : True, "Inverse": "wordchar", "data": ['`', (0, ord('0') - 1), (ord('9') + 1, ord('A') - 1), (ord('Z') + 1, ord('_') - 1), (ord('z') + 1, 0xffff)]},
+ "newline": { "UseTable" : False, "data": ['\n', '\r', 0x2028, 0x2029]},
+ "spaces": { "UseTable" : True, "data": [' ', ('\t', '\r'), 0xa0, 0x1680, 0x180e, 0x2028, 0x2029, 0x202f, 0x205f, 0x3000, (0x2000, 0x200a), 0xfeff]},
+ "nonspaces": { "UseTable" : True, "Inverse": "spaces", "data": [(0, ord('\t') - 1), (ord('\r') + 1, ord(' ') - 1), (ord(' ') + 1, 0x009f), (0x00a1, 0x167f), (0x1681, 0x180d), (0x180f, 0x1fff), (0x200b, 0x2027), (0x202a, 0x202e), (0x2030, 0x205e), (0x2060, 0x2fff), (0x3001, 0xfefe), (0xff00, 0xffff)]},
+ "digits": { "UseTable" : False, "data": [('0', '9')]},
+ "nondigits": { "UseTable" : False, "Inverse": "digits", "data": [(0, ord('0') - 1), (ord('9') + 1, 0xffff)] }
+}
+entriesPerLine = 50
+arrays = "";
+functions = "";
+emitTables = (len(sys.argv) < 2 or sys.argv[1] != "--no-tables")
+
+for name, classes in types.items():
+ ranges = [];
+ size = 0;
+ for _class in classes["data"]:
+ if type(_class) == str:
+ ranges.append((ord(_class), ord(_class)))
+ elif type(_class) == int:
+ ranges.append((_class, _class))
+ else:
+ (min, max) = _class;
+ if type(min) == str:
+ min = ord(min)
+ if type(max) == str:
+ max = ord(max)
+ if max > 0x7f and min <= 0x7f:
+ ranges.append((min, 0x7f))
+ min = 0x80
+ ranges.append((min,max))
+ ranges.sort();
+
+ if emitTables and classes["UseTable"] and (not "Inverse" in classes):
+ array = ("static const char _%sData[65536] = {\n" % name);
+ i = 0
+ for (min,max) in ranges:
+ while i < min:
+ i = i + 1
+ array += ('0,')
+ if (i % entriesPerLine == 0) and (i != 0):
+ array += ('\n')
+ while i <= max:
+ i = i + 1
+ if (i == 65536):
+ array += ("1")
+ else:
+ array += ('1,')
+ if (i % entriesPerLine == 0) and (i != 0):
+ array += ('\n')
+ while i < 0xffff:
+ array += ("0,")
+ i = i + 1;
+ if (i % entriesPerLine == 0) and (i != 0):
+ array += ('\n')
+ if i == 0xffff:
+ array += ("0")
+ array += ("\n};\n\n");
+ arrays += array
+
+ # Generate createFunction:
+ function = "";
+ function += ("CharacterClass* %sCreate()\n" % name)
+ function += ("{\n")
+ if emitTables and classes["UseTable"]:
+ if "Inverse" in classes:
+ function += (" CharacterClass* characterClass = new CharacterClass(_%sData, true);\n" % (classes["Inverse"]))
+ else:
+ function += (" CharacterClass* characterClass = new CharacterClass(_%sData, false);\n" % (name))
+ else:
+ function += (" CharacterClass* characterClass = new CharacterClass;\n")
+ for (min, max) in ranges:
+ if (min == max):
+ if (min > 127):
+ function += (" characterClass->m_matchesUnicode.append(0x%04x);\n" % min)
+ else:
+ function += (" characterClass->m_matches.append(0x%02x);\n" % min)
+ continue
+ if (min > 127) or (max > 127):
+ function += (" characterClass->m_rangesUnicode.append(CharacterRange(0x%04x, 0x%04x));\n" % (min, max))
+ else:
+ function += (" characterClass->m_ranges.append(CharacterRange(0x%02x, 0x%02x));\n" % (min, max))
+ function += (" return characterClass;\n")
+ function += ("}\n\n")
+ functions += function
+
+if (len(sys.argv) > 1):
+ f = open(sys.argv[-1], "w")
+ f.write(arrays)
+ f.write(functions)
+ f.close()
+else:
+ print(arrays)
+ print(functions)
+
diff --git a/src/3rdparty/masm/disassembler/Disassembler.cpp b/src/3rdparty/masm/disassembler/Disassembler.cpp
new file mode 100644
index 0000000000..3fed2cdab8
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/Disassembler.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Disassembler.h"
+
+#include "MacroAssemblerCodeRef.h"
+#include <wtf/DataLog.h>
+
+namespace JSC {
+
+void disassemble(const MacroAssemblerCodePtr& codePtr, size_t size, const char* prefix, PrintStream& out)
+{
+ if (tryToDisassemble(codePtr, size, prefix, out))
+ return;
+
+ out.printf("%sdisassembly not available for range %p...%p\n", prefix, codePtr.executableAddress(), static_cast<char*>(codePtr.executableAddress()) + size);
+}
+
+} // namespace JSC
+
diff --git a/src/3rdparty/masm/disassembler/Disassembler.h b/src/3rdparty/masm/disassembler/Disassembler.h
new file mode 100644
index 0000000000..a087a657b3
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/Disassembler.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Disassembler_h
+#define Disassembler_h
+
+#include <wtf/Platform.h>
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+class MacroAssemblerCodePtr;
+
+#if ENABLE(DISASSEMBLER)
+bool tryToDisassemble(const MacroAssemblerCodePtr&, size_t, const char* prefix, PrintStream&);
+#else
+inline bool tryToDisassemble(const MacroAssemblerCodePtr&, size_t, const char*, PrintStream&)
+{
+ return false;
+}
+#endif
+
+// Prints either the disassembly, or a line of text indicating that disassembly failed and
+// the range of machine code addresses.
+void disassemble(const MacroAssemblerCodePtr&, size_t, const char* prefix, PrintStream& out);
+
+} // namespace JSC
+
+#endif // Disassembler_h
+
diff --git a/src/3rdparty/masm/disassembler/UDis86Disassembler.cpp b/src/3rdparty/masm/disassembler/UDis86Disassembler.cpp
new file mode 100644
index 0000000000..63c235b920
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/UDis86Disassembler.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Disassembler.h"
+
+#if USE(UDIS86)
+
+#include "MacroAssemblerCodeRef.h"
+#include "udis86.h"
+
+namespace JSC {
+
+bool tryToDisassemble(const MacroAssemblerCodePtr& codePtr, size_t size, const char* prefix, PrintStream& out)
+{
+ ud_t disassembler;
+ ud_init(&disassembler);
+ ud_set_input_buffer(&disassembler, static_cast<unsigned char*>(codePtr.executableAddress()), size);
+#if CPU(X86_64)
+ ud_set_mode(&disassembler, 64);
+#else
+ ud_set_mode(&disassembler, 32);
+#endif
+ ud_set_pc(&disassembler, bitwise_cast<uintptr_t>(codePtr.executableAddress()));
+ ud_set_syntax(&disassembler, UD_SYN_ATT);
+
+ uint64_t currentPC = disassembler.pc;
+ while (ud_disassemble(&disassembler)) {
+ char pcString[20];
+ snprintf(pcString, sizeof(pcString), "0x%lx", static_cast<unsigned long>(currentPC));
+ out.printf("%s%16s: %s\n", prefix, pcString, ud_insn_asm(&disassembler));
+ currentPC = disassembler.pc;
+ }
+
+ return true;
+}
+
+} // namespace JSC
+
+#endif // USE(UDIS86)
+
diff --git a/src/3rdparty/masm/disassembler/udis86/differences.txt b/src/3rdparty/masm/disassembler/udis86/differences.txt
new file mode 100644
index 0000000000..dc225b6ffe
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/differences.txt
@@ -0,0 +1,24 @@
+This documents the differences between the stock version of udis86 and the one found
+here:
+
+- All files not named "udis86" were prefixed with "udis86".
+
+- assert() has been changed to ASSERT()
+
+- Mass rename of udis86_input.h inp_ prefixed functions and macros to ud_inp_ to
+ avoid namespace pollution.
+
+- Removal of KERNEL checks.
+
+- Added #include of udis86_extern.h in udis86_decode.c.
+
+- Removed s_ie__pause and s_ie__nop from udis86_decode.c, since they weren't used.
+
+- Made udis86_syn.h use WTF_ATTRIBUTE_PRINTF. This required making a bunch of little
+ fixes to make the compiler's format string warnings go away.
+
+- Made the code in udis86_syn.h use vsnprintf() instead of vsprintf().
+
+- Fixed udis86_syn-att.c's jump destination printing to work correctly in 64-bit mode.
+
+- Add --outputDir option to itab.py.
diff --git a/src/3rdparty/masm/disassembler/udis86/itab.py b/src/3rdparty/masm/disassembler/udis86/itab.py
new file mode 100644
index 0000000000..07e20a6e10
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/itab.py
@@ -0,0 +1,360 @@
+# udis86 - scripts/itab.py
+#
+# Copyright (c) 2009 Vivek Thampi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from optparse import OptionParser
+import os
+import sys
+
+sys.path.append( '../scripts' );
+
+import ud_optable
+import ud_opcode
+
+class UdItabGenerator( ud_opcode.UdOpcodeTables ):
+
+ OperandDict = {
+ "Ap" : [ "OP_A" , "SZ_P" ],
+ "E" : [ "OP_E" , "SZ_NA" ],
+ "Eb" : [ "OP_E" , "SZ_B" ],
+ "Ew" : [ "OP_E" , "SZ_W" ],
+ "Ev" : [ "OP_E" , "SZ_V" ],
+ "Ed" : [ "OP_E" , "SZ_D" ],
+ "Eq" : [ "OP_E" , "SZ_Q" ],
+ "Ez" : [ "OP_E" , "SZ_Z" ],
+ "Ex" : [ "OP_E" , "SZ_MDQ" ],
+ "Ep" : [ "OP_E" , "SZ_P" ],
+ "G" : [ "OP_G" , "SZ_NA" ],
+ "Gb" : [ "OP_G" , "SZ_B" ],
+ "Gw" : [ "OP_G" , "SZ_W" ],
+ "Gv" : [ "OP_G" , "SZ_V" ],
+ "Gy" : [ "OP_G" , "SZ_MDQ" ],
+ "Gy" : [ "OP_G" , "SZ_MDQ" ],
+ "Gd" : [ "OP_G" , "SZ_D" ],
+ "Gq" : [ "OP_G" , "SZ_Q" ],
+ "Gx" : [ "OP_G" , "SZ_MDQ" ],
+ "Gz" : [ "OP_G" , "SZ_Z" ],
+ "M" : [ "OP_M" , "SZ_NA" ],
+ "Mb" : [ "OP_M" , "SZ_B" ],
+ "Mw" : [ "OP_M" , "SZ_W" ],
+ "Ms" : [ "OP_M" , "SZ_W" ],
+ "Md" : [ "OP_M" , "SZ_D" ],
+ "Mq" : [ "OP_M" , "SZ_Q" ],
+ "Mt" : [ "OP_M" , "SZ_T" ],
+ "Mo" : [ "OP_M" , "SZ_O" ],
+ "MwRv" : [ "OP_MR" , "SZ_WV" ],
+ "MdRy" : [ "OP_MR" , "SZ_DY" ],
+ "MbRv" : [ "OP_MR" , "SZ_BV" ],
+ "I1" : [ "OP_I1" , "SZ_NA" ],
+ "I3" : [ "OP_I3" , "SZ_NA" ],
+ "Ib" : [ "OP_I" , "SZ_B" ],
+ "Isb" : [ "OP_I" , "SZ_SB" ],
+ "Iw" : [ "OP_I" , "SZ_W" ],
+ "Iv" : [ "OP_I" , "SZ_V" ],
+ "Iz" : [ "OP_I" , "SZ_Z" ],
+ "Jv" : [ "OP_J" , "SZ_V" ],
+ "Jz" : [ "OP_J" , "SZ_Z" ],
+ "Jb" : [ "OP_J" , "SZ_B" ],
+ "R" : [ "OP_R" , "SZ_RDQ" ],
+ "C" : [ "OP_C" , "SZ_NA" ],
+ "D" : [ "OP_D" , "SZ_NA" ],
+ "S" : [ "OP_S" , "SZ_NA" ],
+ "Ob" : [ "OP_O" , "SZ_B" ],
+ "Ow" : [ "OP_O" , "SZ_W" ],
+ "Ov" : [ "OP_O" , "SZ_V" ],
+ "V" : [ "OP_V" , "SZ_O" ],
+ "W" : [ "OP_W" , "SZ_O" ],
+ "Wsd" : [ "OP_W" , "SZ_O" ],
+ "Wss" : [ "OP_W" , "SZ_O" ],
+ "P" : [ "OP_P" , "SZ_Q" ],
+ "Q" : [ "OP_Q" , "SZ_Q" ],
+ "VR" : [ "OP_VR" , "SZ_O" ],
+ "PR" : [ "OP_PR" , "SZ_Q" ],
+ "AL" : [ "OP_AL" , "SZ_NA" ],
+ "CL" : [ "OP_CL" , "SZ_NA" ],
+ "DL" : [ "OP_DL" , "SZ_NA" ],
+ "BL" : [ "OP_BL" , "SZ_NA" ],
+ "AH" : [ "OP_AH" , "SZ_NA" ],
+ "CH" : [ "OP_CH" , "SZ_NA" ],
+ "DH" : [ "OP_DH" , "SZ_NA" ],
+ "BH" : [ "OP_BH" , "SZ_NA" ],
+ "AX" : [ "OP_AX" , "SZ_NA" ],
+ "CX" : [ "OP_CX" , "SZ_NA" ],
+ "DX" : [ "OP_DX" , "SZ_NA" ],
+ "BX" : [ "OP_BX" , "SZ_NA" ],
+ "SI" : [ "OP_SI" , "SZ_NA" ],
+ "DI" : [ "OP_DI" , "SZ_NA" ],
+ "SP" : [ "OP_SP" , "SZ_NA" ],
+ "BP" : [ "OP_BP" , "SZ_NA" ],
+ "eAX" : [ "OP_eAX" , "SZ_NA" ],
+ "eCX" : [ "OP_eCX" , "SZ_NA" ],
+ "eDX" : [ "OP_eDX" , "SZ_NA" ],
+ "eBX" : [ "OP_eBX" , "SZ_NA" ],
+ "eSI" : [ "OP_eSI" , "SZ_NA" ],
+ "eDI" : [ "OP_eDI" , "SZ_NA" ],
+ "eSP" : [ "OP_eSP" , "SZ_NA" ],
+ "eBP" : [ "OP_eBP" , "SZ_NA" ],
+ "rAX" : [ "OP_rAX" , "SZ_NA" ],
+ "rCX" : [ "OP_rCX" , "SZ_NA" ],
+ "rBX" : [ "OP_rBX" , "SZ_NA" ],
+ "rDX" : [ "OP_rDX" , "SZ_NA" ],
+ "rSI" : [ "OP_rSI" , "SZ_NA" ],
+ "rDI" : [ "OP_rDI" , "SZ_NA" ],
+ "rSP" : [ "OP_rSP" , "SZ_NA" ],
+ "rBP" : [ "OP_rBP" , "SZ_NA" ],
+ "ES" : [ "OP_ES" , "SZ_NA" ],
+ "CS" : [ "OP_CS" , "SZ_NA" ],
+ "DS" : [ "OP_DS" , "SZ_NA" ],
+ "SS" : [ "OP_SS" , "SZ_NA" ],
+ "GS" : [ "OP_GS" , "SZ_NA" ],
+ "FS" : [ "OP_FS" , "SZ_NA" ],
+ "ST0" : [ "OP_ST0" , "SZ_NA" ],
+ "ST1" : [ "OP_ST1" , "SZ_NA" ],
+ "ST2" : [ "OP_ST2" , "SZ_NA" ],
+ "ST3" : [ "OP_ST3" , "SZ_NA" ],
+ "ST4" : [ "OP_ST4" , "SZ_NA" ],
+ "ST5" : [ "OP_ST5" , "SZ_NA" ],
+ "ST6" : [ "OP_ST6" , "SZ_NA" ],
+ "ST7" : [ "OP_ST7" , "SZ_NA" ],
+ "NONE" : [ "OP_NONE" , "SZ_NA" ],
+ "ALr8b" : [ "OP_ALr8b" , "SZ_NA" ],
+ "CLr9b" : [ "OP_CLr9b" , "SZ_NA" ],
+ "DLr10b" : [ "OP_DLr10b" , "SZ_NA" ],
+ "BLr11b" : [ "OP_BLr11b" , "SZ_NA" ],
+ "AHr12b" : [ "OP_AHr12b" , "SZ_NA" ],
+ "CHr13b" : [ "OP_CHr13b" , "SZ_NA" ],
+ "DHr14b" : [ "OP_DHr14b" , "SZ_NA" ],
+ "BHr15b" : [ "OP_BHr15b" , "SZ_NA" ],
+ "rAXr8" : [ "OP_rAXr8" , "SZ_NA" ],
+ "rCXr9" : [ "OP_rCXr9" , "SZ_NA" ],
+ "rDXr10" : [ "OP_rDXr10" , "SZ_NA" ],
+ "rBXr11" : [ "OP_rBXr11" , "SZ_NA" ],
+ "rSPr12" : [ "OP_rSPr12" , "SZ_NA" ],
+ "rBPr13" : [ "OP_rBPr13" , "SZ_NA" ],
+ "rSIr14" : [ "OP_rSIr14" , "SZ_NA" ],
+ "rDIr15" : [ "OP_rDIr15" , "SZ_NA" ],
+ "jWP" : [ "OP_J" , "SZ_WP" ],
+ "jDP" : [ "OP_J" , "SZ_DP" ],
+
+ }
+
+ #
+ # opcode prefix dictionary
+ #
+ PrefixDict = {
+ "aso" : "P_aso",
+ "oso" : "P_oso",
+ "rexw" : "P_rexw",
+ "rexb" : "P_rexb",
+ "rexx" : "P_rexx",
+ "rexr" : "P_rexr",
+ "seg" : "P_seg",
+ "inv64" : "P_inv64",
+ "def64" : "P_def64",
+ "depM" : "P_depM",
+ "cast1" : "P_c1",
+ "cast2" : "P_c2",
+ "cast3" : "P_c3",
+ "cast" : "P_cast",
+ "sext" : "P_sext"
+ }
+
+ InvalidEntryIdx = 0
+ InvalidEntry = { 'type' : 'invalid',
+ 'mnemonic' : 'invalid',
+ 'operands' : '',
+ 'prefixes' : '',
+ 'meta' : '' }
+
+ Itab = [] # instruction table
+ ItabIdx = 1 # instruction table index
+ GtabIdx = 0 # group table index
+ GtabMeta = []
+
+ ItabLookup = {}
+
+ MnemonicAliases = ( "invalid", "3dnow", "none", "db", "pause" )
+
+ def __init__( self, outputDir ):
+ # first itab entry (0) is Invalid
+ self.Itab.append( self.InvalidEntry )
+ self.MnemonicsTable.extend( self.MnemonicAliases )
+ self.outputDir = outputDir
+
+ def toGroupId( self, id ):
+ return 0x8000 | id
+
+ def genLookupTable( self, table, scope = '' ):
+ idxArray = [ ]
+ ( tabIdx, self.GtabIdx ) = ( self.GtabIdx, self.GtabIdx + 1 )
+ self.GtabMeta.append( { 'type' : table[ 'type' ], 'meta' : table[ 'meta' ] } )
+
+ for _idx in range( self.sizeOfTable( table[ 'type' ] ) ):
+ idx = "%02x" % _idx
+
+ e = self.InvalidEntry
+ i = self.InvalidEntryIdx
+
+ if idx in table[ 'entries' ].keys():
+ e = table[ 'entries' ][ idx ]
+
+ # leaf node (insn)
+ if e[ 'type' ] == 'insn':
+ ( i, self.ItabIdx ) = ( self.ItabIdx, self.ItabIdx + 1 )
+ self.Itab.append( e )
+ elif e[ 'type' ] != 'invalid':
+ i = self.genLookupTable( e, 'static' )
+
+ idxArray.append( i )
+
+ name = "ud_itab__%s" % tabIdx
+ self.ItabLookup[ tabIdx ] = name
+
+ self.ItabC.write( "\n" );
+ if len( scope ):
+ self.ItabC.write( scope + ' ' )
+ self.ItabC.write( "const uint16_t %s[] = {\n" % name )
+ for i in range( len( idxArray ) ):
+ if i > 0 and i % 4 == 0:
+ self.ItabC.write( "\n" )
+ if ( i%4 == 0 ):
+ self.ItabC.write( " /* %2x */" % i)
+ if idxArray[ i ] >= 0x8000:
+ self.ItabC.write( "%12s," % ("GROUP(%d)" % ( ~0x8000 & idxArray[ i ] )))
+ else:
+ self.ItabC.write( "%12d," % ( idxArray[ i ] ))
+ self.ItabC.write( "\n" )
+ self.ItabC.write( "};\n" )
+
+ return self.toGroupId( tabIdx )
+
+ def genLookupTableList( self ):
+ self.ItabC.write( "\n\n" );
+ self.ItabC.write( "struct ud_lookup_table_list_entry ud_lookup_table_list[] = {\n" )
+ for i in range( len( self.GtabMeta ) ):
+ f0 = self.ItabLookup[ i ] + ","
+ f1 = ( self.nameOfTable( self.GtabMeta[ i ][ 'type' ] ) ) + ","
+ f2 = "\"%s\"" % self.GtabMeta[ i ][ 'meta' ]
+ self.ItabC.write( " /* %03d */ { %s %s %s },\n" % ( i, f0, f1, f2 ) )
+ self.ItabC.write( "};" )
+
+ def genInsnTable( self ):
+ self.ItabC.write( "struct ud_itab_entry ud_itab[] = {\n" );
+ idx = 0
+ for e in self.Itab:
+ opr_c = [ "O_NONE", "O_NONE", "O_NONE" ]
+ pfx_c = []
+ opr = e[ 'operands' ]
+ for i in range(len(opr)):
+ if not (opr[i] in self.OperandDict.keys()):
+ print "error: invalid operand declaration: %s\n" % opr[i]
+ opr_c[i] = "O_" + opr[i]
+ opr = "%s %s %s" % (opr_c[0] + ",", opr_c[1] + ",", opr_c[2])
+
+ for p in e['prefixes']:
+ if not ( p in self.PrefixDict.keys() ):
+ print "error: invalid prefix specification: %s \n" % pfx
+ pfx_c.append( self.PrefixDict[p] )
+ if len(e['prefixes']) == 0:
+ pfx_c.append( "P_none" )
+ pfx = "|".join( pfx_c )
+
+ self.ItabC.write( " /* %04d */ { UD_I%s %s, %s },\n" \
+ % ( idx, e[ 'mnemonic' ] + ',', opr, pfx ) )
+ idx += 1
+ self.ItabC.write( "};\n" )
+
+ self.ItabC.write( "\n\n" );
+ self.ItabC.write( "const char * ud_mnemonics_str[] = {\n" )
+ self.ItabC.write( ",\n ".join( [ "\"%s\"" % m for m in self.MnemonicsTable ] ) )
+ self.ItabC.write( "\n};\n" )
+
+
+ def genItabH( self ):
+ self.ItabH = open( os.path.join(self.outputDir, "udis86_itab.h"), "w" )
+
+ # Generate Table Type Enumeration
+ self.ItabH.write( "#ifndef UD_ITAB_H\n" )
+ self.ItabH.write( "#define UD_ITAB_H\n\n" )
+
+ # table type enumeration
+ self.ItabH.write( "/* ud_table_type -- lookup table types (see lookup.c) */\n" )
+ self.ItabH.write( "enum ud_table_type {\n " )
+ enum = [ self.TableInfo[ k ][ 'name' ] for k in self.TableInfo.keys() ]
+ self.ItabH.write( ",\n ".join( enum ) )
+ self.ItabH.write( "\n};\n\n" );
+
+ # mnemonic enumeration
+ self.ItabH.write( "/* ud_mnemonic -- mnemonic constants */\n" )
+ enum = "enum ud_mnemonic_code {\n "
+ enum += ",\n ".join( [ "UD_I%s" % m for m in self.MnemonicsTable ] )
+ enum += "\n} UD_ATTR_PACKED;\n"
+ self.ItabH.write( enum )
+ self.ItabH.write( "\n" )
+
+ self.ItabH.write("\n/* itab entry operand definitions */\n");
+ operands = self.OperandDict.keys()
+ operands.sort()
+ for o in operands:
+ self.ItabH.write("#define O_%-7s { %-12s %-8s }\n" %
+ (o, self.OperandDict[o][0] + ",", self.OperandDict[o][1]));
+ self.ItabH.write("\n\n");
+
+ self.ItabH.write( "extern const char * ud_mnemonics_str[];\n" )
+
+ self.ItabH.write( "#define GROUP(n) (0x8000 | (n))" )
+
+ self.ItabH.write( "\n#endif /* UD_ITAB_H */\n" )
+
+ self.ItabH.close()
+
+
+ def genItabC( self ):
+ self.ItabC = open( os.path.join(self.outputDir, "udis86_itab.c"), "w" )
+ self.ItabC.write( "/* itab.c -- generated by itab.py, do no edit" )
+ self.ItabC.write( " */\n" );
+ self.ItabC.write( "#include \"udis86_decode.h\"\n\n" );
+
+ self.genLookupTable( self.OpcodeTable0 )
+ self.genLookupTableList()
+ self.genInsnTable()
+
+ self.ItabC.close()
+
+ def genItab( self ):
+ self.genItabC()
+ self.genItabH()
+
+def main():
+ parser = OptionParser()
+ parser.add_option("--outputDir", dest="outputDir", default="")
+ options, args = parser.parse_args()
+ generator = UdItabGenerator(os.path.normpath(options.outputDir))
+ optableXmlParser = ud_optable.UdOptableXmlParser()
+ optableXmlParser.parse( args[ 0 ], generator.addInsnDef )
+
+ generator.genItab()
+
+if __name__ == '__main__':
+ main()
diff --git a/src/3rdparty/masm/disassembler/udis86/optable.xml b/src/3rdparty/masm/disassembler/udis86/optable.xml
new file mode 100644
index 0000000000..14b4ac5935
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/optable.xml
@@ -0,0 +1,8959 @@
+<?xml version="1.0"?>
+<?xml-stylesheet href="optable.xsl" type="text/xsl"?>
+<x86optable>
+
+ <instruction>
+ <mnemonic>aaa</mnemonic>
+ <def>
+ <opc>37</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>aad</mnemonic>
+ <def>
+ <opc>d5</opc>
+ <opr>Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>aam</mnemonic>
+ <def>
+ <opc>d4</opc>
+ <opr>Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>aas</mnemonic>
+ <def>
+ <opc>3f</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>adc</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>10</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>11</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>12</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>13</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <opc>14</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>15</opc>
+ <opr>rAX Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>80 /reg=2</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>82 /reg=2</opc>
+ <opr>Eb Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>81 /reg=2</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>83 /reg=2</opc>
+ <opr>Ev Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>add</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>00</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>01</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>02</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>03</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <opc>04</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>05</opc>
+ <opr>rAX Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>80 /reg=0</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>82 /reg=0</opc>
+ <opr>Eb Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>81 /reg=0</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>83 /reg=0</opc>
+ <opr>Ev Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <!--
+ SSE2
+ -->
+
+ <instruction>
+ <mnemonic>addpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 58</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>addps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 58</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>addsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 58</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>addss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 58</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>and</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>20</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>21</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>22</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>23</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <opc>24</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>25</opc>
+ <opr>rAX Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>80 /reg=4</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>82 /reg=4</opc>
+ <opr>Eb Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>81 /reg=4</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>83 /reg=4</opc>
+ <opr>Ev Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>andpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 54</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>andps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 54</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>andnpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 55</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>andnps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 55</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>arpl</mnemonic>
+ <def>
+ <pfx>aso</pfx>
+ <opc>63 /m=16</opc>
+ <opr>Ew Gw</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso</pfx>
+ <opc>63 /m=32</opc>
+ <opr>Ew Gw</opr>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movsxd</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexx rexr rexb</pfx>
+ <opc>63 /m=64</opc>
+ <opr>Gv Ed</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>bound</mnemonic>
+ <def>
+ <pfx>aso oso</pfx>
+ <opc>62</opc>
+ <opr>Gv M</opr>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>bsf</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f bc</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>bsr</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f bd</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>bswap</mnemonic>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>0f c8</opc>
+ <opr>rAXr8</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>0f c9</opc>
+ <opr>rCXr9</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>0f ca</opc>
+ <opr>rDXr10</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>0f cb</opc>
+ <opr>rBXr11</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>0f cc</opc>
+ <opr>rSPr12</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>0f cd</opc>
+ <opr>rBPr13</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>0f ce</opc>
+ <opr>rSIr14</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>0f cf</opc>
+ <opr>rDIr15</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>bt</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f ba /reg=4</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f a3</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>btc</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f bb</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f ba /reg=7</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>btr</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f b3</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f ba /reg=6</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>bts</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f ab</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f ba /reg=5</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>call</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>ff /reg=2</opc>
+ <opr>Ev</opr>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>ff /reg=3</opc>
+ <opr>Ep</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>e8</opc>
+ <opr>Jz</opr>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9a</opc>
+ <opr>Ap</opr>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cbw</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>98 /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cwde</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>98 /o=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cdqe</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>98 /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>clc</mnemonic>
+ <def>
+ <opc>f8</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cld</mnemonic>
+ <def>
+ <opc>fc</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>clflush</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f ae /reg=7 /mod=!11</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>clgi</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=3 /mod=11 /rm=5</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cli</mnemonic>
+ <def>
+ <opc>fa</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>clts</mnemonic>
+ <def>
+ <opc>0f 06</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmc</mnemonic>
+ <def>
+ <opc>f5</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovo</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 40</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovno</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 41</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovb</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 42</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovae</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 43</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovz</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 44</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovnz</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 45</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovbe</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 46</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmova</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 47</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovs</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 48</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovns</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 49</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovp</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 4a</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovnp</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 4b</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovl</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 4c</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovge</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 4d</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovle</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 4e</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovg</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 4f</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmp</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>38</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>39</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>3a</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>3b</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <opc>3c</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>3d</opc>
+ <opr>rAX Iz</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>80 /reg=7</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>82 /reg=7</opc>
+ <opr>Eb Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>81 /reg=7</opc>
+ <opr>Ev Iz</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>83 /reg=7</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmppd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f c2</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmpps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f c2</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmpsb</mnemonic>
+ <def>
+ <opc>a6</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmpsw</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>a7 /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmpsd</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>a7 /o=32</opc>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f c2</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmpsq</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>a7 /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmpss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f c2</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmpxchg</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f b0</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f b1</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmpxchg8b</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f c7 /reg=1</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>comisd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 2f</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>comiss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 2f</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cpuid</mnemonic>
+ <def>
+ <opc>0f a2</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtdq2pd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f e6</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtdq2ps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 5b</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtpd2dq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f e6</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtpd2pi</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 2d</opc>
+ <opr>P W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtpd2ps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 5a</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtpi2ps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 2a</opc>
+ <opr>V Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtpi2pd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 2a</opc>
+ <opr>V Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtps2dq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 5b</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtps2pi</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 2d</opc>
+ <opr>P W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtps2pd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 5a</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtsd2si</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>ssef2 0f 2d</opc>
+ <opr>Gy W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtsd2ss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 5a</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtsi2ss</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>ssef3 0f 2a</opc>
+ <opr>V Ex</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtss2si</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>ssef3 0f 2d</opc>
+ <opr>Gy W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtss2sd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 5a</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvttpd2pi</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 2c</opc>
+ <opr>P W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvttpd2dq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e6</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvttps2dq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 5b</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvttps2pi</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 2c</opc>
+ <opr>P W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvttsd2si</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>ssef2 0f 2c</opc>
+ <opr>Gy Wsd</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtsi2sd</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>ssef2 0f 2a</opc>
+ <opr>V Ex</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvttss2si</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>ssef3 0f 2c</opc>
+ <opr>Gy Wsd</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cwd</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>99 /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cdq</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>99 /o=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cqo</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>99 /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>daa</mnemonic>
+ <def>
+ <opc>27</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>das</mnemonic>
+ <def>
+ <opc>2f</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>dec</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>48</opc>
+ <opr>eAX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>49</opc>
+ <opr>eCX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>4a</opc>
+ <opr>eDX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>4b</opc>
+ <opr>eBX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>4c</opc>
+ <opr>eSP</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>4d</opc>
+ <opr>eBP</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>4e</opc>
+ <opr>eSI</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>4f</opc>
+ <opr>eDI</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>fe /reg=1</opc>
+ <opr>Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>ff /reg=1</opc>
+ <opr>Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>div</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>f7 /reg=6</opc>
+ <opr>Ev</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>f6 /reg=6</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>divpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 5e</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>divps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 5e</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>divsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 5e</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>divss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 5e</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>emms</mnemonic>
+ <def>
+ <opc>0f 77</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>enter</mnemonic>
+ <def>
+ <opc>c8</opc>
+ <opr>Iw Ib</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>f2xm1</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=30</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fabs</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=21</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fadd</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dc /mod=!11 /reg=0</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d8 /mod=!11 /reg=0</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=00</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=01</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=02</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=03</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=04</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=05</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=06</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=07</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=00</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=01</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=02</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=03</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=04</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=05</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=06</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=07</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>faddp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>de /mod=11 /x87=00</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=01</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=02</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=03</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=04</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=05</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=06</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=07</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fbld</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>df /mod=!11 /reg=4</opc>
+ <opr>Mt</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fbstp</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>df /mod=!11 /reg=6</opc>
+ <opr>Mt</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fchs</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=20</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fclex</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>db /mod=11 /x87=22</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcmovb</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>da /mod=11 /x87=00</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=01</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=02</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=03</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=04</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=05</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=06</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=07</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcmove</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>da /mod=11 /x87=08</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=09</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=0a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=0b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=0c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=0d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=0e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=0f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcmovbe</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>da /mod=11 /x87=10</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=11</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=12</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=13</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=14</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=15</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=16</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=17</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcmovu</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>da /mod=11 /x87=18</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=19</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=1a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=1b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=1c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=1d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=1e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=1f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcmovnb</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>db /mod=11 /x87=00</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=01</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=02</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=03</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=04</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=05</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=06</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=07</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcmovne</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>db /mod=11 /x87=08</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=09</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=0a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=0b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=0c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=0d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=0e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=0f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcmovnbe</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>db /mod=11 /x87=10</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=11</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=12</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=13</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=14</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=15</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=16</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=17</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcmovnu</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>db /mod=11 /x87=18</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=19</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=1a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=1b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=1c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=1d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=1e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=1f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fucomi</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>db /mod=11 /x87=28</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=29</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=2a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=2b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=2c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=2d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=2e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=2f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcom</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d8 /mod=!11 /reg=2</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dc /mod=!11 /reg=2</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=10</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=11</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=12</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=13</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=14</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=15</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=16</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=17</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcom2</mnemonic>
+ <class>X87 UNDOC</class>
+ <def>
+ <opc>dc /mod=11 /x87=10</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=11</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=12</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=13</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=14</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=15</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=16</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=17</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcomp3</mnemonic>
+ <class>X87 UNDOC</class>
+ <def>
+ <opc>dc /mod=11 /x87=18</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=19</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=1a</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=1b</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=1c</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=1d</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=1e</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=1f</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcomi</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>db /mod=11 /x87=30</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=31</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=32</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=33</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=34</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=35</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=36</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=37</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fucomip</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>df /mod=11 /x87=28</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=29</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=2a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=2b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=2c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=2d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=2e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=2f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcomip</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>df /mod=11 /x87=30</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=31</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=32</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=33</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=34</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=35</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=36</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=37</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcomp</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d8 /mod=!11 /reg=3</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dc /mod=!11 /reg=3</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=18</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=19</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=1a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=1b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=1c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=1d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=1e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=1f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcomp5</mnemonic>
+ <class>X87 UNDOC</class>
+ <def>
+ <opc>de /mod=11 /x87=10</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=11</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=12</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=13</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=14</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=15</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=16</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=17</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcompp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>de /mod=11 /x87=19</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcos</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=3f</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fdecstp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=36</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fdiv</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dc /mod=!11 /reg=6</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=38</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=39</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=3a</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=3b</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=3c</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=3d</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=3e</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=3f</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d8 /mod=!11 /reg=6</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=30</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=31</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=32</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=33</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=34</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=35</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=36</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=37</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fdivp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>de /mod=11 /x87=38</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=39</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=3a</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=3b</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=3c</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=3d</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=3e</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=3f</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fdivr</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dc /mod=!11 /reg=7</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=30</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=31</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=32</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=33</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=34</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=35</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=36</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=37</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d8 /mod=!11 /reg=7</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=38</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=39</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=3a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=3b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=3c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=3d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=3e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=3f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fdivrp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>de /mod=11 /x87=30</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=31</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=32</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=33</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=34</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=35</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=36</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=37</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>femms</mnemonic>
+ <def>
+ <opc>0f 0e</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ffree</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>dd /mod=11 /x87=00</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=01</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=02</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=03</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=04</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=05</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=06</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=07</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ffreep</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>df /mod=11 /x87=00</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=01</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=02</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=03</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=04</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=05</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=06</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=07</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ficom</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>de /mod=!11 /reg=2</opc>
+ <opr>Mw</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>da /mod=!11 /reg=2</opc>
+ <opr>Md</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ficomp</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>de /mod=!11 /reg=3</opc>
+ <opr>Mw</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>da /mod=!11 /reg=3</opc>
+ <opr>Md</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fild</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>df /mod=!11 /reg=0</opc>
+ <opr>Mw</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>df /mod=!11 /reg=5</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>db /mod=!11 /reg=0</opc>
+ <opr>Md</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fncstp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=37</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fninit</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>db /mod=11 /x87=23</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fiadd</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>da /mod=!11 /reg=0</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>de /mod=!11 /reg=0</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fidivr</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>da /mod=!11 /reg=7</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>de /mod=!11 /reg=7</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fidiv</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>da /mod=!11 /reg=6</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>de /mod=!11 /reg=6</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fisub</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>da /mod=!11 /reg=4</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>de /mod=!11 /reg=4</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fisubr</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>da /mod=!11 /reg=5</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>de /mod=!11 /reg=5</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fist</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>df /mod=!11 /reg=2</opc>
+ <opr>Mw</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>db /mod=!11 /reg=2</opc>
+ <opr>Md</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fistp</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>df /mod=!11 /reg=3</opc>
+ <opr>Mw</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>df /mod=!11 /reg=7</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>db /mod=!11 /reg=3</opc>
+ <opr>Md</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fisttp</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>db /mod=!11 /reg=1</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dd /mod=!11 /reg=1</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>df /mod=!11 /reg=1</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fld</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>db /mod=!11 /reg=5</opc>
+ <opr>Mt</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dd /mod=!11 /reg=0</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d9 /mod=!11 /reg=0</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=00</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=01</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=02</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=03</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=04</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=05</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=06</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=07</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fld1</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=28</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fldl2t</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=29</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fldl2e</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=2a</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fldlpi</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=2b</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fldlg2</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=2c</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fldln2</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=2d</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fldz</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=2e</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fldcw</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d9 /mod=!11 /reg=5</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fldenv</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d9 /mod=!11 /reg=4</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fmul</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dc /mod=!11 /reg=1</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=08</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=09</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=0a</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=0b</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=0c</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=0d</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=0e</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=0f</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d8 /mod=!11 /reg=1</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=08</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=09</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=0a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=0b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=0c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=0d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=0e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=0f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fmulp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>de /mod=11 /x87=08</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=09</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=0a</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=0b</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=0c</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=0d</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=0e</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=0f</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fimul</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>da /mod=!11 /reg=1</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>de /mod=!11 /reg=1</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fnop</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=10</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fpatan</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=33</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fprem</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=38</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fprem1</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=35</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fptan</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>frndint</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=3c</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>frstor</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dd /mod=!11 /reg=4</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fnsave</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dd /mod=!11 /reg=6</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fscale</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=3d</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fsin</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=3e</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fsincos</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=3b</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fsqrt</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=3a</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fstp</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>db /mod=!11 /reg=7</opc>
+ <opr>Mt</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dd /mod=!11 /reg=3</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d9 /mod=!11 /reg=3</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=18</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=19</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=1a</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=1b</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=1c</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=1d</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=1e</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=1f</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fstp1</mnemonic>
+ <def>
+ <opc>d9 /mod=11 /x87=18</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=19</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=1a</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=1b</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=1c</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=1d</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=1e</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=1f</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fstp8</mnemonic>
+ <def>
+ <opc>df /mod=11 /x87=10</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=11</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=12</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=13</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=14</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=15</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=16</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=17</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fstp9</mnemonic>
+ <def>
+ <opc>df /mod=11 /x87=18</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=19</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=1a</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=1b</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=1c</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=1d</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=1e</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=1f</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fst</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d9 /mod=!11 /reg=2</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dd /mod=!11 /reg=2</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=10</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=11</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=12</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=13</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=14</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=15</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=16</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=17</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fnstcw</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d9 /mod=!11 /reg=7</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fnstenv</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d9 /mod=!11 /reg=6</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fnstsw</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dd /mod=!11 /reg=7</opc>
+ <opr>Mw</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=20</opc>
+ <opr>AX</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fsub</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d8 /mod=!11 /reg=4</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dc /mod=!11 /reg=4</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=20</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=21</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=22</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=23</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=24</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=25</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=26</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=27</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=28</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=29</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=2a</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=2b</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=2c</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=2d</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=2e</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=2f</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fsubp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>de /mod=11 /x87=28</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=29</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=2a</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=2b</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=2c</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=2d</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=2e</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=2f</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fsubr</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dc /mod=!11 /reg=5</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=28</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=29</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=2a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=2b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=2c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=2d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=2e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=2f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=20</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=21</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=22</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=23</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=24</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=25</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=26</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=27</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d8 /mod=!11 /reg=5</opc>
+ <opr>Md</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fsubrp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>de /mod=11 /x87=20</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=21</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=22</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=23</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=24</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=25</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=26</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=27</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ftst</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=24</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fucom</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>dd /mod=11 /x87=20</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=21</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=22</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=23</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=24</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=25</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=26</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=27</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fucomp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>dd /mod=11 /x87=28</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=29</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=2a</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=2b</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=2c</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=2d</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=2e</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=2f</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fucompp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>da /mod=11 /x87=29</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fxam</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=25</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fxch</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=08</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=09</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=0a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=0b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=0c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=0d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=0e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=0f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fxch4</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>dd /mod=11 /x87=08</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=09</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=0a</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=0b</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=0c</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=0d</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=0e</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=0f</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fxch7</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>df /mod=11 /x87=08</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=09</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=0a</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=0b</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=0c</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=0d</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=0e</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=0f</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fxrstor</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f ae /mod=11 /reg=1</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fxsave</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f ae /mod=11 /reg=0</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fpxtract</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=34</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fyl2x</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=31</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fyl2xp1</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=39</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>hlt</mnemonic>
+ <def>
+ <opc>f4</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>idiv</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>f7 /reg=7</opc>
+ <opr>Ev</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>f6 /reg=7</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>in</mnemonic>
+ <def>
+ <opc>e4</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>e5</opc>
+ <opr>eAX Ib</opr>
+ </def>
+ <def>
+ <opc>ec</opc>
+ <opr>AL DX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>ed</opc>
+ <opr>eAX DX</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>imul</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f af</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>f6 /reg=5</opc>
+ <opr>Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>f7 /reg=5</opc>
+ <opr>Ev</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>69</opc>
+ <opr>Gv Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>6b</opc>
+ <opr>Gv Ev Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>inc</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>40</opc>
+ <opr>eAX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>41</opc>
+ <opr>eCX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>42</opc>
+ <opr>eDX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>43</opc>
+ <opr>eBX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>44</opc>
+ <opr>eSP</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>45</opc>
+ <opr>eBP</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>46</opc>
+ <opr>eSI</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>47</opc>
+ <opr>eDI</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>ff /reg=0</opc>
+ <opr>Ev</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>fe /reg=0</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>insb</mnemonic>
+ <def>
+ <opc>6c</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>insw</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>6d /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>insd</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>6d /o=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>int1</mnemonic>
+ <def>
+ <opc>f1</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>int3</mnemonic>
+ <def>
+ <opc>cc</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>int</mnemonic>
+ <def>
+ <opc>cd</opc>
+ <opr>Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>into</mnemonic>
+ <def>
+ <opc>ce</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>invd</mnemonic>
+ <def>
+ <opc>0f 08</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>invept</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <opc>sse66 0f 38 80 /m=32</opc>
+ <opr>Gd Mo</opr>
+ </def>
+ <def>
+ <opc>sse66 0f 38 80 /m=64</opc>
+ <opr>Gq Mo</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>invlpg</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 01 /reg=7 /mod=!11</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>invlpga</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=3 /mod=11 /rm=7</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>invvpid</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <opc>sse66 0f 38 81 /m=32</opc>
+ <opr>Gd Mo</opr>
+ </def>
+ <def>
+ <opc>sse66 0f 38 81 /m=64</opc>
+ <opr>Gq Mo</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>iretw</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>cf /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>iretd</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>cf /o=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>iretq</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>cf /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jo</mnemonic>
+ <def>
+ <opc>70</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 80</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jno</mnemonic>
+ <def>
+ <opc>71</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 81</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jb</mnemonic>
+ <def>
+ <opc>72</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 82</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jae</mnemonic>
+ <def>
+ <opc>73</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 83</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jz</mnemonic>
+ <def>
+ <opc>74</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 84</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jnz</mnemonic>
+ <def>
+ <opc>75</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 85</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jbe</mnemonic>
+ <def>
+ <opc>76</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 86</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ja</mnemonic>
+ <def>
+ <opc>77</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 87</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>js</mnemonic>
+ <def>
+ <opc>78</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 88</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jns</mnemonic>
+ <def>
+ <opc>79</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 89</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jp</mnemonic>
+ <def>
+ <opc>7a</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 8a</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jnp</mnemonic>
+ <def>
+ <opc>7b</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 8b</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jl</mnemonic>
+ <def>
+ <opc>7c</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 8c</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jge</mnemonic>
+ <def>
+ <opc>7d</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 8d</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jle</mnemonic>
+ <def>
+ <opc>7e</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 8e</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jg</mnemonic>
+ <def>
+ <opc>7f</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 8f</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jcxz</mnemonic>
+ <def>
+ <pfx>aso</pfx>
+ <opc>e3 /a=16</opc>
+ <opr>Jb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jecxz</mnemonic>
+ <def>
+ <pfx>aso</pfx>
+ <opc>e3 /a=32</opc>
+ <opr>Jb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jrcxz</mnemonic>
+ <def>
+ <pfx>aso</pfx>
+ <opc>e3 /a=64</opc>
+ <opr>Jb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jmp</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>ff /reg=4</opc>
+ <opr>Ev</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>ff /reg=5</opc>
+ <opr>Ep</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>e9</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <opc>ea</opc>
+ <opr>Ap</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <opc>eb</opc>
+ <opr>Jb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lahf</mnemonic>
+ <def>
+ <opc>9f</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lar</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 02</opc>
+ <opr>Gv Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lddqu</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f f0</opc>
+ <opr>V M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ldmxcsr</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f ae /reg=2 /mod=11</opc>
+ <opr>Md</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lds</mnemonic>
+ <def>
+ <pfx>aso oso</pfx>
+ <opc>c5</opc>
+ <opr>Gv M</opr>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lea</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>8d</opc>
+ <opr>Gv M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>les</mnemonic>
+ <def>
+ <pfx>aso oso</pfx>
+ <opc>c4</opc>
+ <opr>Gv M</opr>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lfs</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f b4</opc>
+ <opr>Gz M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lgs</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f b5</opc>
+ <opr>Gz M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lidt</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 01 /reg=3 /mod=!11</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lss</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f b2</opc>
+ <opr>Gz M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>leave</mnemonic>
+ <def>
+ <opc>c9</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lfence</mnemonic>
+ <def>
+ <opc>0f ae /reg=5 /mod=11 /rm=0</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=5 /mod=11 /rm=1</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=5 /mod=11 /rm=2</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=5 /mod=11 /rm=3</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=5 /mod=11 /rm=4</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=5 /mod=11 /rm=5</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=5 /mod=11 /rm=6</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=5 /mod=11 /rm=7</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lgdt</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 01 /reg=2 /mod=!11</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lldt</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 00 /reg=2</opc>
+ <opr>Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lmsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 01 /reg=6 /mod=!11</opc>
+ <opr>Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lock</mnemonic>
+ <def>
+ <opc>f0</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lodsb</mnemonic>
+ <def>
+ <pfx>seg</pfx>
+ <opc>ac</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lodsw</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>ad /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lodsd</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>ad /o=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lodsq</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>ad /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>loopnz</mnemonic>
+ <def>
+ <opc>e0</opc>
+ <opr>Jb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>loope</mnemonic>
+ <def>
+ <opc>e1</opc>
+ <opr>Jb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>loop</mnemonic>
+ <def>
+ <opc>e2</opc>
+ <opr>Jb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lsl</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 03</opc>
+ <opr>Gv Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ltr</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 00 /reg=3</opc>
+ <opr>Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>maskmovq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f7</opc>
+ <opr>P PR</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>maxpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 5f</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>maxps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 5f</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>maxsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 5f</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>maxss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 5f</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mfence</mnemonic>
+ <def>
+ <opc>0f ae /reg=6 /mod=11 /rm=0</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=6 /mod=11 /rm=1</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=6 /mod=11 /rm=2</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=6 /mod=11 /rm=3</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=6 /mod=11 /rm=4</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=6 /mod=11 /rm=5</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=6 /mod=11 /rm=6</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=6 /mod=11 /rm=7</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>minpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 5d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>minps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 5d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>minsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 5d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>minss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 5d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>monitor</mnemonic>
+ <def>
+ <opc>0f 01 /reg=1 /mod=11 /rm=0</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>montmul</mnemonic>
+ <def>
+ <opc>0f a6 /mod=11 /rm=0 /reg=0</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mov</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c6 /reg=0</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c7 /reg=0</opc>
+ <opr>Ev Iz</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>88</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>89</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>8a</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>8b</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexr rexx rexb</pfx>
+ <opc>8c</opc>
+ <opr>Ev S</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexr rexx rexb</pfx>
+ <opc>8e</opc>
+ <opr>S Ev</opr>
+ </def>
+ <def>
+ <opc>a0</opc>
+ <opr>AL Ob</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw</pfx>
+ <opc>a1</opc>
+ <opr>rAX Ov</opr>
+ </def>
+ <def>
+ <opc>a2</opc>
+ <opr>Ob AL</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw</pfx>
+ <opc>a3</opc>
+ <opr>Ov rAX</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>b0</opc>
+ <opr>ALr8b Ib</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>b1</opc>
+ <opr>CLr9b Ib</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>b2</opc>
+ <opr>DLr10b Ib</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>b3</opc>
+ <opr>BLr11b Ib</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>b4</opc>
+ <opr>AHr12b Ib</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>b5</opc>
+ <opr>CHr13b Ib</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>b6</opc>
+ <opr>DHr14b Ib</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>b7</opc>
+ <opr>BHr15b Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>b8</opc>
+ <opr>rAXr8 Iv</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>b9</opc>
+ <opr>rCXr9 Iv</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>ba</opc>
+ <opr>rDXr10 Iv</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>bb</opc>
+ <opr>rBXr11 Iv</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>bc</opc>
+ <opr>rSPr12 Iv</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>bd</opc>
+ <opr>rBPr13 Iv</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>be</opc>
+ <opr>rSIr14 Iv</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>bf</opc>
+ <opr>rDIr15 Iv</opr>
+ </def>
+ <def>
+ <pfx>rexr</pfx>
+ <opc>0f 20</opc>
+ <opr>R C</opr>
+ </def>
+ <def>
+ <pfx>rexr</pfx>
+ <opc>0f 21</opc>
+ <opr>R D</opr>
+ </def>
+ <def>
+ <pfx>rexr</pfx>
+ <opc>0f 22</opc>
+ <opr>C R</opr>
+ </def>
+ <def>
+ <pfx>rexr</pfx>
+ <opc>0f 23</opc>
+ <opr>D R</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movapd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 28</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 29</opc>
+ <opr>W V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movaps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 28</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 29</opc>
+ <opr>W V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movd</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>sse66 0f 6e</opc>
+ <opr>V Ex</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 6e</opc>
+ <opr>P Ex</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>sse66 0f 7e</opc>
+ <opr>Ex V</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 7e</opc>
+ <opr>Ex P</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movhpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 16 /mod=!11</opc>
+ <opr>V M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 17</opc>
+ <opr>M V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movhps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 16 /mod=!11</opc>
+ <opr>V M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 17</opc>
+ <opr>M V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movlhps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 16 /mod=11</opc>
+ <opr>V VR</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movlpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 12 /mod=!11</opc>
+ <opr>V M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 13</opc>
+ <opr>M V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movlps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 12 /mod=!11</opc>
+ <opr>V M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 13</opc>
+ <opr>M V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movhlps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 12 /mod=11</opc>
+ <opr>V VR</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movmskpd</mnemonic>
+ <def>
+ <pfx>oso rexr rexb</pfx>
+ <opc>sse66 0f 50</opc>
+ <opr>Gd VR</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movmskps</mnemonic>
+ <def>
+ <pfx>oso rexr rexb</pfx>
+ <opc>0f 50</opc>
+ <opr>Gd VR</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movntdq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e7</opc>
+ <opr>M V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movnti</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f c3</opc>
+ <opr>M Gy</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movntpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 2b</opc>
+ <opr>M V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movntps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 2b</opc>
+ <opr>M V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movntq</mnemonic>
+ <def>
+ <opc>0f e7</opc>
+ <opr>M P</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 6f</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d6</opc>
+ <opr>W V</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 7e</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 7f</opc>
+ <opr>Q P</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movsb</mnemonic>
+ <def>
+ <pfx>seg</pfx>
+ <opc>a4</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movsw</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>a5 /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movsd</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>a5 /o=32</opc>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 10</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 11</opc>
+ <opr>W V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movsq</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>a5 /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 10</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 11</opc>
+ <opr>W V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movsx</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f be</opc>
+ <opr>Gv Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f bf</opc>
+ <opr>Gv Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movupd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 10</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 11</opc>
+ <opr>W V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movups</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 10</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 11</opc>
+ <opr>W V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movzx</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f b6</opc>
+ <opr>Gv Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f b7</opc>
+ <opr>Gv Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mul</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>f6 /reg=4</opc>
+ <opr>Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>f7 /reg=4</opc>
+ <opr>Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mulpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 59</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mulps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 59</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mulsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 59</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mulss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 59</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mwait</mnemonic>
+ <def>
+ <opc>0f 01 /reg=1 /mod=11 /rm=1</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>neg</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>f6 /reg=3</opc>
+ <opr>Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>f7 /reg=3</opc>
+ <opr>Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>nop</mnemonic>
+ <def>
+ <opc>90</opc>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 19</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 1a</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 1b</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 1c</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 1d</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 1e</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 1f</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>not</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>f6 /reg=2</opc>
+ <opr>Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>f7 /reg=2</opc>
+ <opr>Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>or</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>08</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>09</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0a</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0b</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <opc>0c</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>0d</opc>
+ <opr>rAX Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>80 /reg=1</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>81 /reg=1</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>82 /reg=1</opc>
+ <opr>Eb Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>83 /reg=1</opc>
+ <opr>Ev Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>orpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 56</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>orps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 56</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>out</mnemonic>
+ <def>
+ <opc>e6</opc>
+ <opr>Ib AL</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>e7</opc>
+ <opr>Ib eAX</opr>
+ </def>
+ <def>
+ <opc>ee</opc>
+ <opr>DX AL</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>ef</opc>
+ <opr>DX eAX</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>outsb</mnemonic>
+ <def>
+ <opc>6e</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>outsw</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>6f /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>outsd</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>6f /o=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>outsq</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>6f /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>packsswb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 63</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 63</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>packssdw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 6b</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 6b</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>packuswb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 67</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 67</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>paddb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f fc</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f fc</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>paddw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f fd</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f fd</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>paddd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f fe</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f fe</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+
+ <instruction>
+ <mnemonic>paddsb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f ec</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f ec</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>paddsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f ed</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f ed</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>paddusb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f dc</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f dc</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>paddusw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f dd</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f dd</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pand</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f db</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f db</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pandn</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f df</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f df</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pavgb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e0</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f e0</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pavgw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e3</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f e3</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pcmpeqb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 74</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 74</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pcmpeqw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 75</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 75</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pcmpeqd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 76</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 76</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pcmpgtb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 64</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 64</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pcmpgtw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 65</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 65</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pcmpgtd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 66</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 66</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pextrb</mnemonic>
+ <def>
+ <pfx>aso rexr rexb</pfx>
+ <opc>sse66 0f 3a 14</opc>
+ <opr>MbRv V Ib</opr>
+ <mode>def64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pextrd</mnemonic>
+ <def>
+ <pfx>aso rexr rexw rexb</pfx>
+ <opc>sse66 0f 3a 16 /o=16</opc>
+ <opr>Ev V Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexw rexb</pfx>
+ <opc>sse66 0f 3a 16 /o=32</opc>
+ <opr>Ev V Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pextrq</mnemonic>
+ <def>
+ <pfx>aso rexr rexw rexb</pfx>
+ <opc>sse66 0f 3a 16 /o=64</opc>
+ <opr>Ev V Ib</opr>
+ <mode>def64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pextrw</mnemonic>
+ <def>
+ <pfx>aso rexr rexb</pfx>
+ <opc>sse66 0f c5</opc>
+ <opr>Gd VR Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f c5</opc>
+ <opr>Gd PR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pinsrw</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f c4</opc>
+ <opr>P Ew Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>sse66 0f c4</opc>
+ <opr>V Ew Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmaddwd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f5</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f f5</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmaxsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f ee</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f ee</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmaxub</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f de</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f de</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pminsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f ea</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f ea</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pminub</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f da</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f da</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmovmskb</mnemonic>
+ <def>
+ <pfx>rexr rexb</pfx>
+ <opc>sse66 0f d7</opc>
+ <opr>Gd VR</opr>
+ </def>
+ <def>
+ <pfx>oso rexr rexb</pfx>
+ <opc>0f d7</opc>
+ <opr>Gd PR</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmulhuw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f e4</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e4</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmulhw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e5</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f e5</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmullw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f d5</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d5</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pop</mnemonic>
+ <def>
+ <opc>07</opc>
+ <opr>ES</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <opc>17</opc>
+ <opr>SS</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <opc>1f</opc>
+ <opr>DS</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <opc>0f a9</opc>
+ <opr>GS</opr>
+ </def>
+ <def>
+ <opc>0f a1</opc>
+ <opr>FS</opr>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>58</opc>
+ <opr>rAXr8</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>59</opc>
+ <opr>rCXr9</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>5a</opc>
+ <opr>rDXr10</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>5b</opc>
+ <opr>rBXr11</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>5c</opc>
+ <opr>rSPr12</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>5d</opc>
+ <opr>rBPr13</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>5e</opc>
+ <opr>rSIr14</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>5f</opc>
+ <opr>rDIr15</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>8f /reg=0</opc>
+ <opr>Ev</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>popa</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>61 /o=16</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>popad</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>61 /o=32</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>popfw</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9d /m=32 /o=16</opc>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9d /m=16 /o=16</opc>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>popfd</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9d /m=16 /o=32</opc>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9d /m=32 /o=32</opc>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>popfq</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9d /m=64 /o=64</opc>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>por</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f eb</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f eb</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>prefetch</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 0d /reg=0</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 0d /reg=1</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 0d /reg=2</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 0d /reg=3</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 0d /reg=4</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 0d /reg=5</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 0d /reg=6</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 0d /reg=7</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>prefetchnta</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 18 /reg=0</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>prefetcht0</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 18 /reg=1</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>prefetcht1</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 18 /reg=2</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>prefetcht2</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 18 /reg=3</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psadbw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f f6</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f6</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pshufw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 70</opc>
+ <opr>P Q Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psllw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f f1</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f1</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 71 /reg=6</opc>
+ <opr>VR Ib</opr>
+ </def>
+ <def>
+ <opc>0f 71 /reg=6</opc>
+ <opr>PR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pslld</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f f2</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f2</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 72 /reg=6</opc>
+ <opr>VR Ib</opr>
+ </def>
+ <def>
+ <opc>0f 72 /reg=6</opc>
+ <opr>PR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psllq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f f3</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f3</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 73 /reg=6</opc>
+ <opr>VR Ib</opr>
+ </def>
+ <def>
+ <opc>0f 73 /reg=6</opc>
+ <opr>PR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psraw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f e1</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e1</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 71 /reg=4</opc>
+ <opr>VR Ib</opr>
+ </def>
+ <def>
+ <opc>0f 71 /reg=4</opc>
+ <opr>PR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psrad</mnemonic>
+ <def>
+ <opc>0f 72 /reg=4</opc>
+ <opr>PR Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e2</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f e2</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 72 /reg=4</opc>
+ <opr>VR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psrlw</mnemonic>
+ <def>
+ <opc>0f 71 /reg=2</opc>
+ <opr>PR Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f d1</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d1</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 71 /reg=2</opc>
+ <opr>VR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psrld</mnemonic>
+ <def>
+ <opc>0f 72 /reg=2</opc>
+ <opr>PR Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f d2</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d2</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 72 /reg=2</opc>
+ <opr>VR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psrlq</mnemonic>
+ <def>
+ <opc>0f 73 /reg=2</opc>
+ <opr>PR Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f d3</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d3</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 73 /reg=2</opc>
+ <opr>VR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psubb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f f8</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f8</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psubw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f f9</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f9</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psubd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f fa</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f fa</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psubsb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f e8</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e8</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psubsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f e9</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e9</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psubusb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f d8</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d8</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psubusw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f d9</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d9</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>punpckhbw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 68</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 68</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>punpckhwd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 69</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 69</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>punpckhdq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 6a</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 6a</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>punpcklbw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 60</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 60</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>punpcklwd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 61</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 61</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>punpckldq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 62</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 62</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pi2fw</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=0c</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pi2fd</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=0d</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pf2iw</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=1c</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pf2id</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=1d</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfnacc</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=8a</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfpnacc</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=8e</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfcmpge</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=90</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfmin</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=94</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfrcp</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=96</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfrsqrt</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=97</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfsub</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=9a</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfadd</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=9e</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfcmpgt</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=a0</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfmax</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=a4</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfrcpit1</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=a6</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfrsqit1</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=a7</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfsubr</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=aa</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfacc</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=ae</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfcmpeq</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=b0</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfmul</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=b4</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfrcpit2</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=b6</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmulhrw</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=b7</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pswapd</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=bb</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pavgusb</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=bf</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>push</mnemonic>
+ <def>
+ <opc>06</opc>
+ <opr>ES</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <opc>0e</opc>
+ <opr>CS</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <opc>16</opc>
+ <opr>SS</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <opc>1e</opc>
+ <opr>DS</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <opc>0f a8</opc>
+ <opr>GS</opr>
+ </def>
+ <def>
+ <opc>0f a0</opc>
+ <opr>FS</opr>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>50</opc>
+ <opr>rAXr8</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>51</opc>
+ <opr>rCXr9</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>52</opc>
+ <opr>rDXr10</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>53</opc>
+ <opr>rBXr11</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>54</opc>
+ <opr>rSPr12</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>55</opc>
+ <opr>rBPr13</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>56</opc>
+ <opr>rSIr14</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>57</opc>
+ <opr>rDIr15</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>68</opc>
+ <opr>Iz</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>ff /reg=6</opc>
+ <opr>Ev</opr>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <opc>6a</opc>
+ <opr>Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pusha</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>60 /o=16</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pushad</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>60 /o=32</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pushfw</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9c /m=32 /o=16</opc>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9c /m=16 /o=16</opc>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>9c /m=64 /o=16</opc>
+ <mode>def64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pushfd</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9c /m=16 /o=32</opc>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9c /m=32 /o=32</opc>
+ <mode>def64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pushfq</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>9c /m=64 /o=32</opc>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>9c /m=64 /o=64</opc>
+ <mode>def64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pxor</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f ef</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f ef</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rcl</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c0 /reg=2</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c1 /reg=2</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d0 /reg=2</opc>
+ <opr>Eb I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d2 /reg=2</opc>
+ <opr>Eb CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d3 /reg=2</opc>
+ <opr>Ev CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d1 /reg=2</opc>
+ <opr>Ev I1</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rcr</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d0 /reg=3</opc>
+ <opr>Eb I1</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c1 /reg=3</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c0 /reg=3</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d1 /reg=3</opc>
+ <opr>Ev I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d2 /reg=3</opc>
+ <opr>Eb CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d3 /reg=3</opc>
+ <opr>Ev CL</opr>
+ <syn>cast</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rol</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c0 /reg=0</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d0 /reg=0</opc>
+ <opr>Eb I1</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d1 /reg=0</opc>
+ <opr>Ev I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d2 /reg=0</opc>
+ <opr>Eb CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d3 /reg=0</opc>
+ <opr>Ev CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c1 /reg=0</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ror</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d0 /reg=1</opc>
+ <opr>Eb I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c0 /reg=1</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c1 /reg=1</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d1 /reg=1</opc>
+ <opr>Ev I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d2 /reg=1</opc>
+ <opr>Eb CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d3 /reg=1</opc>
+ <opr>Ev CL</opr>
+ <syn>cast</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rcpps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 53</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rcpss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 53</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rdmsr</mnemonic>
+ <def>
+ <opc>0f 32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rdpmc</mnemonic>
+ <def>
+ <opc>0f 33</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rdtsc</mnemonic>
+ <def>
+ <opc>0f 31</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rdtscp</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=7 /mod=11 /rm=1</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>repne</mnemonic>
+ <def>
+ <opc>f2</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rep</mnemonic>
+ <def>
+ <opc>f3</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ret</mnemonic>
+ <def>
+ <opc>c2</opc>
+ <opr>Iw</opr>
+ </def>
+ <def>
+ <opc>c3</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>retf</mnemonic>
+ <def>
+ <opc>ca</opc>
+ <opr>Iw</opr>
+ </def>
+ <def>
+ <opc>cb</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rsm</mnemonic>
+ <def>
+ <opc>0f aa</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rsqrtps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 52</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rsqrtss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 52</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sahf</mnemonic>
+ <def>
+ <opc>9e</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sal</mnemonic>
+ </instruction>
+
+ <instruction>
+ <mnemonic>salc</mnemonic>
+ <def>
+ <opc>d6</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sar</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d1 /reg=7</opc>
+ <opr>Ev I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c0 /reg=7</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d0 /reg=7</opc>
+ <opr>Eb I1</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c1 /reg=7</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d2 /reg=7</opc>
+ <opr>Eb CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d3 /reg=7</opc>
+ <opr>Ev CL</opr>
+ <syn>cast</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>shl</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c0 /reg=6</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c1 /reg=6</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d0 /reg=6</opc>
+ <opr>Eb I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d2 /reg=6</opc>
+ <opr>Eb CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d3 /reg=6</opc>
+ <opr>Ev CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c1 /reg=4</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d2 /reg=4</opc>
+ <opr>Eb CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d1 /reg=4</opc>
+ <opr>Ev I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d0 /reg=4</opc>
+ <opr>Eb I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c0 /reg=4</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d3 /reg=4</opc>
+ <opr>Ev CL</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d1 /reg=6</opc>
+ <opr>Ev I1</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>shr</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c1 /reg=5</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d2 /reg=5</opc>
+ <opr>Eb CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d1 /reg=5</opc>
+ <opr>Ev I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d0 /reg=5</opc>
+ <opr>Eb I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c0 /reg=5</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d3 /reg=5</opc>
+ <opr>Ev CL</opr>
+ <syn>cast</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sbb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>18</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>19</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>1a</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>1b</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <opc>1c</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>1d</opc>
+ <opr>rAX Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>80 /reg=3</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>81 /reg=3</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>82 /reg=3</opc>
+ <opr>Eb Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>83 /reg=3</opc>
+ <opr>Ev Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>scasb</mnemonic>
+ <def>
+ <opc>ae</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>scasw</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>af /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>scasd</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>af /o=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>scasq</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>af /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>seto</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 90</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setno</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 91</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 92</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setnb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 93</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setz</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 94</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setnz</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 95</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setbe</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 96</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>seta</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 97</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sets</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 98</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setns</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 99</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setp</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 9a</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setnp</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 9b</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setl</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 9c</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setge</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 9d</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setle</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 9e</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setg</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 9f</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sfence</mnemonic>
+ <def>
+ <opc>0f ae /reg=7 /mod=11 /rm=0</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=7 /mod=11 /rm=1</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=7 /mod=11 /rm=2</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=7 /mod=11 /rm=3</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=7 /mod=11 /rm=4</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=7 /mod=11 /rm=5</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=7 /mod=11 /rm=6</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=7 /mod=11 /rm=7</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sgdt</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 01 /reg=0 /mod=!11</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>shld</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f a4</opc>
+ <opr>Ev Gv Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f a5</opc>
+ <opr>Ev Gv CL</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>shrd</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f ac</opc>
+ <opr>Ev Gv Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f ad</opc>
+ <opr>Ev Gv CL</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>shufpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f c6</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>shufps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f c6</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sidt</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 01 /reg=1 /mod=!11</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sldt</mnemonic>
+ <def>
+ <pfx>aso oso rexr rexx rexb</pfx>
+ <opc>0f 00 /reg=0</opc>
+ <opr>MwRv</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>smsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 01 /reg=4 /mod=!11</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sqrtps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 51</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sqrtpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 51</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sqrtsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 51</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sqrtss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 51</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>stc</mnemonic>
+ <def>
+ <opc>f9</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>std</mnemonic>
+ <def>
+ <opc>fd</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>stgi</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=3 /mod=11 /rm=4</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sti</mnemonic>
+ <def>
+ <opc>fb</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>skinit</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=3 /mod=11 /rm=6</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>stmxcsr</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f ae /mod=11 /reg=3</opc>
+ <opr>Md</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>stosb</mnemonic>
+ <def>
+ <pfx>seg</pfx>
+ <opc>aa</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>stosw</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>ab /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>stosd</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>ab /o=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>stosq</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>ab /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>str</mnemonic>
+ <def>
+ <pfx>aso oso rexr rexx rexb</pfx>
+ <opc>0f 00 /reg=1</opc>
+ <opr>Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sub</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>28</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>29</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>2a</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>2b</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <opc>2c</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>2d</opc>
+ <opr>rAX Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>80 /reg=5</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>81 /reg=5</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>82 /reg=5</opc>
+ <opr>Eb Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>83 /reg=5</opc>
+ <opr>Ev Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>subpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 5c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>subps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 5c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>subsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 5c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>subss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 5c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>swapgs</mnemonic>
+ <def>
+ <opc>0f 01 /reg=7 /mod=11 /rm=0</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>syscall</mnemonic>
+ <def>
+ <opc>0f 05</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sysenter</mnemonic>
+ <def>
+ <opc>0f 34</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sysexit</mnemonic>
+ <def>
+ <opc>0f 35</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sysret</mnemonic>
+ <def>
+ <opc>0f 07</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>test</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>f6 /reg=0</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>84</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>85</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <opc>a8</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>a9</opc>
+ <opr>rAX Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>f6 /reg=1</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>f7 /reg=0</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>f7 /reg=1</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ucomisd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 2e</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ucomiss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 2e</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ud2</mnemonic>
+ <def>
+ <opc>0f 0b</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>unpckhpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 15</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>unpckhps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 15</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>unpcklps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 14</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>unpcklpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 14</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>verr</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 00 /reg=4</opc>
+ <opr>Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>verw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 00 /reg=5</opc>
+ <opr>Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmcall</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <opc>0f 01 /reg=0 /mod=11 /rm=1</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmclear</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f c7 /reg=6</opc>
+ <opr>Mq</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmxon</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f c7 /reg=6</opc>
+ <opr>Mq</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmptrld</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f c7 /reg=6</opc>
+ <opr>Mq</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmptrst</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f c7 /reg=7</opc>
+ <opr>Mq</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmlaunch</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <opc>0f 01 /reg=0 /mod=11 /rm=2</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmresume</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <opc>0f 01 /reg=0 /mod=11 /rm=3</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmxoff</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <opc>0f 01 /reg=0 /mod=11 /rm=4</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmread</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 78 /m=16</opc>
+ <opr>Ed Gd</opr>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 78 /m=32</opc>
+ <opr>Ed Gd</opr>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 78 /m=64</opc>
+ <opr>Eq Gq</opr>
+ <mode>def64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmwrite</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 79 /m=16</opc>
+ <opr>Gd Ed</opr>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 79 /m=32</opc>
+ <opr>Gd Ed</opr>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 79 /m=64</opc>
+ <opr>Gq Eq</opr>
+ <mode>def64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmrun</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=3 /mod=11 /rm=0</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmmcall</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=3 /mod=11 /rm=1</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmload</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=3 /mod=11 /rm=2</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmsave</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=3 /mod=11 /rm=3</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>wait</mnemonic>
+ <def>
+ <opc>9b</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>wbinvd</mnemonic>
+ <def>
+ <opc>0f 09</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>wrmsr</mnemonic>
+ <def>
+ <opc>0f 30</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xadd</mnemonic>
+ <def>
+ <pfx>aso oso rexr rexx rexb</pfx>
+ <opc>0f c0</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f c1</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xchg</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>86</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>87</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>90</opc>
+ <opr>rAXr8 rAX</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>91</opc>
+ <opr>rCXr9 rAX</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>92</opc>
+ <opr>rDXr10 rAX</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>93</opc>
+ <opr>rBXr11 rAX</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>94</opc>
+ <opr>rSPr12 rAX</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>95</opc>
+ <opr>rBPr13 rAX</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>96</opc>
+ <opr>rSIr14 rAX</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>97</opc>
+ <opr>rDIr15 rAX</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xlatb</mnemonic>
+ <def>
+ <pfx>rexw</pfx>
+ <opc>d7</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xor</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>30</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>31</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>32</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>33</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <opc>34</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>35</opc>
+ <opr>rAX Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>80 /reg=6</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>81 /reg=6</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>82 /reg=6</opc>
+ <opr>Eb Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>83 /reg=6</opc>
+ <opr>Ev Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xorpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 57</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xorps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 57</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xcryptecb</mnemonic>
+ <def>
+ <opc>0f a7 /mod=11 /rm=0 /reg=1</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xcryptcbc</mnemonic>
+ <def>
+ <opc>0f a7 /mod=11 /rm=0 /reg=2</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xcryptctr</mnemonic>
+ <def>
+ <opc>0f a7 /mod=11 /rm=0 /reg=3</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xcryptcfb</mnemonic>
+ <def>
+ <opc>0f a7 /mod=11 /rm=0 /reg=4</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xcryptofb</mnemonic>
+ <def>
+ <opc>0f a7 /mod=11 /rm=0 /reg=5</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xsha1</mnemonic>
+ <def>
+ <opc>0f a6 /mod=11 /rm=0 /reg=1</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xsha256</mnemonic>
+ <def>
+ <opc>0f a6 /mod=11 /rm=0 /reg=2</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xstore</mnemonic>
+ <def>
+ <opc>0f a7 /mod=11 /rm=0 /reg=0</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>db</mnemonic>
+ </instruction>
+
+ <!--
+ SSE 2
+ -->
+
+ <instruction>
+ <mnemonic>movdqa</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 7f</opc>
+ <opr>W V</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 6f</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movdq2q</mnemonic>
+ <def>
+ <pfx>aso rexb</pfx>
+ <opc>ssef2 0f d6</opc>
+ <opr>P VR</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movdqu</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 6f</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 7f</opc>
+ <opr>W V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movq2dq</mnemonic>
+ <def>
+ <pfx>aso</pfx>
+ <opc>ssef3 0f d6</opc>
+ <opr>V PR</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>paddq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f d4</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d4</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psubq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f fb</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f fb</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmuludq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f4</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f f4</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pshufhw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 70</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pshuflw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 70</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pshufd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 70</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pslldq</mnemonic>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 73 /reg=7</opc>
+ <opr>VR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psrldq</mnemonic>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 73 /reg=3</opc>
+ <opr>VR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>punpckhqdq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 6d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>punpcklqdq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 6c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <!--
+ SSE 3
+ -->
+
+ <instruction>
+ <mnemonic>addsubpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d0</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>addsubps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f d0</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>haddpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 7c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>haddps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 7c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>hsubpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 7d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>hsubps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 7d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movddup</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 12 /mod=11</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 12 /mod=!11</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movshdup</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 16 /mod=11</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 16 /mod=!11</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movsldup</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 12 /mod=11</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 12 /mod=!11</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <!--
+ SSSE 3
+ -->
+
+ <instruction>
+ <mnemonic>pabsb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 1c</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 1c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pabsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 1d</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 1d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pabsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 1e</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 1e</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psignb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 00</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 00</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>phaddw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 01</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 01</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>phaddd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 02</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 02</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>phaddsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 03</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 03</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmaddubsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 04</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 04</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>phsubw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 05</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 05</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>phsubd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 06</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 06</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>phsubsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 07</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 07</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psignb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 08</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 08</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psignd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 0a</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 0a</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psignw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 09</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 09</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmulhrsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 0b</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 0b</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>palignr</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 3a 0f</opc>
+ <opr>P Q Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 0f</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <!--
+ SSE 4.1
+ -->
+
+ <instruction>
+ <mnemonic>pblendvb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 10</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmuldq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 28</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pminsb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 38</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pminsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 39</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pminuw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 3a</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pminud</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 3b</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmaxsb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 3c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmaxsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 3d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmaxud</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 3f</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmulld</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 40</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>phminposuw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 41</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>roundps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 08</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>roundpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 09</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>roundss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 0a</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>roundsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 0b</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>blendpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 0d</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pblendw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 0e</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>blendps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 0c</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>blendvpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 15</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>blendvps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 14</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>dpps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 40</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>dppd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 41</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mpsadbw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 42</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>extractps</mnemonic>
+ <def>
+ <pfx>aso rexr rexw rexb</pfx>
+ <opc>sse66 0f 3a 17</opc>
+ <opr>MdRy V Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>invalid</mnemonic>
+ </instruction>
+
+</x86optable>
diff --git a/src/3rdparty/masm/disassembler/udis86/ud_opcode.py b/src/3rdparty/masm/disassembler/udis86/ud_opcode.py
new file mode 100644
index 0000000000..f301b52461
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/ud_opcode.py
@@ -0,0 +1,235 @@
+# udis86 - scripts/ud_opcode.py
+#
+# Copyright (c) 2009 Vivek Thampi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+class UdOpcodeTables:
+
+ TableInfo = {
+ 'opctbl' : { 'name' : 'UD_TAB__OPC_TABLE', 'size' : 256 },
+ '/sse' : { 'name' : 'UD_TAB__OPC_SSE', 'size' : 4 },
+ '/reg' : { 'name' : 'UD_TAB__OPC_REG', 'size' : 8 },
+ '/rm' : { 'name' : 'UD_TAB__OPC_RM', 'size' : 8 },
+ '/mod' : { 'name' : 'UD_TAB__OPC_MOD', 'size' : 2 },
+ '/m' : { 'name' : 'UD_TAB__OPC_MODE', 'size' : 3 },
+ '/x87' : { 'name' : 'UD_TAB__OPC_X87', 'size' : 64 },
+ '/a' : { 'name' : 'UD_TAB__OPC_ASIZE', 'size' : 3 },
+ '/o' : { 'name' : 'UD_TAB__OPC_OSIZE', 'size' : 3 },
+ '/3dnow' : { 'name' : 'UD_TAB__OPC_3DNOW', 'size' : 256 },
+ 'vendor' : { 'name' : 'UD_TAB__OPC_VENDOR', 'size' : 3 },
+ }
+
+ OpcodeTable0 = {
+ 'type' : 'opctbl',
+ 'entries' : {},
+ 'meta' : 'table0'
+ }
+
+ OpcExtIndex = {
+
+ # ssef2, ssef3, sse66
+ 'sse': {
+ 'none' : '00',
+ 'f2' : '01',
+ 'f3' : '02',
+ '66' : '03'
+ },
+
+ # /mod=
+ 'mod': {
+ '!11' : '00',
+ '11' : '01'
+ },
+
+ # /m=, /o=, /a=
+ 'mode': {
+ '16' : '00',
+ '32' : '01',
+ '64' : '02'
+ },
+
+ 'vendor' : {
+ 'amd' : '00',
+ 'intel' : '01',
+ 'any' : '02'
+ }
+ }
+
+ InsnTable = []
+ MnemonicsTable = []
+
+ ThreeDNowTable = {}
+
+ def sizeOfTable( self, t ):
+ return self.TableInfo[ t ][ 'size' ]
+
+ def nameOfTable( self, t ):
+ return self.TableInfo[ t ][ 'name' ]
+
+ #
+ # Updates a table entry: If the entry doesn't exist
+ # it will create the entry, otherwise, it will walk
+ # while validating the path.
+ #
+ def updateTable( self, table, index, type, meta ):
+ if not index in table[ 'entries' ]:
+ table[ 'entries' ][ index ] = { 'type' : type, 'entries' : {}, 'meta' : meta }
+ if table[ 'entries' ][ index ][ 'type' ] != type:
+ raise NameError( "error: violation in opcode mapping (overwrite) %s with %s." %
+ ( table[ 'entries' ][ index ][ 'type' ], type) )
+ return table[ 'entries' ][ index ]
+
+ class Insn:
+ """An abstract type representing an instruction in the opcode map.
+ """
+
+ # A mapping of opcode extensions to their representational
+ # values used in the opcode map.
+ OpcExtMap = {
+ '/rm' : lambda v: "%02x" % int(v, 16),
+ '/x87' : lambda v: "%02x" % int(v, 16),
+ '/3dnow' : lambda v: "%02x" % int(v, 16),
+ '/reg' : lambda v: "%02x" % int(v, 16),
+ # modrm.mod
+ # (!11, 11) => (00, 01)
+ '/mod' : lambda v: '00' if v == '!11' else '01',
+ # Mode extensions:
+ # (16, 32, 64) => (00, 01, 02)
+ '/o' : lambda v: "%02x" % (int(v) / 32),
+ '/a' : lambda v: "%02x" % (int(v) / 32),
+ '/m' : lambda v: "%02x" % (int(v) / 32),
+ '/sse' : lambda v: UdOpcodeTables.OpcExtIndex['sse'][v]
+ }
+
+ def __init__(self, prefixes, mnemonic, opcodes, operands, vendor):
+ self.opcodes = opcodes
+ self.prefixes = prefixes
+ self.mnemonic = mnemonic
+ self.operands = operands
+ self.vendor = vendor
+ self.opcext = {}
+
+ ssePrefix = None
+ if self.opcodes[0] in ('ssef2', 'ssef3', 'sse66'):
+ ssePrefix = self.opcodes[0][3:]
+ self.opcodes.pop(0)
+
+ # do some preliminary decoding of the instruction type
+ # 1byte, 2byte or 3byte instruction?
+ self.nByteInsn = 1
+ if self.opcodes[0] == '0f': # 2byte
+ # 2+ byte opcodes are always disambiguated by an
+ # sse prefix, unless it is a 3d now instruction
+ # which is 0f 0f ...
+ if self.opcodes[1] != '0f' and ssePrefix is None:
+ ssePrefix = 'none'
+ if self.opcodes[1] in ('38', '3a'): # 3byte
+ self.nByteInsn = 3
+ else:
+ self.nByteInsn = 2
+
+ # The opcode that indexes into the opcode table.
+ self.opcode = self.opcodes[self.nByteInsn - 1]
+
+ # Record opcode extensions
+ for opcode in self.opcodes[self.nByteInsn:]:
+ arg, val = opcode.split('=')
+ self.opcext[arg] = self.OpcExtMap[arg](val)
+
+ # Record sse extension: the reason sse extension is handled
+ # separately is that historically sse was handled as a first
+ # class opcode, not as an extension. Now that sse is handled
+ # as an extension, we do the manual conversion here, as opposed
+ # to modifying the opcode xml file.
+ if ssePrefix is not None:
+ self.opcext['/sse'] = self.OpcExtMap['/sse'](ssePrefix)
+
+ def parse(self, table, insn):
+ index = insn.opcodes[0];
+ if insn.nByteInsn > 1:
+ assert index == '0f'
+ table = self.updateTable(table, index, 'opctbl', '0f')
+ index = insn.opcodes[1]
+
+ if insn.nByteInsn == 3:
+ table = self.updateTable(table, index, 'opctbl', index)
+ index = insn.opcodes[2]
+
+ # Walk down the tree, create levels as needed, for opcode
+ # extensions. The order is important, and determines how
+ # well the opcode table is packed. Also note, /sse must be
+ # before /o, because /sse may consume operand size prefix
+ # affect the outcome of /o.
+ for ext in ('/mod', '/x87', '/reg', '/rm', '/sse',
+ '/o', '/a', '/m', '/3dnow'):
+ if ext in insn.opcext:
+ table = self.updateTable(table, index, ext, ext)
+ index = insn.opcext[ext]
+
+ # additional table for disambiguating vendor
+ if len(insn.vendor):
+ table = self.updateTable(table, index, 'vendor', insn.vendor)
+ index = self.OpcExtIndex['vendor'][insn.vendor]
+
+ # make leaf node entries
+ leaf = self.updateTable(table, index, 'insn', '')
+
+ leaf['mnemonic'] = insn.mnemonic
+ leaf['prefixes'] = insn.prefixes
+ leaf['operands'] = insn.operands
+
+ # add instruction to linear table of instruction forms
+ self.InsnTable.append({ 'prefixes' : insn.prefixes,
+ 'mnemonic' : insn.mnemonic,
+ 'operands' : insn.operands })
+
+ # add mnemonic to mnemonic table
+ if not insn.mnemonic in self.MnemonicsTable:
+ self.MnemonicsTable.append(insn.mnemonic)
+
+
+ # Adds an instruction definition to the opcode tables
+ def addInsnDef( self, prefixes, mnemonic, opcodes, operands, vendor ):
+ insn = self.Insn(prefixes=prefixes,
+ mnemonic=mnemonic,
+ opcodes=opcodes,
+ operands=operands,
+ vendor=vendor)
+ self.parse(self.OpcodeTable0, insn)
+
+ def print_table( self, table, pfxs ):
+ print "%s |" % pfxs
+ keys = table[ 'entries' ].keys()
+ if ( len( keys ) ):
+ keys.sort()
+ for idx in keys:
+ e = table[ 'entries' ][ idx ]
+ if e[ 'type' ] == 'insn':
+ print "%s |-<%s>" % ( pfxs, idx ),
+ print "%s %s" % ( e[ 'mnemonic' ], ' '.join( e[ 'operands'] ) )
+ else:
+ print "%s |-<%s> %s" % ( pfxs, idx, e['type'] )
+ self.print_table( e, pfxs + ' |' )
+
+ def print_tree( self ):
+ self.print_table( self.OpcodeTable0, '' )
diff --git a/src/3rdparty/masm/disassembler/udis86/ud_optable.py b/src/3rdparty/masm/disassembler/udis86/ud_optable.py
new file mode 100644
index 0000000000..5b5c55d3b8
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/ud_optable.py
@@ -0,0 +1,103 @@
+# udis86 - scripts/ud_optable.py (optable.xml parser)
+#
+# Copyright (c) 2009 Vivek Thampi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import sys
+from xml.dom import minidom
+
+class UdOptableXmlParser:
+
+ def parseDef( self, node ):
+ ven = ''
+ pfx = []
+ opc = []
+ opr = []
+ for def_node in node.childNodes:
+ if not def_node.localName:
+ continue
+ if def_node.localName == 'pfx':
+ pfx = def_node.firstChild.data.split();
+ elif def_node.localName == 'opc':
+ opc = def_node.firstChild.data.split();
+ elif def_node.localName == 'opr':
+ opr = def_node.firstChild.data.split();
+ elif def_node.localName == 'mode':
+ pfx.extend( def_node.firstChild.data.split() );
+ elif def_node.localName == 'syn':
+ pfx.extend( def_node.firstChild.data.split() );
+ elif def_node.localName == 'vendor':
+ ven = ( def_node.firstChild.data );
+ else:
+ print "warning: invalid node - %s" % def_node.localName
+ continue
+ return ( pfx, opc, opr, ven )
+
+ def parse( self, xml, fn ):
+ xmlDoc = minidom.parse( xml )
+ self.TlNode = xmlDoc.firstChild
+
+ while self.TlNode and self.TlNode.localName != "x86optable":
+ self.TlNode = self.TlNode.nextSibling
+
+ for insnNode in self.TlNode.childNodes:
+ if not insnNode.localName:
+ continue
+ if insnNode.localName != "instruction":
+ print "warning: invalid insn node - %s" % insnNode.localName
+ continue
+
+ mnemonic = insnNode.getElementsByTagName( 'mnemonic' )[ 0 ].firstChild.data
+ vendor = ''
+
+ for node in insnNode.childNodes:
+ if node.localName == 'vendor':
+ vendor = node.firstChild.data
+ elif node.localName == 'def':
+ ( prefixes, opcodes, operands, local_vendor ) = \
+ self.parseDef( node )
+ if ( len( local_vendor ) ):
+ vendor = local_vendor
+ # callback
+ fn( prefixes, mnemonic, opcodes, operands, vendor )
+
+
+def printFn( pfx, mnm, opc, opr, ven ):
+ print 'def: ',
+ if len( pfx ):
+ print ' '.join( pfx ),
+ print "%s %s %s %s" % \
+ ( mnm, ' '.join( opc ), ' '.join( opr ), ven )
+
+
+def parse( xml, callback ):
+ parser = UdOptableXmlParser()
+ parser.parse( xml, callback )
+
+def main():
+ parser = UdOptableXmlParser()
+ parser.parse( sys.argv[ 1 ], printFn )
+
+if __name__ == "__main__":
+ main()
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86.c b/src/3rdparty/masm/disassembler/udis86/udis86.c
new file mode 100644
index 0000000000..2641034232
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86.c
@@ -0,0 +1,182 @@
+/* udis86 - libudis86/udis86.c
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if USE(UDIS86)
+
+#include "udis86_input.h"
+#include "udis86_extern.h"
+
+#ifndef __UD_STANDALONE__
+# include <stdlib.h>
+# include <string.h>
+#endif /* __UD_STANDALONE__ */
+
+/* =============================================================================
+ * ud_init() - Initializes ud_t object.
+ * =============================================================================
+ */
+extern void
+ud_init(struct ud* u)
+{
+ memset((void*)u, 0, sizeof(struct ud));
+ ud_set_mode(u, 16);
+ u->mnemonic = UD_Iinvalid;
+ ud_set_pc(u, 0);
+#ifndef __UD_STANDALONE__
+ ud_set_input_file(u, stdin);
+#endif /* __UD_STANDALONE__ */
+}
+
+/* =============================================================================
+ * ud_disassemble() - disassembles one instruction and returns the number of
+ * bytes disassembled. A zero means end of disassembly.
+ * =============================================================================
+ */
+extern unsigned int
+ud_disassemble(struct ud* u)
+{
+ if (ud_input_end(u))
+ return 0;
+
+
+ u->insn_buffer[0] = u->insn_hexcode[0] = 0;
+
+
+ if (ud_decode(u) == 0)
+ return 0;
+ if (u->translator)
+ u->translator(u);
+ return ud_insn_len(u);
+}
+
+/* =============================================================================
+ * ud_set_mode() - Set Disassemly Mode.
+ * =============================================================================
+ */
+extern void
+ud_set_mode(struct ud* u, uint8_t m)
+{
+ switch(m) {
+ case 16:
+ case 32:
+ case 64: u->dis_mode = m ; return;
+ default: u->dis_mode = 16; return;
+ }
+}
+
+/* =============================================================================
+ * ud_set_vendor() - Set vendor.
+ * =============================================================================
+ */
+extern void
+ud_set_vendor(struct ud* u, unsigned v)
+{
+ switch(v) {
+ case UD_VENDOR_INTEL:
+ u->vendor = v;
+ break;
+ case UD_VENDOR_ANY:
+ u->vendor = v;
+ break;
+ default:
+ u->vendor = UD_VENDOR_AMD;
+ }
+}
+
+/* =============================================================================
+ * ud_set_pc() - Sets code origin.
+ * =============================================================================
+ */
+extern void
+ud_set_pc(struct ud* u, uint64_t o)
+{
+ u->pc = o;
+}
+
+/* =============================================================================
+ * ud_set_syntax() - Sets the output syntax.
+ * =============================================================================
+ */
+extern void
+ud_set_syntax(struct ud* u, void (*t)(struct ud*))
+{
+ u->translator = t;
+}
+
+/* =============================================================================
+ * ud_insn() - returns the disassembled instruction
+ * =============================================================================
+ */
+extern char*
+ud_insn_asm(struct ud* u)
+{
+ return u->insn_buffer;
+}
+
+/* =============================================================================
+ * ud_insn_offset() - Returns the offset.
+ * =============================================================================
+ */
+extern uint64_t
+ud_insn_off(struct ud* u)
+{
+ return u->insn_offset;
+}
+
+
+/* =============================================================================
+ * ud_insn_hex() - Returns hex form of disassembled instruction.
+ * =============================================================================
+ */
+extern char*
+ud_insn_hex(struct ud* u)
+{
+ return u->insn_hexcode;
+}
+
+/* =============================================================================
+ * ud_insn_ptr() - Returns code disassembled.
+ * =============================================================================
+ */
+extern uint8_t*
+ud_insn_ptr(struct ud* u)
+{
+ return u->inp_sess;
+}
+
+/* =============================================================================
+ * ud_insn_len() - Returns the count of bytes disassembled.
+ * =============================================================================
+ */
+extern unsigned int
+ud_insn_len(struct ud* u)
+{
+ return u->inp_ctr;
+}
+
+#endif // USE(UDIS86)
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86.h b/src/3rdparty/masm/disassembler/udis86/udis86.h
new file mode 100644
index 0000000000..baaf495e04
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86.h
@@ -0,0 +1,33 @@
+/* udis86 - udis86.h
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef UDIS86_H
+#define UDIS86_H
+
+#include "udis86_types.h"
+#include "udis86_extern.h"
+#include "udis86_itab.h"
+
+#endif
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_decode.c b/src/3rdparty/masm/disassembler/udis86/udis86_decode.c
new file mode 100644
index 0000000000..3d567b6df2
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_decode.c
@@ -0,0 +1,1141 @@
+/* udis86 - libudis86/decode.c
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if USE(UDIS86)
+
+#include "udis86_extern.h"
+#include "udis86_types.h"
+#include "udis86_input.h"
+#include "udis86_decode.h"
+#include <wtf/Assertions.h>
+
+#define dbg(x, n...)
+/* #define dbg printf */
+
+#ifndef __UD_STANDALONE__
+# include <string.h>
+#endif /* __UD_STANDALONE__ */
+
+/* The max number of prefixes to an instruction */
+#define MAX_PREFIXES 15
+
+/* instruction aliases and special cases */
+static struct ud_itab_entry s_ie__invalid =
+ { UD_Iinvalid, O_NONE, O_NONE, O_NONE, P_none };
+
+static int
+decode_ext(struct ud *u, uint16_t ptr);
+
+
+static inline int
+eff_opr_mode(int dis_mode, int rex_w, int pfx_opr)
+{
+ if (dis_mode == 64) {
+ return rex_w ? 64 : (pfx_opr ? 16 : 32);
+ } else if (dis_mode == 32) {
+ return pfx_opr ? 16 : 32;
+ } else {
+ ASSERT(dis_mode == 16);
+ return pfx_opr ? 32 : 16;
+ }
+}
+
+
+static inline int
+eff_adr_mode(int dis_mode, int pfx_adr)
+{
+ if (dis_mode == 64) {
+ return pfx_adr ? 32 : 64;
+ } else if (dis_mode == 32) {
+ return pfx_adr ? 16 : 32;
+ } else {
+ ASSERT(dis_mode == 16);
+ return pfx_adr ? 32 : 16;
+ }
+}
+
+
+/* Looks up mnemonic code in the mnemonic string table
+ * Returns NULL if the mnemonic code is invalid
+ */
+const char * ud_lookup_mnemonic( enum ud_mnemonic_code c )
+{
+ return ud_mnemonics_str[ c ];
+}
+
+
+/*
+ * decode_prefixes
+ *
+ * Extracts instruction prefixes.
+ */
+static int
+decode_prefixes(struct ud *u)
+{
+ unsigned int have_pfx = 1;
+ unsigned int i;
+ uint8_t curr;
+
+ /* if in error state, bail out */
+ if ( u->error )
+ return -1;
+
+ /* keep going as long as there are prefixes available */
+ for ( i = 0; have_pfx ; ++i ) {
+
+ /* Get next byte. */
+ ud_inp_next(u);
+ if ( u->error )
+ return -1;
+ curr = ud_inp_curr( u );
+
+ /* rex prefixes in 64bit mode */
+ if ( u->dis_mode == 64 && ( curr & 0xF0 ) == 0x40 ) {
+ u->pfx_rex = curr;
+ } else {
+ switch ( curr )
+ {
+ case 0x2E :
+ u->pfx_seg = UD_R_CS;
+ u->pfx_rex = 0;
+ break;
+ case 0x36 :
+ u->pfx_seg = UD_R_SS;
+ u->pfx_rex = 0;
+ break;
+ case 0x3E :
+ u->pfx_seg = UD_R_DS;
+ u->pfx_rex = 0;
+ break;
+ case 0x26 :
+ u->pfx_seg = UD_R_ES;
+ u->pfx_rex = 0;
+ break;
+ case 0x64 :
+ u->pfx_seg = UD_R_FS;
+ u->pfx_rex = 0;
+ break;
+ case 0x65 :
+ u->pfx_seg = UD_R_GS;
+ u->pfx_rex = 0;
+ break;
+ case 0x67 : /* adress-size override prefix */
+ u->pfx_adr = 0x67;
+ u->pfx_rex = 0;
+ break;
+ case 0xF0 :
+ u->pfx_lock = 0xF0;
+ u->pfx_rex = 0;
+ break;
+ case 0x66:
+ /* the 0x66 sse prefix is only effective if no other sse prefix
+ * has already been specified.
+ */
+ if ( !u->pfx_insn ) u->pfx_insn = 0x66;
+ u->pfx_opr = 0x66;
+ u->pfx_rex = 0;
+ break;
+ case 0xF2:
+ u->pfx_insn = 0xF2;
+ u->pfx_repne = 0xF2;
+ u->pfx_rex = 0;
+ break;
+ case 0xF3:
+ u->pfx_insn = 0xF3;
+ u->pfx_rep = 0xF3;
+ u->pfx_repe = 0xF3;
+ u->pfx_rex = 0;
+ break;
+ default :
+ /* No more prefixes */
+ have_pfx = 0;
+ break;
+ }
+ }
+
+ /* check if we reached max instruction length */
+ if ( i + 1 == MAX_INSN_LENGTH ) {
+ u->error = 1;
+ break;
+ }
+ }
+
+ /* return status */
+ if ( u->error )
+ return -1;
+
+ /* rewind back one byte in stream, since the above loop
+ * stops with a non-prefix byte.
+ */
+ ud_inp_back(u);
+ return 0;
+}
+
+
+static inline unsigned int modrm( struct ud * u )
+{
+ if ( !u->have_modrm ) {
+ u->modrm = ud_inp_next( u );
+ u->have_modrm = 1;
+ }
+ return u->modrm;
+}
+
+
+static unsigned int resolve_operand_size( const struct ud * u, unsigned int s )
+{
+ switch ( s )
+ {
+ case SZ_V:
+ return ( u->opr_mode );
+ case SZ_Z:
+ return ( u->opr_mode == 16 ) ? 16 : 32;
+ case SZ_P:
+ return ( u->opr_mode == 16 ) ? SZ_WP : SZ_DP;
+ case SZ_MDQ:
+ return ( u->opr_mode == 16 ) ? 32 : u->opr_mode;
+ case SZ_RDQ:
+ return ( u->dis_mode == 64 ) ? 64 : 32;
+ default:
+ return s;
+ }
+}
+
+
+static int resolve_mnemonic( struct ud* u )
+{
+ /* far/near flags */
+ u->br_far = 0;
+ u->br_near = 0;
+ /* readjust operand sizes for call/jmp instrcutions */
+ if ( u->mnemonic == UD_Icall || u->mnemonic == UD_Ijmp ) {
+ /* WP: 16:16 pointer */
+ if ( u->operand[ 0 ].size == SZ_WP ) {
+ u->operand[ 0 ].size = 16;
+ u->br_far = 1;
+ u->br_near= 0;
+ /* DP: 32:32 pointer */
+ } else if ( u->operand[ 0 ].size == SZ_DP ) {
+ u->operand[ 0 ].size = 32;
+ u->br_far = 1;
+ u->br_near= 0;
+ } else {
+ u->br_far = 0;
+ u->br_near= 1;
+ }
+ /* resolve 3dnow weirdness. */
+ } else if ( u->mnemonic == UD_I3dnow ) {
+ u->mnemonic = ud_itab[ u->le->table[ ud_inp_curr( u ) ] ].mnemonic;
+ }
+ /* SWAPGS is only valid in 64bits mode */
+ if ( u->mnemonic == UD_Iswapgs && u->dis_mode != 64 ) {
+ u->error = 1;
+ return -1;
+ }
+
+ if (u->mnemonic == UD_Ixchg) {
+ if ((u->operand[0].type == UD_OP_REG && u->operand[0].base == UD_R_AX &&
+ u->operand[1].type == UD_OP_REG && u->operand[1].base == UD_R_AX) ||
+ (u->operand[0].type == UD_OP_REG && u->operand[0].base == UD_R_EAX &&
+ u->operand[1].type == UD_OP_REG && u->operand[1].base == UD_R_EAX)) {
+ u->operand[0].type = UD_NONE;
+ u->operand[1].type = UD_NONE;
+ u->mnemonic = UD_Inop;
+ }
+ }
+
+ if (u->mnemonic == UD_Inop && u->pfx_rep) {
+ u->pfx_rep = 0;
+ u->mnemonic = UD_Ipause;
+ }
+ return 0;
+}
+
+
+/* -----------------------------------------------------------------------------
+ * decode_a()- Decodes operands of the type seg:offset
+ * -----------------------------------------------------------------------------
+ */
+static void
+decode_a(struct ud* u, struct ud_operand *op)
+{
+ if (u->opr_mode == 16) {
+ /* seg16:off16 */
+ op->type = UD_OP_PTR;
+ op->size = 32;
+ op->lval.ptr.off = ud_inp_uint16(u);
+ op->lval.ptr.seg = ud_inp_uint16(u);
+ } else {
+ /* seg16:off32 */
+ op->type = UD_OP_PTR;
+ op->size = 48;
+ op->lval.ptr.off = ud_inp_uint32(u);
+ op->lval.ptr.seg = ud_inp_uint16(u);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * decode_gpr() - Returns decoded General Purpose Register
+ * -----------------------------------------------------------------------------
+ */
+static enum ud_type
+decode_gpr(register struct ud* u, unsigned int s, unsigned char rm)
+{
+ s = resolve_operand_size(u, s);
+
+ switch (s) {
+ case 64:
+ return UD_R_RAX + rm;
+ case SZ_DP:
+ case 32:
+ return UD_R_EAX + rm;
+ case SZ_WP:
+ case 16:
+ return UD_R_AX + rm;
+ case 8:
+ if (u->dis_mode == 64 && u->pfx_rex) {
+ if (rm >= 4)
+ return UD_R_SPL + (rm-4);
+ return UD_R_AL + rm;
+ } else return UD_R_AL + rm;
+ default:
+ return 0;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * resolve_gpr64() - 64bit General Purpose Register-Selection.
+ * -----------------------------------------------------------------------------
+ */
+static enum ud_type
+resolve_gpr64(struct ud* u, enum ud_operand_code gpr_op, enum ud_operand_size * size)
+{
+ if (gpr_op >= OP_rAXr8 && gpr_op <= OP_rDIr15)
+ gpr_op = (gpr_op - OP_rAXr8) | (REX_B(u->pfx_rex) << 3);
+ else gpr_op = (gpr_op - OP_rAX);
+
+ if (u->opr_mode == 16) {
+ *size = 16;
+ return gpr_op + UD_R_AX;
+ }
+ if (u->dis_mode == 32 ||
+ (u->opr_mode == 32 && ! (REX_W(u->pfx_rex) || u->default64))) {
+ *size = 32;
+ return gpr_op + UD_R_EAX;
+ }
+
+ *size = 64;
+ return gpr_op + UD_R_RAX;
+}
+
+/* -----------------------------------------------------------------------------
+ * resolve_gpr32 () - 32bit General Purpose Register-Selection.
+ * -----------------------------------------------------------------------------
+ */
+static enum ud_type
+resolve_gpr32(struct ud* u, enum ud_operand_code gpr_op)
+{
+ gpr_op = gpr_op - OP_eAX;
+
+ if (u->opr_mode == 16)
+ return gpr_op + UD_R_AX;
+
+ return gpr_op + UD_R_EAX;
+}
+
+/* -----------------------------------------------------------------------------
+ * resolve_reg() - Resolves the register type
+ * -----------------------------------------------------------------------------
+ */
+static enum ud_type
+resolve_reg(struct ud* u, unsigned int type, unsigned char i)
+{
+ switch (type) {
+ case T_MMX : return UD_R_MM0 + (i & 7);
+ case T_XMM : return UD_R_XMM0 + i;
+ case T_CRG : return UD_R_CR0 + i;
+ case T_DBG : return UD_R_DR0 + i;
+ case T_SEG : {
+ /*
+ * Only 6 segment registers, anything else is an error.
+ */
+ if ((i & 7) > 5) {
+ u->error = 1;
+ } else {
+ return UD_R_ES + (i & 7);
+ }
+ }
+ case T_NONE:
+ default: return UD_NONE;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * decode_imm() - Decodes Immediate values.
+ * -----------------------------------------------------------------------------
+ */
+static void
+decode_imm(struct ud* u, unsigned int s, struct ud_operand *op)
+{
+ op->size = resolve_operand_size(u, s);
+ op->type = UD_OP_IMM;
+
+ switch (op->size) {
+ case 8: op->lval.sbyte = ud_inp_uint8(u); break;
+ case 16: op->lval.uword = ud_inp_uint16(u); break;
+ case 32: op->lval.udword = ud_inp_uint32(u); break;
+ case 64: op->lval.uqword = ud_inp_uint64(u); break;
+ default: return;
+ }
+}
+
+
+/*
+ * decode_modrm_reg
+ *
+ * Decodes reg field of mod/rm byte
+ *
+ */
+static void
+decode_modrm_reg(struct ud *u,
+ struct ud_operand *operand,
+ unsigned int type,
+ unsigned int size)
+{
+ uint8_t reg = (REX_R(u->pfx_rex) << 3) | MODRM_REG(modrm(u));
+ operand->type = UD_OP_REG;
+ operand->size = resolve_operand_size(u, size);
+
+ if (type == T_GPR) {
+ operand->base = decode_gpr(u, operand->size, reg);
+ } else {
+ operand->base = resolve_reg(u, type, reg);
+ }
+}
+
+
+/*
+ * decode_modrm_rm
+ *
+ * Decodes rm field of mod/rm byte
+ *
+ */
+static void
+decode_modrm_rm(struct ud *u,
+ struct ud_operand *op,
+ unsigned char type,
+ unsigned int size)
+
+{
+ unsigned char mod, rm, reg;
+
+ /* get mod, r/m and reg fields */
+ mod = MODRM_MOD(modrm(u));
+ rm = (REX_B(u->pfx_rex) << 3) | MODRM_RM(modrm(u));
+ reg = (REX_R(u->pfx_rex) << 3) | MODRM_REG(modrm(u));
+
+ op->size = resolve_operand_size(u, size);
+
+ /*
+ * If mod is 11b, then the modrm.rm specifies a register.
+ *
+ */
+ if (mod == 3) {
+ op->type = UD_OP_REG;
+ if (type == T_GPR) {
+ op->base = decode_gpr(u, op->size, rm);
+ } else {
+ op->base = resolve_reg(u, type, (REX_B(u->pfx_rex) << 3) | (rm & 7));
+ }
+ return;
+ }
+
+
+ /*
+ * !11 => Memory Address
+ */
+ op->type = UD_OP_MEM;
+
+ if (u->adr_mode == 64) {
+ op->base = UD_R_RAX + rm;
+ if (mod == 1) {
+ op->offset = 8;
+ } else if (mod == 2) {
+ op->offset = 32;
+ } else if (mod == 0 && (rm & 7) == 5) {
+ op->base = UD_R_RIP;
+ op->offset = 32;
+ } else {
+ op->offset = 0;
+ }
+ /*
+ * Scale-Index-Base (SIB)
+ */
+ if ((rm & 7) == 4) {
+ ud_inp_next(u);
+
+ op->scale = (1 << SIB_S(ud_inp_curr(u))) & ~1;
+ op->index = UD_R_RAX + (SIB_I(ud_inp_curr(u)) | (REX_X(u->pfx_rex) << 3));
+ op->base = UD_R_RAX + (SIB_B(ud_inp_curr(u)) | (REX_B(u->pfx_rex) << 3));
+
+ /* special conditions for base reference */
+ if (op->index == UD_R_RSP) {
+ op->index = UD_NONE;
+ op->scale = UD_NONE;
+ }
+
+ if (op->base == UD_R_RBP || op->base == UD_R_R13) {
+ if (mod == 0) {
+ op->base = UD_NONE;
+ }
+ if (mod == 1) {
+ op->offset = 8;
+ } else {
+ op->offset = 32;
+ }
+ }
+ }
+ } else if (u->adr_mode == 32) {
+ op->base = UD_R_EAX + rm;
+ if (mod == 1) {
+ op->offset = 8;
+ } else if (mod == 2) {
+ op->offset = 32;
+ } else if (mod == 0 && rm == 5) {
+ op->base = UD_NONE;
+ op->offset = 32;
+ } else {
+ op->offset = 0;
+ }
+
+ /* Scale-Index-Base (SIB) */
+ if ((rm & 7) == 4) {
+ ud_inp_next(u);
+
+ op->scale = (1 << SIB_S(ud_inp_curr(u))) & ~1;
+ op->index = UD_R_EAX + (SIB_I(ud_inp_curr(u)) | (REX_X(u->pfx_rex) << 3));
+ op->base = UD_R_EAX + (SIB_B(ud_inp_curr(u)) | (REX_B(u->pfx_rex) << 3));
+
+ if (op->index == UD_R_ESP) {
+ op->index = UD_NONE;
+ op->scale = UD_NONE;
+ }
+
+ /* special condition for base reference */
+ if (op->base == UD_R_EBP) {
+ if (mod == 0) {
+ op->base = UD_NONE;
+ }
+ if (mod == 1) {
+ op->offset = 8;
+ } else {
+ op->offset = 32;
+ }
+ }
+ }
+ } else {
+ const unsigned int bases[] = { UD_R_BX, UD_R_BX, UD_R_BP, UD_R_BP,
+ UD_R_SI, UD_R_DI, UD_R_BP, UD_R_BX };
+ const unsigned int indices[] = { UD_R_SI, UD_R_DI, UD_R_SI, UD_R_DI,
+ UD_NONE, UD_NONE, UD_NONE, UD_NONE };
+ op->base = bases[rm & 7];
+ op->index = indices[rm & 7];
+ if (mod == 0 && rm == 6) {
+ op->offset= 16;
+ op->base = UD_NONE;
+ } else if (mod == 1) {
+ op->offset = 8;
+ } else if (mod == 2) {
+ op->offset = 16;
+ }
+ }
+
+ /*
+ * extract offset, if any
+ */
+ switch (op->offset) {
+ case 8 : op->lval.ubyte = ud_inp_uint8(u); break;
+ case 16: op->lval.uword = ud_inp_uint16(u); break;
+ case 32: op->lval.udword = ud_inp_uint32(u); break;
+ case 64: op->lval.uqword = ud_inp_uint64(u); break;
+ default: break;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * decode_o() - Decodes offset
+ * -----------------------------------------------------------------------------
+ */
+static void
+decode_o(struct ud* u, unsigned int s, struct ud_operand *op)
+{
+ switch (u->adr_mode) {
+ case 64:
+ op->offset = 64;
+ op->lval.uqword = ud_inp_uint64(u);
+ break;
+ case 32:
+ op->offset = 32;
+ op->lval.udword = ud_inp_uint32(u);
+ break;
+ case 16:
+ op->offset = 16;
+ op->lval.uword = ud_inp_uint16(u);
+ break;
+ default:
+ return;
+ }
+ op->type = UD_OP_MEM;
+ op->size = resolve_operand_size(u, s);
+}
+
+/* -----------------------------------------------------------------------------
+ * decode_operands() - Disassembles Operands.
+ * -----------------------------------------------------------------------------
+ */
+static int
+decode_operand(struct ud *u,
+ struct ud_operand *operand,
+ enum ud_operand_code type,
+ unsigned int size)
+{
+ switch (type) {
+ case OP_A :
+ decode_a(u, operand);
+ break;
+ case OP_MR:
+ if (MODRM_MOD(modrm(u)) == 3) {
+ decode_modrm_rm(u, operand, T_GPR,
+ size == SZ_DY ? SZ_MDQ : SZ_V);
+ } else if (size == SZ_WV) {
+ decode_modrm_rm( u, operand, T_GPR, SZ_W);
+ } else if (size == SZ_BV) {
+ decode_modrm_rm( u, operand, T_GPR, SZ_B);
+ } else if (size == SZ_DY) {
+ decode_modrm_rm( u, operand, T_GPR, SZ_D);
+ } else {
+ ASSERT(!"unexpected size");
+ }
+ break;
+ case OP_M:
+ if (MODRM_MOD(modrm(u)) == 3) {
+ u->error = 1;
+ }
+ /* intended fall through */
+ case OP_E:
+ decode_modrm_rm(u, operand, T_GPR, size);
+ break;
+ break;
+ case OP_G:
+ decode_modrm_reg(u, operand, T_GPR, size);
+ break;
+ case OP_I:
+ decode_imm(u, size, operand);
+ break;
+ case OP_I1:
+ operand->type = UD_OP_CONST;
+ operand->lval.udword = 1;
+ break;
+ case OP_PR:
+ if (MODRM_MOD(modrm(u)) != 3) {
+ u->error = 1;
+ }
+ decode_modrm_rm(u, operand, T_MMX, size);
+ break;
+ case OP_P:
+ decode_modrm_reg(u, operand, T_MMX, size);
+ break;
+ case OP_VR:
+ if (MODRM_MOD(modrm(u)) != 3) {
+ u->error = 1;
+ }
+ /* intended fall through */
+ case OP_W:
+ decode_modrm_rm(u, operand, T_XMM, size);
+ break;
+ case OP_V:
+ decode_modrm_reg(u, operand, T_XMM, size);
+ break;
+ case OP_S:
+ decode_modrm_reg(u, operand, T_SEG, size);
+ break;
+ case OP_AL:
+ case OP_CL:
+ case OP_DL:
+ case OP_BL:
+ case OP_AH:
+ case OP_CH:
+ case OP_DH:
+ case OP_BH:
+ operand->type = UD_OP_REG;
+ operand->base = UD_R_AL + (type - OP_AL);
+ operand->size = 8;
+ break;
+ case OP_DX:
+ operand->type = UD_OP_REG;
+ operand->base = UD_R_DX;
+ operand->size = 16;
+ break;
+ case OP_O:
+ decode_o(u, size, operand);
+ break;
+ case OP_rAXr8:
+ case OP_rCXr9:
+ case OP_rDXr10:
+ case OP_rBXr11:
+ case OP_rSPr12:
+ case OP_rBPr13:
+ case OP_rSIr14:
+ case OP_rDIr15:
+ case OP_rAX:
+ case OP_rCX:
+ case OP_rDX:
+ case OP_rBX:
+ case OP_rSP:
+ case OP_rBP:
+ case OP_rSI:
+ case OP_rDI:
+ operand->type = UD_OP_REG;
+ operand->base = resolve_gpr64(u, type, &operand->size);
+ break;
+ case OP_ALr8b:
+ case OP_CLr9b:
+ case OP_DLr10b:
+ case OP_BLr11b:
+ case OP_AHr12b:
+ case OP_CHr13b:
+ case OP_DHr14b:
+ case OP_BHr15b: {
+ ud_type_t gpr = (type - OP_ALr8b) + UD_R_AL
+ + (REX_B(u->pfx_rex) << 3);
+ if (UD_R_AH <= gpr && u->pfx_rex) {
+ gpr = gpr + 4;
+ }
+ operand->type = UD_OP_REG;
+ operand->base = gpr;
+ break;
+ }
+ case OP_eAX:
+ case OP_eCX:
+ case OP_eDX:
+ case OP_eBX:
+ case OP_eSP:
+ case OP_eBP:
+ case OP_eSI:
+ case OP_eDI:
+ operand->type = UD_OP_REG;
+ operand->base = resolve_gpr32(u, type);
+ operand->size = u->opr_mode == 16 ? 16 : 32;
+ break;
+ case OP_ES:
+ case OP_CS:
+ case OP_DS:
+ case OP_SS:
+ case OP_FS:
+ case OP_GS:
+ /* in 64bits mode, only fs and gs are allowed */
+ if (u->dis_mode == 64) {
+ if (type != OP_FS && type != OP_GS) {
+ u->error= 1;
+ }
+ }
+ operand->type = UD_OP_REG;
+ operand->base = (type - OP_ES) + UD_R_ES;
+ operand->size = 16;
+ break;
+ case OP_J :
+ decode_imm(u, size, operand);
+ operand->type = UD_OP_JIMM;
+ break ;
+ case OP_Q:
+ decode_modrm_rm(u, operand, T_MMX, size);
+ break;
+ case OP_R :
+ decode_modrm_rm(u, operand, T_GPR, size);
+ break;
+ case OP_C:
+ decode_modrm_reg(u, operand, T_CRG, size);
+ break;
+ case OP_D:
+ decode_modrm_reg(u, operand, T_DBG, size);
+ break;
+ case OP_I3 :
+ operand->type = UD_OP_CONST;
+ operand->lval.sbyte = 3;
+ break;
+ case OP_ST0:
+ case OP_ST1:
+ case OP_ST2:
+ case OP_ST3:
+ case OP_ST4:
+ case OP_ST5:
+ case OP_ST6:
+ case OP_ST7:
+ operand->type = UD_OP_REG;
+ operand->base = (type - OP_ST0) + UD_R_ST0;
+ operand->size = 0;
+ break;
+ case OP_AX:
+ operand->type = UD_OP_REG;
+ operand->base = UD_R_AX;
+ operand->size = 16;
+ break;
+ default :
+ operand->type = UD_NONE;
+ break;
+ }
+ return 0;
+}
+
+
+/*
+ * decode_operands
+ *
+ * Disassemble upto 3 operands of the current instruction being
+ * disassembled. By the end of the function, the operand fields
+ * of the ud structure will have been filled.
+ */
+static int
+decode_operands(struct ud* u)
+{
+ decode_operand(u, &u->operand[0],
+ u->itab_entry->operand1.type,
+ u->itab_entry->operand1.size);
+ decode_operand(u, &u->operand[1],
+ u->itab_entry->operand2.type,
+ u->itab_entry->operand2.size);
+ decode_operand(u, &u->operand[2],
+ u->itab_entry->operand3.type,
+ u->itab_entry->operand3.size);
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * clear_insn() - clear instruction structure
+ * -----------------------------------------------------------------------------
+ */
+static void
+clear_insn(register struct ud* u)
+{
+ u->error = 0;
+ u->pfx_seg = 0;
+ u->pfx_opr = 0;
+ u->pfx_adr = 0;
+ u->pfx_lock = 0;
+ u->pfx_repne = 0;
+ u->pfx_rep = 0;
+ u->pfx_repe = 0;
+ u->pfx_rex = 0;
+ u->pfx_insn = 0;
+ u->mnemonic = UD_Inone;
+ u->itab_entry = NULL;
+ u->have_modrm = 0;
+
+ memset( &u->operand[ 0 ], 0, sizeof( struct ud_operand ) );
+ memset( &u->operand[ 1 ], 0, sizeof( struct ud_operand ) );
+ memset( &u->operand[ 2 ], 0, sizeof( struct ud_operand ) );
+}
+
+static int
+resolve_mode( struct ud* u )
+{
+ /* if in error state, bail out */
+ if ( u->error ) return -1;
+
+ /* propagate prefix effects */
+ if ( u->dis_mode == 64 ) { /* set 64bit-mode flags */
+
+ /* Check validity of instruction m64 */
+ if ( P_INV64( u->itab_entry->prefix ) ) {
+ u->error = 1;
+ return -1;
+ }
+
+ /* effective rex prefix is the effective mask for the
+ * instruction hard-coded in the opcode map.
+ */
+ u->pfx_rex = ( u->pfx_rex & 0x40 ) |
+ ( u->pfx_rex & REX_PFX_MASK( u->itab_entry->prefix ) );
+
+ /* whether this instruction has a default operand size of
+ * 64bit, also hardcoded into the opcode map.
+ */
+ u->default64 = P_DEF64( u->itab_entry->prefix );
+ /* calculate effective operand size */
+ if ( REX_W( u->pfx_rex ) ) {
+ u->opr_mode = 64;
+ } else if ( u->pfx_opr ) {
+ u->opr_mode = 16;
+ } else {
+ /* unless the default opr size of instruction is 64,
+ * the effective operand size in the absence of rex.w
+ * prefix is 32.
+ */
+ u->opr_mode = ( u->default64 ) ? 64 : 32;
+ }
+
+ /* calculate effective address size */
+ u->adr_mode = (u->pfx_adr) ? 32 : 64;
+ } else if ( u->dis_mode == 32 ) { /* set 32bit-mode flags */
+ u->opr_mode = ( u->pfx_opr ) ? 16 : 32;
+ u->adr_mode = ( u->pfx_adr ) ? 16 : 32;
+ } else if ( u->dis_mode == 16 ) { /* set 16bit-mode flags */
+ u->opr_mode = ( u->pfx_opr ) ? 32 : 16;
+ u->adr_mode = ( u->pfx_adr ) ? 32 : 16;
+ }
+
+ /* These flags determine which operand to apply the operand size
+ * cast to.
+ */
+ u->c1 = ( P_C1( u->itab_entry->prefix ) ) ? 1 : 0;
+ u->c2 = ( P_C2( u->itab_entry->prefix ) ) ? 1 : 0;
+ u->c3 = ( P_C3( u->itab_entry->prefix ) ) ? 1 : 0;
+
+ /* set flags for implicit addressing */
+ u->implicit_addr = P_IMPADDR( u->itab_entry->prefix );
+
+ return 0;
+}
+
+static int gen_hex( struct ud *u )
+{
+ unsigned int i;
+ unsigned char *src_ptr = ud_inp_sess( u );
+ char* src_hex;
+
+ /* bail out if in error stat. */
+ if ( u->error ) return -1;
+ /* output buffer pointe */
+ src_hex = ( char* ) u->insn_hexcode;
+ /* for each byte used to decode instruction */
+ for ( i = 0; i < u->inp_ctr; ++i, ++src_ptr) {
+ sprintf( src_hex, "%02x", *src_ptr & 0xFF );
+ src_hex += 2;
+ }
+ return 0;
+}
+
+
+static inline int
+decode_insn(struct ud *u, uint16_t ptr)
+{
+ ASSERT((ptr & 0x8000) == 0);
+ u->itab_entry = &ud_itab[ ptr ];
+ u->mnemonic = u->itab_entry->mnemonic;
+ return (resolve_mode(u) == 0 &&
+ decode_operands(u) == 0 &&
+ resolve_mnemonic(u) == 0) ? 0 : -1;
+}
+
+
+/*
+ * decode_3dnow()
+ *
+ * Decoding 3dnow is a little tricky because of its strange opcode
+ * structure. The final opcode disambiguation depends on the last
+ * byte that comes after the operands have been decoded. Fortunately,
+ * all 3dnow instructions have the same set of operand types. So we
+ * go ahead and decode the instruction by picking an arbitrarily chosen
+ * valid entry in the table, decode the operands, and read the final
+ * byte to resolve the menmonic.
+ */
+static inline int
+decode_3dnow(struct ud* u)
+{
+ uint16_t ptr;
+ ASSERT(u->le->type == UD_TAB__OPC_3DNOW);
+ ASSERT(u->le->table[0xc] != 0);
+ decode_insn(u, u->le->table[0xc]);
+ ud_inp_next(u);
+ if (u->error) {
+ return -1;
+ }
+ ptr = u->le->table[ud_inp_curr(u)];
+ ASSERT((ptr & 0x8000) == 0);
+ u->mnemonic = ud_itab[ptr].mnemonic;
+ return 0;
+}
+
+
+static int
+decode_ssepfx(struct ud *u)
+{
+ uint8_t idx = ((u->pfx_insn & 0xf) + 1) / 2;
+ if (u->le->table[idx] == 0) {
+ idx = 0;
+ }
+ if (idx && u->le->table[idx] != 0) {
+ /*
+ * "Consume" the prefix as a part of the opcode, so it is no
+ * longer exported as an instruction prefix.
+ */
+ switch (u->pfx_insn) {
+ case 0xf2:
+ u->pfx_repne = 0;
+ break;
+ case 0xf3:
+ u->pfx_rep = 0;
+ u->pfx_repe = 0;
+ break;
+ case 0x66:
+ u->pfx_opr = 0;
+ break;
+ }
+ }
+ return decode_ext(u, u->le->table[idx]);
+}
+
+
+/*
+ * decode_ext()
+ *
+ * Decode opcode extensions (if any)
+ */
+static int
+decode_ext(struct ud *u, uint16_t ptr)
+{
+ uint8_t idx = 0;
+ if ((ptr & 0x8000) == 0) {
+ return decode_insn(u, ptr);
+ }
+ u->le = &ud_lookup_table_list[(~0x8000 & ptr)];
+ if (u->le->type == UD_TAB__OPC_3DNOW) {
+ return decode_3dnow(u);
+ }
+
+ switch (u->le->type) {
+ case UD_TAB__OPC_MOD:
+ /* !11 = 0, 11 = 1 */
+ idx = (MODRM_MOD(modrm(u)) + 1) / 4;
+ break;
+ /* disassembly mode/operand size/address size based tables.
+ * 16 = 0,, 32 = 1, 64 = 2
+ */
+ case UD_TAB__OPC_MODE:
+ idx = u->dis_mode / 32;
+ break;
+ case UD_TAB__OPC_OSIZE:
+ idx = eff_opr_mode(u->dis_mode, REX_W(u->pfx_rex), u->pfx_opr) / 32;
+ break;
+ case UD_TAB__OPC_ASIZE:
+ idx = eff_adr_mode(u->dis_mode, u->pfx_adr) / 32;
+ break;
+ case UD_TAB__OPC_X87:
+ idx = modrm(u) - 0xC0;
+ break;
+ case UD_TAB__OPC_VENDOR:
+ if (u->vendor == UD_VENDOR_ANY) {
+ /* choose a valid entry */
+ idx = (u->le->table[idx] != 0) ? 0 : 1;
+ } else if (u->vendor == UD_VENDOR_AMD) {
+ idx = 0;
+ } else {
+ idx = 1;
+ }
+ break;
+ case UD_TAB__OPC_RM:
+ idx = MODRM_RM(modrm(u));
+ break;
+ case UD_TAB__OPC_REG:
+ idx = MODRM_REG(modrm(u));
+ break;
+ case UD_TAB__OPC_SSE:
+ return decode_ssepfx(u);
+ default:
+ ASSERT(!"not reached");
+ break;
+ }
+
+ return decode_ext(u, u->le->table[idx]);
+}
+
+
+static inline int
+decode_opcode(struct ud *u)
+{
+ uint16_t ptr;
+ ASSERT(u->le->type == UD_TAB__OPC_TABLE);
+ ud_inp_next(u);
+ if (u->error) {
+ return -1;
+ }
+ ptr = u->le->table[ud_inp_curr(u)];
+ if (ptr & 0x8000) {
+ u->le = &ud_lookup_table_list[ptr & ~0x8000];
+ if (u->le->type == UD_TAB__OPC_TABLE) {
+ return decode_opcode(u);
+ }
+ }
+ return decode_ext(u, ptr);
+}
+
+
+/* =============================================================================
+ * ud_decode() - Instruction decoder. Returns the number of bytes decoded.
+ * =============================================================================
+ */
+unsigned int
+ud_decode(struct ud *u)
+{
+ ud_inp_start(u);
+ clear_insn(u);
+ u->le = &ud_lookup_table_list[0];
+ u->error = decode_prefixes(u) == -1 ||
+ decode_opcode(u) == -1 ||
+ u->error;
+ /* Handle decode error. */
+ if (u->error) {
+ /* clear out the decode data. */
+ clear_insn(u);
+ /* mark the sequence of bytes as invalid. */
+ u->itab_entry = & s_ie__invalid;
+ u->mnemonic = u->itab_entry->mnemonic;
+ }
+
+ /* maybe this stray segment override byte
+ * should be spewed out?
+ */
+ if ( !P_SEG( u->itab_entry->prefix ) &&
+ u->operand[0].type != UD_OP_MEM &&
+ u->operand[1].type != UD_OP_MEM )
+ u->pfx_seg = 0;
+
+ u->insn_offset = u->pc; /* set offset of instruction */
+ u->insn_fill = 0; /* set translation buffer index to 0 */
+ u->pc += u->inp_ctr; /* move program counter by bytes decoded */
+ gen_hex( u ); /* generate hex code */
+
+ /* return number of bytes disassembled. */
+ return u->inp_ctr;
+}
+
+/*
+vim: set ts=2 sw=2 expandtab
+*/
+
+#endif // USE(UDIS86)
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_decode.h b/src/3rdparty/masm/disassembler/udis86/udis86_decode.h
new file mode 100644
index 0000000000..940ed5ad6f
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_decode.h
@@ -0,0 +1,258 @@
+/* udis86 - libudis86/decode.h
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef UD_DECODE_H
+#define UD_DECODE_H
+
+#include "udis86_types.h"
+#include "udis86_itab.h"
+
+#define MAX_INSN_LENGTH 15
+
+/* register classes */
+#define T_NONE 0
+#define T_GPR 1
+#define T_MMX 2
+#define T_CRG 3
+#define T_DBG 4
+#define T_SEG 5
+#define T_XMM 6
+
+/* itab prefix bits */
+#define P_none ( 0 )
+#define P_cast ( 1 << 0 )
+#define P_CAST(n) ( ( n >> 0 ) & 1 )
+#define P_c1 ( 1 << 0 )
+#define P_C1(n) ( ( n >> 0 ) & 1 )
+#define P_rexb ( 1 << 1 )
+#define P_REXB(n) ( ( n >> 1 ) & 1 )
+#define P_depM ( 1 << 2 )
+#define P_DEPM(n) ( ( n >> 2 ) & 1 )
+#define P_c3 ( 1 << 3 )
+#define P_C3(n) ( ( n >> 3 ) & 1 )
+#define P_inv64 ( 1 << 4 )
+#define P_INV64(n) ( ( n >> 4 ) & 1 )
+#define P_rexw ( 1 << 5 )
+#define P_REXW(n) ( ( n >> 5 ) & 1 )
+#define P_c2 ( 1 << 6 )
+#define P_C2(n) ( ( n >> 6 ) & 1 )
+#define P_def64 ( 1 << 7 )
+#define P_DEF64(n) ( ( n >> 7 ) & 1 )
+#define P_rexr ( 1 << 8 )
+#define P_REXR(n) ( ( n >> 8 ) & 1 )
+#define P_oso ( 1 << 9 )
+#define P_OSO(n) ( ( n >> 9 ) & 1 )
+#define P_aso ( 1 << 10 )
+#define P_ASO(n) ( ( n >> 10 ) & 1 )
+#define P_rexx ( 1 << 11 )
+#define P_REXX(n) ( ( n >> 11 ) & 1 )
+#define P_ImpAddr ( 1 << 12 )
+#define P_IMPADDR(n) ( ( n >> 12 ) & 1 )
+#define P_seg ( 1 << 13 )
+#define P_SEG(n) ( ( n >> 13 ) & 1 )
+#define P_sext ( 1 << 14 )
+#define P_SEXT(n) ( ( n >> 14 ) & 1 )
+
+/* rex prefix bits */
+#define REX_W(r) ( ( 0xF & ( r ) ) >> 3 )
+#define REX_R(r) ( ( 0x7 & ( r ) ) >> 2 )
+#define REX_X(r) ( ( 0x3 & ( r ) ) >> 1 )
+#define REX_B(r) ( ( 0x1 & ( r ) ) >> 0 )
+#define REX_PFX_MASK(n) ( ( P_REXW(n) << 3 ) | \
+ ( P_REXR(n) << 2 ) | \
+ ( P_REXX(n) << 1 ) | \
+ ( P_REXB(n) << 0 ) )
+
+/* scable-index-base bits */
+#define SIB_S(b) ( ( b ) >> 6 )
+#define SIB_I(b) ( ( ( b ) >> 3 ) & 7 )
+#define SIB_B(b) ( ( b ) & 7 )
+
+/* modrm bits */
+#define MODRM_REG(b) ( ( ( b ) >> 3 ) & 7 )
+#define MODRM_NNN(b) ( ( ( b ) >> 3 ) & 7 )
+#define MODRM_MOD(b) ( ( ( b ) >> 6 ) & 3 )
+#define MODRM_RM(b) ( ( b ) & 7 )
+
+/* operand type constants -- order is important! */
+
+enum ud_operand_code {
+ OP_NONE,
+
+ OP_A, OP_E, OP_M, OP_G,
+ OP_I,
+
+ OP_AL, OP_CL, OP_DL, OP_BL,
+ OP_AH, OP_CH, OP_DH, OP_BH,
+
+ OP_ALr8b, OP_CLr9b, OP_DLr10b, OP_BLr11b,
+ OP_AHr12b, OP_CHr13b, OP_DHr14b, OP_BHr15b,
+
+ OP_AX, OP_CX, OP_DX, OP_BX,
+ OP_SI, OP_DI, OP_SP, OP_BP,
+
+ OP_rAX, OP_rCX, OP_rDX, OP_rBX,
+ OP_rSP, OP_rBP, OP_rSI, OP_rDI,
+
+ OP_rAXr8, OP_rCXr9, OP_rDXr10, OP_rBXr11,
+ OP_rSPr12, OP_rBPr13, OP_rSIr14, OP_rDIr15,
+
+ OP_eAX, OP_eCX, OP_eDX, OP_eBX,
+ OP_eSP, OP_eBP, OP_eSI, OP_eDI,
+
+ OP_ES, OP_CS, OP_SS, OP_DS,
+ OP_FS, OP_GS,
+
+ OP_ST0, OP_ST1, OP_ST2, OP_ST3,
+ OP_ST4, OP_ST5, OP_ST6, OP_ST7,
+
+ OP_J, OP_S, OP_O,
+ OP_I1, OP_I3,
+
+ OP_V, OP_W, OP_Q, OP_P,
+
+ OP_R, OP_C, OP_D, OP_VR, OP_PR,
+
+ OP_MR
+} UD_ATTR_PACKED;
+
+
+/* operand size constants */
+
+enum ud_operand_size {
+ SZ_NA = 0,
+ SZ_Z = 1,
+ SZ_V = 2,
+ SZ_P = 3,
+ SZ_WP = 4,
+ SZ_DP = 5,
+ SZ_MDQ = 6,
+ SZ_RDQ = 7,
+
+ /* the following values are used as is,
+ * and thus hard-coded. changing them
+ * will break internals
+ */
+ SZ_B = 8,
+ SZ_W = 16,
+ SZ_D = 32,
+ SZ_Q = 64,
+ SZ_T = 80,
+ SZ_O = 128,
+
+ SZ_WV = 17,
+ SZ_BV = 18,
+ SZ_DY = 19
+
+} UD_ATTR_PACKED;
+
+
+/* A single operand of an entry in the instruction table.
+ * (internal use only)
+ */
+struct ud_itab_entry_operand
+{
+ enum ud_operand_code type;
+ enum ud_operand_size size;
+};
+
+
+/* A single entry in an instruction table.
+ *(internal use only)
+ */
+struct ud_itab_entry
+{
+ enum ud_mnemonic_code mnemonic;
+ struct ud_itab_entry_operand operand1;
+ struct ud_itab_entry_operand operand2;
+ struct ud_itab_entry_operand operand3;
+ uint32_t prefix;
+};
+
+struct ud_lookup_table_list_entry {
+ const uint16_t *table;
+ enum ud_table_type type;
+ const char *meta;
+};
+
+
+static inline unsigned int sse_pfx_idx( const unsigned int pfx )
+{
+ /* 00 = 0
+ * f2 = 1
+ * f3 = 2
+ * 66 = 3
+ */
+ return ( ( pfx & 0xf ) + 1 ) / 2;
+}
+
+static inline unsigned int mode_idx( const unsigned int mode )
+{
+ /* 16 = 0
+ * 32 = 1
+ * 64 = 2
+ */
+ return ( mode / 32 );
+}
+
+static inline unsigned int modrm_mod_idx( const unsigned int mod )
+{
+ /* !11 = 0
+ * 11 = 1
+ */
+ return ( mod + 1 ) / 4;
+}
+
+static inline unsigned int vendor_idx( const unsigned int vendor )
+{
+ switch ( vendor ) {
+ case UD_VENDOR_AMD: return 0;
+ case UD_VENDOR_INTEL: return 1;
+ case UD_VENDOR_ANY: return 2;
+ default: return 2;
+ }
+}
+
+static inline unsigned int is_group_ptr( uint16_t ptr )
+{
+ return ( 0x8000 & ptr );
+}
+
+static inline unsigned int group_idx( uint16_t ptr )
+{
+ return ( ~0x8000 & ptr );
+}
+
+
+extern struct ud_itab_entry ud_itab[];
+extern struct ud_lookup_table_list_entry ud_lookup_table_list[];
+
+#endif /* UD_DECODE_H */
+
+/* vim:cindent
+ * vim:expandtab
+ * vim:ts=4
+ * vim:sw=4
+ */
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_extern.h b/src/3rdparty/masm/disassembler/udis86/udis86_extern.h
new file mode 100644
index 0000000000..8e87721e8c
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_extern.h
@@ -0,0 +1,88 @@
+/* udis86 - libudis86/extern.h
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef UD_EXTERN_H
+#define UD_EXTERN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "udis86_types.h"
+
+/* ============================= PUBLIC API ================================= */
+
+extern void ud_init(struct ud*);
+
+extern void ud_set_mode(struct ud*, uint8_t);
+
+extern void ud_set_pc(struct ud*, uint64_t);
+
+extern void ud_set_input_hook(struct ud*, int (*)(struct ud*));
+
+extern void ud_set_input_buffer(struct ud*, uint8_t*, size_t);
+
+#ifndef __UD_STANDALONE__
+extern void ud_set_input_file(struct ud*, FILE*);
+#endif /* __UD_STANDALONE__ */
+
+extern void ud_set_vendor(struct ud*, unsigned);
+
+extern void ud_set_syntax(struct ud*, void (*)(struct ud*));
+
+extern void ud_input_skip(struct ud*, size_t);
+
+extern int ud_input_end(struct ud*);
+
+extern unsigned int ud_decode(struct ud*);
+
+extern unsigned int ud_disassemble(struct ud*);
+
+extern void ud_translate_intel(struct ud*);
+
+extern void ud_translate_att(struct ud*);
+
+extern char* ud_insn_asm(struct ud* u);
+
+extern uint8_t* ud_insn_ptr(struct ud* u);
+
+extern uint64_t ud_insn_off(struct ud*);
+
+extern char* ud_insn_hex(struct ud*);
+
+extern unsigned int ud_insn_len(struct ud* u);
+
+extern const char* ud_lookup_mnemonic(enum ud_mnemonic_code c);
+
+extern void ud_set_user_opaque_data(struct ud*, void*);
+
+extern void *ud_get_user_opaque_data(struct ud*);
+
+/* ========================================================================== */
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_input.c b/src/3rdparty/masm/disassembler/udis86/udis86_input.c
new file mode 100644
index 0000000000..4dbe328766
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_input.c
@@ -0,0 +1,262 @@
+/* udis86 - libudis86/input.c
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "config.h"
+
+#if USE(UDIS86)
+
+#include "udis86_extern.h"
+#include "udis86_types.h"
+#include "udis86_input.h"
+
+/* -----------------------------------------------------------------------------
+ * inp_buff_hook() - Hook for buffered inputs.
+ * -----------------------------------------------------------------------------
+ */
+static int
+inp_buff_hook(struct ud* u)
+{
+ if (u->inp_buff < u->inp_buff_end)
+ return *u->inp_buff++;
+ else return -1;
+}
+
+#ifndef __UD_STANDALONE__
+/* -----------------------------------------------------------------------------
+ * inp_file_hook() - Hook for FILE inputs.
+ * -----------------------------------------------------------------------------
+ */
+static int
+inp_file_hook(struct ud* u)
+{
+ return fgetc(u->inp_file);
+}
+#endif /* __UD_STANDALONE__*/
+
+/* =============================================================================
+ * ud_inp_set_hook() - Sets input hook.
+ * =============================================================================
+ */
+extern void
+ud_set_input_hook(register struct ud* u, int (*hook)(struct ud*))
+{
+ u->inp_hook = hook;
+ ud_inp_init(u);
+}
+
+extern void
+ud_set_user_opaque_data( struct ud * u, void * opaque )
+{
+ u->user_opaque_data = opaque;
+}
+
+extern void *
+ud_get_user_opaque_data( struct ud * u )
+{
+ return u->user_opaque_data;
+}
+
+/* =============================================================================
+ * ud_inp_set_buffer() - Set buffer as input.
+ * =============================================================================
+ */
+extern void
+ud_set_input_buffer(register struct ud* u, uint8_t* buf, size_t len)
+{
+ u->inp_hook = inp_buff_hook;
+ u->inp_buff = buf;
+ u->inp_buff_end = buf + len;
+ ud_inp_init(u);
+}
+
+#ifndef __UD_STANDALONE__
+/* =============================================================================
+ * ud_input_set_file() - Set buffer as input.
+ * =============================================================================
+ */
+extern void
+ud_set_input_file(register struct ud* u, FILE* f)
+{
+ u->inp_hook = inp_file_hook;
+ u->inp_file = f;
+ ud_inp_init(u);
+}
+#endif /* __UD_STANDALONE__ */
+
+/* =============================================================================
+ * ud_input_skip() - Skip n input bytes.
+ * =============================================================================
+ */
+extern void
+ud_input_skip(struct ud* u, size_t n)
+{
+ while (n--) {
+ u->inp_hook(u);
+ }
+}
+
+/* =============================================================================
+ * ud_input_end() - Test for end of input.
+ * =============================================================================
+ */
+extern int
+ud_input_end(struct ud* u)
+{
+ return (u->inp_curr == u->inp_fill) && u->inp_end;
+}
+
+/* -----------------------------------------------------------------------------
+ * ud_inp_next() - Loads and returns the next byte from input.
+ *
+ * inp_curr and inp_fill are pointers to the cache. The program is written based
+ * on the property that they are 8-bits in size, and will eventually wrap around
+ * forming a circular buffer. So, the size of the cache is 256 in size, kind of
+ * unnecessary yet optimized.
+ *
+ * A buffer inp_sess stores the bytes disassembled for a single session.
+ * -----------------------------------------------------------------------------
+ */
+extern uint8_t ud_inp_next(struct ud* u)
+{
+ int c = -1;
+ /* if current pointer is not upto the fill point in the
+ * input cache.
+ */
+ if ( u->inp_curr != u->inp_fill ) {
+ c = u->inp_cache[ ++u->inp_curr ];
+ /* if !end-of-input, call the input hook and get a byte */
+ } else if ( u->inp_end || ( c = u->inp_hook( u ) ) == -1 ) {
+ /* end-of-input, mark it as an error, since the decoder,
+ * expected a byte more.
+ */
+ u->error = 1;
+ /* flag end of input */
+ u->inp_end = 1;
+ return 0;
+ } else {
+ /* increment pointers, we have a new byte. */
+ u->inp_curr = ++u->inp_fill;
+ /* add the byte to the cache */
+ u->inp_cache[ u->inp_fill ] = c;
+ }
+ /* record bytes input per decode-session. */
+ u->inp_sess[ u->inp_ctr++ ] = c;
+ /* return byte */
+ return ( uint8_t ) c;
+}
+
+/* -----------------------------------------------------------------------------
+ * ud_inp_back() - Move back a single byte in the stream.
+ * -----------------------------------------------------------------------------
+ */
+extern void
+ud_inp_back(struct ud* u)
+{
+ if ( u->inp_ctr > 0 ) {
+ --u->inp_curr;
+ --u->inp_ctr;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * ud_inp_peek() - Peek into the next byte in source.
+ * -----------------------------------------------------------------------------
+ */
+extern uint8_t
+ud_inp_peek(struct ud* u)
+{
+ uint8_t r = ud_inp_next(u);
+ if ( !u->error ) ud_inp_back(u); /* Don't backup if there was an error */
+ return r;
+}
+
+/* -----------------------------------------------------------------------------
+ * ud_inp_move() - Move ahead n input bytes.
+ * -----------------------------------------------------------------------------
+ */
+extern void
+ud_inp_move(struct ud* u, size_t n)
+{
+ while (n--)
+ ud_inp_next(u);
+}
+
+/*------------------------------------------------------------------------------
+ * ud_inp_uintN() - return uintN from source.
+ *------------------------------------------------------------------------------
+ */
+extern uint8_t
+ud_inp_uint8(struct ud* u)
+{
+ return ud_inp_next(u);
+}
+
+extern uint16_t
+ud_inp_uint16(struct ud* u)
+{
+ uint16_t r, ret;
+
+ ret = ud_inp_next(u);
+ r = ud_inp_next(u);
+ return ret | (r << 8);
+}
+
+extern uint32_t
+ud_inp_uint32(struct ud* u)
+{
+ uint32_t r, ret;
+
+ ret = ud_inp_next(u);
+ r = ud_inp_next(u);
+ ret = ret | (r << 8);
+ r = ud_inp_next(u);
+ ret = ret | (r << 16);
+ r = ud_inp_next(u);
+ return ret | (r << 24);
+}
+
+extern uint64_t
+ud_inp_uint64(struct ud* u)
+{
+ uint64_t r, ret;
+
+ ret = ud_inp_next(u);
+ r = ud_inp_next(u);
+ ret = ret | (r << 8);
+ r = ud_inp_next(u);
+ ret = ret | (r << 16);
+ r = ud_inp_next(u);
+ ret = ret | (r << 24);
+ r = ud_inp_next(u);
+ ret = ret | (r << 32);
+ r = ud_inp_next(u);
+ ret = ret | (r << 40);
+ r = ud_inp_next(u);
+ ret = ret | (r << 48);
+ r = ud_inp_next(u);
+ return ret | (r << 56);
+}
+
+#endif // USE(UDIS86)
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_input.h b/src/3rdparty/masm/disassembler/udis86/udis86_input.h
new file mode 100644
index 0000000000..96865a88b5
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_input.h
@@ -0,0 +1,67 @@
+/* udis86 - libudis86/input.h
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef UD_INPUT_H
+#define UD_INPUT_H
+
+#include "udis86_types.h"
+
+uint8_t ud_inp_next(struct ud*);
+uint8_t ud_inp_peek(struct ud*);
+uint8_t ud_inp_uint8(struct ud*);
+uint16_t ud_inp_uint16(struct ud*);
+uint32_t ud_inp_uint32(struct ud*);
+uint64_t ud_inp_uint64(struct ud*);
+void ud_inp_move(struct ud*, size_t);
+void ud_inp_back(struct ud*);
+
+/* ud_inp_init() - Initializes the input system. */
+#define ud_inp_init(u) \
+do { \
+ u->inp_curr = 0; \
+ u->inp_fill = 0; \
+ u->inp_ctr = 0; \
+ u->inp_end = 0; \
+} while (0)
+
+/* ud_inp_start() - Should be called before each de-code operation. */
+#define ud_inp_start(u) u->inp_ctr = 0
+
+/* ud_inp_back() - Resets the current pointer to its position before the current
+ * instruction disassembly was started.
+ */
+#define ud_inp_reset(u) \
+do { \
+ u->inp_curr -= u->inp_ctr; \
+ u->inp_ctr = 0; \
+} while (0)
+
+/* ud_inp_sess() - Returns the pointer to current session. */
+#define ud_inp_sess(u) (u->inp_sess)
+
+/* inp_cur() - Returns the current input byte. */
+#define ud_inp_curr(u) ((u)->inp_cache[(u)->inp_curr])
+
+#endif
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_itab_holder.c b/src/3rdparty/masm/disassembler/udis86/udis86_itab_holder.c
new file mode 100644
index 0000000000..80dda3a199
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_itab_holder.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if USE(UDIS86)
+
+#include "udis86_itab.c"
+
+#endif
+
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_syn-att.c b/src/3rdparty/masm/disassembler/udis86/udis86_syn-att.c
new file mode 100644
index 0000000000..0d1c57d482
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_syn-att.c
@@ -0,0 +1,252 @@
+/* udis86 - libudis86/syn-att.c
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "config.h"
+
+#if USE(UDIS86)
+
+#include "udis86_types.h"
+#include "udis86_extern.h"
+#include "udis86_decode.h"
+#include "udis86_itab.h"
+#include "udis86_syn.h"
+
+/* -----------------------------------------------------------------------------
+ * opr_cast() - Prints an operand cast.
+ * -----------------------------------------------------------------------------
+ */
+static void
+opr_cast(struct ud* u, struct ud_operand* op)
+{
+ switch(op->size) {
+ case 16 : case 32 :
+ mkasm(u, "*"); break;
+ default: break;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * gen_operand() - Generates assembly output for each operand.
+ * -----------------------------------------------------------------------------
+ */
+static void
+gen_operand(struct ud* u, struct ud_operand* op)
+{
+ switch(op->type) {
+ case UD_OP_REG:
+ mkasm(u, "%%%s", ud_reg_tab[op->base - UD_R_AL]);
+ break;
+
+ case UD_OP_MEM:
+ if (u->br_far) opr_cast(u, op);
+ if (u->pfx_seg)
+ mkasm(u, "%%%s:", ud_reg_tab[u->pfx_seg - UD_R_AL]);
+ if (op->offset == 8) {
+ if (op->lval.sbyte < 0)
+ mkasm(u, "-0x%x", (-op->lval.sbyte) & 0xff);
+ else mkasm(u, "0x%x", op->lval.sbyte);
+ }
+ else if (op->offset == 16)
+ mkasm(u, "0x%x", op->lval.uword);
+ else if (op->offset == 32)
+ mkasm(u, "0x%lx", (unsigned long)op->lval.udword);
+ else if (op->offset == 64)
+ mkasm(u, "0x" FMT64 "x", op->lval.uqword);
+
+ if (op->base)
+ mkasm(u, "(%%%s", ud_reg_tab[op->base - UD_R_AL]);
+ if (op->index) {
+ if (op->base)
+ mkasm(u, ",");
+ else mkasm(u, "(");
+ mkasm(u, "%%%s", ud_reg_tab[op->index - UD_R_AL]);
+ }
+ if (op->scale)
+ mkasm(u, ",%d", op->scale);
+ if (op->base || op->index)
+ mkasm(u, ")");
+ break;
+
+ case UD_OP_IMM: {
+ int64_t imm = 0;
+ uint64_t sext_mask = 0xffffffffffffffffull;
+ unsigned sext_size = op->size;
+
+ switch (op->size) {
+ case 8: imm = op->lval.sbyte; break;
+ case 16: imm = op->lval.sword; break;
+ case 32: imm = op->lval.sdword; break;
+ case 64: imm = op->lval.sqword; break;
+ }
+ if ( P_SEXT( u->itab_entry->prefix ) ) {
+ sext_size = u->operand[ 0 ].size;
+ if ( u->mnemonic == UD_Ipush )
+ /* push sign-extends to operand size */
+ sext_size = u->opr_mode;
+ }
+ if ( sext_size < 64 )
+ sext_mask = ( 1ull << sext_size ) - 1;
+ mkasm( u, "$0x" FMT64 "x", imm & sext_mask );
+
+ break;
+ }
+
+ case UD_OP_JIMM:
+ switch (op->size) {
+ case 8:
+ mkasm(u, "0x" FMT64 "x", u->pc + op->lval.sbyte);
+ break;
+ case 16:
+ mkasm(u, "0x" FMT64 "x", (u->pc + op->lval.sword) & 0xffff );
+ break;
+ case 32:
+ if (u->dis_mode == 32)
+ mkasm(u, "0x" FMT64 "x", (u->pc + op->lval.sdword) & 0xffffffff);
+ else
+ mkasm(u, "0x" FMT64 "x", u->pc + op->lval.sdword);
+ break;
+ default:break;
+ }
+ break;
+
+ case UD_OP_PTR:
+ switch (op->size) {
+ case 32:
+ mkasm(u, "$0x%x, $0x%x", op->lval.ptr.seg,
+ op->lval.ptr.off & 0xFFFF);
+ break;
+ case 48:
+ mkasm(u, "$0x%x, $0x%lx", op->lval.ptr.seg,
+ (unsigned long)op->lval.ptr.off);
+ break;
+ }
+ break;
+
+ default: return;
+ }
+}
+
+/* =============================================================================
+ * translates to AT&T syntax
+ * =============================================================================
+ */
+extern void
+ud_translate_att(struct ud *u)
+{
+ int size = 0;
+
+ /* check if P_OSO prefix is used */
+ if (! P_OSO(u->itab_entry->prefix) && u->pfx_opr) {
+ switch (u->dis_mode) {
+ case 16:
+ mkasm(u, "o32 ");
+ break;
+ case 32:
+ case 64:
+ mkasm(u, "o16 ");
+ break;
+ }
+ }
+
+ /* check if P_ASO prefix was used */
+ if (! P_ASO(u->itab_entry->prefix) && u->pfx_adr) {
+ switch (u->dis_mode) {
+ case 16:
+ mkasm(u, "a32 ");
+ break;
+ case 32:
+ mkasm(u, "a16 ");
+ break;
+ case 64:
+ mkasm(u, "a32 ");
+ break;
+ }
+ }
+
+ if (u->pfx_lock)
+ mkasm(u, "lock ");
+ if (u->pfx_rep)
+ mkasm(u, "rep ");
+ if (u->pfx_repne)
+ mkasm(u, "repne ");
+
+ /* special instructions */
+ switch (u->mnemonic) {
+ case UD_Iretf:
+ mkasm(u, "lret ");
+ break;
+ case UD_Idb:
+ mkasm(u, ".byte 0x%x", u->operand[0].lval.ubyte);
+ return;
+ case UD_Ijmp:
+ case UD_Icall:
+ if (u->br_far) mkasm(u, "l");
+ mkasm(u, "%s", ud_lookup_mnemonic(u->mnemonic));
+ break;
+ case UD_Ibound:
+ case UD_Ienter:
+ if (u->operand[0].type != UD_NONE)
+ gen_operand(u, &u->operand[0]);
+ if (u->operand[1].type != UD_NONE) {
+ mkasm(u, ",");
+ gen_operand(u, &u->operand[1]);
+ }
+ return;
+ default:
+ mkasm(u, "%s", ud_lookup_mnemonic(u->mnemonic));
+ }
+
+ if (u->c1)
+ size = u->operand[0].size;
+ else if (u->c2)
+ size = u->operand[1].size;
+ else if (u->c3)
+ size = u->operand[2].size;
+
+ if (size == 8)
+ mkasm(u, "b");
+ else if (size == 16)
+ mkasm(u, "w");
+ else if (size == 64)
+ mkasm(u, "q");
+
+ mkasm(u, " ");
+
+ if (u->operand[2].type != UD_NONE) {
+ gen_operand(u, &u->operand[2]);
+ mkasm(u, ", ");
+ }
+
+ if (u->operand[1].type != UD_NONE) {
+ gen_operand(u, &u->operand[1]);
+ mkasm(u, ", ");
+ }
+
+ if (u->operand[0].type != UD_NONE)
+ gen_operand(u, &u->operand[0]);
+}
+
+#endif // USE(UDIS86)
+
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_syn-intel.c b/src/3rdparty/masm/disassembler/udis86/udis86_syn-intel.c
new file mode 100644
index 0000000000..38251db889
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_syn-intel.c
@@ -0,0 +1,278 @@
+/* udis86 - libudis86/syn-intel.c
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "config.h"
+
+#if USE(UDIS86)
+
+#include "udis86_types.h"
+#include "udis86_extern.h"
+#include "udis86_decode.h"
+#include "udis86_itab.h"
+#include "udis86_syn.h"
+
+/* -----------------------------------------------------------------------------
+ * opr_cast() - Prints an operand cast.
+ * -----------------------------------------------------------------------------
+ */
+static void
+opr_cast(struct ud* u, struct ud_operand* op)
+{
+ switch(op->size) {
+ case 8: mkasm(u, "byte " ); break;
+ case 16: mkasm(u, "word " ); break;
+ case 32: mkasm(u, "dword "); break;
+ case 64: mkasm(u, "qword "); break;
+ case 80: mkasm(u, "tword "); break;
+ default: break;
+ }
+ if (u->br_far)
+ mkasm(u, "far ");
+}
+
+/* -----------------------------------------------------------------------------
+ * gen_operand() - Generates assembly output for each operand.
+ * -----------------------------------------------------------------------------
+ */
+static void gen_operand(struct ud* u, struct ud_operand* op, int syn_cast)
+{
+ switch(op->type) {
+ case UD_OP_REG:
+ mkasm(u, "%s", ud_reg_tab[op->base - UD_R_AL]);
+ break;
+
+ case UD_OP_MEM: {
+
+ int op_f = 0;
+
+ if (syn_cast)
+ opr_cast(u, op);
+
+ mkasm(u, "[");
+
+ if (u->pfx_seg)
+ mkasm(u, "%s:", ud_reg_tab[u->pfx_seg - UD_R_AL]);
+
+ if (op->base) {
+ mkasm(u, "%s", ud_reg_tab[op->base - UD_R_AL]);
+ op_f = 1;
+ }
+
+ if (op->index) {
+ if (op_f)
+ mkasm(u, "+");
+ mkasm(u, "%s", ud_reg_tab[op->index - UD_R_AL]);
+ op_f = 1;
+ }
+
+ if (op->scale)
+ mkasm(u, "*%d", op->scale);
+
+ if (op->offset == 8) {
+ if (op->lval.sbyte < 0)
+ mkasm(u, "-0x%x", -op->lval.sbyte);
+ else mkasm(u, "%s0x%x", (op_f) ? "+" : "", op->lval.sbyte);
+ }
+ else if (op->offset == 16)
+ mkasm(u, "%s0x%x", (op_f) ? "+" : "", op->lval.uword);
+ else if (op->offset == 32) {
+ if (u->adr_mode == 64) {
+ if (op->lval.sdword < 0)
+ mkasm(u, "-0x%x", -op->lval.sdword);
+ else mkasm(u, "%s0x%x", (op_f) ? "+" : "", op->lval.sdword);
+ }
+ else mkasm(u, "%s0x%lx", (op_f) ? "+" : "", (unsigned long)op->lval.udword);
+ }
+ else if (op->offset == 64)
+ mkasm(u, "%s0x" FMT64 "x", (op_f) ? "+" : "", op->lval.uqword);
+
+ mkasm(u, "]");
+ break;
+ }
+
+ case UD_OP_IMM: {
+ int64_t imm = 0;
+ uint64_t sext_mask = 0xffffffffffffffffull;
+ unsigned sext_size = op->size;
+
+ if (syn_cast)
+ opr_cast(u, op);
+ switch (op->size) {
+ case 8: imm = op->lval.sbyte; break;
+ case 16: imm = op->lval.sword; break;
+ case 32: imm = op->lval.sdword; break;
+ case 64: imm = op->lval.sqword; break;
+ }
+ if ( P_SEXT( u->itab_entry->prefix ) ) {
+ sext_size = u->operand[ 0 ].size;
+ if ( u->mnemonic == UD_Ipush )
+ /* push sign-extends to operand size */
+ sext_size = u->opr_mode;
+ }
+ if ( sext_size < 64 )
+ sext_mask = ( 1ull << sext_size ) - 1;
+ mkasm( u, "0x" FMT64 "x", imm & sext_mask );
+
+ break;
+ }
+
+
+ case UD_OP_JIMM:
+ if (syn_cast) opr_cast(u, op);
+ switch (op->size) {
+ case 8:
+ mkasm(u, "0x" FMT64 "x", u->pc + op->lval.sbyte);
+ break;
+ case 16:
+ mkasm(u, "0x" FMT64 "x", ( u->pc + op->lval.sword ) & 0xffff );
+ break;
+ case 32:
+ mkasm(u, "0x" FMT64 "x", ( u->pc + op->lval.sdword ) & 0xfffffffful );
+ break;
+ default:break;
+ }
+ break;
+
+ case UD_OP_PTR:
+ switch (op->size) {
+ case 32:
+ mkasm(u, "word 0x%x:0x%x", op->lval.ptr.seg,
+ op->lval.ptr.off & 0xFFFF);
+ break;
+ case 48:
+ mkasm(u, "dword 0x%x:0x%lx", op->lval.ptr.seg,
+ (unsigned long)op->lval.ptr.off);
+ break;
+ }
+ break;
+
+ case UD_OP_CONST:
+ if (syn_cast) opr_cast(u, op);
+ mkasm(u, "%d", op->lval.udword);
+ break;
+
+ default: return;
+ }
+}
+
+/* =============================================================================
+ * translates to intel syntax
+ * =============================================================================
+ */
+extern void ud_translate_intel(struct ud* u)
+{
+ /* -- prefixes -- */
+
+ /* check if P_OSO prefix is used */
+ if (! P_OSO(u->itab_entry->prefix) && u->pfx_opr) {
+ switch (u->dis_mode) {
+ case 16:
+ mkasm(u, "o32 ");
+ break;
+ case 32:
+ case 64:
+ mkasm(u, "o16 ");
+ break;
+ }
+ }
+
+ /* check if P_ASO prefix was used */
+ if (! P_ASO(u->itab_entry->prefix) && u->pfx_adr) {
+ switch (u->dis_mode) {
+ case 16:
+ mkasm(u, "a32 ");
+ break;
+ case 32:
+ mkasm(u, "a16 ");
+ break;
+ case 64:
+ mkasm(u, "a32 ");
+ break;
+ }
+ }
+
+ if ( u->pfx_seg &&
+ u->operand[0].type != UD_OP_MEM &&
+ u->operand[1].type != UD_OP_MEM ) {
+ mkasm(u, "%s ", ud_reg_tab[u->pfx_seg - UD_R_AL]);
+ }
+ if (u->pfx_lock)
+ mkasm(u, "lock ");
+ if (u->pfx_rep)
+ mkasm(u, "rep ");
+ if (u->pfx_repne)
+ mkasm(u, "repne ");
+
+ /* print the instruction mnemonic */
+ mkasm(u, "%s ", ud_lookup_mnemonic(u->mnemonic));
+
+ /* operand 1 */
+ if (u->operand[0].type != UD_NONE) {
+ int cast = 0;
+ if ( u->operand[0].type == UD_OP_IMM &&
+ u->operand[1].type == UD_NONE )
+ cast = u->c1;
+ if ( u->operand[0].type == UD_OP_MEM ) {
+ cast = u->c1;
+ if ( u->operand[1].type == UD_OP_IMM ||
+ u->operand[1].type == UD_OP_CONST )
+ cast = 1;
+ if ( u->operand[1].type == UD_NONE )
+ cast = 1;
+ if ( ( u->operand[0].size != u->operand[1].size ) && u->operand[1].size )
+ cast = 1;
+ } else if ( u->operand[ 0 ].type == UD_OP_JIMM ) {
+ if ( u->operand[ 0 ].size > 8 ) cast = 1;
+ }
+ gen_operand(u, &u->operand[0], cast);
+ }
+ /* operand 2 */
+ if (u->operand[1].type != UD_NONE) {
+ int cast = 0;
+ mkasm(u, ", ");
+ if ( u->operand[1].type == UD_OP_MEM ) {
+ cast = u->c1;
+
+ if ( u->operand[0].type != UD_OP_REG )
+ cast = 1;
+ if ( u->operand[0].size != u->operand[1].size && u->operand[1].size )
+ cast = 1;
+ if ( u->operand[0].type == UD_OP_REG &&
+ u->operand[0].base >= UD_R_ES &&
+ u->operand[0].base <= UD_R_GS )
+ cast = 0;
+ }
+ gen_operand(u, &u->operand[1], cast );
+ }
+
+ /* operand 3 */
+ if (u->operand[2].type != UD_NONE) {
+ mkasm(u, ", ");
+ gen_operand(u, &u->operand[2], u->c3);
+ }
+}
+
+#endif // USE(UDIS86)
+
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_syn.c b/src/3rdparty/masm/disassembler/udis86/udis86_syn.c
new file mode 100644
index 0000000000..31a45ea5c5
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_syn.c
@@ -0,0 +1,86 @@
+/* udis86 - libudis86/syn.c
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if USE(UDIS86)
+
+/* -----------------------------------------------------------------------------
+ * Intel Register Table - Order Matters (types.h)!
+ * -----------------------------------------------------------------------------
+ */
+const char* ud_reg_tab[] =
+{
+ "al", "cl", "dl", "bl",
+ "ah", "ch", "dh", "bh",
+ "spl", "bpl", "sil", "dil",
+ "r8b", "r9b", "r10b", "r11b",
+ "r12b", "r13b", "r14b", "r15b",
+
+ "ax", "cx", "dx", "bx",
+ "sp", "bp", "si", "di",
+ "r8w", "r9w", "r10w", "r11w",
+ "r12w", "r13W" , "r14w", "r15w",
+
+ "eax", "ecx", "edx", "ebx",
+ "esp", "ebp", "esi", "edi",
+ "r8d", "r9d", "r10d", "r11d",
+ "r12d", "r13d", "r14d", "r15d",
+
+ "rax", "rcx", "rdx", "rbx",
+ "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11",
+ "r12", "r13", "r14", "r15",
+
+ "es", "cs", "ss", "ds",
+ "fs", "gs",
+
+ "cr0", "cr1", "cr2", "cr3",
+ "cr4", "cr5", "cr6", "cr7",
+ "cr8", "cr9", "cr10", "cr11",
+ "cr12", "cr13", "cr14", "cr15",
+
+ "dr0", "dr1", "dr2", "dr3",
+ "dr4", "dr5", "dr6", "dr7",
+ "dr8", "dr9", "dr10", "dr11",
+ "dr12", "dr13", "dr14", "dr15",
+
+ "mm0", "mm1", "mm2", "mm3",
+ "mm4", "mm5", "mm6", "mm7",
+
+ "st0", "st1", "st2", "st3",
+ "st4", "st5", "st6", "st7",
+
+ "xmm0", "xmm1", "xmm2", "xmm3",
+ "xmm4", "xmm5", "xmm6", "xmm7",
+ "xmm8", "xmm9", "xmm10", "xmm11",
+ "xmm12", "xmm13", "xmm14", "xmm15",
+
+ "rip"
+};
+
+#endif // USE(UDIS86)
+
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_syn.h b/src/3rdparty/masm/disassembler/udis86/udis86_syn.h
new file mode 100644
index 0000000000..e8636163ef
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_syn.h
@@ -0,0 +1,47 @@
+/* udis86 - libudis86/syn.h
+ *
+ * Copyright (c) 2002-2009
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef UD_SYN_H
+#define UD_SYN_H
+
+#include "udis86_types.h"
+#include <wtf/Assertions.h>
+
+#ifndef __UD_STANDALONE__
+# include <stdarg.h>
+#endif /* __UD_STANDALONE__ */
+
+extern const char* ud_reg_tab[];
+
+static void mkasm(struct ud* u, const char* fmt, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
+static void mkasm(struct ud* u, const char* fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ u->insn_fill += vsnprintf((char*) u->insn_buffer + u->insn_fill, UD_STRING_BUFFER_SIZE - u->insn_fill, fmt, ap);
+ va_end(ap);
+}
+
+#endif
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_types.h b/src/3rdparty/masm/disassembler/udis86/udis86_types.h
new file mode 100644
index 0000000000..320d1ca491
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_types.h
@@ -0,0 +1,238 @@
+/* udis86 - libudis86/types.h
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef UD_TYPES_H
+#define UD_TYPES_H
+
+#ifndef __UD_STANDALONE__
+# include <stdio.h>
+#endif /* __UD_STANDALONE__ */
+
+/* gcc specific extensions */
+#ifdef __GNUC__
+# define UD_ATTR_PACKED __attribute__((packed))
+#else
+# define UD_ATTR_PACKED
+#endif /* UD_ATTR_PACKED */
+
+#ifdef _MSC_VER
+# define FMT64 "%I64"
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int16 uint16_t;
+ typedef unsigned __int32 uint32_t;
+ typedef unsigned __int64 uint64_t;
+ typedef __int8 int8_t;
+ typedef __int16 int16_t;
+ typedef __int32 int32_t;
+ typedef __int64 int64_t;
+#else
+# define FMT64 "%ll"
+# ifndef __UD_STANDALONE__
+# include <inttypes.h>
+# endif /* __UD_STANDALONE__ */
+#endif
+
+/* -----------------------------------------------------------------------------
+ * All possible "types" of objects in udis86. Order is Important!
+ * -----------------------------------------------------------------------------
+ */
+enum ud_type
+{
+ UD_NONE,
+
+ /* 8 bit GPRs */
+ UD_R_AL, UD_R_CL, UD_R_DL, UD_R_BL,
+ UD_R_AH, UD_R_CH, UD_R_DH, UD_R_BH,
+ UD_R_SPL, UD_R_BPL, UD_R_SIL, UD_R_DIL,
+ UD_R_R8B, UD_R_R9B, UD_R_R10B, UD_R_R11B,
+ UD_R_R12B, UD_R_R13B, UD_R_R14B, UD_R_R15B,
+
+ /* 16 bit GPRs */
+ UD_R_AX, UD_R_CX, UD_R_DX, UD_R_BX,
+ UD_R_SP, UD_R_BP, UD_R_SI, UD_R_DI,
+ UD_R_R8W, UD_R_R9W, UD_R_R10W, UD_R_R11W,
+ UD_R_R12W, UD_R_R13W, UD_R_R14W, UD_R_R15W,
+
+ /* 32 bit GPRs */
+ UD_R_EAX, UD_R_ECX, UD_R_EDX, UD_R_EBX,
+ UD_R_ESP, UD_R_EBP, UD_R_ESI, UD_R_EDI,
+ UD_R_R8D, UD_R_R9D, UD_R_R10D, UD_R_R11D,
+ UD_R_R12D, UD_R_R13D, UD_R_R14D, UD_R_R15D,
+
+ /* 64 bit GPRs */
+ UD_R_RAX, UD_R_RCX, UD_R_RDX, UD_R_RBX,
+ UD_R_RSP, UD_R_RBP, UD_R_RSI, UD_R_RDI,
+ UD_R_R8, UD_R_R9, UD_R_R10, UD_R_R11,
+ UD_R_R12, UD_R_R13, UD_R_R14, UD_R_R15,
+
+ /* segment registers */
+ UD_R_ES, UD_R_CS, UD_R_SS, UD_R_DS,
+ UD_R_FS, UD_R_GS,
+
+ /* control registers*/
+ UD_R_CR0, UD_R_CR1, UD_R_CR2, UD_R_CR3,
+ UD_R_CR4, UD_R_CR5, UD_R_CR6, UD_R_CR7,
+ UD_R_CR8, UD_R_CR9, UD_R_CR10, UD_R_CR11,
+ UD_R_CR12, UD_R_CR13, UD_R_CR14, UD_R_CR15,
+
+ /* debug registers */
+ UD_R_DR0, UD_R_DR1, UD_R_DR2, UD_R_DR3,
+ UD_R_DR4, UD_R_DR5, UD_R_DR6, UD_R_DR7,
+ UD_R_DR8, UD_R_DR9, UD_R_DR10, UD_R_DR11,
+ UD_R_DR12, UD_R_DR13, UD_R_DR14, UD_R_DR15,
+
+ /* mmx registers */
+ UD_R_MM0, UD_R_MM1, UD_R_MM2, UD_R_MM3,
+ UD_R_MM4, UD_R_MM5, UD_R_MM6, UD_R_MM7,
+
+ /* x87 registers */
+ UD_R_ST0, UD_R_ST1, UD_R_ST2, UD_R_ST3,
+ UD_R_ST4, UD_R_ST5, UD_R_ST6, UD_R_ST7,
+
+ /* extended multimedia registers */
+ UD_R_XMM0, UD_R_XMM1, UD_R_XMM2, UD_R_XMM3,
+ UD_R_XMM4, UD_R_XMM5, UD_R_XMM6, UD_R_XMM7,
+ UD_R_XMM8, UD_R_XMM9, UD_R_XMM10, UD_R_XMM11,
+ UD_R_XMM12, UD_R_XMM13, UD_R_XMM14, UD_R_XMM15,
+
+ UD_R_RIP,
+
+ /* Operand Types */
+ UD_OP_REG, UD_OP_MEM, UD_OP_PTR, UD_OP_IMM,
+ UD_OP_JIMM, UD_OP_CONST
+};
+
+#include "udis86_itab.h"
+
+/* -----------------------------------------------------------------------------
+ * struct ud_operand - Disassembled instruction Operand.
+ * -----------------------------------------------------------------------------
+ */
+struct ud_operand
+{
+ enum ud_type type;
+ uint8_t size;
+ union {
+ int8_t sbyte;
+ uint8_t ubyte;
+ int16_t sword;
+ uint16_t uword;
+ int32_t sdword;
+ uint32_t udword;
+ int64_t sqword;
+ uint64_t uqword;
+
+ struct {
+ uint16_t seg;
+ uint32_t off;
+ } ptr;
+ } lval;
+
+ enum ud_type base;
+ enum ud_type index;
+ uint8_t offset;
+ uint8_t scale;
+};
+
+#define UD_STRING_BUFFER_SIZE 64
+
+/* -----------------------------------------------------------------------------
+ * struct ud - The udis86 object.
+ * -----------------------------------------------------------------------------
+ */
+struct ud
+{
+ int (*inp_hook) (struct ud*);
+ uint8_t inp_curr;
+ uint8_t inp_fill;
+#ifndef __UD_STANDALONE__
+ FILE* inp_file;
+#endif
+ uint8_t inp_ctr;
+ uint8_t* inp_buff;
+ uint8_t* inp_buff_end;
+ uint8_t inp_end;
+ void (*translator)(struct ud*);
+ uint64_t insn_offset;
+ char insn_hexcode[32];
+ char insn_buffer[UD_STRING_BUFFER_SIZE];
+ unsigned int insn_fill;
+ uint8_t dis_mode;
+ uint64_t pc;
+ uint8_t vendor;
+ struct map_entry* mapen;
+ enum ud_mnemonic_code mnemonic;
+ struct ud_operand operand[3];
+ uint8_t error;
+ uint8_t pfx_rex;
+ uint8_t pfx_seg;
+ uint8_t pfx_opr;
+ uint8_t pfx_adr;
+ uint8_t pfx_lock;
+ uint8_t pfx_rep;
+ uint8_t pfx_repe;
+ uint8_t pfx_repne;
+ uint8_t pfx_insn;
+ uint8_t default64;
+ uint8_t opr_mode;
+ uint8_t adr_mode;
+ uint8_t br_far;
+ uint8_t br_near;
+ uint8_t implicit_addr;
+ uint8_t c1;
+ uint8_t c2;
+ uint8_t c3;
+ uint8_t inp_cache[256];
+ uint8_t inp_sess[64];
+ uint8_t have_modrm;
+ uint8_t modrm;
+ void * user_opaque_data;
+ struct ud_itab_entry * itab_entry;
+ struct ud_lookup_table_list_entry *le;
+};
+
+/* -----------------------------------------------------------------------------
+ * Type-definitions
+ * -----------------------------------------------------------------------------
+ */
+typedef enum ud_type ud_type_t;
+typedef enum ud_mnemonic_code ud_mnemonic_code_t;
+
+typedef struct ud ud_t;
+typedef struct ud_operand ud_operand_t;
+
+#define UD_SYN_INTEL ud_translate_intel
+#define UD_SYN_ATT ud_translate_att
+#define UD_EOI -1
+#define UD_INP_CACHE_SZ 32
+#define UD_VENDOR_AMD 0
+#define UD_VENDOR_INTEL 1
+#define UD_VENDOR_ANY 2
+
+#define bail_out(ud,error_code) longjmp( (ud)->bailout, error_code )
+#define try_decode(ud) if ( setjmp( (ud)->bailout ) == 0 )
+#define catch_error() else
+
+#endif
diff --git a/src/3rdparty/masm/jit/JITCompilationEffort.h b/src/3rdparty/masm/jit/JITCompilationEffort.h
new file mode 100644
index 0000000000..5eb6801789
--- /dev/null
+++ b/src/3rdparty/masm/jit/JITCompilationEffort.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITCompilationEffort_h
+#define JITCompilationEffort_h
+
+namespace JSC {
+
+enum JITCompilationEffort {
+ JITCompilationCanFail,
+ JITCompilationMustSucceed
+};
+
+} // namespace JSC
+
+#endif // JITCompilationEffort_h
+
diff --git a/src/3rdparty/masm/masm-defs.pri b/src/3rdparty/masm/masm-defs.pri
new file mode 100644
index 0000000000..f8055d0ff4
--- /dev/null
+++ b/src/3rdparty/masm/masm-defs.pri
@@ -0,0 +1,28 @@
+
+DEFINES += WTF_EXPORT_PRIVATE="" JS_EXPORT_PRIVATE=""
+
+win*: DEFINES += NOMINMAX
+
+DEFINES += ENABLE_LLINT=0
+DEFINES += ENABLE_DFG_JIT=0
+DEFINES += ENABLE_JIT=1
+DEFINES += ENABLE_JIT_CONSTANT_BLINDING=0
+DEFINES += ENABLE_ASSEMBLER=1
+DEFINES += ENABLE_YARR_JIT=0
+DEFINES += BUILDING_QT__
+
+INCLUDEPATH += $$PWD/jit
+INCLUDEPATH += $$PWD/assembler
+INCLUDEPATH += $$PWD/runtime
+INCLUDEPATH += $$PWD/wtf
+INCLUDEPATH += $$PWD/stubs
+INCLUDEPATH += $$PWD/stubs/wtf
+INCLUDEPATH += $$PWD
+
+if(isEqual(QT_ARCH, "i386")|isEqual(QT_ARCH, "x86_64")):!win*: DEFINES += WTF_USE_UDIS86=1
+else: DEFINES += WTF_USE_UDIS86=0
+
+INCLUDEPATH += $$PWD/disassembler
+INCLUDEPATH += $$PWD/disassembler/udis86
+INCLUDEPATH += $$_OUT_PWD
+
diff --git a/src/3rdparty/masm/masm.pri b/src/3rdparty/masm/masm.pri
new file mode 100644
index 0000000000..683a5b19d3
--- /dev/null
+++ b/src/3rdparty/masm/masm.pri
@@ -0,0 +1,86 @@
+HEADERS += $$PWD/assembler/*.h
+SOURCES += $$PWD/assembler/ARMAssembler.cpp
+SOURCES += $$PWD/assembler/ARMv7Assembler.cpp
+SOURCES += $$PWD/assembler/MacroAssemblerARM.cpp
+SOURCES += $$PWD/assembler/MacroAssemblerSH4.cpp
+SOURCES += $$PWD/assembler/LinkBuffer.cpp
+
+HEADERS += $$PWD/wtf/*.h
+SOURCES += $$PWD/wtf/PrintStream.cpp
+HEADERS += $$PWD/wtf/PrintStream.h
+
+SOURCES += $$PWD/wtf/FilePrintStream.cpp
+HEADERS += $$PWD/wtf/FilePrintStream.h
+
+HEADERS += $$PWD/wtf/RawPointer.h
+
+win32: SOURCES += $$PWD/wtf/OSAllocatorWin.cpp
+else: SOURCES += $$PWD/wtf/OSAllocatorPosix.cpp
+HEADERS += $$PWD/wtf/OSAllocator.h
+
+SOURCES += $$PWD/wtf/PageAllocationAligned.cpp
+HEADERS += $$PWD/wtf/PageAllocationAligned.h
+HEADERS += $$PWD/wtf/PageAllocation.h
+
+SOURCES += $$PWD/wtf/PageBlock.cpp
+HEADERS += $$PWD/wtf/PageBlock.h
+
+HEADERS += $$PWD/wtf/PageReservation.h
+
+SOURCES += $$PWD/stubs/WTFStubs.cpp
+HEADERS += $$PWD/stubs/WTFStubs.h
+
+SOURCES += $$PWD/disassembler/Disassembler.cpp
+SOURCES += $$PWD/disassembler/UDis86Disassembler.cpp
+contains(DEFINES, WTF_USE_UDIS86=1) {
+ SOURCES += $$PWD/disassembler/udis86/udis86.c
+ SOURCES += $$PWD/disassembler/udis86/udis86_decode.c
+ SOURCES += $$PWD/disassembler/udis86/udis86_input.c
+ SOURCES += $$PWD/disassembler/udis86/udis86_itab_holder.c
+ SOURCES += $$PWD/disassembler/udis86/udis86_syn-att.c
+ SOURCES += $$PWD/disassembler/udis86/udis86_syn.c
+ SOURCES += $$PWD/disassembler/udis86/udis86_syn-intel.c
+
+ ITAB = $$PWD/disassembler/udis86/optable.xml
+ udis86.output = udis86_itab.h
+ udis86.input = ITAB
+ udis86.CONFIG += no_link
+ udis86.commands = python $$PWD/disassembler/udis86/itab.py ${QMAKE_FILE_IN}
+ QMAKE_EXTRA_COMPILERS += udis86
+
+ udis86_tab_cfile.target = $$OUT_PWD/udis86_itab.c
+ udis86_tab_cfile.depends = udis86_itab.h
+ QMAKE_EXTRA_TARGETS += udis86_tab_cfile
+}
+
+SOURCES += \
+ $$PWD/yarr/YarrCanonicalizeUCS2.cpp \
+ $$PWD/yarr/YarrInterpreter.cpp \
+ $$PWD/yarr/YarrPattern.cpp \
+ $$PWD/yarr/YarrSyntaxChecker.cpp
+
+HEADERS += $$PWD/yarr/*.h
+
+retgen.output = RegExpJitTables.h
+retgen.script = $$PWD/create_regex_tables
+retgen.input = retgen.script
+retgen.CONFIG += no_link
+retgen.commands = python $$retgen.script > ${QMAKE_FILE_OUT}
+QMAKE_EXTRA_COMPILERS += retgen
+
+# Taken from WebKit/Tools/qmake/mkspecs/features/unix/default_post.prf
+linux-g++* {
+ greaterThan(QT_GCC_MAJOR_VERSION, 3):greaterThan(QT_GCC_MINOR_VERSION, 5) {
+ !contains(QMAKE_CXXFLAGS, -std=(c|gnu)\\+\\+(0x|11)) {
+ # We need to deactivate those warnings because some names conflicts with upcoming c++0x types (e.g.nullptr).
+ QMAKE_CXXFLAGS_WARN_ON += -Wno-c++0x-compat
+ QMAKE_CXXFLAGS += -Wno-c++0x-compat
+ }
+ }
+}
+
+# Don't warn about OVERRIDE and FINAL, since they are feature-checked anyways
+*clang:!contains(QMAKE_CXXFLAGS, -std=c++11) {
+ QMAKE_CXXFLAGS += -Wno-c++11-extensions
+ QMAKE_OBJECTIVE_CFLAGS += -Wno-c++11-extensions
+}
diff --git a/src/3rdparty/masm/runtime/MatchResult.h b/src/3rdparty/masm/runtime/MatchResult.h
new file mode 100644
index 0000000000..d87c8516b0
--- /dev/null
+++ b/src/3rdparty/masm/runtime/MatchResult.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MatchResult_h
+#define MatchResult_h
+
+typedef uint64_t EncodedMatchResult;
+
+struct MatchResult {
+ ALWAYS_INLINE MatchResult(size_t start, size_t end)
+ : start(start)
+ , end(end)
+ {
+ }
+
+ explicit ALWAYS_INLINE MatchResult(EncodedMatchResult encoded)
+ {
+ union u {
+ uint64_t encoded;
+ struct s {
+ size_t start;
+ size_t end;
+ } split;
+ } value;
+ value.encoded = encoded;
+ start = value.split.start;
+ end = value.split.end;
+ }
+
+ ALWAYS_INLINE static MatchResult failed()
+ {
+ return MatchResult(WTF::notFound, 0);
+ }
+
+ ALWAYS_INLINE operator bool()
+ {
+ return start != WTF::notFound;
+ }
+
+ ALWAYS_INLINE bool empty()
+ {
+ return start == end;
+ }
+
+ size_t start;
+ size_t end;
+};
+
+#endif
diff --git a/src/3rdparty/masm/stubs/ExecutableAllocator.h b/src/3rdparty/masm/stubs/ExecutableAllocator.h
new file mode 100644
index 0000000000..f4292c791d
--- /dev/null
+++ b/src/3rdparty/masm/stubs/ExecutableAllocator.h
@@ -0,0 +1,120 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef MASM_EXECUTABLEALLOCATOR_H
+#define MASM_EXECUTABLEALLOCATOR_H
+
+#include <RefPtr.h>
+#include <RefCounted.h>
+#include <wtf/PageBlock.h>
+
+#include <qv4executableallocator.h>
+
+#if OS(WINDOWS)
+#include <windows.h>
+#else
+#include <sys/mman.h>
+#include <unistd.h>
+#endif
+
+namespace JSC {
+
+class JSGlobalData;
+
+struct ExecutableMemoryHandle : public RefCounted<ExecutableMemoryHandle> {
+ ExecutableMemoryHandle(QQmlJS::VM::ExecutableAllocator *allocator, int size)
+ : m_allocator(allocator)
+ , m_size(size)
+ {
+ m_allocation = allocator->allocate(size);
+ }
+ ~ExecutableMemoryHandle()
+ {
+ m_allocator->free(m_allocation);
+ }
+
+ inline void shrink(size_t) {
+ // ### TODO.
+ }
+
+ inline bool isManaged() const { return true; }
+
+ void* start() { return m_allocation->start(); }
+ int sizeInBytes() { return m_size; }
+
+ QQmlJS::VM::ExecutableAllocator *m_allocator;
+ QQmlJS::VM::ExecutableAllocator::Allocation *m_allocation;
+ int m_size;
+};
+
+struct ExecutableAllocator {
+ ExecutableAllocator(QQmlJS::VM::ExecutableAllocator *alloc)
+ : realAllocator(alloc)
+ {}
+
+ PassRefPtr<ExecutableMemoryHandle> allocate(JSGlobalData&, int size, void*, int)
+ {
+ return adoptRef(new ExecutableMemoryHandle(realAllocator, size));
+ }
+
+ static void makeWritable(void*, int)
+ {
+ }
+
+ static void makeExecutable(void* addr, int size)
+ {
+ size_t pageSize = WTF::pageSize();
+ size_t iaddr = reinterpret_cast<size_t>(addr);
+ size_t roundAddr = iaddr & ~(pageSize - static_cast<size_t>(1));
+#if OS(WINDOWS)
+ DWORD oldProtect;
+ VirtualProtect(reinterpret_cast<void*>(roundAddr), size + (iaddr - roundAddr), PAGE_EXECUTE_READWRITE, &oldProtect);
+#else
+ int mode = PROT_READ | PROT_WRITE | PROT_EXEC;
+ mprotect(reinterpret_cast<void*>(roundAddr), size + (iaddr - roundAddr), mode);
+#endif
+ }
+
+ QQmlJS::VM::ExecutableAllocator *realAllocator;
+};
+
+}
+
+#endif // MASM_EXECUTABLEALLOCATOR_H
diff --git a/src/3rdparty/masm/stubs/JSGlobalData.h b/src/3rdparty/masm/stubs/JSGlobalData.h
new file mode 100644
index 0000000000..281a64de83
--- /dev/null
+++ b/src/3rdparty/masm/stubs/JSGlobalData.h
@@ -0,0 +1,65 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef MASM_JSGLOBALDATA_H
+#define MASM_JSGLOBALDATA_H
+
+#include "ExecutableAllocator.h"
+#include "WeakRandom.h"
+
+namespace QQmlJS {
+namespace VM {
+class ExecutableAllocator;
+}
+}
+
+namespace JSC {
+
+class JSGlobalData {
+public:
+ JSGlobalData(QQmlJS::VM::ExecutableAllocator *realAllocator)
+ : executableAllocator(realAllocator)
+ {}
+ ExecutableAllocator executableAllocator;
+};
+
+}
+
+#endif // MASM_JSGLOBALDATA_H
diff --git a/src/3rdparty/masm/stubs/LLIntData.h b/src/3rdparty/masm/stubs/LLIntData.h
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/src/3rdparty/masm/stubs/LLIntData.h
diff --git a/src/3rdparty/masm/stubs/Options.h b/src/3rdparty/masm/stubs/Options.h
new file mode 100644
index 0000000000..b95e4354e2
--- /dev/null
+++ b/src/3rdparty/masm/stubs/Options.h
@@ -0,0 +1,53 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef OPTIONS_H
+#define OPTIONS_H
+
+namespace JSC {
+
+struct Options {
+ static bool showDisassembly() { return true; }
+ static bool showDFGDisassembly() { return true; }
+};
+
+}
+
+#endif // MASM_STUBS/OPTIONS_H
diff --git a/src/3rdparty/masm/stubs/WTFStubs.cpp b/src/3rdparty/masm/stubs/WTFStubs.cpp
new file mode 100644
index 0000000000..530804fe3e
--- /dev/null
+++ b/src/3rdparty/masm/stubs/WTFStubs.cpp
@@ -0,0 +1,131 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#include <config.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <qdebug.h>
+#include <FilePrintStream.h>
+
+namespace WTF {
+
+void* fastMalloc(size_t size)
+{
+ return malloc(size);
+}
+
+void* fastRealloc(void* ptr, size_t size)
+{
+ return realloc(ptr, size);
+}
+
+void fastFree(void* ptr)
+{
+ free(ptr);
+}
+
+uint32_t cryptographicallyRandomNumber()
+{
+ return 0;
+}
+
+static FilePrintStream* s_dataFile;
+
+void setDataFile(FILE* f)
+{
+ delete s_dataFile;
+ s_dataFile = new FilePrintStream(f, FilePrintStream::Borrow);
+}
+
+FilePrintStream& dataFile()
+{
+ if (!s_dataFile)
+ s_dataFile = new FilePrintStream(stderr, FilePrintStream::Borrow);
+ return *s_dataFile;
+}
+
+void dataLogFV(const char* format, va_list args)
+{
+ char buffer[1024];
+ vsnprintf(buffer, sizeof(buffer), format, args);
+ qDebug("%s", buffer);
+}
+
+void dataLogF(const char* format, ...)
+{
+ char buffer[1024];
+ va_list args;
+ va_start(args, format);
+ vsnprintf(buffer, sizeof(buffer), format, args);
+ va_end(args);
+ qDebug("%s", buffer);
+}
+
+void dataLogFString(const char* str)
+{
+ qDebug("%s", str);
+}
+
+}
+
+extern "C" {
+
+void WTFReportAssertionFailure(const char* /*file*/, int /*line*/, const char* /*function*/, const char* /*assertion*/)
+{
+}
+
+void WTFReportBacktrace()
+{
+}
+
+void WTFInvokeCrashHook()
+{
+}
+
+}
+
+
+#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
+#include <MacroAssemblerX86Common.h>
+
+JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = JSC::MacroAssemblerX86Common::NotCheckedSSE2;
+#endif
+
diff --git a/src/3rdparty/masm/stubs/WTFStubs.h b/src/3rdparty/masm/stubs/WTFStubs.h
new file mode 100644
index 0000000000..ec77d25da7
--- /dev/null
+++ b/src/3rdparty/masm/stubs/WTFStubs.h
@@ -0,0 +1,50 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef WTFSTUBS_H
+#define WTFSTUBS_H
+
+namespace WTF {
+
+void setDataFile(FILE* f);
+
+}
+
+#endif // WTFSTUBS_H
diff --git a/src/3rdparty/masm/stubs/wtf/FastAllocBase.h b/src/3rdparty/masm/stubs/wtf/FastAllocBase.h
new file mode 100644
index 0000000000..a062a885af
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/FastAllocBase.h
@@ -0,0 +1,48 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef FASTALLOCBASE_H
+#define FASTALLOCBASE_H
+
+/* Dummy empty header file, only needed for #include source compatibility */
+
+#define WTF_MAKE_FAST_ALLOCATED
+
+#endif // FASTALLOCBASE_H
diff --git a/src/3rdparty/masm/stubs/wtf/FastMalloc.h b/src/3rdparty/masm/stubs/wtf/FastMalloc.h
new file mode 100644
index 0000000000..1248c79dec
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/FastMalloc.h
@@ -0,0 +1,46 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef FASTMALLOC_H
+#define FASTMALLOC_H
+
+/* Dummy empty header file, only needed for #include source compatibility */
+
+#endif // FASTMALLOC_H
diff --git a/src/3rdparty/masm/stubs/wtf/Noncopyable.h b/src/3rdparty/masm/stubs/wtf/Noncopyable.h
new file mode 100644
index 0000000000..d3d1eed6d1
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/Noncopyable.h
@@ -0,0 +1,48 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef NONCOPYABLE_H
+#define NONCOPYABLE_H
+
+#include <qglobal.h>
+
+#define WTF_MAKE_NONCOPYABLE(x) Q_DISABLE_COPY(x)
+
+#endif // NONCOPYABLE_H
diff --git a/src/3rdparty/masm/stubs/wtf/OwnPtr.h b/src/3rdparty/masm/stubs/wtf/OwnPtr.h
new file mode 100644
index 0000000000..31d2f1efa3
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/OwnPtr.h
@@ -0,0 +1,46 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef OWNPTR_H
+#define OWNPTR_H
+
+#include "PassOwnPtr.h"
+
+#endif // OWNPTR_H
diff --git a/src/3rdparty/masm/stubs/wtf/PassOwnPtr.h b/src/3rdparty/masm/stubs/wtf/PassOwnPtr.h
new file mode 100644
index 0000000000..601d278c16
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/PassOwnPtr.h
@@ -0,0 +1,120 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef PASSOWNPTR_H
+#define PASSOWNPTR_H
+
+#include <qscopedpointer.h>
+
+template <typename T> class PassOwnPtr;
+template <typename PtrType> PassOwnPtr<PtrType> adoptPtr(PtrType*);
+
+template <typename T>
+struct OwnPtr : public QScopedPointer<T>
+{
+ OwnPtr() {}
+ OwnPtr(const PassOwnPtr<T> &ptr)
+ : QScopedPointer<T>(ptr.leakRef())
+ {}
+
+ OwnPtr(const OwnPtr<T>& other)
+ : QScopedPointer<T>(const_cast<OwnPtr<T> &>(other).take())
+ {}
+
+ OwnPtr& operator=(const OwnPtr<T>& other)
+ {
+ this->reset(const_cast<OwnPtr<T> &>(other).take());
+ return *this;
+ }
+
+ T* get() const { return this->data(); }
+
+ PassOwnPtr<T> release()
+ {
+ return adoptPtr(this->take());
+ }
+};
+
+template <typename T>
+class PassOwnPtr {
+public:
+ PassOwnPtr() {}
+
+ PassOwnPtr(T* ptr)
+ : m_ptr(ptr)
+ {
+ }
+
+ PassOwnPtr(const PassOwnPtr<T>& other)
+ : m_ptr(other.leakRef())
+ {
+ }
+
+ PassOwnPtr(const OwnPtr<T>& other)
+ : m_ptr(other.take())
+ {
+ }
+
+ ~PassOwnPtr()
+ {
+ }
+
+ T* operator->() const { return m_ptr.data(); }
+
+ T* leakRef() const { return m_ptr.take(); }
+
+private:
+ template <typename PtrType> friend PassOwnPtr<PtrType> adoptPtr(PtrType*);
+
+ PassOwnPtr<T>& operator=(const PassOwnPtr<T>&)
+ {}
+ mutable QScopedPointer<T> m_ptr;
+};
+
+template <typename T>
+PassOwnPtr<T> adoptPtr(T* ptr)
+{
+ PassOwnPtr<T> result;
+ result.m_ptr.reset(ptr);
+ return result;
+}
+
+
+#endif // PASSOWNPTR_H
diff --git a/src/3rdparty/masm/stubs/wtf/PassRefPtr.h b/src/3rdparty/masm/stubs/wtf/PassRefPtr.h
new file mode 100644
index 0000000000..d97be1c330
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/PassRefPtr.h
@@ -0,0 +1,101 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef PASSREFPTR_H
+#define PASSREFPTR_H
+
+template <typename T> class RefPtr;
+
+template <typename T>
+class PassRefPtr {
+public:
+ PassRefPtr() : m_ptr(0) {}
+
+ PassRefPtr(T* ptr)
+ : m_ptr(ptr)
+ {
+ if (m_ptr)
+ m_ptr->ref();
+ }
+
+ PassRefPtr(const PassRefPtr<T>& other)
+ : m_ptr(other.leakRef())
+ {
+ }
+
+ PassRefPtr(const RefPtr<T>& other)
+ : m_ptr(other.get())
+ {
+ if (m_ptr)
+ m_ptr->ref();
+ }
+
+ ~PassRefPtr()
+ {
+ if (m_ptr)
+ m_ptr->deref();
+ }
+
+ T* operator->() const { return m_ptr; }
+
+ T* leakRef() const
+ {
+ T* result = m_ptr;
+ m_ptr = 0;
+ return result;
+ }
+
+private:
+ PassRefPtr<T>& operator=(const PassRefPtr<T>&)
+ {}
+
+ template <typename PtrType> friend PassRefPtr<PtrType> adoptRef(PtrType*);
+ mutable T* m_ptr;
+};
+
+template <typename T>
+PassRefPtr<T> adoptRef(T* ptr)
+{
+ PassRefPtr<T> result;
+ result.m_ptr = ptr;
+ return result;
+}
+
+#endif // PASSREFPTR_H
diff --git a/src/3rdparty/masm/stubs/wtf/RefCounted.h b/src/3rdparty/masm/stubs/wtf/RefCounted.h
new file mode 100644
index 0000000000..4fc9ad9074
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/RefCounted.h
@@ -0,0 +1,70 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef REFCOUNTED_H
+#define REFCOUNTED_H
+
+#include "PassRefPtr.h"
+
+template <typename Base>
+class RefCounted {
+public:
+ RefCounted() : m_refCount(1) {}
+ ~RefCounted()
+ {
+ deref();
+ }
+
+ void ref()
+ {
+ ++m_refCount;
+ }
+
+ void deref()
+ {
+ if (!--m_refCount)
+ delete static_cast<Base*>(this);
+ }
+
+protected:
+ int m_refCount;
+};
+
+#endif // REFCOUNTED_H
diff --git a/src/3rdparty/masm/stubs/wtf/RefPtr.h b/src/3rdparty/masm/stubs/wtf/RefPtr.h
new file mode 100644
index 0000000000..929b493b4b
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/RefPtr.h
@@ -0,0 +1,93 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef REFPTR_H
+#define REFPTR_H
+
+#include "PassRefPtr.h"
+
+template <typename T>
+class RefPtr {
+public:
+ RefPtr() : m_ptr(0) {}
+ RefPtr(const RefPtr<T> &other)
+ : m_ptr(other.m_ptr)
+ {
+ if (m_ptr)
+ m_ptr->ref();
+ }
+
+ RefPtr<T>& operator=(const RefPtr<T>& other)
+ {
+ if (other.m_ptr)
+ other.m_ptr->ref();
+ if (m_ptr)
+ m_ptr->deref();
+ m_ptr = other.m_ptr;
+ return *this;
+ }
+
+ RefPtr(const PassRefPtr<T>& other)
+ : m_ptr(other.leakRef())
+ {
+ }
+
+ ~RefPtr()
+ {
+ if (m_ptr)
+ m_ptr->deref();
+ }
+
+ T* operator->() const { return m_ptr; }
+ T* get() const { return m_ptr; }
+ bool operator!() const { return !m_ptr; }
+
+ PassRefPtr<T> release()
+ {
+ T* ptr = m_ptr;
+ m_ptr = 0;
+ return adoptRef(ptr);
+ }
+
+private:
+ T* m_ptr;
+};
+
+#endif // REFPTR_H
diff --git a/src/3rdparty/masm/stubs/wtf/TypeTraits.h b/src/3rdparty/masm/stubs/wtf/TypeTraits.h
new file mode 100644
index 0000000000..9b626a7a53
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/TypeTraits.h
@@ -0,0 +1,58 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef TYPETRAITS_H
+#define TYPETRAITS_H
+
+namespace WTF {
+
+template <typename A, typename B>
+struct IsSameType {
+ static const bool value = false;
+};
+
+template <typename A>
+struct IsSameType<A, A> {
+ static const bool value = true;
+};
+
+}
+
+#endif // TYPETRAITS_H
diff --git a/src/3rdparty/masm/stubs/wtf/UnusedParam.h b/src/3rdparty/masm/stubs/wtf/UnusedParam.h
new file mode 100644
index 0000000000..a676bdf303
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/UnusedParam.h
@@ -0,0 +1,48 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef UNUSEDPARAM_H
+#define UNUSEDPARAM_H
+
+#include <qglobal.h>
+
+#define UNUSED_PARAM(x) Q_UNUSED(x)
+
+#endif // UNUSEDPARAM_H
diff --git a/src/3rdparty/masm/stubs/wtf/Vector.h b/src/3rdparty/masm/stubs/wtf/Vector.h
new file mode 100644
index 0000000000..39742d8ab0
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/Vector.h
@@ -0,0 +1,104 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef VECTOR_H
+#define VECTOR_H
+
+#include <vector>
+#include <wtf/Assertions.h>
+#include <wtf/NotFound.h>
+#include <qalgorithms.h>
+
+enum WTF_UnusedOverflowMode {
+ UnsafeVectorOverflow
+};
+
+namespace WTF {
+
+template <typename T, int capacity = 1, int overflowMode = UnsafeVectorOverflow>
+class Vector : public std::vector<T> {
+public:
+ Vector() {}
+ Vector(int initialSize) : std::vector<T>(initialSize) {}
+
+ inline void append(const T& value)
+ { this->push_back(value); }
+
+ inline void append(const Vector<T>& vector)
+ {
+ this->insert(this->end(), vector.begin(), vector.end());
+ }
+
+ using std::vector<T>::insert;
+
+ inline void reserveInitialCapacity(size_t size) { this->reserve(size); }
+
+ inline void insert(size_t position, T value)
+ { this->insert(this->begin() + position, value); }
+
+ inline void grow(size_t size)
+ { this->resize(size); }
+
+ inline void shrink(size_t size)
+ { this->erase(this->begin() + size, this->end()); }
+
+ inline void shrinkToFit()
+ { this->shrink_to_fit(); }
+
+ inline void remove(size_t position)
+ { this->erase(this->begin() + position); }
+
+ inline bool isEmpty() const { return this->empty(); }
+
+ inline T &last() { return *(this->begin() + this->size() - 1); }
+};
+
+template <typename T, int capacity>
+void deleteAllValues(const Vector<T, capacity> &vector)
+{
+ qDeleteAll(vector);
+}
+
+}
+
+using WTF::Vector;
+using WTF::deleteAllValues;
+
+#endif // VECTOR_H
diff --git a/src/3rdparty/masm/stubs/wtf/text/CString.h b/src/3rdparty/masm/stubs/wtf/text/CString.h
new file mode 100644
index 0000000000..c9a65e5c0b
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/text/CString.h
@@ -0,0 +1,44 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef CSTRING_H
+#define CSTRING_H
+
+#endif // CSTRING_H
diff --git a/src/3rdparty/masm/stubs/wtf/text/WTFString.h b/src/3rdparty/masm/stubs/wtf/text/WTFString.h
new file mode 100644
index 0000000000..d157dc7adc
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/text/WTFString.h
@@ -0,0 +1,75 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef WTFSTRING_H
+#define WTFSTRING_H
+
+#include <QString>
+#include <wtf/ASCIICType.h>
+#include <wtf/unicode/Unicode.h>
+
+namespace WTF {
+
+class String : public QString
+{
+public:
+ String(const QString& s) : QString(s) {}
+ bool is8Bit() const { return false; }
+ const unsigned char *characters8() const { return 0; }
+ const UChar *characters16() const { return reinterpret_cast<const UChar*>(constData()); }
+
+ template <typename T>
+ const T* getCharacters() const;
+
+};
+
+template <>
+inline const unsigned char* String::getCharacters<unsigned char>() const { return characters8(); }
+template <>
+inline const UChar* String::getCharacters<UChar>() const { return characters16(); }
+
+}
+
+// Don't import WTF::String into the global namespace to avoid conflicts with QQmlJS::VM::String
+namespace JSC {
+ using WTF::String;
+}
+
+#endif // WTFSTRING_H
diff --git a/src/3rdparty/masm/stubs/wtf/unicode/Unicode.h b/src/3rdparty/masm/stubs/wtf/unicode/Unicode.h
new file mode 100644
index 0000000000..9e7427e8ac
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/unicode/Unicode.h
@@ -0,0 +1,59 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef UNICODE_H
+#define UNICODE_H
+
+#include <QChar>
+
+typedef unsigned char LChar;
+typedef unsigned short UChar;
+
+namespace Unicode {
+ inline UChar toLower(UChar ch) {
+ return QChar::toLower(ch);
+ }
+
+ inline UChar toUpper(UChar ch) {
+ return QChar::toUpper(ch);
+ }
+}
+
+#endif // UNICODE_H
diff --git a/src/3rdparty/masm/wtf/ASCIICType.h b/src/3rdparty/masm/wtf/ASCIICType.h
new file mode 100644
index 0000000000..18e108e1bf
--- /dev/null
+++ b/src/3rdparty/masm/wtf/ASCIICType.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_ASCIICType_h
+#define WTF_ASCIICType_h
+
+#include <wtf/Assertions.h>
+
+// The behavior of many of the functions in the <ctype.h> header is dependent
+// on the current locale. But in the WebKit project, all uses of those functions
+// are in code processing something that's not locale-specific. These equivalents
+// for some of the <ctype.h> functions are named more explicitly, not dependent
+// on the C library locale, and we should also optimize them as needed.
+
+// All functions return false or leave the character unchanged if passed a character
+// that is outside the range 0-7F. So they can be used on Unicode strings or
+// characters if the intent is to do processing only if the character is ASCII.
+
+namespace WTF {
+
+template<typename CharType> inline bool isASCII(CharType c)
+{
+ return !(c & ~0x7F);
+}
+
+template<typename CharType> inline bool isASCIIAlpha(CharType c)
+{
+ return (c | 0x20) >= 'a' && (c | 0x20) <= 'z';
+}
+
+template<typename CharType> inline bool isASCIIDigit(CharType c)
+{
+ return c >= '0' && c <= '9';
+}
+
+template<typename CharType> inline bool isASCIIAlphanumeric(CharType c)
+{
+ return isASCIIDigit(c) || isASCIIAlpha(c);
+}
+
+template<typename CharType> inline bool isASCIIHexDigit(CharType c)
+{
+ return isASCIIDigit(c) || ((c | 0x20) >= 'a' && (c | 0x20) <= 'f');
+}
+
+template<typename CharType> inline bool isASCIILower(CharType c)
+{
+ return c >= 'a' && c <= 'z';
+}
+
+template<typename CharType> inline bool isASCIIOctalDigit(CharType c)
+{
+ return (c >= '0') & (c <= '7');
+}
+
+template<typename CharType> inline bool isASCIIPrintable(CharType c)
+{
+ return c >= ' ' && c <= '~';
+}
+
+/*
+ Statistics from a run of Apple's page load test for callers of isASCIISpace:
+
+ character count
+ --------- -----
+ non-spaces 689383
+ 20 space 294720
+ 0A \n 89059
+ 09 \t 28320
+ 0D \r 0
+ 0C \f 0
+ 0B \v 0
+ */
+template<typename CharType> inline bool isASCIISpace(CharType c)
+{
+ return c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9));
+}
+
+template<typename CharType> inline bool isASCIIUpper(CharType c)
+{
+ return c >= 'A' && c <= 'Z';
+}
+
+template<typename CharType> inline CharType toASCIILower(CharType c)
+{
+ return c | ((c >= 'A' && c <= 'Z') << 5);
+}
+
+template<typename CharType> inline CharType toASCIILowerUnchecked(CharType character)
+{
+ // This function can be used for comparing any input character
+ // to a lowercase English character. The isASCIIAlphaCaselessEqual
+ // below should be used for regular comparison of ASCII alpha
+ // characters, but switch statements in CSS tokenizer require
+ // direct use of this function.
+ return character | 0x20;
+}
+
+template<typename CharType> inline CharType toASCIIUpper(CharType c)
+{
+ return c & ~((c >= 'a' && c <= 'z') << 5);
+}
+
+template<typename CharType> inline int toASCIIHexValue(CharType c)
+{
+ ASSERT(isASCIIHexDigit(c));
+ return c < 'A' ? c - '0' : (c - 'A' + 10) & 0xF;
+}
+
+template<typename CharType> inline int toASCIIHexValue(CharType upperValue, CharType lowerValue)
+{
+ ASSERT(isASCIIHexDigit(upperValue) && isASCIIHexDigit(lowerValue));
+ return ((toASCIIHexValue(upperValue) << 4) & 0xF0) | toASCIIHexValue(lowerValue);
+}
+
+inline char lowerNibbleToASCIIHexDigit(char c)
+{
+ char nibble = c & 0xF;
+ return nibble < 10 ? '0' + nibble : 'A' + nibble - 10;
+}
+
+inline char upperNibbleToASCIIHexDigit(char c)
+{
+ char nibble = (c >> 4) & 0xF;
+ return nibble < 10 ? '0' + nibble : 'A' + nibble - 10;
+}
+
+template<typename CharType> inline bool isASCIIAlphaCaselessEqual(CharType cssCharacter, char character)
+{
+ // This function compares a (preferrably) constant ASCII
+ // lowercase letter to any input character.
+ ASSERT(character >= 'a' && character <= 'z');
+ return LIKELY(toASCIILowerUnchecked(cssCharacter) == character);
+}
+
+}
+
+using WTF::isASCII;
+using WTF::isASCIIAlpha;
+using WTF::isASCIIAlphanumeric;
+using WTF::isASCIIDigit;
+using WTF::isASCIIHexDigit;
+using WTF::isASCIILower;
+using WTF::isASCIIOctalDigit;
+using WTF::isASCIIPrintable;
+using WTF::isASCIISpace;
+using WTF::isASCIIUpper;
+using WTF::toASCIIHexValue;
+using WTF::toASCIILower;
+using WTF::toASCIILowerUnchecked;
+using WTF::toASCIIUpper;
+using WTF::lowerNibbleToASCIIHexDigit;
+using WTF::upperNibbleToASCIIHexDigit;
+using WTF::isASCIIAlphaCaselessEqual;
+
+#endif
diff --git a/src/3rdparty/masm/wtf/Assertions.h b/src/3rdparty/masm/wtf/Assertions.h
new file mode 100644
index 0000000000..6263e50ed9
--- /dev/null
+++ b/src/3rdparty/masm/wtf/Assertions.h
@@ -0,0 +1,428 @@
+/*
+ * Copyright (C) 2003, 2006, 2007 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_Assertions_h
+#define WTF_Assertions_h
+
+/*
+ no namespaces because this file has to be includable from C and Objective-C
+
+ Note, this file uses many GCC extensions, but it should be compatible with
+ C, Objective C, C++, and Objective C++.
+
+ For non-debug builds, everything is disabled by default.
+ Defining any of the symbols explicitly prevents this from having any effect.
+
+ MSVC7 note: variadic macro support was added in MSVC8, so for now we disable
+ those macros in MSVC7. For more info, see the MSDN document on variadic
+ macros here:
+
+ http://msdn2.microsoft.com/en-us/library/ms177415(VS.80).aspx
+*/
+
+#include <wtf/Platform.h>
+
+#include <stddef.h>
+
+#if !COMPILER(MSVC)
+#include <inttypes.h>
+#endif
+
+#ifdef NDEBUG
+/* Disable ASSERT* macros in release mode. */
+#define ASSERTIONS_DISABLED_DEFAULT 1
+#else
+#define ASSERTIONS_DISABLED_DEFAULT 0
+#endif
+
+#if COMPILER(MSVC7_OR_LOWER)
+#define HAVE_VARIADIC_MACRO 0
+#else
+#define HAVE_VARIADIC_MACRO 1
+#endif
+
+#ifndef BACKTRACE_DISABLED
+#define BACKTRACE_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#endif
+
+#ifndef ASSERT_DISABLED
+#define ASSERT_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#endif
+
+#ifndef ASSERT_MSG_DISABLED
+#if HAVE(VARIADIC_MACRO)
+#define ASSERT_MSG_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#else
+#define ASSERT_MSG_DISABLED 1
+#endif
+#endif
+
+#ifndef ASSERT_ARG_DISABLED
+#define ASSERT_ARG_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#endif
+
+#ifndef FATAL_DISABLED
+#if HAVE(VARIADIC_MACRO)
+#define FATAL_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#else
+#define FATAL_DISABLED 1
+#endif
+#endif
+
+#ifndef ERROR_DISABLED
+#if HAVE(VARIADIC_MACRO)
+#define ERROR_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#else
+#define ERROR_DISABLED 1
+#endif
+#endif
+
+#ifndef LOG_DISABLED
+#if HAVE(VARIADIC_MACRO)
+#define LOG_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#else
+#define LOG_DISABLED 1
+#endif
+#endif
+
+#if COMPILER(GCC)
+#define WTF_PRETTY_FUNCTION __PRETTY_FUNCTION__
+#else
+#define WTF_PRETTY_FUNCTION __FUNCTION__
+#endif
+
+/* WTF logging functions can process %@ in the format string to log a NSObject* but the printf format attribute
+ emits a warning when %@ is used in the format string. Until <rdar://problem/5195437> is resolved we can't include
+ the attribute when being used from Objective-C code in case it decides to use %@. */
+#if COMPILER(GCC) && !defined(__OBJC__)
+#define WTF_ATTRIBUTE_PRINTF(formatStringArgument, extraArguments) __attribute__((__format__(printf, formatStringArgument, extraArguments)))
+#else
+#define WTF_ATTRIBUTE_PRINTF(formatStringArgument, extraArguments)
+#endif
+
+/* These helper functions are always declared, but not necessarily always defined if the corresponding function is disabled. */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum { WTFLogChannelOff, WTFLogChannelOn } WTFLogChannelState;
+
+typedef struct {
+ unsigned mask;
+ const char *defaultName;
+ WTFLogChannelState state;
+} WTFLogChannel;
+
+WTF_EXPORT_PRIVATE void WTFReportAssertionFailure(const char* file, int line, const char* function, const char* assertion);
+WTF_EXPORT_PRIVATE void WTFReportAssertionFailureWithMessage(const char* file, int line, const char* function, const char* assertion, const char* format, ...) WTF_ATTRIBUTE_PRINTF(5, 6);
+WTF_EXPORT_PRIVATE void WTFReportArgumentAssertionFailure(const char* file, int line, const char* function, const char* argName, const char* assertion);
+WTF_EXPORT_PRIVATE void WTFReportFatalError(const char* file, int line, const char* function, const char* format, ...) WTF_ATTRIBUTE_PRINTF(4, 5);
+WTF_EXPORT_PRIVATE void WTFReportError(const char* file, int line, const char* function, const char* format, ...) WTF_ATTRIBUTE_PRINTF(4, 5);
+WTF_EXPORT_PRIVATE void WTFLog(WTFLogChannel*, const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
+WTF_EXPORT_PRIVATE void WTFLogVerbose(const char* file, int line, const char* function, WTFLogChannel*, const char* format, ...) WTF_ATTRIBUTE_PRINTF(5, 6);
+WTF_EXPORT_PRIVATE void WTFLogAlways(const char* format, ...) WTF_ATTRIBUTE_PRINTF(1, 2);
+
+WTF_EXPORT_PRIVATE void WTFGetBacktrace(void** stack, int* size);
+WTF_EXPORT_PRIVATE void WTFReportBacktrace();
+WTF_EXPORT_PRIVATE void WTFPrintBacktrace(void** stack, int size);
+
+typedef void (*WTFCrashHookFunction)();
+WTF_EXPORT_PRIVATE void WTFSetCrashHook(WTFCrashHookFunction);
+WTF_EXPORT_PRIVATE void WTFInvokeCrashHook();
+WTF_EXPORT_PRIVATE void WTFInstallReportBacktraceOnCrashHook();
+
+#ifdef __cplusplus
+}
+#endif
+
+/* CRASH() - Raises a fatal error resulting in program termination and triggering either the debugger or the crash reporter.
+
+ Use CRASH() in response to known, unrecoverable errors like out-of-memory.
+ Macro is enabled in both debug and release mode.
+ To test for unknown errors and verify assumptions, use ASSERT instead, to avoid impacting performance in release builds.
+
+ Signals are ignored by the crash reporter on OS X so we must do better.
+*/
+#ifndef CRASH
+#if COMPILER(CLANG)
+#define CRASH() \
+ (WTFReportBacktrace(), \
+ WTFInvokeCrashHook(), \
+ (*(int *)(uintptr_t)0xbbadbeef = 0), \
+ __builtin_trap())
+#else
+#define CRASH() \
+ (WTFReportBacktrace(), \
+ WTFInvokeCrashHook(), \
+ (*(int *)(uintptr_t)0xbbadbeef = 0), \
+ ((void(*)())0)() /* More reliable, but doesn't say BBADBEEF */ \
+ )
+#endif
+#endif
+
+#if COMPILER(CLANG)
+#define NO_RETURN_DUE_TO_CRASH NO_RETURN
+#else
+#define NO_RETURN_DUE_TO_CRASH
+#endif
+
+
+/* BACKTRACE
+
+ Print a backtrace to the same location as ASSERT messages.
+*/
+
+#if BACKTRACE_DISABLED
+
+#define BACKTRACE() ((void)0)
+
+#else
+
+#define BACKTRACE() do { \
+ WTFReportBacktrace(); \
+} while(false)
+
+#endif
+
+/* ASSERT, ASSERT_NOT_REACHED, ASSERT_UNUSED
+
+ These macros are compiled out of release builds.
+ Expressions inside them are evaluated in debug builds only.
+*/
+
+#if OS(WINCE)
+/* FIXME: We include this here only to avoid a conflict with the ASSERT macro. */
+#include <windows.h>
+#undef min
+#undef max
+#undef ERROR
+#endif
+
+#if OS(WINDOWS)
+/* FIXME: Change to use something other than ASSERT to avoid this conflict with the underlying platform */
+#undef ASSERT
+#endif
+
+#if ASSERT_DISABLED
+
+#define ASSERT(assertion) ((void)0)
+#define ASSERT_AT(assertion, file, line, function) ((void)0)
+#define ASSERT_NOT_REACHED() ((void)0)
+#define NO_RETURN_DUE_TO_ASSERT
+
+#if COMPILER(INTEL) && !OS(WINDOWS) || COMPILER(RVCT)
+template<typename T>
+inline void assertUnused(T& x) { (void)x; }
+#define ASSERT_UNUSED(variable, assertion) (assertUnused(variable))
+#else
+#define ASSERT_UNUSED(variable, assertion) ((void)variable)
+#endif
+
+#else
+
+#define ASSERT(assertion) \
+ (!(assertion) ? \
+ (WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion), \
+ CRASH()) : \
+ (void)0)
+
+#define ASSERT_AT(assertion, file, line, function) \
+ (!(assertion) ? \
+ (WTFReportAssertionFailure(file, line, function, #assertion), \
+ CRASH()) : \
+ (void)0)
+
+#define ASSERT_NOT_REACHED() do { \
+ WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, 0); \
+ CRASH(); \
+} while (0)
+
+#define ASSERT_UNUSED(variable, assertion) ASSERT(assertion)
+
+#define NO_RETURN_DUE_TO_ASSERT NO_RETURN_DUE_TO_CRASH
+
+#endif
+
+/* ASSERT_WITH_SECURITY_IMPLICATION
+
+ Failure of this assertion indicates a possible security vulnerability.
+ Class of vulnerabilities that it tests include bad casts, out of bounds
+ accesses, use-after-frees, etc. Please file a bug using the security
+ template - https://bugs.webkit.org/enter_bug.cgi?product=Security.
+
+*/
+#ifdef ADDRESS_SANITIZER
+
+#define ASSERT_WITH_SECURITY_IMPLICATION(assertion) \
+ (!(assertion) ? \
+ (WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion), \
+ CRASH()) : \
+ (void)0)
+
+#else
+
+#define ASSERT_WITH_SECURITY_IMPLICATION(assertion) ASSERT(assertion)
+
+#endif
+
+/* ASSERT_WITH_MESSAGE */
+
+#if COMPILER(MSVC7_OR_LOWER)
+#define ASSERT_WITH_MESSAGE(assertion) ((void)0)
+#elif ASSERT_MSG_DISABLED
+#define ASSERT_WITH_MESSAGE(assertion, ...) ((void)0)
+#else
+#define ASSERT_WITH_MESSAGE(assertion, ...) do \
+ if (!(assertion)) { \
+ WTFReportAssertionFailureWithMessage(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion, __VA_ARGS__); \
+ CRASH(); \
+ } \
+while (0)
+#endif
+
+/* ASSERT_WITH_MESSAGE_UNUSED */
+
+#if COMPILER(MSVC7_OR_LOWER)
+#define ASSERT_WITH_MESSAGE_UNUSED(variable, assertion) ((void)0)
+#elif ASSERT_MSG_DISABLED
+#if COMPILER(INTEL) && !OS(WINDOWS) || COMPILER(RVCT)
+template<typename T>
+inline void assertWithMessageUnused(T& x) { (void)x; }
+#define ASSERT_WITH_MESSAGE_UNUSED(variable, assertion, ...) (assertWithMessageUnused(variable))
+#else
+#define ASSERT_WITH_MESSAGE_UNUSED(variable, assertion, ...) ((void)variable)
+#endif
+#else
+#define ASSERT_WITH_MESSAGE_UNUSED(variable, assertion, ...) do \
+ if (!(assertion)) { \
+ WTFReportAssertionFailureWithMessage(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion, __VA_ARGS__); \
+ CRASH(); \
+ } \
+while (0)
+#endif
+
+
+/* ASSERT_ARG */
+
+#if ASSERT_ARG_DISABLED
+
+#define ASSERT_ARG(argName, assertion) ((void)0)
+
+#else
+
+#define ASSERT_ARG(argName, assertion) do \
+ if (!(assertion)) { \
+ WTFReportArgumentAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #argName, #assertion); \
+ CRASH(); \
+ } \
+while (0)
+
+#endif
+
+/* COMPILE_ASSERT */
+#ifndef COMPILE_ASSERT
+#if COMPILER_SUPPORTS(C_STATIC_ASSERT)
+/* Unlike static_assert below, this also works in plain C code. */
+#define COMPILE_ASSERT(exp, name) _Static_assert((exp), #name)
+#elif COMPILER_SUPPORTS(CXX_STATIC_ASSERT)
+#define COMPILE_ASSERT(exp, name) static_assert((exp), #name)
+#else
+#define COMPILE_ASSERT(exp, name) typedef int dummy##name [(exp) ? 1 : -1]
+#endif
+#endif
+
+/* FATAL */
+
+#if COMPILER(MSVC7_OR_LOWER)
+#define FATAL() ((void)0)
+#elif FATAL_DISABLED
+#define FATAL(...) ((void)0)
+#else
+#define FATAL(...) do { \
+ WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, __VA_ARGS__); \
+ CRASH(); \
+} while (0)
+#endif
+
+/* LOG_ERROR */
+
+#if COMPILER(MSVC7_OR_LOWER)
+#define LOG_ERROR() ((void)0)
+#elif ERROR_DISABLED
+#define LOG_ERROR(...) ((void)0)
+#else
+#define LOG_ERROR(...) WTFReportError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, __VA_ARGS__)
+#endif
+
+/* LOG */
+
+#if COMPILER(MSVC7_OR_LOWER)
+#define LOG() ((void)0)
+#elif LOG_DISABLED
+#define LOG(channel, ...) ((void)0)
+#else
+#define LOG(channel, ...) WTFLog(&JOIN_LOG_CHANNEL_WITH_PREFIX(LOG_CHANNEL_PREFIX, channel), __VA_ARGS__)
+#define JOIN_LOG_CHANNEL_WITH_PREFIX(prefix, channel) JOIN_LOG_CHANNEL_WITH_PREFIX_LEVEL_2(prefix, channel)
+#define JOIN_LOG_CHANNEL_WITH_PREFIX_LEVEL_2(prefix, channel) prefix ## channel
+#endif
+
+/* LOG_VERBOSE */
+
+#if COMPILER(MSVC7_OR_LOWER)
+#define LOG_VERBOSE(channel) ((void)0)
+#elif LOG_DISABLED
+#define LOG_VERBOSE(channel, ...) ((void)0)
+#else
+#define LOG_VERBOSE(channel, ...) WTFLogVerbose(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, &JOIN_LOG_CHANNEL_WITH_PREFIX(LOG_CHANNEL_PREFIX, channel), __VA_ARGS__)
+#endif
+
+/* UNREACHABLE_FOR_PLATFORM */
+
+#if COMPILER(CLANG)
+// This would be a macro except that its use of #pragma works best around
+// a function. Hence it uses macro naming convention.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wmissing-noreturn"
+static inline void UNREACHABLE_FOR_PLATFORM()
+{
+ ASSERT_NOT_REACHED();
+}
+#pragma clang diagnostic pop
+#else
+#define UNREACHABLE_FOR_PLATFORM() ASSERT_NOT_REACHED()
+#endif
+
+#if ASSERT_DISABLED
+#define RELEASE_ASSERT(assertion) (UNLIKELY(!(assertion)) ? (CRASH()) : (void)0)
+#define RELEASE_ASSERT_WITH_MESSAGE(assertion, ...) RELEASE_ASSERT(assertion)
+#define RELEASE_ASSERT_NOT_REACHED() CRASH()
+#else
+#define RELEASE_ASSERT(assertion) ASSERT(assertion)
+#define RELEASE_ASSERT_WITH_MESSAGE(assertion, ...) ASSERT_WITH_MESSAGE(assertion, __VA_ARGS__)
+#define RELEASE_ASSERT_NOT_REACHED() ASSERT_NOT_REACHED()
+#endif
+
+#endif /* WTF_Assertions_h */
diff --git a/src/3rdparty/masm/wtf/Atomics.h b/src/3rdparty/masm/wtf/Atomics.h
new file mode 100644
index 0000000000..df5abec81d
--- /dev/null
+++ b/src/3rdparty/masm/wtf/Atomics.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2007, 2008, 2010, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Note: The implementations of InterlockedIncrement and InterlockedDecrement are based
+ * on atomic_increment and atomic_exchange_and_add from the Boost C++ Library. The license
+ * is virtually identical to the Apple license above but is included here for completeness.
+ *
+ * Boost Software License - Version 1.0 - August 17th, 2003
+ *
+ * Permission is hereby granted, free of charge, to any person or organization
+ * obtaining a copy of the software and accompanying documentation covered by
+ * this license (the "Software") to use, reproduce, display, distribute,
+ * execute, and transmit the Software, and to prepare derivative works of the
+ * Software, and to permit third-parties to whom the Software is furnished to
+ * do so, all subject to the following:
+ *
+ * The copyright notices in the Software and this entire statement, including
+ * the above license grant, this restriction and the following disclaimer,
+ * must be included in all copies of the Software, in whole or in part, and
+ * all derivative works of the Software, unless such copies or derivative
+ * works are solely in the form of machine-executable object code generated by
+ * a source language processor.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+ * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef Atomics_h
+#define Atomics_h
+
+#include <wtf/Platform.h>
+#include <wtf/StdLibExtras.h>
+#include <wtf/UnusedParam.h>
+
+#if OS(WINDOWS)
+#include <windows.h>
+#elif OS(QNX)
+#include <atomic.h>
+#endif
+
+namespace WTF {
+
+#if OS(WINDOWS)
+#define WTF_USE_LOCKFREE_THREADSAFEREFCOUNTED 1
+
+#if OS(WINCE)
+inline int atomicIncrement(int* addend) { return InterlockedIncrement(reinterpret_cast<long*>(addend)); }
+inline int atomicDecrement(int* addend) { return InterlockedDecrement(reinterpret_cast<long*>(addend)); }
+#elif COMPILER(MINGW) || COMPILER(MSVC7_OR_LOWER)
+inline int atomicIncrement(int* addend) { return InterlockedIncrement(reinterpret_cast<long*>(addend)); }
+inline int atomicDecrement(int* addend) { return InterlockedDecrement(reinterpret_cast<long*>(addend)); }
+
+inline int64_t atomicIncrement(int64_t* addend) { return InterlockedIncrement64(reinterpret_cast<long long*>(addend)); }
+inline int64_t atomicDecrement(int64_t* addend) { return InterlockedDecrement64(reinterpret_cast<long long*>(addend)); }
+#else
+inline int atomicIncrement(int volatile* addend) { return InterlockedIncrement(reinterpret_cast<long volatile*>(addend)); }
+inline int atomicDecrement(int volatile* addend) { return InterlockedDecrement(reinterpret_cast<long volatile*>(addend)); }
+
+inline int64_t atomicIncrement(int64_t volatile* addend) { return InterlockedIncrement64(reinterpret_cast<long long volatile*>(addend)); }
+inline int64_t atomicDecrement(int64_t volatile* addend) { return InterlockedDecrement64(reinterpret_cast<long long volatile*>(addend)); }
+#endif
+
+#elif OS(QNX)
+#define WTF_USE_LOCKFREE_THREADSAFEREFCOUNTED 1
+
+// Note, atomic_{add, sub}_value() return the previous value of addend's content.
+inline int atomicIncrement(int volatile* addend) { return static_cast<int>(atomic_add_value(reinterpret_cast<unsigned volatile*>(addend), 1)) + 1; }
+inline int atomicDecrement(int volatile* addend) { return static_cast<int>(atomic_sub_value(reinterpret_cast<unsigned volatile*>(addend), 1)) - 1; }
+
+#elif COMPILER(GCC) && !CPU(SPARC64) // sizeof(_Atomic_word) != sizeof(int) on sparc64 gcc
+#define WTF_USE_LOCKFREE_THREADSAFEREFCOUNTED 1
+
+inline int atomicIncrement(int volatile* addend) { return __sync_add_and_fetch(addend, 1); }
+inline int atomicDecrement(int volatile* addend) { return __sync_sub_and_fetch(addend, 1); }
+
+inline int64_t atomicIncrement(int64_t volatile* addend) { return __sync_add_and_fetch(addend, 1); }
+inline int64_t atomicDecrement(int64_t volatile* addend) { return __sync_sub_and_fetch(addend, 1); }
+
+#endif
+
+#if OS(WINDOWS)
+inline bool weakCompareAndSwap(volatile unsigned* location, unsigned expected, unsigned newValue)
+{
+#if OS(WINCE)
+ return InterlockedCompareExchange(reinterpret_cast<LONG*>(const_cast<unsigned*>(location)), static_cast<LONG>(newValue), static_cast<LONG>(expected)) == static_cast<LONG>(expected);
+#else
+ return InterlockedCompareExchange(reinterpret_cast<LONG volatile*>(location), static_cast<LONG>(newValue), static_cast<LONG>(expected)) == static_cast<LONG>(expected);
+#endif
+}
+
+inline bool weakCompareAndSwap(void*volatile* location, void* expected, void* newValue)
+{
+ return InterlockedCompareExchangePointer(location, newValue, expected) == expected;
+}
+#else // OS(WINDOWS) --> not windows
+#if COMPILER(GCC) && !COMPILER(CLANG) // Work around a gcc bug
+inline bool weakCompareAndSwap(volatile unsigned* location, unsigned expected, unsigned newValue)
+#else
+inline bool weakCompareAndSwap(unsigned* location, unsigned expected, unsigned newValue)
+#endif
+{
+#if ENABLE(COMPARE_AND_SWAP)
+#if CPU(X86) || CPU(X86_64)
+ unsigned char result;
+ asm volatile(
+ "lock; cmpxchgl %3, %2\n\t"
+ "sete %1"
+ : "+a"(expected), "=q"(result), "+m"(*location)
+ : "r"(newValue)
+ : "memory"
+ );
+#elif CPU(ARM_THUMB2)
+ unsigned tmp;
+ unsigned result;
+ asm volatile(
+ "movw %1, #1\n\t"
+ "ldrex %2, %0\n\t"
+ "cmp %3, %2\n\t"
+ "bne.n 0f\n\t"
+ "strex %1, %4, %0\n\t"
+ "0:"
+ : "+Q"(*location), "=&r"(result), "=&r"(tmp)
+ : "r"(expected), "r"(newValue)
+ : "memory");
+ result = !result;
+#else
+#error "Bad architecture for compare and swap."
+#endif
+ return result;
+#else
+ UNUSED_PARAM(location);
+ UNUSED_PARAM(expected);
+ UNUSED_PARAM(newValue);
+ CRASH();
+ return false;
+#endif
+}
+
+inline bool weakCompareAndSwap(void*volatile* location, void* expected, void* newValue)
+{
+#if ENABLE(COMPARE_AND_SWAP)
+#if CPU(X86_64)
+ bool result;
+ asm volatile(
+ "lock; cmpxchgq %3, %2\n\t"
+ "sete %1"
+ : "+a"(expected), "=q"(result), "+m"(*location)
+ : "r"(newValue)
+ : "memory"
+ );
+ return result;
+#else
+ return weakCompareAndSwap(bitwise_cast<unsigned*>(location), bitwise_cast<unsigned>(expected), bitwise_cast<unsigned>(newValue));
+#endif
+#else // ENABLE(COMPARE_AND_SWAP)
+ UNUSED_PARAM(location);
+ UNUSED_PARAM(expected);
+ UNUSED_PARAM(newValue);
+ CRASH();
+ return 0;
+#endif // ENABLE(COMPARE_AND_SWAP)
+}
+#endif // OS(WINDOWS) (end of the not-windows case)
+
+inline bool weakCompareAndSwapUIntPtr(volatile uintptr_t* location, uintptr_t expected, uintptr_t newValue)
+{
+ return weakCompareAndSwap(reinterpret_cast<void*volatile*>(location), reinterpret_cast<void*>(expected), reinterpret_cast<void*>(newValue));
+}
+
+#if CPU(ARM_THUMB2)
+
+inline void memoryBarrierAfterLock()
+{
+ asm volatile("dmb" ::: "memory");
+}
+
+inline void memoryBarrierBeforeUnlock()
+{
+ asm volatile("dmb" ::: "memory");
+}
+
+#else
+
+inline void memoryBarrierAfterLock() { }
+inline void memoryBarrierBeforeUnlock() { }
+
+#endif
+
+} // namespace WTF
+
+#if USE(LOCKFREE_THREADSAFEREFCOUNTED)
+using WTF::atomicDecrement;
+using WTF::atomicIncrement;
+#endif
+
+#endif // Atomics_h
diff --git a/src/3rdparty/masm/wtf/BumpPointerAllocator.h b/src/3rdparty/masm/wtf/BumpPointerAllocator.h
new file mode 100644
index 0000000000..3b2cfd974a
--- /dev/null
+++ b/src/3rdparty/masm/wtf/BumpPointerAllocator.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef BumpPointerAllocator_h
+#define BumpPointerAllocator_h
+
+#include <algorithm>
+#include <wtf/PageAllocation.h>
+#include <wtf/PageBlock.h>
+
+namespace WTF {
+
+#define MINIMUM_BUMP_POOL_SIZE 0x1000
+
+class BumpPointerPool {
+public:
+ // ensureCapacity will check whether the current pool has capacity to
+ // allocate 'size' bytes of memory If it does not, it will attempt to
+ // allocate a new pool (which will be added to this one in a chain).
+ //
+ // If allocation fails (out of memory) this method will return null.
+ // If the return value is non-null, then callers should update any
+ // references they have to this current (possibly full) BumpPointerPool
+ // to instead point to the newly returned BumpPointerPool.
+ BumpPointerPool* ensureCapacity(size_t size)
+ {
+ void* allocationEnd = static_cast<char*>(m_current) + size;
+ ASSERT(allocationEnd > m_current); // check for overflow
+ if (allocationEnd <= static_cast<void*>(this))
+ return this;
+ return ensureCapacityCrossPool(this, size);
+ }
+
+ // alloc should only be called after calling ensureCapacity; as such
+ // alloc will never fail.
+ void* alloc(size_t size)
+ {
+ void* current = m_current;
+ void* allocationEnd = static_cast<char*>(current) + size;
+ ASSERT(allocationEnd > current); // check for overflow
+ ASSERT(allocationEnd <= static_cast<void*>(this));
+ m_current = allocationEnd;
+ return current;
+ }
+
+ // The dealloc method releases memory allocated using alloc. Memory
+ // must be released in a LIFO fashion, e.g. if the client calls alloc
+ // four times, returning pointer A, B, C, D, then the only valid order
+ // in which these may be deallocaed is D, C, B, A.
+ //
+ // The client may optionally skip some deallocations. In the example
+ // above, it would be valid to only explicitly dealloc C, A (D being
+ // dealloced along with C, B along with A).
+ //
+ // If pointer was not allocated from this pool (or pools) then dealloc
+ // will CRASH(). Callers should update any references they have to
+ // this current BumpPointerPool to instead point to the returned
+ // BumpPointerPool.
+ BumpPointerPool* dealloc(void* position)
+ {
+ if ((position >= m_start) && (position <= static_cast<void*>(this))) {
+ ASSERT(position <= m_current);
+ m_current = position;
+ return this;
+ }
+ return deallocCrossPool(this, position);
+ }
+
+private:
+ // Placement operator new, returns the last 'size' bytes of allocation for use as this.
+ void* operator new(size_t size, const PageAllocation& allocation)
+ {
+ ASSERT(size < allocation.size());
+ return reinterpret_cast<char*>(reinterpret_cast<intptr_t>(allocation.base()) + allocation.size()) - size;
+ }
+
+ BumpPointerPool(const PageAllocation& allocation)
+ : m_current(allocation.base())
+ , m_start(allocation.base())
+ , m_next(0)
+ , m_previous(0)
+ , m_allocation(allocation)
+ {
+ }
+
+ static BumpPointerPool* create(size_t minimumCapacity = 0)
+ {
+ // Add size of BumpPointerPool object, check for overflow.
+ minimumCapacity += sizeof(BumpPointerPool);
+ if (minimumCapacity < sizeof(BumpPointerPool))
+ return 0;
+
+ size_t poolSize = std::max(static_cast<size_t>(MINIMUM_BUMP_POOL_SIZE), WTF::pageSize());
+ while (poolSize < minimumCapacity) {
+ poolSize <<= 1;
+ // The following if check relies on MINIMUM_BUMP_POOL_SIZE being a power of 2!
+ ASSERT(!(MINIMUM_BUMP_POOL_SIZE & (MINIMUM_BUMP_POOL_SIZE - 1)));
+ if (!poolSize)
+ return 0;
+ }
+
+ PageAllocation allocation = PageAllocation::allocate(poolSize);
+ if (!!allocation)
+ return new (allocation) BumpPointerPool(allocation);
+ return 0;
+ }
+
+ void shrink()
+ {
+ ASSERT(!m_previous);
+ m_current = m_start;
+ while (m_next) {
+ BumpPointerPool* nextNext = m_next->m_next;
+ m_next->destroy();
+ m_next = nextNext;
+ }
+ }
+
+ void destroy()
+ {
+ m_allocation.deallocate();
+ }
+
+ static BumpPointerPool* ensureCapacityCrossPool(BumpPointerPool* previousPool, size_t size)
+ {
+ // The pool passed should not have capacity, so we'll start with the next one.
+ ASSERT(previousPool);
+ ASSERT((static_cast<char*>(previousPool->m_current) + size) > previousPool->m_current); // check for overflow
+ ASSERT((static_cast<char*>(previousPool->m_current) + size) > static_cast<void*>(previousPool));
+ BumpPointerPool* pool = previousPool->m_next;
+
+ while (true) {
+ if (!pool) {
+ // We've run to the end; allocate a new pool.
+ pool = BumpPointerPool::create(size);
+ previousPool->m_next = pool;
+ pool->m_previous = previousPool;
+ return pool;
+ }
+
+ //
+ void* current = pool->m_current;
+ void* allocationEnd = static_cast<char*>(current) + size;
+ ASSERT(allocationEnd > current); // check for overflow
+ if (allocationEnd <= static_cast<void*>(pool))
+ return pool;
+ }
+ }
+
+ static BumpPointerPool* deallocCrossPool(BumpPointerPool* pool, void* position)
+ {
+ // Should only be called if position is not in the current pool.
+ ASSERT((position < pool->m_start) || (position > static_cast<void*>(pool)));
+
+ while (true) {
+ // Unwind the current pool to the start, move back in the chain to the previous pool.
+ pool->m_current = pool->m_start;
+ pool = pool->m_previous;
+
+ // position was nowhere in the chain!
+ if (!pool)
+ CRASH();
+
+ if ((position >= pool->m_start) && (position <= static_cast<void*>(pool))) {
+ ASSERT(position <= pool->m_current);
+ pool->m_current = position;
+ return pool;
+ }
+ }
+ }
+
+ void* m_current;
+ void* m_start;
+ BumpPointerPool* m_next;
+ BumpPointerPool* m_previous;
+ PageAllocation m_allocation;
+
+ friend class BumpPointerAllocator;
+};
+
+// A BumpPointerAllocator manages a set of BumpPointerPool objects, which
+// can be used for LIFO (stack like) allocation.
+//
+// To begin allocating using this class call startAllocator(). The result
+// of this method will be null if the initial pool allocation fails, or a
+// pointer to a BumpPointerPool object that can be used to perform
+// allocations. Whilst running no memory will be released until
+// stopAllocator() is called. At this point all allocations made through
+// this allocator will be reaped, and underlying memory may be freed.
+//
+// (In practice we will still hold on to the initial pool to allow allocation
+// to be quickly restared, but aditional pools will be freed).
+//
+// This allocator is non-renetrant, it is encumbant on the clients to ensure
+// startAllocator() is not called again until stopAllocator() has been called.
+class BumpPointerAllocator {
+public:
+ BumpPointerAllocator()
+ : m_head(0)
+ {
+ }
+
+ ~BumpPointerAllocator()
+ {
+ if (m_head)
+ m_head->destroy();
+ }
+
+ BumpPointerPool* startAllocator()
+ {
+ if (!m_head)
+ m_head = BumpPointerPool::create();
+ return m_head;
+ }
+
+ void stopAllocator()
+ {
+ if (m_head)
+ m_head->shrink();
+ }
+
+private:
+ BumpPointerPool* m_head;
+};
+
+}
+
+using WTF::BumpPointerAllocator;
+
+#endif // BumpPointerAllocator_h
diff --git a/src/3rdparty/masm/wtf/CheckedArithmetic.h b/src/3rdparty/masm/wtf/CheckedArithmetic.h
new file mode 100644
index 0000000000..dd4acbb9b5
--- /dev/null
+++ b/src/3rdparty/masm/wtf/CheckedArithmetic.h
@@ -0,0 +1,721 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CheckedArithmetic_h
+#define CheckedArithmetic_h
+
+#include <wtf/Assertions.h>
+#include <wtf/EnumClass.h>
+#include <wtf/TypeTraits.h>
+
+#include <limits>
+#include <stdint.h>
+
+/* Checked<T>
+ *
+ * This class provides a mechanism to perform overflow-safe integer arithmetic
+ * without having to manually ensure that you have all the required bounds checks
+ * directly in your code.
+ *
+ * There are two modes of operation:
+ * - The default is Checked<T, CrashOnOverflow>, and crashes at the point
+ * and overflow has occurred.
+ * - The alternative is Checked<T, RecordOverflow>, which uses an additional
+ * byte of storage to track whether an overflow has occurred, subsequent
+ * unchecked operations will crash if an overflow has occured
+ *
+ * It is possible to provide a custom overflow handler, in which case you need
+ * to support these functions:
+ * - void overflowed();
+ * This function is called when an operation has produced an overflow.
+ * - bool hasOverflowed();
+ * This function must return true if overflowed() has been called on an
+ * instance and false if it has not.
+ * - void clearOverflow();
+ * Used to reset overflow tracking when a value is being overwritten with
+ * a new value.
+ *
+ * Checked<T> works for all integer types, with the following caveats:
+ * - Mixing signedness of operands is only supported for types narrower than
+ * 64bits.
+ * - It does have a performance impact, so tight loops may want to be careful
+ * when using it.
+ *
+ */
+
+namespace WTF {
+
+ENUM_CLASS(CheckedState)
+{
+ DidOverflow,
+ DidNotOverflow
+} ENUM_CLASS_END(CheckedState);
+
+class CrashOnOverflow {
+public:
+ static NO_RETURN_DUE_TO_CRASH void overflowed()
+ {
+ CRASH();
+ }
+
+ void clearOverflow() { }
+
+public:
+ bool hasOverflowed() const { return false; }
+};
+
+class RecordOverflow {
+protected:
+ RecordOverflow()
+ : m_overflowed(false)
+ {
+ }
+
+ void overflowed()
+ {
+ m_overflowed = true;
+ }
+
+ void clearOverflow()
+ {
+ m_overflowed = false;
+ }
+
+public:
+ bool hasOverflowed() const { return m_overflowed; }
+
+private:
+ unsigned char m_overflowed;
+};
+
+template <typename T, class OverflowHandler = CrashOnOverflow> class Checked;
+template <typename T> struct RemoveChecked;
+template <typename T> struct RemoveChecked<Checked<T> >;
+
+template <typename Target, typename Source, bool targetSigned = std::numeric_limits<Target>::is_signed, bool sourceSigned = std::numeric_limits<Source>::is_signed> struct BoundsChecker;
+template <typename Target, typename Source> struct BoundsChecker<Target, Source, false, false> {
+ static bool inBounds(Source value)
+ {
+ // Same signedness so implicit type conversion will always increase precision
+ // to widest type
+ return value <= std::numeric_limits<Target>::max();
+ }
+};
+
+template <typename Target, typename Source> struct BoundsChecker<Target, Source, true, true> {
+ static bool inBounds(Source value)
+ {
+ // Same signedness so implicit type conversion will always increase precision
+ // to widest type
+ return std::numeric_limits<Target>::min() <= value && value <= std::numeric_limits<Target>::max();
+ }
+};
+
+template <typename Target, typename Source> struct BoundsChecker<Target, Source, false, true> {
+ static bool inBounds(Source value)
+ {
+ // Target is unsigned so any value less than zero is clearly unsafe
+ if (value < 0)
+ return false;
+ // If our (unsigned) Target is the same or greater width we can
+ // convert value to type Target without losing precision
+ if (sizeof(Target) >= sizeof(Source))
+ return static_cast<Target>(value) <= std::numeric_limits<Target>::max();
+ // The signed Source type has greater precision than the target so
+ // max(Target) -> Source will widen.
+ return value <= static_cast<Source>(std::numeric_limits<Target>::max());
+ }
+};
+
+template <typename Target, typename Source> struct BoundsChecker<Target, Source, true, false> {
+ static bool inBounds(Source value)
+ {
+ // Signed target with an unsigned source
+ if (sizeof(Target) <= sizeof(Source))
+ return value <= static_cast<Source>(std::numeric_limits<Target>::max());
+ // Target is Wider than Source so we're guaranteed to fit any value in
+ // unsigned Source
+ return true;
+ }
+};
+
+template <typename Target, typename Source, bool CanElide = IsSameType<Target, Source>::value || (sizeof(Target) > sizeof(Source)) > struct BoundsCheckElider;
+template <typename Target, typename Source> struct BoundsCheckElider<Target, Source, true> {
+ static bool inBounds(Source) { return true; }
+};
+template <typename Target, typename Source> struct BoundsCheckElider<Target, Source, false> : public BoundsChecker<Target, Source> {
+};
+
+template <typename Target, typename Source> static inline bool isInBounds(Source value)
+{
+ return BoundsCheckElider<Target, Source>::inBounds(value);
+}
+
+template <typename T> struct RemoveChecked {
+ typedef T CleanType;
+ static const CleanType DefaultValue = 0;
+};
+
+template <typename T> struct RemoveChecked<Checked<T, CrashOnOverflow> > {
+ typedef typename RemoveChecked<T>::CleanType CleanType;
+ static const CleanType DefaultValue = 0;
+};
+
+template <typename T> struct RemoveChecked<Checked<T, RecordOverflow> > {
+ typedef typename RemoveChecked<T>::CleanType CleanType;
+ static const CleanType DefaultValue = 0;
+};
+
+// The ResultBase and SignednessSelector are used to workaround typeof not being
+// available in MSVC
+template <typename U, typename V, bool uIsBigger = (sizeof(U) > sizeof(V)), bool sameSize = (sizeof(U) == sizeof(V))> struct ResultBase;
+template <typename U, typename V> struct ResultBase<U, V, true, false> {
+ typedef U ResultType;
+};
+
+template <typename U, typename V> struct ResultBase<U, V, false, false> {
+ typedef V ResultType;
+};
+
+template <typename U> struct ResultBase<U, U, false, true> {
+ typedef U ResultType;
+};
+
+template <typename U, typename V, bool uIsSigned = std::numeric_limits<U>::is_signed, bool vIsSigned = std::numeric_limits<V>::is_signed> struct SignednessSelector;
+template <typename U, typename V> struct SignednessSelector<U, V, true, true> {
+ typedef U ResultType;
+};
+
+template <typename U, typename V> struct SignednessSelector<U, V, false, false> {
+ typedef U ResultType;
+};
+
+template <typename U, typename V> struct SignednessSelector<U, V, true, false> {
+ typedef V ResultType;
+};
+
+template <typename U, typename V> struct SignednessSelector<U, V, false, true> {
+ typedef U ResultType;
+};
+
+template <typename U, typename V> struct ResultBase<U, V, false, true> {
+ typedef typename SignednessSelector<U, V>::ResultType ResultType;
+};
+
+template <typename U, typename V> struct Result : ResultBase<typename RemoveChecked<U>::CleanType, typename RemoveChecked<V>::CleanType> {
+};
+
+template <typename LHS, typename RHS, typename ResultType = typename Result<LHS, RHS>::ResultType,
+ bool lhsSigned = std::numeric_limits<LHS>::is_signed, bool rhsSigned = std::numeric_limits<RHS>::is_signed> struct ArithmeticOperations;
+
+template <typename LHS, typename RHS, typename ResultType> struct ArithmeticOperations<LHS, RHS, ResultType, true, true> {
+ // LHS and RHS are signed types
+
+ // Helper function
+ static inline bool signsMatch(LHS lhs, RHS rhs)
+ {
+ return (lhs ^ rhs) >= 0;
+ }
+
+ static inline bool add(LHS lhs, RHS rhs, ResultType& result) WARN_UNUSED_RETURN
+ {
+ if (signsMatch(lhs, rhs)) {
+ if (lhs >= 0) {
+ if ((std::numeric_limits<ResultType>::max() - rhs) < lhs)
+ return false;
+ } else {
+ ResultType temp = lhs - std::numeric_limits<ResultType>::min();
+ if (rhs < -temp)
+ return false;
+ }
+ } // if the signs do not match this operation can't overflow
+ result = lhs + rhs;
+ return true;
+ }
+
+ static inline bool sub(LHS lhs, RHS rhs, ResultType& result) WARN_UNUSED_RETURN
+ {
+ if (!signsMatch(lhs, rhs)) {
+ if (lhs >= 0) {
+ if (lhs > std::numeric_limits<ResultType>::max() + rhs)
+ return false;
+ } else {
+ if (rhs > std::numeric_limits<ResultType>::max() + lhs)
+ return false;
+ }
+ } // if the signs match this operation can't overflow
+ result = lhs - rhs;
+ return true;
+ }
+
+ static inline bool multiply(LHS lhs, RHS rhs, ResultType& result) WARN_UNUSED_RETURN
+ {
+ if (signsMatch(lhs, rhs)) {
+ if (lhs >= 0) {
+ if (lhs && (std::numeric_limits<ResultType>::max() / lhs) < rhs)
+ return false;
+ } else {
+ if (static_cast<ResultType>(lhs) == std::numeric_limits<ResultType>::min() || static_cast<ResultType>(rhs) == std::numeric_limits<ResultType>::min())
+ return false;
+ if ((std::numeric_limits<ResultType>::max() / -lhs) < -rhs)
+ return false;
+ }
+ } else {
+ if (lhs < 0) {
+ if (rhs && lhs < (std::numeric_limits<ResultType>::min() / rhs))
+ return false;
+ } else {
+ if (lhs && rhs < (std::numeric_limits<ResultType>::min() / lhs))
+ return false;
+ }
+ }
+ result = lhs * rhs;
+ return true;
+ }
+
+ static inline bool equals(LHS lhs, RHS rhs) { return lhs == rhs; }
+
+};
+
+template <typename LHS, typename RHS, typename ResultType> struct ArithmeticOperations<LHS, RHS, ResultType, false, false> {
+ // LHS and RHS are unsigned types so bounds checks are nice and easy
+ static inline bool add(LHS lhs, RHS rhs, ResultType& result) WARN_UNUSED_RETURN
+ {
+ ResultType temp = lhs + rhs;
+ if (temp < lhs)
+ return false;
+ result = temp;
+ return true;
+ }
+
+ static inline bool sub(LHS lhs, RHS rhs, ResultType& result) WARN_UNUSED_RETURN
+ {
+ ResultType temp = lhs - rhs;
+ if (temp > lhs)
+ return false;
+ result = temp;
+ return true;
+ }
+
+ static inline bool multiply(LHS lhs, RHS rhs, ResultType& result) WARN_UNUSED_RETURN
+ {
+ if (!lhs || !rhs) {
+ result = 0;
+ return true;
+ }
+ if (std::numeric_limits<ResultType>::max() / lhs < rhs)
+ return false;
+ result = lhs * rhs;
+ return true;
+ }
+
+ static inline bool equals(LHS lhs, RHS rhs) { return lhs == rhs; }
+
+};
+
+template <typename ResultType> struct ArithmeticOperations<int, unsigned, ResultType, true, false> {
+ static inline bool add(int64_t lhs, int64_t rhs, ResultType& result)
+ {
+ int64_t temp = lhs + rhs;
+ if (temp < std::numeric_limits<ResultType>::min())
+ return false;
+ if (temp > std::numeric_limits<ResultType>::max())
+ return false;
+ result = static_cast<ResultType>(temp);
+ return true;
+ }
+
+ static inline bool sub(int64_t lhs, int64_t rhs, ResultType& result)
+ {
+ int64_t temp = lhs - rhs;
+ if (temp < std::numeric_limits<ResultType>::min())
+ return false;
+ if (temp > std::numeric_limits<ResultType>::max())
+ return false;
+ result = static_cast<ResultType>(temp);
+ return true;
+ }
+
+ static inline bool multiply(int64_t lhs, int64_t rhs, ResultType& result)
+ {
+ int64_t temp = lhs * rhs;
+ if (temp < std::numeric_limits<ResultType>::min())
+ return false;
+ if (temp > std::numeric_limits<ResultType>::max())
+ return false;
+ result = static_cast<ResultType>(temp);
+ return true;
+ }
+
+ static inline bool equals(int lhs, unsigned rhs)
+ {
+ return static_cast<int64_t>(lhs) == static_cast<int64_t>(rhs);
+ }
+};
+
+template <typename ResultType> struct ArithmeticOperations<unsigned, int, ResultType, false, true> {
+ static inline bool add(int64_t lhs, int64_t rhs, ResultType& result)
+ {
+ return ArithmeticOperations<int, unsigned, ResultType>::add(rhs, lhs, result);
+ }
+
+ static inline bool sub(int64_t lhs, int64_t rhs, ResultType& result)
+ {
+ return ArithmeticOperations<int, unsigned, ResultType>::sub(lhs, rhs, result);
+ }
+
+ static inline bool multiply(int64_t lhs, int64_t rhs, ResultType& result)
+ {
+ return ArithmeticOperations<int, unsigned, ResultType>::multiply(rhs, lhs, result);
+ }
+
+ static inline bool equals(unsigned lhs, int rhs)
+ {
+ return ArithmeticOperations<int, unsigned, ResultType>::equals(rhs, lhs);
+ }
+};
+
+template <typename U, typename V, typename R> static inline bool safeAdd(U lhs, V rhs, R& result)
+{
+ return ArithmeticOperations<U, V, R>::add(lhs, rhs, result);
+}
+
+template <typename U, typename V, typename R> static inline bool safeSub(U lhs, V rhs, R& result)
+{
+ return ArithmeticOperations<U, V, R>::sub(lhs, rhs, result);
+}
+
+template <typename U, typename V, typename R> static inline bool safeMultiply(U lhs, V rhs, R& result)
+{
+ return ArithmeticOperations<U, V, R>::multiply(lhs, rhs, result);
+}
+
+template <typename U, typename V> static inline bool safeEquals(U lhs, V rhs)
+{
+ return ArithmeticOperations<U, V>::equals(lhs, rhs);
+}
+
+enum ResultOverflowedTag { ResultOverflowed };
+
+// FIXME: Needed to workaround http://llvm.org/bugs/show_bug.cgi?id=10801
+static inline bool workAroundClangBug() { return true; }
+
+template <typename T, class OverflowHandler> class Checked : public OverflowHandler {
+public:
+ template <typename _T, class _OverflowHandler> friend class Checked;
+ Checked()
+ : m_value(0)
+ {
+ }
+
+ Checked(ResultOverflowedTag)
+ : m_value(0)
+ {
+ // FIXME: Remove this when clang fixes http://llvm.org/bugs/show_bug.cgi?id=10801
+ if (workAroundClangBug())
+ this->overflowed();
+ }
+
+ template <typename U> Checked(U value)
+ {
+ if (!isInBounds<T>(value))
+ this->overflowed();
+ m_value = static_cast<T>(value);
+ }
+
+ template <typename V> Checked(const Checked<T, V>& rhs)
+ : m_value(rhs.m_value)
+ {
+ if (rhs.hasOverflowed())
+ this->overflowed();
+ }
+
+ template <typename U> Checked(const Checked<U, OverflowHandler>& rhs)
+ : OverflowHandler(rhs)
+ {
+ if (!isInBounds<T>(rhs.m_value))
+ this->overflowed();
+ m_value = static_cast<T>(rhs.m_value);
+ }
+
+ template <typename U, typename V> Checked(const Checked<U, V>& rhs)
+ {
+ if (rhs.hasOverflowed())
+ this->overflowed();
+ if (!isInBounds<T>(rhs.m_value))
+ this->overflowed();
+ m_value = static_cast<T>(rhs.m_value);
+ }
+
+ const Checked& operator=(Checked rhs)
+ {
+ this->clearOverflow();
+ if (rhs.hasOverflowed())
+ this->overflowed();
+ m_value = static_cast<T>(rhs.m_value);
+ return *this;
+ }
+
+ template <typename U> const Checked& operator=(U value)
+ {
+ return *this = Checked(value);
+ }
+
+ template <typename U, typename V> const Checked& operator=(const Checked<U, V>& rhs)
+ {
+ return *this = Checked(rhs);
+ }
+
+ // prefix
+ const Checked& operator++()
+ {
+ if (m_value == std::numeric_limits<T>::max())
+ this->overflowed();
+ m_value++;
+ return *this;
+ }
+
+ const Checked& operator--()
+ {
+ if (m_value == std::numeric_limits<T>::min())
+ this->overflowed();
+ m_value--;
+ return *this;
+ }
+
+ // postfix operators
+ const Checked operator++(int)
+ {
+ if (m_value == std::numeric_limits<T>::max())
+ this->overflowed();
+ return Checked(m_value++);
+ }
+
+ const Checked operator--(int)
+ {
+ if (m_value == std::numeric_limits<T>::min())
+ this->overflowed();
+ return Checked(m_value--);
+ }
+
+ // Boolean operators
+ bool operator!() const
+ {
+ if (this->hasOverflowed())
+ CRASH();
+ return !m_value;
+ }
+
+ typedef void* (Checked::*UnspecifiedBoolType);
+ operator UnspecifiedBoolType*() const
+ {
+ if (this->hasOverflowed())
+ CRASH();
+ return (m_value) ? reinterpret_cast<UnspecifiedBoolType*>(1) : 0;
+ }
+
+ // Value accessors. unsafeGet() will crash if there's been an overflow.
+ T unsafeGet() const
+ {
+ if (this->hasOverflowed())
+ CRASH();
+ return m_value;
+ }
+
+ inline CheckedState safeGet(T& value) const WARN_UNUSED_RETURN
+ {
+ value = m_value;
+ if (this->hasOverflowed())
+ return CheckedState::DidOverflow;
+ return CheckedState::DidNotOverflow;
+ }
+
+ // Mutating assignment
+ template <typename U> const Checked operator+=(U rhs)
+ {
+ if (!safeAdd(m_value, rhs, m_value))
+ this->overflowed();
+ return *this;
+ }
+
+ template <typename U> const Checked operator-=(U rhs)
+ {
+ if (!safeSub(m_value, rhs, m_value))
+ this->overflowed();
+ return *this;
+ }
+
+ template <typename U> const Checked operator*=(U rhs)
+ {
+ if (!safeMultiply(m_value, rhs, m_value))
+ this->overflowed();
+ return *this;
+ }
+
+ const Checked operator*=(double rhs)
+ {
+ double result = rhs * m_value;
+ // Handle +/- infinity and NaN
+ if (!(std::numeric_limits<T>::min() <= result && std::numeric_limits<T>::max() >= result))
+ this->overflowed();
+ m_value = (T)result;
+ return *this;
+ }
+
+ const Checked operator*=(float rhs)
+ {
+ return *this *= (double)rhs;
+ }
+
+ template <typename U, typename V> const Checked operator+=(Checked<U, V> rhs)
+ {
+ if (rhs.hasOverflowed())
+ this->overflowed();
+ return *this += rhs.m_value;
+ }
+
+ template <typename U, typename V> const Checked operator-=(Checked<U, V> rhs)
+ {
+ if (rhs.hasOverflowed())
+ this->overflowed();
+ return *this -= rhs.m_value;
+ }
+
+ template <typename U, typename V> const Checked operator*=(Checked<U, V> rhs)
+ {
+ if (rhs.hasOverflowed())
+ this->overflowed();
+ return *this *= rhs.m_value;
+ }
+
+ // Equality comparisons
+ template <typename V> bool operator==(Checked<T, V> rhs)
+ {
+ return unsafeGet() == rhs.unsafeGet();
+ }
+
+ template <typename U> bool operator==(U rhs)
+ {
+ if (this->hasOverflowed())
+ this->overflowed();
+ return safeEquals(m_value, rhs);
+ }
+
+ template <typename U, typename V> const Checked operator==(Checked<U, V> rhs)
+ {
+ return unsafeGet() == Checked(rhs.unsafeGet());
+ }
+
+ template <typename U> bool operator!=(U rhs)
+ {
+ return !(*this == rhs);
+ }
+
+private:
+ // Disallow implicit conversion of floating point to integer types
+ Checked(float);
+ Checked(double);
+ void operator=(float);
+ void operator=(double);
+ void operator+=(float);
+ void operator+=(double);
+ void operator-=(float);
+ void operator-=(double);
+ T m_value;
+};
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator+(Checked<U, OverflowHandler> lhs, Checked<V, OverflowHandler> rhs)
+{
+ U x = 0;
+ V y = 0;
+ bool overflowed = lhs.safeGet(x) == CheckedState::DidOverflow || rhs.safeGet(y) == CheckedState::DidOverflow;
+ typename Result<U, V>::ResultType result = 0;
+ overflowed |= !safeAdd(x, y, result);
+ if (overflowed)
+ return ResultOverflowed;
+ return result;
+}
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator-(Checked<U, OverflowHandler> lhs, Checked<V, OverflowHandler> rhs)
+{
+ U x = 0;
+ V y = 0;
+ bool overflowed = lhs.safeGet(x) == CheckedState::DidOverflow || rhs.safeGet(y) == CheckedState::DidOverflow;
+ typename Result<U, V>::ResultType result = 0;
+ overflowed |= !safeSub(x, y, result);
+ if (overflowed)
+ return ResultOverflowed;
+ return result;
+}
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator*(Checked<U, OverflowHandler> lhs, Checked<V, OverflowHandler> rhs)
+{
+ U x = 0;
+ V y = 0;
+ bool overflowed = lhs.safeGet(x) == CheckedState::DidOverflow || rhs.safeGet(y) == CheckedState::DidOverflow;
+ typename Result<U, V>::ResultType result = 0;
+ overflowed |= !safeMultiply(x, y, result);
+ if (overflowed)
+ return ResultOverflowed;
+ return result;
+}
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator+(Checked<U, OverflowHandler> lhs, V rhs)
+{
+ return lhs + Checked<V, OverflowHandler>(rhs);
+}
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator-(Checked<U, OverflowHandler> lhs, V rhs)
+{
+ return lhs - Checked<V, OverflowHandler>(rhs);
+}
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator*(Checked<U, OverflowHandler> lhs, V rhs)
+{
+ return lhs * Checked<V, OverflowHandler>(rhs);
+}
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator+(U lhs, Checked<V, OverflowHandler> rhs)
+{
+ return Checked<U, OverflowHandler>(lhs) + rhs;
+}
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator-(U lhs, Checked<V, OverflowHandler> rhs)
+{
+ return Checked<U, OverflowHandler>(lhs) - rhs;
+}
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator*(U lhs, Checked<V, OverflowHandler> rhs)
+{
+ return Checked<U, OverflowHandler>(lhs) * rhs;
+}
+
+}
+
+using WTF::Checked;
+using WTF::CheckedState;
+using WTF::RecordOverflow;
+
+#endif
diff --git a/src/3rdparty/masm/wtf/Compiler.h b/src/3rdparty/masm/wtf/Compiler.h
new file mode 100644
index 0000000000..b886f37151
--- /dev/null
+++ b/src/3rdparty/masm/wtf/Compiler.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_Compiler_h
+#define WTF_Compiler_h
+
+/* COMPILER() - the compiler being used to build the project */
+#define COMPILER(WTF_FEATURE) (defined WTF_COMPILER_##WTF_FEATURE && WTF_COMPILER_##WTF_FEATURE)
+
+/* COMPILER_SUPPORTS() - whether the compiler being used to build the project supports the given feature. */
+#define COMPILER_SUPPORTS(WTF_COMPILER_FEATURE) (defined WTF_COMPILER_SUPPORTS_##WTF_COMPILER_FEATURE && WTF_COMPILER_SUPPORTS_##WTF_COMPILER_FEATURE)
+
+/* COMPILER_QUIRK() - whether the compiler being used to build the project requires a given quirk. */
+#define COMPILER_QUIRK(WTF_COMPILER_QUIRK) (defined WTF_COMPILER_QUIRK_##WTF_COMPILER_QUIRK && WTF_COMPILER_QUIRK_##WTF_COMPILER_QUIRK)
+
+/* ==== COMPILER() - the compiler being used to build the project ==== */
+
+/* COMPILER(CLANG) - Clang */
+#if defined(__clang__)
+#define WTF_COMPILER_CLANG 1
+
+#ifndef __has_extension
+#define __has_extension __has_feature /* Compatibility with older versions of clang */
+#endif
+
+#define CLANG_PRAGMA(PRAGMA) _Pragma(PRAGMA)
+
+/* Specific compiler features */
+#define WTF_COMPILER_SUPPORTS_CXX_VARIADIC_TEMPLATES __has_extension(cxx_variadic_templates)
+
+/* There is a bug in clang that comes with Xcode 4.2 where AtomicStrings can't be implicitly converted to Strings
+ in the presence of move constructors and/or move assignment operators. This bug has been fixed in Xcode 4.3 clang, so we
+ check for both cxx_rvalue_references as well as the unrelated cxx_nonstatic_member_init feature which we know was added in 4.3 */
+#define WTF_COMPILER_SUPPORTS_CXX_RVALUE_REFERENCES __has_extension(cxx_rvalue_references) && __has_extension(cxx_nonstatic_member_init)
+
+#define WTF_COMPILER_SUPPORTS_CXX_DELETED_FUNCTIONS __has_extension(cxx_deleted_functions)
+#define WTF_COMPILER_SUPPORTS_CXX_NULLPTR __has_feature(cxx_nullptr)
+#define WTF_COMPILER_SUPPORTS_CXX_EXPLICIT_CONVERSIONS __has_feature(cxx_explicit_conversions)
+#define WTF_COMPILER_SUPPORTS_BLOCKS __has_feature(blocks)
+#define WTF_COMPILER_SUPPORTS_C_STATIC_ASSERT __has_extension(c_static_assert)
+#define WTF_COMPILER_SUPPORTS_CXX_STATIC_ASSERT __has_extension(cxx_static_assert)
+#define WTF_COMPILER_SUPPORTS_CXX_OVERRIDE_CONTROL __has_extension(cxx_override_control)
+#define WTF_COMPILER_SUPPORTS_HAS_TRIVIAL_DESTRUCTOR __has_extension(has_trivial_destructor)
+#define WTF_COMPILER_SUPPORTS_CXX_STRONG_ENUMS __has_extension(cxx_strong_enums)
+
+#endif
+
+#ifndef CLANG_PRAGMA
+#define CLANG_PRAGMA(PRAGMA)
+#endif
+
+/* COMPILER(MSVC) - Microsoft Visual C++ */
+/* COMPILER(MSVC7_OR_LOWER) - Microsoft Visual C++ 2003 or lower*/
+/* COMPILER(MSVC9_OR_LOWER) - Microsoft Visual C++ 2008 or lower*/
+#if defined(_MSC_VER)
+#define WTF_COMPILER_MSVC 1
+#if _MSC_VER < 1400
+#define WTF_COMPILER_MSVC7_OR_LOWER 1
+#elif _MSC_VER < 1600
+#define WTF_COMPILER_MSVC9_OR_LOWER 1
+#endif
+
+/* Specific compiler features */
+#if !COMPILER(CLANG) && _MSC_VER >= 1600
+#define WTF_COMPILER_SUPPORTS_CXX_NULLPTR 1
+#endif
+
+#if !COMPILER(CLANG)
+#define WTF_COMPILER_SUPPORTS_CXX_OVERRIDE_CONTROL 1
+#define WTF_COMPILER_QUIRK_FINAL_IS_CALLED_SEALED 1
+#endif
+
+#endif
+
+/* COMPILER(RVCT) - ARM RealView Compilation Tools */
+#if defined(__CC_ARM) || defined(__ARMCC__)
+#define WTF_COMPILER_RVCT 1
+#define RVCT_VERSION_AT_LEAST(major, minor, patch, build) (__ARMCC_VERSION >= (major * 100000 + minor * 10000 + patch * 1000 + build))
+#else
+/* Define this for !RVCT compilers, just so we can write things like RVCT_VERSION_AT_LEAST(3, 0, 0, 0). */
+#define RVCT_VERSION_AT_LEAST(major, minor, patch, build) 0
+#endif
+
+/* COMPILER(GCCE) - GNU Compiler Collection for Embedded */
+#if defined(__GCCE__)
+#define WTF_COMPILER_GCCE 1
+#define GCCE_VERSION (__GCCE__ * 10000 + __GCCE_MINOR__ * 100 + __GCCE_PATCHLEVEL__)
+#define GCCE_VERSION_AT_LEAST(major, minor, patch) (GCCE_VERSION >= (major * 10000 + minor * 100 + patch))
+#endif
+
+/* COMPILER(GCC) - GNU Compiler Collection */
+/* --gnu option of the RVCT compiler also defines __GNUC__ */
+#if defined(__GNUC__) && !COMPILER(RVCT)
+#define WTF_COMPILER_GCC 1
+#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+#define GCC_VERSION_AT_LEAST(major, minor, patch) (GCC_VERSION >= (major * 10000 + minor * 100 + patch))
+#else
+/* Define this for !GCC compilers, just so we can write things like GCC_VERSION_AT_LEAST(4, 1, 0). */
+#define GCC_VERSION_AT_LEAST(major, minor, patch) 0
+#endif
+
+/* Specific compiler features */
+#if COMPILER(GCC) && !COMPILER(CLANG)
+#if GCC_VERSION_AT_LEAST(4, 8, 0)
+#pragma GCC diagnostic ignored "-Wunused-local-typedefs"
+#endif
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
+/* C11 support */
+#define WTF_COMPILER_SUPPORTS_C_STATIC_ASSERT 1
+#endif
+#if defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(__cplusplus) && __cplusplus >= 201103L)
+/* C++11 support */
+#if GCC_VERSION_AT_LEAST(4, 3, 0)
+#define WTF_COMPILER_SUPPORTS_CXX_RVALUE_REFERENCES 1
+#define WTF_COMPILER_SUPPORTS_CXX_STATIC_ASSERT 1
+#define WTF_COMPILER_SUPPORTS_CXX_VARIADIC_TEMPLATES 1
+#endif
+#if GCC_VERSION_AT_LEAST(4, 4, 0)
+#define WTF_COMPILER_SUPPORTS_CXX_DELETED_FUNCTIONS 1
+#endif
+#if GCC_VERSION_AT_LEAST(4, 5, 0)
+#define WTF_COMPILER_SUPPORTS_CXX_EXPLICIT_CONVERSIONS 1
+#endif
+#if GCC_VERSION_AT_LEAST(4, 6, 0)
+#define WTF_COMPILER_SUPPORTS_CXX_NULLPTR 1
+/* Strong enums should work from gcc 4.4, but doesn't seem to support some operators */
+#define WTF_COMPILER_SUPPORTS_CXX_STRONG_ENUMS 1
+#endif
+#if GCC_VERSION_AT_LEAST(4, 7, 0)
+#define WTF_COMPILER_SUPPORTS_CXX_OVERRIDE_CONTROL 1
+#endif
+#endif /* defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(__cplusplus) && __cplusplus >= 201103L) */
+#endif /* COMPILER(GCC) */
+
+/* COMPILER(MINGW) - MinGW GCC */
+/* COMPILER(MINGW64) - mingw-w64 GCC - only used as additional check to exclude mingw.org specific functions */
+#if defined(__MINGW32__)
+#define WTF_COMPILER_MINGW 1
+#include <_mingw.h> /* private MinGW header */
+ #if defined(__MINGW64_VERSION_MAJOR) /* best way to check for mingw-w64 vs mingw.org */
+ #define WTF_COMPILER_MINGW64 1
+ #endif /* __MINGW64_VERSION_MAJOR */
+#endif /* __MINGW32__ */
+
+/* COMPILER(INTEL) - Intel C++ Compiler */
+#if defined(__INTEL_COMPILER)
+#define WTF_COMPILER_INTEL 1
+#endif
+
+/* COMPILER(SUNCC) */
+#if defined(__SUNPRO_CC) || defined(__SUNPRO_C)
+#define WTF_COMPILER_SUNCC 1
+#endif
+
+/* ==== Compiler features ==== */
+
+
+/* ALWAYS_INLINE */
+
+#ifndef ALWAYS_INLINE
+#if COMPILER(GCC) && defined(NDEBUG) && !COMPILER(MINGW)
+#define ALWAYS_INLINE inline __attribute__((__always_inline__))
+#elif (COMPILER(MSVC) || COMPILER(RVCT)) && defined(NDEBUG)
+#define ALWAYS_INLINE __forceinline
+#else
+#define ALWAYS_INLINE inline
+#endif
+#endif
+
+
+/* NEVER_INLINE */
+
+#ifndef NEVER_INLINE
+#if COMPILER(GCC)
+#define NEVER_INLINE __attribute__((__noinline__))
+#elif COMPILER(RVCT)
+#define NEVER_INLINE __declspec(noinline)
+#else
+#define NEVER_INLINE
+#endif
+#endif
+
+
+/* UNLIKELY */
+
+#ifndef UNLIKELY
+#if COMPILER(GCC) || (COMPILER(RVCT) && defined(__GNUC__))
+#define UNLIKELY(x) __builtin_expect((x), 0)
+#else
+#define UNLIKELY(x) (x)
+#endif
+#endif
+
+
+/* LIKELY */
+
+#ifndef LIKELY
+#if COMPILER(GCC) || (COMPILER(RVCT) && defined(__GNUC__))
+#define LIKELY(x) __builtin_expect((x), 1)
+#else
+#define LIKELY(x) (x)
+#endif
+#endif
+
+
+/* NO_RETURN */
+
+
+#ifndef NO_RETURN
+#if COMPILER(GCC)
+#define NO_RETURN __attribute((__noreturn__))
+#elif COMPILER(MSVC) || COMPILER(RVCT)
+#define NO_RETURN __declspec(noreturn)
+#else
+#define NO_RETURN
+#endif
+#endif
+
+
+/* NO_RETURN_WITH_VALUE */
+
+#ifndef NO_RETURN_WITH_VALUE
+#if !COMPILER(MSVC)
+#define NO_RETURN_WITH_VALUE NO_RETURN
+#else
+#define NO_RETURN_WITH_VALUE
+#endif
+#endif
+
+
+/* WARN_UNUSED_RETURN */
+
+#if COMPILER(GCC)
+#define WARN_UNUSED_RETURN __attribute__ ((warn_unused_result))
+#else
+#define WARN_UNUSED_RETURN
+#endif
+
+/* OVERRIDE and FINAL */
+
+#if COMPILER_SUPPORTS(CXX_OVERRIDE_CONTROL)
+#define OVERRIDE override
+
+#if COMPILER_QUIRK(FINAL_IS_CALLED_SEALED)
+#define FINAL sealed
+#else
+#define FINAL final
+#endif
+
+#else
+#define OVERRIDE
+#define FINAL
+#endif
+
+/* REFERENCED_FROM_ASM */
+
+#ifndef REFERENCED_FROM_ASM
+#if COMPILER(GCC)
+#define REFERENCED_FROM_ASM __attribute__((used))
+#else
+#define REFERENCED_FROM_ASM
+#endif
+#endif
+
+/* OBJC_CLASS */
+
+#ifndef OBJC_CLASS
+#ifdef __OBJC__
+#define OBJC_CLASS @class
+#else
+#define OBJC_CLASS class
+#endif
+#endif
+
+/* ABI */
+#if defined(__ARM_EABI__) || defined(__EABI__)
+#define WTF_COMPILER_SUPPORTS_EABI 1
+#endif
+
+#endif /* WTF_Compiler_h */
diff --git a/src/3rdparty/masm/wtf/CryptographicallyRandomNumber.h b/src/3rdparty/masm/wtf/CryptographicallyRandomNumber.h
new file mode 100644
index 0000000000..2262b6c3b3
--- /dev/null
+++ b/src/3rdparty/masm/wtf/CryptographicallyRandomNumber.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008 Torch Mobile Inc. All rights reserved. (http://www.torchmobile.com/)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_CryptographicallyRandomNumber_h
+#define WTF_CryptographicallyRandomNumber_h
+
+#include <stdint.h>
+
+namespace WTF {
+
+#if USE(OS_RANDOMNESS)
+WTF_EXPORT_PRIVATE uint32_t cryptographicallyRandomNumber();
+WTF_EXPORT_PRIVATE void cryptographicallyRandomValues(void* buffer, size_t length);
+#endif
+
+}
+
+#if USE(OS_RANDOMNESS)
+using WTF::cryptographicallyRandomNumber;
+using WTF::cryptographicallyRandomValues;
+#endif
+
+#endif
diff --git a/src/3rdparty/masm/wtf/DataLog.h b/src/3rdparty/masm/wtf/DataLog.h
new file mode 100644
index 0000000000..0bd8efe727
--- /dev/null
+++ b/src/3rdparty/masm/wtf/DataLog.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DataLog_h
+#define DataLog_h
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <wtf/FilePrintStream.h>
+#include <wtf/Platform.h>
+#include <wtf/StdLibExtras.h>
+
+namespace WTF {
+
+WTF_EXPORT_PRIVATE FilePrintStream& dataFile();
+
+WTF_EXPORT_PRIVATE void dataLogFV(const char* format, va_list) WTF_ATTRIBUTE_PRINTF(1, 0);
+WTF_EXPORT_PRIVATE void dataLogF(const char* format, ...) WTF_ATTRIBUTE_PRINTF(1, 2);
+WTF_EXPORT_PRIVATE void dataLogFString(const char*);
+
+template<typename T>
+void dataLog(const T& value)
+{
+ dataFile().print(value);
+}
+
+template<typename T1, typename T2>
+void dataLog(const T1& value1, const T2& value2)
+{
+ dataFile().print(value1, value2);
+}
+
+template<typename T1, typename T2, typename T3>
+void dataLog(const T1& value1, const T2& value2, const T3& value3)
+{
+ dataFile().print(value1, value2, value3);
+}
+
+template<typename T1, typename T2, typename T3, typename T4>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4)
+{
+ dataFile().print(value1, value2, value3, value4);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5)
+{
+ dataFile().print(value1, value2, value3, value4, value5);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6)
+{
+ dataFile().print(value1, value2, value3, value4, value5, value6);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7)
+{
+ dataFile().print(value1, value2, value3, value4, value5, value6, value7);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8)
+{
+ dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9)
+{
+ dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10)
+{
+ dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9, value10);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11)
+{
+ dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11, const T12& value12)
+{
+ dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11, value12);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11, const T12& value12, const T13& value13)
+{
+ dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11, value12, value13);
+}
+
+} // namespace WTF
+
+using WTF::dataLog;
+using WTF::dataLogF;
+using WTF::dataLogFString;
+
+#endif // DataLog_h
+
diff --git a/src/3rdparty/masm/wtf/DynamicAnnotations.h b/src/3rdparty/masm/wtf/DynamicAnnotations.h
new file mode 100644
index 0000000000..38acce35e6
--- /dev/null
+++ b/src/3rdparty/masm/wtf/DynamicAnnotations.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2011 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_DynamicAnnotations_h
+#define WTF_DynamicAnnotations_h
+
+/* This file defines dynamic annotations for use with dynamic analysis
+ * tool such as ThreadSanitizer, Valgrind, etc.
+ *
+ * Dynamic annotation is a source code annotation that affects
+ * the generated code (that is, the annotation is not a comment).
+ * Each such annotation is attached to a particular
+ * instruction and/or to a particular object (address) in the program.
+ *
+ * By using dynamic annotations a developer can give more details to the dynamic
+ * analysis tool to improve its precision.
+ *
+ * In C/C++ program the annotations are represented as C macros.
+ * With the default build flags, these macros are empty, hence don't affect
+ * performance of a compiled binary.
+ * If dynamic annotations are enabled, they just call no-op functions.
+ * The dynamic analysis tools can intercept these functions and replace them
+ * with their own implementations.
+ *
+ * See http://code.google.com/p/data-race-test/wiki/DynamicAnnotations for more information.
+ */
+
+#if USE(DYNAMIC_ANNOTATIONS)
+/* Tell data race detector that we're not interested in reports on the given address range. */
+#define WTF_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) WTFAnnotateBenignRaceSized(__FILE__, __LINE__, address, size, description)
+#define WTF_ANNOTATE_BENIGN_RACE(pointer, description) WTFAnnotateBenignRaceSized(__FILE__, __LINE__, pointer, sizeof(*(pointer)), description)
+
+/* Annotations for user-defined synchronization mechanisms.
+ * These annotations can be used to define happens-before arcs in user-defined
+ * synchronization mechanisms: the race detector will infer an arc from
+ * the former to the latter when they share the same argument pointer.
+ *
+ * The most common case requiring annotations is atomic reference counting:
+ * bool deref() {
+ * ANNOTATE_HAPPENS_BEFORE(&m_refCount);
+ * if (!atomicDecrement(&m_refCount)) {
+ * // m_refCount is now 0
+ * ANNOTATE_HAPPENS_AFTER(&m_refCount);
+ * // "return true; happens-after each atomicDecrement of m_refCount"
+ * return true;
+ * }
+ * return false;
+ * }
+ */
+#define WTF_ANNOTATE_HAPPENS_BEFORE(address) WTFAnnotateHappensBefore(__FILE__, __LINE__, address)
+#define WTF_ANNOTATE_HAPPENS_AFTER(address) WTFAnnotateHappensAfter(__FILE__, __LINE__, address)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* Don't use these directly, use the above macros instead. */
+void WTFAnnotateBenignRaceSized(const char* file, int line, const volatile void* memory, long size, const char* description);
+void WTFAnnotateHappensBefore(const char* file, int line, const volatile void* address);
+void WTFAnnotateHappensAfter(const char* file, int line, const volatile void* address);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#else // USE(DYNAMIC_ANNOTATIONS)
+/* These macros are empty when dynamic annotations are not enabled so you can
+ * use them without affecting the performance of release binaries. */
+#define WTF_ANNOTATE_BENIGN_RACE_SIZED(address, size, description)
+#define WTF_ANNOTATE_BENIGN_RACE(pointer, description)
+#define WTF_ANNOTATE_HAPPENS_BEFORE(address)
+#define WTF_ANNOTATE_HAPPENS_AFTER(address)
+#endif // USE(DYNAMIC_ANNOTATIONS)
+
+#endif // WTF_DynamicAnnotations_h
diff --git a/src/3rdparty/masm/wtf/EnumClass.h b/src/3rdparty/masm/wtf/EnumClass.h
new file mode 100644
index 0000000000..a5729b3b97
--- /dev/null
+++ b/src/3rdparty/masm/wtf/EnumClass.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_EnumClass_h
+#define WTF_EnumClass_h
+
+#include <wtf/Compiler.h>
+
+namespace WTF {
+
+// How to define a type safe enum list using the ENUM_CLASS macros?
+// ===============================================================
+// To get an enum list like this:
+//
+// enum class MyEnums {
+// Value1,
+// Value2,
+// ...
+// ValueN
+// };
+//
+// ... write this:
+//
+// ENUM_CLASS(MyEnums) {
+// Value1,
+// Value2,
+// ...
+// ValueN
+// } ENUM_CLASS_END(MyEnums);
+//
+// The ENUM_CLASS macros will use C++11's enum class if the compiler supports it.
+// Otherwise, it will use the EnumClass template below.
+
+#if COMPILER_SUPPORTS(CXX_STRONG_ENUMS)
+
+#define ENUM_CLASS(__enumName) \
+ enum class __enumName
+
+#define ENUM_CLASS_END(__enumName)
+
+#else // !COMPILER_SUPPORTS(CXX_STRONG_ENUMS)
+
+// How to define a type safe enum list using the EnumClass template?
+// ================================================================
+// Definition should be a struct that encapsulates an enum list.
+// The enum list should be names Enums.
+//
+// Here's an example of how to define a type safe enum named MyEnum using
+// the EnumClass template:
+//
+// struct MyEnumDefinition {
+// enum Enums {
+// ValueDefault,
+// Value1,
+// ...
+// ValueN
+// };
+// };
+// typedef EnumClass<MyEnumDefinition, MyEnumDefinition::ValueDefault> MyEnum;
+//
+// With that, you can now use MyEnum enum values as follow:
+//
+// MyEnum value1; // value1 is assigned MyEnum::ValueDefault by default.
+// MyEnum value2 = MyEnum::Value1; // value2 is assigned MyEnum::Value1;
+
+template <typename Definition>
+class EnumClass : public Definition {
+ typedef enum Definition::Enums Value;
+public:
+ ALWAYS_INLINE EnumClass() { }
+ ALWAYS_INLINE EnumClass(Value value) : m_value(value) { }
+
+ ALWAYS_INLINE Value value() const { return m_value; }
+
+ ALWAYS_INLINE bool operator==(const EnumClass other) { return m_value == other.m_value; }
+ ALWAYS_INLINE bool operator!=(const EnumClass other) { return m_value != other.m_value; }
+ ALWAYS_INLINE bool operator<(const EnumClass other) { return m_value < other.m_value; }
+ ALWAYS_INLINE bool operator<=(const EnumClass other) { return m_value <= other.m_value; }
+ ALWAYS_INLINE bool operator>(const EnumClass other) { return m_value > other.m_value; }
+ ALWAYS_INLINE bool operator>=(const EnumClass other) { return m_value >= other.m_value; }
+
+ ALWAYS_INLINE bool operator==(const Value value) { return m_value == value; }
+ ALWAYS_INLINE bool operator!=(const Value value) { return m_value != value; }
+ ALWAYS_INLINE bool operator<(const Value value) { return m_value < value; }
+ ALWAYS_INLINE bool operator<=(const Value value) { return m_value <= value; }
+ ALWAYS_INLINE bool operator>(const Value value) { return m_value > value; }
+ ALWAYS_INLINE bool operator>=(const Value value) { return m_value >= value; }
+
+ ALWAYS_INLINE operator Value() { return m_value; }
+
+private:
+ Value m_value;
+};
+
+#define ENUM_CLASS(__enumName) \
+ struct __enumName ## Definition { \
+ enum Enums
+
+#define ENUM_CLASS_END(__enumName) \
+ ; \
+ }; \
+ typedef EnumClass< __enumName ## Definition > __enumName
+
+#endif // !COMPILER_SUPPORTS(CXX_STRONG_ENUMS)
+
+} // namespace WTF
+
+#if !COMPILER_SUPPORTS(CXX_STRONG_ENUMS)
+using WTF::EnumClass;
+#endif
+
+#endif // WTF_EnumClass_h
diff --git a/src/3rdparty/masm/wtf/FeatureDefines.h b/src/3rdparty/masm/wtf/FeatureDefines.h
new file mode 100644
index 0000000000..afad174658
--- /dev/null
+++ b/src/3rdparty/masm/wtf/FeatureDefines.h
@@ -0,0 +1,874 @@
+/*
+ * Copyright (C) 2006, 2007, 2008, 2009, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2007-2009 Torch Mobile, Inc.
+ * Copyright (C) 2010, 2011 Research In Motion Limited. All rights reserved.
+ * Copyright (C) 2013 Samsung Electronics. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_FeatureDefines_h
+#define WTF_FeatureDefines_h
+
+/* Use this file to list _all_ ENABLE() macros. Define the macros to be one of the following values:
+ * - "0" disables the feature by default. The feature can still be enabled for a specific port or environment.
+ * - "1" enables the feature by default. The feature can still be disabled for a specific port or environment.
+ *
+ * The feature defaults in this file are only taken into account if the (port specific) build system
+ * has not enabled or disabled a particular feature.
+ *
+ * Use this file to define ENABLE() macros only. Do not use this file to define USE() or macros !
+ *
+ * Only define a macro if it was not defined before - always check for !defined first.
+ *
+ * Keep the file sorted by the name of the defines. As an exception you can change the order
+ * to allow interdependencies between the default values.
+ *
+ * Below are a few potential commands to take advantage of this file running from the Source/WTF directory
+ *
+ * Get the list of feature defines: grep -o "ENABLE_\(\w\+\)" wtf/FeatureDefines.h | sort | uniq
+ * Get the list of features enabled by default for a PLATFORM(XXX): gcc -E -dM -I. -DWTF_PLATFORM_XXX "wtf/Platform.h" | grep "ENABLE_\w\+ 1" | cut -d' ' -f2 | sort
+ */
+
+/* FIXME: Move out the PLATFORM specific rules into platform specific files. */
+
+/* --------- Apple IOS (but not MAC) port --------- */
+/* PLATFORM(IOS) is a specialization of PLATFORM(MAC). */
+/* PLATFORM(MAC) is always enabled when PLATFORM(IOS) is enabled. */
+#if PLATFORM(IOS)
+
+#if !defined(ENABLE_8BIT_TEXTRUN)
+#define ENABLE_8BIT_TEXTRUN 1
+#endif
+
+#if !defined(ENABLE_CONTEXT_MENUS)
+#define ENABLE_CONTEXT_MENUS 0
+#endif
+
+#if !defined(ENABLE_CSS_IMAGE_SET)
+#define ENABLE_CSS_IMAGE_SET 1
+#endif
+
+#if !defined(ENABLE_DRAG_SUPPORT)
+#define ENABLE_DRAG_SUPPORT 0
+#endif
+
+#if !defined(ENABLE_GEOLOCATION)
+#define ENABLE_GEOLOCATION 1
+#endif
+
+#if !defined(ENABLE_ICONDATABASE)
+#define ENABLE_ICONDATABASE 0
+#endif
+
+#if !defined(ENABLE_NETSCAPE_PLUGIN_API)
+#define ENABLE_NETSCAPE_PLUGIN_API 0
+#endif
+
+#if !defined(ENABLE_ORIENTATION_EVENTS)
+#define ENABLE_ORIENTATION_EVENTS 1
+#endif
+
+#if !defined(ENABLE_REPAINT_THROTTLING)
+#define ENABLE_REPAINT_THROTTLING 1
+#endif
+
+#if !defined(ENABLE_TEXT_CARET)
+#define ENABLE_TEXT_CARET 0
+#endif
+
+#if !defined(ENABLE_WEB_ARCHIVE)
+#define ENABLE_WEB_ARCHIVE 1
+#endif
+
+#if !defined(ENABLE_VIEW_MODE_CSS_MEDIA)
+#define ENABLE_VIEW_MODE_CSS_MEDIA 0
+#endif
+
+#if !defined(ENABLE_WEBGL)
+#define ENABLE_WEBGL 1
+#endif
+
+#endif /* PLATFORM(IOS) */
+
+/* --------- Apple MAC port (not IOS) --------- */
+#if PLATFORM(MAC) && !PLATFORM(IOS)
+
+#if !defined(ENABLE_8BIT_TEXTRUN)
+#define ENABLE_8BIT_TEXTRUN 1
+#endif
+
+#if !defined(ENABLE_CSS_IMAGE_SET)
+#define ENABLE_CSS_IMAGE_SET 1
+#endif
+
+#if !defined(ENABLE_DASHBOARD_SUPPORT)
+#define ENABLE_DASHBOARD_SUPPORT 1
+#endif
+
+#if !defined(ENABLE_DELETION_UI)
+#define ENABLE_DELETION_UI 1
+#endif
+
+#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090
+#if !defined(ENABLE_ENCRYPTED_MEDIA)
+#define ENABLE_ENCRYPTED_MEDIA 1
+#endif
+#if !defined(ENABLE_ENCRYPTED_MEDIA_V2)
+#define ENABLE_ENCRYPTED_MEDIA_V2 1
+#endif
+#endif
+
+#if !defined(ENABLE_FULLSCREEN_API)
+#define ENABLE_FULLSCREEN_API 1
+#endif
+
+#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
+#if !defined(ENABLE_GESTURE_EVENTS)
+#define ENABLE_GESTURE_EVENTS 1
+#endif
+#endif
+
+#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
+#if !defined(ENABLE_RUBBER_BANDING)
+#define ENABLE_RUBBER_BANDING 1
+#endif
+#endif
+
+#if !defined(ENABLE_SMOOTH_SCROLLING)
+#define ENABLE_SMOOTH_SCROLLING 1
+#endif
+
+#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
+#if !defined(ENABLE_THREADED_SCROLLING)
+#define ENABLE_THREADED_SCROLLING 1
+#endif
+#endif
+
+#if ENABLE(VIDEO)
+#if !defined(ENABLE_VIDEO_TRACK)
+#define ENABLE_VIDEO_TRACK 1
+#endif
+#endif
+
+#if !defined(ENABLE_VIEW_MODE_CSS_MEDIA)
+#define ENABLE_VIEW_MODE_CSS_MEDIA 0
+#endif
+
+#if !defined(ENABLE_WEB_ARCHIVE)
+#define ENABLE_WEB_ARCHIVE 1
+#endif
+
+#if !defined(ENABLE_WEB_AUDIO)
+#define ENABLE_WEB_AUDIO 1
+#endif
+
+#if !defined(ENABLE_CURSOR_VISIBILITY)
+#define ENABLE_CURSOR_VISIBILITY 1
+#endif
+
+#endif /* PLATFORM(MAC) && !PLATFORM(IOS) */
+
+/* --------- Apple Windows port --------- */
+#if PLATFORM(WIN) && !OS(WINCE) && !PLATFORM(WIN_CAIRO)
+
+#if !defined(ENABLE_FULLSCREEN_API)
+#define ENABLE_FULLSCREEN_API 1
+#endif
+
+#if !defined(ENABLE_WEB_ARCHIVE)
+#define ENABLE_WEB_ARCHIVE 1
+#endif
+
+#endif /* PLATFORM(WIN) && !OS(WINCE) && !PLATFORM(WIN_CAIRO) */
+
+/* --------- WinCE port --------- */
+/* WinCE port is a specialization of PLATFORM(WIN). */
+/* PLATFORM(WIN) is always enabled when building for the WinCE port. */
+#if PLATFORM(WIN) && OS(WINCE)
+
+#if !defined(ENABLE_DRAG_SUPPORT)
+#define ENABLE_DRAG_SUPPORT 0
+#endif
+
+#if !defined(ENABLE_FTPDIR)
+#define ENABLE_FTPDIR 0
+#endif
+
+#if !defined(ENABLE_INSPECTOR)
+#define ENABLE_INSPECTOR 0
+#endif
+
+#endif /* PLATFORM(WIN) && OS(WINCE) */
+
+/* --------- Windows CAIRO port --------- */
+/* PLATFORM(WIN_CAIRO) is a specialization of PLATFORM(WIN). */
+/* PLATFORM(WIN) is always enabled when PLATFORM(WIN_CAIRO) is enabled. */
+#if PLATFORM(WIN_CAIRO)
+
+#if !defined(ENABLE_WEB_ARCHIVE)
+#define ENABLE_WEB_ARCHIVE 1
+#endif
+
+#endif /* PLATFORM(WIN_CAIRO) */
+
+/* --------- WX port (Mac OS and Windows) --------- */
+#if PLATFORM(WX)
+
+#if OS(DARWIN)
+#if !defined(ENABLE_WEB_ARCHIVE)
+#define ENABLE_WEB_ARCHIVE 1
+#endif
+#endif
+
+#if OS(UNIX)
+#if !defined(ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH)
+#define ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH 1
+#endif
+#endif
+
+#endif /* PLATFORM(WX) */
+
+/* --------- EFL port (Unix) --------- */
+#if PLATFORM(EFL)
+
+#if !defined(ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH)
+#define ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH 1
+#endif
+
+#if !defined(ENABLE_SUBPIXEL_LAYOUT)
+#define ENABLE_SUBPIXEL_LAYOUT 1
+#endif
+
+#endif /* PLATFORM(EFL) */
+
+/* --------- Gtk port (Unix, Windows, Mac) --------- */
+#if PLATFORM(GTK)
+
+#if OS(UNIX)
+#if !defined(ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH)
+#define ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH 1
+#endif
+#endif
+
+#endif /* PLATFORM(GTK) */
+
+/* --------- Qt port (Unix, Windows, Mac, WinCE) --------- */
+#if PLATFORM(QT)
+
+#if OS(UNIX)
+#if !defined(ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH)
+#define ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH 1
+#endif
+#endif
+
+#endif /* PLATFORM(QT) */
+
+/* --------- Blackberry port (QNX) --------- */
+#if PLATFORM(BLACKBERRY)
+
+#if !defined(ENABLE_BLACKBERRY_CREDENTIAL_PERSIST)
+#define ENABLE_BLACKBERRY_CREDENTIAL_PERSIST 1
+#endif
+
+#endif /* PLATFORM(BLACKBERRY) */
+
+/* ENABLE macro defaults for WebCore */
+/* Do not use PLATFORM() tests in this section ! */
+
+#if !defined(ENABLE_3D_RENDERING)
+#define ENABLE_3D_RENDERING 0
+#endif
+
+#if !defined(ENABLE_8BIT_TEXTRUN)
+#define ENABLE_8BIT_TEXTRUN 0
+#endif
+
+#if !defined(ENABLE_ACCELERATED_2D_CANVAS)
+#define ENABLE_ACCELERATED_2D_CANVAS 0
+#endif
+
+#if !defined(ENABLE_ACCELERATED_OVERFLOW_SCROLLING)
+#define ENABLE_ACCELERATED_OVERFLOW_SCROLLING 0
+#endif
+
+#if !defined(ENABLE_BATTERY_STATUS)
+#define ENABLE_BATTERY_STATUS 0
+#endif
+
+#if !defined(ENABLE_BLOB)
+#define ENABLE_BLOB 0
+#endif
+
+#if !defined(ENABLE_CALENDAR_PICKER)
+#define ENABLE_CALENDAR_PICKER 0
+#endif
+
+#if !defined(ENABLE_CANVAS_PATH)
+#define ENABLE_CANVAS_PATH 1
+#endif
+
+#if !defined(ENABLE_CANVAS_PROXY)
+#define ENABLE_CANVAS_PROXY 0
+#endif
+
+#if !defined(ENABLE_CHANNEL_MESSAGING)
+#define ENABLE_CHANNEL_MESSAGING 1
+#endif
+
+#if !defined(ENABLE_CONTEXT_MENUS)
+#define ENABLE_CONTEXT_MENUS 1
+#endif
+
+#if !defined(ENABLE_CSP_NEXT)
+#define ENABLE_CSP_NEXT 0
+#endif
+
+#if !defined(ENABLE_CSS3_CONDITIONAL_RULES)
+#define ENABLE_CSS3_CONDITIONAL_RULES 0
+#endif
+
+#if !defined(ENABLE_CSS3_TEXT)
+#define ENABLE_CSS3_TEXT 0
+#endif
+
+#if !defined(ENABLE_CSS_BOX_DECORATION_BREAK)
+#define ENABLE_CSS_BOX_DECORATION_BREAK 1
+#endif
+
+#if !defined(ENABLE_CSS_DEVICE_ADAPTATION)
+#define ENABLE_CSS_DEVICE_ADAPTATION 0
+#endif
+
+#if !defined(ENABLE_CSS_COMPOSITING)
+#define ENABLE_CSS_COMPOSITING 0
+#endif
+
+#if !defined(ENABLE_CSS_FILTERS)
+#define ENABLE_CSS_FILTERS 0
+#endif
+
+#if !defined(ENABLE_CSS_IMAGE_ORIENTATION)
+#define ENABLE_CSS_IMAGE_ORIENTATION 0
+#endif
+
+#if !defined(ENABLE_CSS_IMAGE_RESOLUTION)
+#define ENABLE_CSS_IMAGE_RESOLUTION 0
+#endif
+
+#if !defined(ENABLE_CSS_IMAGE_SET)
+#define ENABLE_CSS_IMAGE_SET 0
+#endif
+
+#if !defined(ENABLE_CSS_SHADERS)
+#define ENABLE_CSS_SHADERS 0
+#endif
+
+#if !defined(ENABLE_CSS_STICKY_POSITION)
+#define ENABLE_CSS_STICKY_POSITION 0
+#endif
+
+#if !defined(ENABLE_CSS_TRANSFORMS_ANIMATIONS_TRANSITIONS_UNPREFIXED)
+#define ENABLE_CSS_TRANSFORMS_ANIMATIONS_TRANSITIONS_UNPREFIXED 0
+#endif
+
+#if !defined(ENABLE_CSS_VARIABLES)
+#define ENABLE_CSS_VARIABLES 0
+#endif
+
+#if !defined(ENABLE_CUSTOM_SCHEME_HANDLER)
+#define ENABLE_CUSTOM_SCHEME_HANDLER 0
+#endif
+
+#if !defined(ENABLE_DASHBOARD_SUPPORT)
+#define ENABLE_DASHBOARD_SUPPORT 0
+#endif
+
+#if !defined(ENABLE_DATALIST_ELEMENT)
+#define ENABLE_DATALIST_ELEMENT 0
+#endif
+
+#if !defined(ENABLE_DATA_TRANSFER_ITEMS)
+#define ENABLE_DATA_TRANSFER_ITEMS 0
+#endif
+
+#if !defined(ENABLE_DELETION_UI)
+#define ENABLE_DELETION_UI 0
+#endif
+
+#if !defined(ENABLE_DETAILS_ELEMENT)
+#define ENABLE_DETAILS_ELEMENT 1
+#endif
+
+#if !defined(ENABLE_DEVICE_ORIENTATION)
+#define ENABLE_DEVICE_ORIENTATION 0
+#endif
+
+#if !defined(ENABLE_DIALOG_ELEMENT)
+#define ENABLE_DIALOG_ELEMENT 0
+#endif
+
+#if !defined(ENABLE_DIRECTORY_UPLOAD)
+#define ENABLE_DIRECTORY_UPLOAD 0
+#endif
+
+#if !defined(ENABLE_DOWNLOAD_ATTRIBUTE)
+#define ENABLE_DOWNLOAD_ATTRIBUTE 0
+#endif
+
+#if !defined(ENABLE_DRAGGABLE_REGION)
+#define ENABLE_DRAGGABLE_REGION 0
+#endif
+
+#if !defined(ENABLE_DRAG_SUPPORT)
+#define ENABLE_DRAG_SUPPORT 1
+#endif
+
+#if !defined(ENABLE_ENCRYPTED_MEDIA)
+#define ENABLE_ENCRYPTED_MEDIA 0
+#endif
+
+#if !defined(ENABLE_ENCRYPTED_MEDIA_V2)
+#define ENABLE_ENCRYPTED_MEDIA_V2 0
+#endif
+
+#if !defined(ENABLE_FAST_MOBILE_SCROLLING)
+#define ENABLE_FAST_MOBILE_SCROLLING 0
+#endif
+
+#if !defined(ENABLE_FILE_SYSTEM)
+#define ENABLE_FILE_SYSTEM 0
+#endif
+
+#if !defined(ENABLE_FILTERS)
+#define ENABLE_FILTERS 0
+#endif
+
+#if !defined(ENABLE_FONT_LOAD_EVENTS)
+#define ENABLE_FONT_LOAD_EVENTS 0
+#endif
+
+#if !defined(ENABLE_FTPDIR)
+#define ENABLE_FTPDIR 1
+#endif
+
+#if !defined(ENABLE_FULLSCREEN_API)
+#define ENABLE_FULLSCREEN_API 0
+#endif
+
+#if !defined(ENABLE_GAMEPAD)
+#define ENABLE_GAMEPAD 0
+#endif
+
+#if !defined(ENABLE_GEOLOCATION)
+#define ENABLE_GEOLOCATION 0
+#endif
+
+#if !defined(ENABLE_GESTURE_EVENTS)
+#define ENABLE_GESTURE_EVENTS 0
+#endif
+
+#if !defined(ENABLE_GLIB_SUPPORT)
+#define ENABLE_GLIB_SUPPORT 0
+#endif
+
+#if !defined(ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING)
+#define ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING 0
+#endif
+
+#if !defined(ENABLE_HIGH_DPI_CANVAS)
+#define ENABLE_HIGH_DPI_CANVAS 0
+#endif
+
+#if !defined(ENABLE_ICONDATABASE)
+#define ENABLE_ICONDATABASE 1
+#endif
+
+#if !defined(ENABLE_IFRAME_SEAMLESS)
+#define ENABLE_IFRAME_SEAMLESS 1
+#endif
+
+#if !defined(ENABLE_IMAGE_DECODER_DOWN_SAMPLING)
+#define ENABLE_IMAGE_DECODER_DOWN_SAMPLING 0
+#endif
+
+#if !defined(ENABLE_INDEXED_DATABASE)
+#define ENABLE_INDEXED_DATABASE 0
+#endif
+
+#if !defined(ENABLE_INPUT_MULTIPLE_FIELDS_UI)
+#define ENABLE_INPUT_MULTIPLE_FIELDS_UI 0
+#endif
+
+#if !defined(ENABLE_INPUT_SPEECH)
+#define ENABLE_INPUT_SPEECH 0
+#endif
+
+#if !defined(ENABLE_INPUT_TYPE_COLOR)
+#define ENABLE_INPUT_TYPE_COLOR 0
+#endif
+
+#if !defined(ENABLE_INPUT_TYPE_DATE)
+#define ENABLE_INPUT_TYPE_DATE 0
+#endif
+
+#if !defined(ENABLE_INPUT_TYPE_DATETIME_INCOMPLETE)
+#define ENABLE_INPUT_TYPE_DATETIME_INCOMPLETE 0
+#endif
+
+#if !defined(ENABLE_INPUT_TYPE_DATETIMELOCAL)
+#define ENABLE_INPUT_TYPE_DATETIMELOCAL 0
+#endif
+
+#if !defined(ENABLE_INPUT_TYPE_MONTH)
+#define ENABLE_INPUT_TYPE_MONTH 0
+#endif
+
+#if !defined(ENABLE_INPUT_TYPE_TIME)
+#define ENABLE_INPUT_TYPE_TIME 0
+#endif
+
+#if !defined(ENABLE_INPUT_TYPE_WEEK)
+#define ENABLE_INPUT_TYPE_WEEK 0
+#endif
+
+#if ENABLE(INPUT_TYPE_DATE) || ENABLE(INPUT_TYPE_DATETIME_INCOMPLETE) || ENABLE(INPUT_TYPE_DATETIMELOCAL) || ENABLE(INPUT_TYPE_MONTH) || ENABLE(INPUT_TYPE_TIME) || ENABLE(INPUT_TYPE_WEEK)
+#if !defined(ENABLE_DATE_AND_TIME_INPUT_TYPES)
+#define ENABLE_DATE_AND_TIME_INPUT_TYPES 1
+#endif
+#endif
+
+#if !defined(ENABLE_INSPECTOR)
+#define ENABLE_INSPECTOR 1
+#endif
+
+#if !defined(ENABLE_JAVASCRIPT_DEBUGGER)
+#define ENABLE_JAVASCRIPT_DEBUGGER 1
+#endif
+
+#if !defined(ENABLE_JAVASCRIPT_I18N_API)
+#define ENABLE_JAVASCRIPT_I18N_API 0
+#endif
+
+#if !defined(ENABLE_LEGACY_CSS_VENDOR_PREFIXES)
+#define ENABLE_LEGACY_CSS_VENDOR_PREFIXES 0
+#endif
+
+#if !defined(ENABLE_LEGACY_NOTIFICATIONS)
+#define ENABLE_LEGACY_NOTIFICATIONS 0
+#endif
+
+#if !defined(ENABLE_LEGACY_VENDOR_PREFIXES)
+#define ENABLE_LEGACY_VENDOR_PREFIXES 0
+#endif
+
+#if !defined(ENABLE_LEGACY_VIEWPORT_ADAPTION)
+#define ENABLE_LEGACY_VIEWPORT_ADAPTION 0
+#endif
+
+#if !defined(ENABLE_LINK_PREFETCH)
+#define ENABLE_LINK_PREFETCH 0
+#endif
+
+#if !defined(ENABLE_LINK_PRERENDER)
+#define ENABLE_LINK_PRERENDER 0
+#endif
+
+#if !defined(ENABLE_MATHML)
+#define ENABLE_MATHML 1
+#endif
+
+#if !defined(ENABLE_MEDIA_CAPTURE)
+#define ENABLE_MEDIA_CAPTURE 0
+#endif
+
+#if !defined(ENABLE_MEDIA_SOURCE)
+#define ENABLE_MEDIA_SOURCE 0
+#endif
+
+#if !defined(ENABLE_MEDIA_STATISTICS)
+#define ENABLE_MEDIA_STATISTICS 0
+#endif
+
+#if !defined(ENABLE_MEDIA_STREAM)
+#define ENABLE_MEDIA_STREAM 0
+#endif
+
+#if !defined(ENABLE_METER_ELEMENT)
+#define ENABLE_METER_ELEMENT 1
+#endif
+
+#if !defined(ENABLE_MHTML)
+#define ENABLE_MHTML 0
+#endif
+
+#if !defined(ENABLE_MICRODATA)
+#define ENABLE_MICRODATA 0
+#endif
+
+#if !defined(ENABLE_MOUSE_CURSOR_SCALE)
+#define ENABLE_MOUSE_CURSOR_SCALE 0
+#endif
+
+#if !defined(ENABLE_NAVIGATOR_CONTENT_UTILS)
+#define ENABLE_NAVIGATOR_CONTENT_UTILS 0
+#endif
+
+#if !defined(ENABLE_NETSCAPE_PLUGIN_API)
+#define ENABLE_NETSCAPE_PLUGIN_API 1
+#endif
+
+#if !defined(ENABLE_NETSCAPE_PLUGIN_METADATA_CACHE)
+#define ENABLE_NETSCAPE_PLUGIN_METADATA_CACHE 0
+#endif
+
+#if !defined(ENABLE_NETWORK_INFO)
+#define ENABLE_NETWORK_INFO 0
+#endif
+
+#if !defined(ENABLE_NOTIFICATIONS)
+#define ENABLE_NOTIFICATIONS 0
+#endif
+
+#if !defined(ENABLE_OBJECT_MARK_LOGGING)
+#define ENABLE_OBJECT_MARK_LOGGING 0
+#endif
+
+#if !defined(ENABLE_OPENCL)
+#define ENABLE_OPENCL 0
+#endif
+
+#if !defined(ENABLE_OPENTYPE_VERTICAL)
+#define ENABLE_OPENTYPE_VERTICAL 0
+#endif
+
+#if !defined(ENABLE_ORIENTATION_EVENTS)
+#define ENABLE_ORIENTATION_EVENTS 0
+#endif
+
+#if !defined(ENABLE_PAGE_POPUP)
+#define ENABLE_PAGE_POPUP 0
+#endif
+
+#if !defined(ENABLE_PAGE_VISIBILITY_API)
+#define ENABLE_PAGE_VISIBILITY_API 0
+#endif
+
+#if OS(WINDOWS)
+#if !defined(ENABLE_PAN_SCROLLING)
+#define ENABLE_PAN_SCROLLING 1
+#endif
+#endif
+
+#if !defined(ENABLE_PARSED_STYLE_SHEET_CACHING)
+#define ENABLE_PARSED_STYLE_SHEET_CACHING 1
+#endif
+
+#if !defined(ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH)
+#define ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH 0
+#endif
+
+#if !defined(ENABLE_PLUGIN_PROXY_FOR_VIDEO)
+#define ENABLE_PLUGIN_PROXY_FOR_VIDEO 0
+#endif
+
+#if !defined(ENABLE_POINTER_LOCK)
+#define ENABLE_POINTER_LOCK 0
+#endif
+
+#if !defined(ENABLE_PROGRESS_ELEMENT)
+#define ENABLE_PROGRESS_ELEMENT 0
+#endif
+
+#if !defined(ENABLE_PROXIMITY_EVENTS)
+#define ENABLE_PROXIMITY_EVENTS 0
+#endif
+
+#if !defined(ENABLE_QUOTA)
+#define ENABLE_QUOTA 0
+#endif
+
+#if !defined(ENABLE_REPAINT_THROTTLING)
+#define ENABLE_REPAINT_THROTTLING 0
+#endif
+
+#if !defined(ENABLE_REQUEST_ANIMATION_FRAME)
+#define ENABLE_REQUEST_ANIMATION_FRAME 0
+#endif
+
+#if !defined(ENABLE_REQUEST_AUTOCOMPLETE)
+#define ENABLE_REQUEST_AUTOCOMPLETE 0
+#endif
+
+#if !defined(ENABLE_RUBBER_BANDING)
+#define ENABLE_RUBBER_BANDING 0
+#endif
+
+#if !defined(ENABLE_SATURATED_LAYOUT_ARITHMETIC)
+#define ENABLE_SATURATED_LAYOUT_ARITHMETIC 0
+#endif
+
+#if !defined(ENABLE_SCRIPTED_SPEECH)
+#define ENABLE_SCRIPTED_SPEECH 0
+#endif
+
+#if !defined(ENABLE_SHADOW_DOM)
+#define ENABLE_SHADOW_DOM 0
+#endif
+
+#if !defined(ENABLE_SHARED_WORKERS)
+#define ENABLE_SHARED_WORKERS 0
+#endif
+
+#if !defined(ENABLE_SMOOTH_SCROLLING)
+#define ENABLE_SMOOTH_SCROLLING 0
+#endif
+
+#if !defined(ENABLE_SPEECH_SYNTHESIS)
+#define ENABLE_SPEECH_SYNTHESIS 0
+#endif
+
+#if !defined(ENABLE_SPELLCHECK)
+#define ENABLE_SPELLCHECK 0
+#endif
+
+#if !defined(ENABLE_SQL_DATABASE)
+#define ENABLE_SQL_DATABASE 1
+#endif
+
+#if !defined(ENABLE_STYLE_SCOPED)
+#define ENABLE_STYLE_SCOPED 0
+#endif
+
+#if !defined(ENABLE_SUBPIXEL_LAYOUT)
+#define ENABLE_SUBPIXEL_LAYOUT 0
+#endif
+
+#if !defined(ENABLE_SVG)
+#define ENABLE_SVG 1
+#endif
+
+#if ENABLE(SVG)
+#if !defined(ENABLE_SVG_FONTS)
+#define ENABLE_SVG_FONTS 1
+#endif
+#endif
+
+#if !defined(ENABLE_TEMPLATE_ELEMENT)
+#define ENABLE_TEMPLATE_ELEMENT 0
+#endif
+
+#if !defined(ENABLE_TEXT_AUTOSIZING)
+#define ENABLE_TEXT_AUTOSIZING 0
+#endif
+
+#if !defined(ENABLE_TEXT_CARET)
+#define ENABLE_TEXT_CARET 1
+#endif
+
+#if !defined(ENABLE_THREADED_HTML_PARSER)
+#define ENABLE_THREADED_HTML_PARSER 0
+#endif
+
+#if !defined(ENABLE_THREADED_SCROLLING)
+#define ENABLE_THREADED_SCROLLING 0
+#endif
+
+#if !defined(ENABLE_TOUCH_EVENTS)
+#define ENABLE_TOUCH_EVENTS 0
+#endif
+
+#if !defined(ENABLE_TOUCH_ICON_LOADING)
+#define ENABLE_TOUCH_ICON_LOADING 0
+#endif
+
+#if !defined(ENABLE_VIBRATION)
+#define ENABLE_VIBRATION 0
+#endif
+
+#if !defined(ENABLE_VIDEO)
+#define ENABLE_VIDEO 0
+#endif
+
+#if !defined(ENABLE_VIDEO_TRACK)
+#define ENABLE_VIDEO_TRACK 0
+#endif
+
+#if !defined(ENABLE_VIEWPORT)
+#define ENABLE_VIEWPORT 0
+#endif
+
+#if !defined(ENABLE_VIEWSOURCE_ATTRIBUTE)
+#define ENABLE_VIEWSOURCE_ATTRIBUTE 1
+#endif
+
+#if !defined(ENABLE_VIEW_MODE_CSS_MEDIA)
+#define ENABLE_VIEW_MODE_CSS_MEDIA 1
+#endif
+
+#if !defined(ENABLE_WEBGL)
+#define ENABLE_WEBGL 0
+#endif
+
+#if !defined(ENABLE_WEB_ARCHIVE)
+#define ENABLE_WEB_ARCHIVE 0
+#endif
+
+#if !defined(ENABLE_WEB_AUDIO)
+#define ENABLE_WEB_AUDIO 0
+#endif
+
+#if !defined(ENABLE_WEB_SOCKETS)
+#define ENABLE_WEB_SOCKETS 1
+#endif
+
+#if !defined(ENABLE_WEB_TIMING)
+#define ENABLE_WEB_TIMING 0
+#endif
+
+#if !defined(ENABLE_WORKERS)
+#define ENABLE_WORKERS 0
+#endif
+
+#if !defined(ENABLE_XHR_TIMEOUT)
+#define ENABLE_XHR_TIMEOUT 0
+#endif
+
+#if !defined(ENABLE_XSLT)
+#define ENABLE_XSLT 1
+#endif
+
+/* Asserts, invariants for macro definitions */
+
+#if ENABLE(SATURATED_LAYOUT_ARITHMETIC) && !ENABLE(SUBPIXEL_LAYOUT)
+#error "ENABLE(SATURATED_LAYOUT_ARITHMETIC) requires ENABLE(SUBPIXEL_LAYOUT)"
+#endif
+
+#if ENABLE(SVG_FONTS) && !ENABLE(SVG)
+#error "ENABLE(SVG_FONTS) requires ENABLE(SVG)"
+#endif
+
+#if ENABLE(VIDEO_TRACK) && !ENABLE(VIDEO)
+#error "ENABLE(VIDEO_TRACK) requires ENABLE(VIDEO)"
+#endif
+
+#endif /* WTF_FeatureDefines_h */
diff --git a/src/3rdparty/masm/wtf/FilePrintStream.cpp b/src/3rdparty/masm/wtf/FilePrintStream.cpp
new file mode 100644
index 0000000000..b5ab25e0bf
--- /dev/null
+++ b/src/3rdparty/masm/wtf/FilePrintStream.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FilePrintStream.h"
+
+namespace WTF {
+
+FilePrintStream::FilePrintStream(FILE* file, AdoptionMode adoptionMode)
+ : m_file(file)
+ , m_adoptionMode(adoptionMode)
+{
+}
+
+FilePrintStream::~FilePrintStream()
+{
+ if (m_adoptionMode == Borrow)
+ return;
+ fclose(m_file);
+}
+
+PassOwnPtr<FilePrintStream> FilePrintStream::open(const char* filename, const char* mode)
+{
+ FILE* file = fopen(filename, mode);
+ if (!file)
+ return PassOwnPtr<FilePrintStream>();
+
+ return adoptPtr(new FilePrintStream(file));
+}
+
+void FilePrintStream::vprintf(const char* format, va_list argList)
+{
+ vfprintf(m_file, format, argList);
+}
+
+void FilePrintStream::flush()
+{
+ fflush(m_file);
+}
+
+} // namespace WTF
+
diff --git a/src/3rdparty/masm/wtf/FilePrintStream.h b/src/3rdparty/masm/wtf/FilePrintStream.h
new file mode 100644
index 0000000000..bdeab4c479
--- /dev/null
+++ b/src/3rdparty/masm/wtf/FilePrintStream.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef FilePrintStream_h
+#define FilePrintStream_h
+
+#include <stdio.h>
+#include <wtf/PassOwnPtr.h>
+#include <wtf/PrintStream.h>
+
+namespace WTF {
+
+class FilePrintStream : public PrintStream {
+public:
+ enum AdoptionMode {
+ Adopt,
+ Borrow
+ };
+
+ FilePrintStream(FILE*, AdoptionMode = Adopt);
+ virtual ~FilePrintStream();
+
+ static PassOwnPtr<FilePrintStream> open(const char* filename, const char* mode);
+
+ FILE* file() { return m_file; }
+
+ void vprintf(const char* format, va_list) WTF_ATTRIBUTE_PRINTF(2, 0);
+ void flush();
+
+private:
+ FILE* m_file;
+ AdoptionMode m_adoptionMode;
+};
+
+} // namespace WTF
+
+using WTF::FilePrintStream;
+
+#endif // FilePrintStream_h
+
diff --git a/src/3rdparty/masm/wtf/Locker.h b/src/3rdparty/masm/wtf/Locker.h
new file mode 100644
index 0000000000..c465b99ea4
--- /dev/null
+++ b/src/3rdparty/masm/wtf/Locker.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef Locker_h
+#define Locker_h
+
+#include <wtf/Noncopyable.h>
+
+namespace WTF {
+
+template <typename T> class Locker {
+ WTF_MAKE_NONCOPYABLE(Locker);
+public:
+ Locker(T& lockable) : m_lockable(lockable) { m_lockable.lock(); }
+ ~Locker() { m_lockable.unlock(); }
+private:
+ T& m_lockable;
+};
+
+}
+
+using WTF::Locker;
+
+#endif
diff --git a/src/3rdparty/masm/wtf/MathExtras.h b/src/3rdparty/masm/wtf/MathExtras.h
new file mode 100644
index 0000000000..b70e468dfa
--- /dev/null
+++ b/src/3rdparty/masm/wtf/MathExtras.h
@@ -0,0 +1,459 @@
+/*
+ * Copyright (C) 2006, 2007, 2008, 2009, 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_MathExtras_h
+#define WTF_MathExtras_h
+
+#include <algorithm>
+#include <cmath>
+#include <float.h>
+#include <limits>
+#include <stdint.h>
+#include <stdlib.h>
+#include <wtf/StdLibExtras.h>
+
+#if OS(SOLARIS)
+#include <ieeefp.h>
+#endif
+
+#if OS(OPENBSD)
+#include <sys/types.h>
+#include <machine/ieee.h>
+#endif
+
+#if OS(QNX)
+// FIXME: Look into a way to have cmath import its functions into both the standard and global
+// namespace. For now, we include math.h since the QNX cmath header only imports its functions
+// into the standard namespace.
+#include <math.h>
+// These macros from math.h conflict with the real functions in the std namespace.
+#undef signbit
+#undef isnan
+#undef isinf
+#undef isfinite
+#endif
+
+#ifndef M_PI
+const double piDouble = 3.14159265358979323846;
+const float piFloat = 3.14159265358979323846f;
+#else
+const double piDouble = M_PI;
+const float piFloat = static_cast<float>(M_PI);
+#endif
+
+#ifndef M_PI_2
+const double piOverTwoDouble = 1.57079632679489661923;
+const float piOverTwoFloat = 1.57079632679489661923f;
+#else
+const double piOverTwoDouble = M_PI_2;
+const float piOverTwoFloat = static_cast<float>(M_PI_2);
+#endif
+
+#ifndef M_PI_4
+const double piOverFourDouble = 0.785398163397448309616;
+const float piOverFourFloat = 0.785398163397448309616f;
+#else
+const double piOverFourDouble = M_PI_4;
+const float piOverFourFloat = static_cast<float>(M_PI_4);
+#endif
+
+#if OS(DARWIN)
+
+// Work around a bug in the Mac OS X libc where ceil(-0.1) return +0.
+inline double wtf_ceil(double x) { return copysign(ceil(x), x); }
+
+#define ceil(x) wtf_ceil(x)
+
+#endif
+
+#if OS(SOLARIS)
+
+namespace std {
+
+#ifndef isfinite
+inline bool isfinite(double x) { return finite(x) && !isnand(x); }
+#endif
+#ifndef signbit
+inline bool signbit(double x) { return copysign(1.0, x) < 0; }
+#endif
+#ifndef isinf
+inline bool isinf(double x) { return !finite(x) && !isnand(x); }
+#endif
+
+} // namespace std
+
+#endif
+
+#if OS(OPENBSD)
+
+namespace std {
+
+#ifndef isfinite
+inline bool isfinite(double x) { return finite(x); }
+#endif
+#ifndef signbit
+inline bool signbit(double x) { struct ieee_double *p = (struct ieee_double *)&x; return p->dbl_sign; }
+#endif
+
+} // namespace std
+
+#endif
+
+#if COMPILER(MSVC)
+
+// We must not do 'num + 0.5' or 'num - 0.5' because they can cause precision loss.
+static double round(double num)
+{
+ double integer = ceil(num);
+ if (num > 0)
+ return integer - num > 0.5 ? integer - 1.0 : integer;
+ return integer - num >= 0.5 ? integer - 1.0 : integer;
+}
+static float roundf(float num)
+{
+ float integer = ceilf(num);
+ if (num > 0)
+ return integer - num > 0.5f ? integer - 1.0f : integer;
+ return integer - num >= 0.5f ? integer - 1.0f : integer;
+}
+inline long long llround(double num) { return static_cast<long long>(round(num)); }
+inline long long llroundf(float num) { return static_cast<long long>(roundf(num)); }
+inline long lround(double num) { return static_cast<long>(round(num)); }
+inline long lroundf(float num) { return static_cast<long>(roundf(num)); }
+inline double trunc(double num) { return num > 0 ? floor(num) : ceil(num); }
+
+#endif
+
+#if COMPILER(GCC) && OS(QNX)
+// The stdlib on QNX doesn't contain long abs(long). See PR #104666.
+inline long long abs(long num) { return labs(num); }
+#endif
+
+#if COMPILER(MSVC)
+// MSVC's math.h does not currently supply log2 or log2f.
+inline double log2(double num)
+{
+ // This constant is roughly M_LN2, which is not provided by default on Windows.
+ return log(num) / 0.693147180559945309417232121458176568;
+}
+
+inline float log2f(float num)
+{
+ // This constant is roughly M_LN2, which is not provided by default on Windows.
+ return logf(num) / 0.693147180559945309417232121458176568f;
+}
+#endif
+
+#if COMPILER(MSVC)
+// The 64bit version of abs() is already defined in stdlib.h which comes with VC10
+#if COMPILER(MSVC9_OR_LOWER)
+inline long long abs(long long num) { return _abs64(num); }
+#endif
+
+namespace std {
+
+inline bool isinf(double num) { return !_finite(num) && !_isnan(num); }
+inline bool isnan(double num) { return !!_isnan(num); }
+inline bool isfinite(double x) { return _finite(x); }
+inline bool signbit(double num) { return _copysign(1.0, num) < 0; }
+
+} // namespace std
+
+inline double nextafter(double x, double y) { return _nextafter(x, y); }
+inline float nextafterf(float x, float y) { return x > y ? x - FLT_EPSILON : x + FLT_EPSILON; }
+
+inline double copysign(double x, double y) { return _copysign(x, y); }
+
+// Work around a bug in Win, where atan2(+-infinity, +-infinity) yields NaN instead of specific values.
+inline double wtf_atan2(double x, double y)
+{
+ double posInf = std::numeric_limits<double>::infinity();
+ double negInf = -std::numeric_limits<double>::infinity();
+ double nan = std::numeric_limits<double>::quiet_NaN();
+
+ double result = nan;
+
+ if (x == posInf && y == posInf)
+ result = piOverFourDouble;
+ else if (x == posInf && y == negInf)
+ result = 3 * piOverFourDouble;
+ else if (x == negInf && y == posInf)
+ result = -piOverFourDouble;
+ else if (x == negInf && y == negInf)
+ result = -3 * piOverFourDouble;
+ else
+ result = ::atan2(x, y);
+
+ return result;
+}
+
+// Work around a bug in the Microsoft CRT, where fmod(x, +-infinity) yields NaN instead of x.
+inline double wtf_fmod(double x, double y) { return (!std::isinf(x) && std::isinf(y)) ? x : fmod(x, y); }
+
+// Work around a bug in the Microsoft CRT, where pow(NaN, 0) yields NaN instead of 1.
+inline double wtf_pow(double x, double y) { return y == 0 ? 1 : pow(x, y); }
+
+#define atan2(x, y) wtf_atan2(x, y)
+#define fmod(x, y) wtf_fmod(x, y)
+#define pow(x, y) wtf_pow(x, y)
+
+// MSVC's math functions do not bring lrint.
+inline long int lrint(double flt)
+{
+ int64_t intgr;
+#if CPU(X86)
+ __asm {
+ fld flt
+ fistp intgr
+ };
+#else
+ ASSERT(std::isfinite(flt));
+ double rounded = round(flt);
+ intgr = static_cast<int64_t>(rounded);
+ // If the fractional part is exactly 0.5, we need to check whether
+ // the rounded result is even. If it is not we need to add 1 to
+ // negative values and subtract one from positive values.
+ if ((fabs(intgr - flt) == 0.5) & intgr)
+ intgr -= ((intgr >> 62) | 1); // 1 with the sign of result, i.e. -1 or 1.
+#endif
+ return static_cast<long int>(intgr);
+}
+
+#endif // COMPILER(MSVC)
+
+inline double deg2rad(double d) { return d * piDouble / 180.0; }
+inline double rad2deg(double r) { return r * 180.0 / piDouble; }
+inline double deg2grad(double d) { return d * 400.0 / 360.0; }
+inline double grad2deg(double g) { return g * 360.0 / 400.0; }
+inline double turn2deg(double t) { return t * 360.0; }
+inline double deg2turn(double d) { return d / 360.0; }
+inline double rad2grad(double r) { return r * 200.0 / piDouble; }
+inline double grad2rad(double g) { return g * piDouble / 200.0; }
+
+inline float deg2rad(float d) { return d * piFloat / 180.0f; }
+inline float rad2deg(float r) { return r * 180.0f / piFloat; }
+inline float deg2grad(float d) { return d * 400.0f / 360.0f; }
+inline float grad2deg(float g) { return g * 360.0f / 400.0f; }
+inline float turn2deg(float t) { return t * 360.0f; }
+inline float deg2turn(float d) { return d / 360.0f; }
+inline float rad2grad(float r) { return r * 200.0f / piFloat; }
+inline float grad2rad(float g) { return g * piFloat / 200.0f; }
+
+// std::numeric_limits<T>::min() returns the smallest positive value for floating point types
+template<typename T> inline T defaultMinimumForClamp() { return std::numeric_limits<T>::min(); }
+template<> inline float defaultMinimumForClamp() { return -std::numeric_limits<float>::max(); }
+template<> inline double defaultMinimumForClamp() { return -std::numeric_limits<double>::max(); }
+template<typename T> inline T defaultMaximumForClamp() { return std::numeric_limits<T>::max(); }
+
+template<typename T> inline T clampTo(double value, T min = defaultMinimumForClamp<T>(), T max = defaultMaximumForClamp<T>())
+{
+ if (value >= static_cast<double>(max))
+ return max;
+ if (value <= static_cast<double>(min))
+ return min;
+ return static_cast<T>(value);
+}
+template<> inline long long int clampTo(double, long long int, long long int); // clampTo does not support long long ints.
+
+inline int clampToInteger(double value)
+{
+ return clampTo<int>(value);
+}
+
+inline float clampToFloat(double value)
+{
+ return clampTo<float>(value);
+}
+
+inline int clampToPositiveInteger(double value)
+{
+ return clampTo<int>(value, 0);
+}
+
+inline int clampToInteger(float value)
+{
+ return clampTo<int>(value);
+}
+
+inline int clampToInteger(unsigned x)
+{
+ const unsigned intMax = static_cast<unsigned>(std::numeric_limits<int>::max());
+
+ if (x >= intMax)
+ return std::numeric_limits<int>::max();
+ return static_cast<int>(x);
+}
+
+inline bool isWithinIntRange(float x)
+{
+ return x > static_cast<float>(std::numeric_limits<int>::min()) && x < static_cast<float>(std::numeric_limits<int>::max());
+}
+
+template<typename T> inline bool hasOneBitSet(T value)
+{
+ return !((value - 1) & value) && value;
+}
+
+template<typename T> inline bool hasZeroOrOneBitsSet(T value)
+{
+ return !((value - 1) & value);
+}
+
+template<typename T> inline bool hasTwoOrMoreBitsSet(T value)
+{
+ return !hasZeroOrOneBitsSet(value);
+}
+
+template <typename T> inline unsigned getLSBSet(T value)
+{
+ unsigned result = 0;
+
+ while (value >>= 1)
+ ++result;
+
+ return result;
+}
+
+template<typename T> inline T timesThreePlusOneDividedByTwo(T value)
+{
+ // Mathematically equivalent to:
+ // (value * 3 + 1) / 2;
+ // or:
+ // (unsigned)ceil(value * 1.5));
+ // This form is not prone to internal overflow.
+ return value + (value >> 1) + (value & 1);
+}
+
+#ifndef UINT64_C
+#if COMPILER(MSVC)
+#define UINT64_C(c) c ## ui64
+#else
+#define UINT64_C(c) c ## ull
+#endif
+#endif
+
+#if COMPILER(MINGW64) && (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)
+inline double wtf_pow(double x, double y)
+{
+ // MinGW-w64 has a custom implementation for pow.
+ // This handles certain special cases that are different.
+ if ((x == 0.0 || std::isinf(x)) && std::isfinite(y)) {
+ double f;
+ if (modf(y, &f) != 0.0)
+ return ((x == 0.0) ^ (y > 0.0)) ? std::numeric_limits<double>::infinity() : 0.0;
+ }
+
+ if (x == 2.0) {
+ int yInt = static_cast<int>(y);
+ if (y == yInt)
+ return ldexp(1.0, yInt);
+ }
+
+ return pow(x, y);
+}
+#define pow(x, y) wtf_pow(x, y)
+#endif // COMPILER(MINGW64) && (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)
+
+
+// decompose 'number' to its sign, exponent, and mantissa components.
+// The result is interpreted as:
+// (sign ? -1 : 1) * pow(2, exponent) * (mantissa / (1 << 52))
+inline void decomposeDouble(double number, bool& sign, int32_t& exponent, uint64_t& mantissa)
+{
+ ASSERT(std::isfinite(number));
+
+ sign = std::signbit(number);
+
+ uint64_t bits = WTF::bitwise_cast<uint64_t>(number);
+ exponent = (static_cast<int32_t>(bits >> 52) & 0x7ff) - 0x3ff;
+ mantissa = bits & 0xFFFFFFFFFFFFFull;
+
+ // Check for zero/denormal values; if so, adjust the exponent,
+ // if not insert the implicit, omitted leading 1 bit.
+ if (exponent == -0x3ff)
+ exponent = mantissa ? -0x3fe : 0;
+ else
+ mantissa |= 0x10000000000000ull;
+}
+
+// Calculate d % 2^{64}.
+inline void doubleToInteger(double d, unsigned long long& value)
+{
+ if (std::isnan(d) || std::isinf(d))
+ value = 0;
+ else {
+ // -2^{64} < fmodValue < 2^{64}.
+ double fmodValue = fmod(trunc(d), std::numeric_limits<unsigned long long>::max() + 1.0);
+ if (fmodValue >= 0) {
+ // 0 <= fmodValue < 2^{64}.
+ // 0 <= value < 2^{64}. This cast causes no loss.
+ value = static_cast<unsigned long long>(fmodValue);
+ } else {
+ // -2^{64} < fmodValue < 0.
+ // 0 < fmodValueInUnsignedLongLong < 2^{64}. This cast causes no loss.
+ unsigned long long fmodValueInUnsignedLongLong = static_cast<unsigned long long>(-fmodValue);
+ // -1 < (std::numeric_limits<unsigned long long>::max() - fmodValueInUnsignedLongLong) < 2^{64} - 1.
+ // 0 < value < 2^{64}.
+ value = std::numeric_limits<unsigned long long>::max() - fmodValueInUnsignedLongLong + 1;
+ }
+ }
+}
+
+namespace WTF {
+
+// From http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
+inline uint32_t roundUpToPowerOfTwo(uint32_t v)
+{
+ v--;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v++;
+ return v;
+}
+
+inline unsigned fastLog2(unsigned i)
+{
+ unsigned log2 = 0;
+ if (i & (i - 1))
+ log2 += 1;
+ if (i >> 16)
+ log2 += 16, i >>= 16;
+ if (i >> 8)
+ log2 += 8, i >>= 8;
+ if (i >> 4)
+ log2 += 4, i >>= 4;
+ if (i >> 2)
+ log2 += 2, i >>= 2;
+ if (i >> 1)
+ log2 += 1;
+ return log2;
+}
+
+} // namespace WTF
+
+#endif // #ifndef WTF_MathExtras_h
diff --git a/src/3rdparty/masm/wtf/NotFound.h b/src/3rdparty/masm/wtf/NotFound.h
new file mode 100644
index 0000000000..4263bcecab
--- /dev/null
+++ b/src/3rdparty/masm/wtf/NotFound.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef NotFound_h
+#define NotFound_h
+
+namespace WTF {
+
+ const size_t notFound = static_cast<size_t>(-1);
+
+} // namespace WTF
+
+using WTF::notFound;
+
+#endif // NotFound_h
diff --git a/src/3rdparty/masm/wtf/NullPtr.h b/src/3rdparty/masm/wtf/NullPtr.h
new file mode 100644
index 0000000000..98c05140d8
--- /dev/null
+++ b/src/3rdparty/masm/wtf/NullPtr.h
@@ -0,0 +1,56 @@
+/*
+
+Copyright (C) 2010 Apple Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef NullPtr_h
+#define NullPtr_h
+
+// For compilers and standard libraries that do not yet include it, this adds the
+// nullptr_t type and nullptr object. They are defined in the same namespaces they
+// would be in compiler and library that had the support.
+
+#include <ciso646>
+
+#if COMPILER_SUPPORTS(CXX_NULLPTR) || defined(_LIBCPP_VERSION)
+
+#include <cstddef>
+
+// libstdc++ supports nullptr_t starting with gcc 4.6.
+#if defined(__GLIBCXX__) && __GLIBCXX__ < 20110325
+namespace std {
+typedef decltype(nullptr) nullptr_t;
+}
+#endif
+
+#else
+
+namespace std {
+class WTF_EXPORT_PRIVATE nullptr_t { };
+}
+extern WTF_EXPORT_PRIVATE std::nullptr_t nullptr;
+
+#endif
+
+#endif
diff --git a/src/3rdparty/masm/wtf/OSAllocator.h b/src/3rdparty/masm/wtf/OSAllocator.h
new file mode 100644
index 0000000000..a12a467497
--- /dev/null
+++ b/src/3rdparty/masm/wtf/OSAllocator.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef OSAllocator_h
+#define OSAllocator_h
+
+#include <algorithm>
+#include <wtf/UnusedParam.h>
+#include <wtf/VMTags.h>
+
+namespace WTF {
+
+class OSAllocator {
+public:
+ enum Usage {
+ UnknownUsage = -1,
+ FastMallocPages = VM_TAG_FOR_TCMALLOC_MEMORY,
+ JSGCHeapPages = VM_TAG_FOR_COLLECTOR_MEMORY,
+ JSVMStackPages = VM_TAG_FOR_REGISTERFILE_MEMORY,
+ JSJITCodePages = VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY,
+ };
+
+ // These methods are symmetric; reserveUncommitted allocates VM in an uncommitted state,
+ // releaseDecommitted should be called on a region of VM allocated by a single reservation,
+ // the memory must all currently be in a decommitted state.
+ static void* reserveUncommitted(size_t, Usage = UnknownUsage, bool writable = true, bool executable = false, bool includesGuardPages = false);
+ WTF_EXPORT_PRIVATE static void releaseDecommitted(void*, size_t);
+
+ // These methods are symmetric; they commit or decommit a region of VM (uncommitted VM should
+ // never be accessed, since the OS may not have attached physical memory for these regions).
+ // Clients should only call commit on uncommitted regions and decommit on committed regions.
+ static void commit(void*, size_t, bool writable, bool executable);
+ static void decommit(void*, size_t);
+
+ // These methods are symmetric; reserveAndCommit allocates VM in an committed state,
+ // decommitAndRelease should be called on a region of VM allocated by a single reservation,
+ // the memory must all currently be in a committed state.
+ WTF_EXPORT_PRIVATE static void* reserveAndCommit(size_t, Usage = UnknownUsage, bool writable = true, bool executable = false, bool includesGuardPages = false);
+ static void decommitAndRelease(void* base, size_t size);
+
+ // These methods are akin to reserveAndCommit/decommitAndRelease, above - however rather than
+ // committing/decommitting the entire region additional parameters allow a subregion to be
+ // specified.
+ static void* reserveAndCommit(size_t reserveSize, size_t commitSize, Usage = UnknownUsage, bool writable = true, bool executable = false);
+ static void decommitAndRelease(void* releaseBase, size_t releaseSize, void* decommitBase, size_t decommitSize);
+
+ // Reallocate an existing, committed allocation.
+ // The prior allocation must be fully comitted, and the new size will also be fully committed.
+ // This interface is provided since it may be possible to optimize this operation on some platforms.
+ template<typename T>
+ static T* reallocateCommitted(T*, size_t oldSize, size_t newSize, Usage = UnknownUsage, bool writable = true, bool executable = false);
+};
+
+inline void* OSAllocator::reserveAndCommit(size_t reserveSize, size_t commitSize, Usage usage, bool writable, bool executable)
+{
+ void* base = reserveUncommitted(reserveSize, usage, writable, executable);
+ commit(base, commitSize, writable, executable);
+ return base;
+}
+
+inline void OSAllocator::decommitAndRelease(void* releaseBase, size_t releaseSize, void* decommitBase, size_t decommitSize)
+{
+ ASSERT(decommitBase >= releaseBase && (static_cast<char*>(decommitBase) + decommitSize) <= (static_cast<char*>(releaseBase) + releaseSize));
+#if OS(WINCE)
+ // On most platforms we can actually skip this final decommit; releasing the VM will
+ // implicitly decommit any physical memory in the region. This is not true on WINCE.
+ decommit(decommitBase, decommitSize);
+#else
+ UNUSED_PARAM(decommitBase);
+ UNUSED_PARAM(decommitSize);
+#endif
+ releaseDecommitted(releaseBase, releaseSize);
+}
+
+inline void OSAllocator::decommitAndRelease(void* base, size_t size)
+{
+ decommitAndRelease(base, size, base, size);
+}
+
+template<typename T>
+inline T* OSAllocator::reallocateCommitted(T* oldBase, size_t oldSize, size_t newSize, Usage usage, bool writable, bool executable)
+{
+ void* newBase = reserveAndCommit(newSize, usage, writable, executable);
+ memcpy(newBase, oldBase, std::min(oldSize, newSize));
+ decommitAndRelease(oldBase, oldSize);
+ return static_cast<T*>(newBase);
+}
+
+} // namespace WTF
+
+using WTF::OSAllocator;
+
+#endif // OSAllocator_h
diff --git a/src/3rdparty/masm/wtf/OSAllocatorPosix.cpp b/src/3rdparty/masm/wtf/OSAllocatorPosix.cpp
new file mode 100644
index 0000000000..7b2a55c6b6
--- /dev/null
+++ b/src/3rdparty/masm/wtf/OSAllocatorPosix.cpp
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "OSAllocator.h"
+
+#if OS(UNIX)
+
+#include "PageAllocation.h"
+#include <errno.h>
+#include <sys/mman.h>
+#include <wtf/Assertions.h>
+#include <wtf/UnusedParam.h>
+
+namespace WTF {
+
+void* OSAllocator::reserveUncommitted(size_t bytes, Usage usage, bool writable, bool executable, bool includesGuardPages)
+{
+#if OS(QNX)
+ // Reserve memory with PROT_NONE and MAP_LAZY so it isn't committed now.
+ void* result = mmap(0, bytes, PROT_NONE, MAP_LAZY | MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (result == MAP_FAILED)
+ CRASH();
+#elif OS(LINUX)
+ UNUSED_PARAM(usage);
+ UNUSED_PARAM(writable);
+ UNUSED_PARAM(executable);
+ UNUSED_PARAM(includesGuardPages);
+
+ void* result = mmap(0, bytes, PROT_NONE, MAP_NORESERVE | MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (result == MAP_FAILED)
+ CRASH();
+ madvise(result, bytes, MADV_DONTNEED);
+#else
+ void* result = reserveAndCommit(bytes, usage, writable, executable, includesGuardPages);
+#if HAVE(MADV_FREE_REUSE)
+ // To support the "reserve then commit" model, we have to initially decommit.
+ while (madvise(result, bytes, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+#endif
+
+#endif // OS(QNX)
+
+ return result;
+}
+
+void* OSAllocator::reserveAndCommit(size_t bytes, Usage usage, bool writable, bool executable, bool includesGuardPages)
+{
+ // All POSIX reservations start out logically committed.
+ int protection = PROT_READ;
+ if (writable)
+ protection |= PROT_WRITE;
+ if (executable)
+ protection |= PROT_EXEC;
+
+ int flags = MAP_PRIVATE | MAP_ANON;
+#if PLATFORM(IOS)
+ if (executable)
+ flags |= MAP_JIT;
+#endif
+
+#if OS(DARWIN)
+ int fd = usage;
+#else
+ UNUSED_PARAM(usage);
+ int fd = -1;
+#endif
+
+ void* result = 0;
+#if (OS(DARWIN) && CPU(X86_64))
+ if (executable) {
+ ASSERT(includesGuardPages);
+ // Cook up an address to allocate at, using the following recipe:
+ // 17 bits of zero, stay in userspace kids.
+ // 26 bits of randomness for ASLR.
+ // 21 bits of zero, at least stay aligned within one level of the pagetables.
+ //
+ // But! - as a temporary workaround for some plugin problems (rdar://problem/6812854),
+ // for now instead of 2^26 bits of ASLR lets stick with 25 bits of randomization plus
+ // 2^24, which should put up somewhere in the middle of userspace (in the address range
+ // 0x200000000000 .. 0x5fffffffffff).
+ intptr_t randomLocation = 0;
+ randomLocation = arc4random() & ((1 << 25) - 1);
+ randomLocation += (1 << 24);
+ randomLocation <<= 21;
+ result = reinterpret_cast<void*>(randomLocation);
+ }
+#endif
+
+ result = mmap(result, bytes, protection, flags, fd, 0);
+ if (result == MAP_FAILED) {
+#if ENABLE(LLINT)
+ if (executable)
+ result = 0;
+ else
+#endif
+ CRASH();
+ }
+ if (result && includesGuardPages) {
+ // We use mmap to remap the guardpages rather than using mprotect as
+ // mprotect results in multiple references to the code region. This
+ // breaks the madvise based mechanism we use to return physical memory
+ // to the OS.
+ mmap(result, pageSize(), PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, fd, 0);
+ mmap(static_cast<char*>(result) + bytes - pageSize(), pageSize(), PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, fd, 0);
+ }
+ return result;
+}
+
+void OSAllocator::commit(void* address, size_t bytes, bool writable, bool executable)
+{
+#if OS(QNX)
+ int protection = PROT_READ;
+ if (writable)
+ protection |= PROT_WRITE;
+ if (executable)
+ protection |= PROT_EXEC;
+ if (MAP_FAILED == mmap(address, bytes, protection, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0))
+ CRASH();
+#elif OS(LINUX)
+ int protection = PROT_READ;
+ if (writable)
+ protection |= PROT_WRITE;
+ if (executable)
+ protection |= PROT_EXEC;
+ if (mprotect(address, bytes, protection))
+ CRASH();
+ madvise(address, bytes, MADV_WILLNEED);
+#elif HAVE(MADV_FREE_REUSE)
+ UNUSED_PARAM(writable);
+ UNUSED_PARAM(executable);
+ while (madvise(address, bytes, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
+#else
+ // Non-MADV_FREE_REUSE reservations automatically commit on demand.
+ UNUSED_PARAM(address);
+ UNUSED_PARAM(bytes);
+ UNUSED_PARAM(writable);
+ UNUSED_PARAM(executable);
+#endif
+}
+
+void OSAllocator::decommit(void* address, size_t bytes)
+{
+#if OS(QNX)
+ // Use PROT_NONE and MAP_LAZY to decommit the pages.
+ mmap(address, bytes, PROT_NONE, MAP_FIXED | MAP_LAZY | MAP_PRIVATE | MAP_ANON, -1, 0);
+#elif OS(LINUX)
+ madvise(address, bytes, MADV_DONTNEED);
+ if (mprotect(address, bytes, PROT_NONE))
+ CRASH();
+#elif HAVE(MADV_FREE_REUSE)
+ while (madvise(address, bytes, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+#elif HAVE(MADV_FREE)
+ while (madvise(address, bytes, MADV_FREE) == -1 && errno == EAGAIN) { }
+#elif HAVE(MADV_DONTNEED)
+ while (madvise(address, bytes, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
+#else
+ UNUSED_PARAM(address);
+ UNUSED_PARAM(bytes);
+#endif
+}
+
+void OSAllocator::releaseDecommitted(void* address, size_t bytes)
+{
+ int result = munmap(address, bytes);
+ if (result == -1)
+ CRASH();
+}
+
+} // namespace WTF
+
+#endif // OS(UNIX)
diff --git a/src/3rdparty/masm/wtf/OSAllocatorWin.cpp b/src/3rdparty/masm/wtf/OSAllocatorWin.cpp
new file mode 100644
index 0000000000..78300dc715
--- /dev/null
+++ b/src/3rdparty/masm/wtf/OSAllocatorWin.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "OSAllocator.h"
+
+#if OS(WINDOWS)
+
+#include "windows.h"
+#include <wtf/Assertions.h>
+
+namespace WTF {
+
+static inline DWORD protection(bool writable, bool executable)
+{
+ return executable ?
+ (writable ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ) :
+ (writable ? PAGE_READWRITE : PAGE_READONLY);
+}
+
+void* OSAllocator::reserveUncommitted(size_t bytes, Usage, bool writable, bool executable, bool)
+{
+ void* result = VirtualAlloc(0, bytes, MEM_RESERVE, protection(writable, executable));
+ if (!result)
+ CRASH();
+ return result;
+}
+
+void* OSAllocator::reserveAndCommit(size_t bytes, Usage, bool writable, bool executable, bool)
+{
+ void* result = VirtualAlloc(0, bytes, MEM_RESERVE | MEM_COMMIT, protection(writable, executable));
+ if (!result)
+ CRASH();
+ return result;
+}
+
+void OSAllocator::commit(void* address, size_t bytes, bool writable, bool executable)
+{
+ void* result = VirtualAlloc(address, bytes, MEM_COMMIT, protection(writable, executable));
+ if (!result)
+ CRASH();
+}
+
+void OSAllocator::decommit(void* address, size_t bytes)
+{
+ bool result = VirtualFree(address, bytes, MEM_DECOMMIT);
+ if (!result)
+ CRASH();
+}
+
+void OSAllocator::releaseDecommitted(void* address, size_t bytes)
+{
+ // According to http://msdn.microsoft.com/en-us/library/aa366892(VS.85).aspx,
+ // dwSize must be 0 if dwFreeType is MEM_RELEASE.
+ bool result = VirtualFree(address, 0, MEM_RELEASE);
+ if (!result)
+ CRASH();
+}
+
+} // namespace WTF
+
+#endif // OS(WINDOWS)
diff --git a/src/3rdparty/masm/wtf/PageAllocation.h b/src/3rdparty/masm/wtf/PageAllocation.h
new file mode 100644
index 0000000000..18d31880c0
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PageAllocation.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PageAllocation_h
+#define PageAllocation_h
+
+#include <wtf/Assertions.h>
+#include <wtf/OSAllocator.h>
+#include <wtf/PageBlock.h>
+#include <wtf/UnusedParam.h>
+#include <wtf/VMTags.h>
+#include <algorithm>
+
+#if OS(DARWIN)
+#include <mach/mach_init.h>
+#include <mach/vm_map.h>
+#endif
+
+#if OS(WINDOWS)
+#include <malloc.h>
+#include <windows.h>
+#endif
+
+#if HAVE(ERRNO_H)
+#include <errno.h>
+#endif
+
+#if HAVE(MMAP)
+#include <sys/mman.h>
+#include <unistd.h>
+#endif
+
+namespace WTF {
+
+/*
+ PageAllocation
+
+ The PageAllocation class provides a cross-platform memory allocation interface
+ with similar capabilities to posix mmap/munmap. Memory is allocated by calling
+ PageAllocation::allocate, and deallocated by calling deallocate on the
+ PageAllocation object. The PageAllocation holds the allocation's base pointer
+ and size.
+
+ The allocate method is passed the size required (which must be a multiple of
+ the system page size, which can be accessed using PageAllocation::pageSize).
+ Callers may also optinally provide a flag indicating the usage (for use by
+ system memory usage tracking tools, where implemented), and boolean values
+ specifying the required protection (defaulting to writable, non-executable).
+*/
+
+class PageAllocation : private PageBlock {
+public:
+ PageAllocation()
+ {
+ }
+
+ using PageBlock::size;
+ using PageBlock::base;
+
+#ifndef __clang__
+ using PageBlock::operator bool;
+#else
+ // FIXME: This is a workaround for <rdar://problem/8876150>, wherein Clang incorrectly emits an access
+ // control warning when a client tries to use operator bool exposed above via "using PageBlock::operator bool".
+ operator bool() const { return PageBlock::operator bool(); }
+#endif
+
+ static PageAllocation allocate(size_t size, OSAllocator::Usage usage = OSAllocator::UnknownUsage, bool writable = true, bool executable = false)
+ {
+ ASSERT(isPageAligned(size));
+ return PageAllocation(OSAllocator::reserveAndCommit(size, usage, writable, executable), size);
+ }
+
+ void deallocate()
+ {
+ // Clear base & size before calling release; if this is *inside* allocation
+ // then we won't be able to clear then after deallocating the memory.
+ PageAllocation tmp;
+ std::swap(tmp, *this);
+
+ ASSERT(tmp);
+ ASSERT(!*this);
+
+ OSAllocator::decommitAndRelease(tmp.base(), tmp.size());
+ }
+
+private:
+ PageAllocation(void* base, size_t size)
+ : PageBlock(base, size, false)
+ {
+ }
+};
+
+} // namespace WTF
+
+using WTF::PageAllocation;
+
+#endif // PageAllocation_h
diff --git a/src/3rdparty/masm/wtf/PageAllocationAligned.cpp b/src/3rdparty/masm/wtf/PageAllocationAligned.cpp
new file mode 100644
index 0000000000..bdb976b1b7
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PageAllocationAligned.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PageAllocationAligned.h"
+
+namespace WTF {
+
+PageAllocationAligned PageAllocationAligned::allocate(size_t size, size_t alignment, OSAllocator::Usage usage, bool writable)
+{
+ ASSERT(isPageAligned(size));
+ ASSERT(isPageAligned(alignment));
+ ASSERT(isPowerOfTwo(alignment));
+ ASSERT(size >= alignment);
+ size_t alignmentMask = alignment - 1;
+
+#if OS(DARWIN)
+ int flags = VM_FLAGS_ANYWHERE;
+ if (usage != OSAllocator::UnknownUsage)
+ flags |= usage;
+ int protection = PROT_READ;
+ if (writable)
+ protection |= PROT_WRITE;
+
+ vm_address_t address = 0;
+ vm_map(current_task(), &address, size, alignmentMask, flags, MEMORY_OBJECT_NULL, 0, FALSE, protection, PROT_READ | PROT_WRITE, VM_INHERIT_DEFAULT);
+ return PageAllocationAligned(reinterpret_cast<void*>(address), size);
+#else
+ size_t alignmentDelta = alignment - pageSize();
+
+ // Resererve with suffcient additional VM to correctly align.
+ size_t reservationSize = size + alignmentDelta;
+ void* reservationBase = OSAllocator::reserveUncommitted(reservationSize, usage, writable, false);
+
+ // Select an aligned region within the reservation and commit.
+ void* alignedBase = reinterpret_cast<uintptr_t>(reservationBase) & alignmentMask
+ ? reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(reservationBase) & ~alignmentMask) + alignment)
+ : reservationBase;
+ OSAllocator::commit(alignedBase, size, writable, false);
+
+ return PageAllocationAligned(alignedBase, size, reservationBase, reservationSize);
+#endif
+}
+
+void PageAllocationAligned::deallocate()
+{
+ // Clear base & size before calling release; if this is *inside* allocation
+ // then we won't be able to clear then after deallocating the memory.
+ PageAllocationAligned tmp;
+ std::swap(tmp, *this);
+
+ ASSERT(tmp);
+ ASSERT(!*this);
+
+#if OS(DARWIN)
+ vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(tmp.base()), tmp.size());
+#else
+ ASSERT(tmp.m_reservation.contains(tmp.base(), tmp.size()));
+ OSAllocator::decommitAndRelease(tmp.m_reservation.base(), tmp.m_reservation.size(), tmp.base(), tmp.size());
+#endif
+}
+
+} // namespace WTF
diff --git a/src/3rdparty/masm/wtf/PageAllocationAligned.h b/src/3rdparty/masm/wtf/PageAllocationAligned.h
new file mode 100644
index 0000000000..211a61b8b5
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PageAllocationAligned.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PageAllocationAligned_h
+#define PageAllocationAligned_h
+
+#include <wtf/OSAllocator.h>
+#include <wtf/PageReservation.h>
+
+namespace WTF {
+
+class PageAllocationAligned : private PageBlock {
+public:
+ PageAllocationAligned()
+ {
+ }
+
+ using PageBlock::operator bool;
+ using PageBlock::size;
+ using PageBlock::base;
+
+ static PageAllocationAligned allocate(size_t size, size_t alignment, OSAllocator::Usage usage = OSAllocator::UnknownUsage, bool writable = true);
+
+ void deallocate();
+
+private:
+#if OS(DARWIN)
+ PageAllocationAligned(void* base, size_t size)
+ : PageBlock(base, size, false)
+ {
+ }
+#else
+ PageAllocationAligned(void* base, size_t size, void* reservationBase, size_t reservationSize)
+ : PageBlock(base, size, false)
+ , m_reservation(reservationBase, reservationSize, false)
+ {
+ }
+
+ PageBlock m_reservation;
+#endif
+};
+
+
+} // namespace WTF
+
+using WTF::PageAllocationAligned;
+
+#endif // PageAllocationAligned_h
diff --git a/src/3rdparty/masm/wtf/PageBlock.cpp b/src/3rdparty/masm/wtf/PageBlock.cpp
new file mode 100644
index 0000000000..8bbd7eb600
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PageBlock.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PageBlock.h"
+
+#if OS(UNIX)
+#include <unistd.h>
+#endif
+
+#if OS(WINDOWS)
+#include <malloc.h>
+#include <windows.h>
+#endif
+
+namespace WTF {
+
+static size_t s_pageSize;
+static size_t s_pageMask;
+
+#if OS(UNIX)
+
+inline size_t systemPageSize()
+{
+ return getpagesize();
+}
+
+#elif OS(WINDOWS)
+
+inline size_t systemPageSize()
+{
+ static size_t size = 0;
+ SYSTEM_INFO system_info;
+ GetSystemInfo(&system_info);
+ size = system_info.dwPageSize;
+ return size;
+}
+
+#endif
+
+size_t pageSize()
+{
+ if (!s_pageSize)
+ s_pageSize = systemPageSize();
+ ASSERT(isPowerOfTwo(s_pageSize));
+ return s_pageSize;
+}
+
+size_t pageMask()
+{
+ if (!s_pageMask)
+ s_pageMask = ~(pageSize() - 1);
+ return s_pageMask;
+}
+
+} // namespace WTF
diff --git a/src/3rdparty/masm/wtf/PageBlock.h b/src/3rdparty/masm/wtf/PageBlock.h
new file mode 100644
index 0000000000..56e5570178
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PageBlock.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PageBlock_h
+#define PageBlock_h
+
+namespace WTF {
+
+WTF_EXPORT_PRIVATE size_t pageSize();
+WTF_EXPORT_PRIVATE size_t pageMask();
+inline bool isPageAligned(void* address) { return !(reinterpret_cast<intptr_t>(address) & (pageSize() - 1)); }
+inline bool isPageAligned(size_t size) { return !(size & (pageSize() - 1)); }
+inline bool isPowerOfTwo(size_t size) { return !(size & (size - 1)); }
+
+class PageBlock {
+public:
+ PageBlock();
+ PageBlock(const PageBlock&);
+ PageBlock(void*, size_t, bool hasGuardPages);
+
+ void* base() const { return m_base; }
+ size_t size() const { return m_size; }
+
+ operator bool() const { return !!m_realBase; }
+
+ bool contains(void* containedBase, size_t containedSize)
+ {
+ return containedBase >= m_base
+ && (static_cast<char*>(containedBase) + containedSize) <= (static_cast<char*>(m_base) + m_size);
+ }
+
+private:
+ void* m_realBase;
+ void* m_base;
+ size_t m_size;
+};
+
+inline PageBlock::PageBlock()
+ : m_realBase(0)
+ , m_base(0)
+ , m_size(0)
+{
+}
+
+inline PageBlock::PageBlock(const PageBlock& other)
+ : m_realBase(other.m_realBase)
+ , m_base(other.m_base)
+ , m_size(other.m_size)
+{
+}
+
+inline PageBlock::PageBlock(void* base, size_t size, bool hasGuardPages)
+ : m_realBase(base)
+ , m_base(static_cast<char*>(base) + ((base && hasGuardPages) ? pageSize() : 0))
+ , m_size(size)
+{
+}
+
+} // namespace WTF
+
+using WTF::pageSize;
+using WTF::isPageAligned;
+using WTF::isPageAligned;
+using WTF::isPowerOfTwo;
+
+#endif // PageBlock_h
diff --git a/src/3rdparty/masm/wtf/PageReservation.h b/src/3rdparty/masm/wtf/PageReservation.h
new file mode 100644
index 0000000000..77783ebcc4
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PageReservation.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PageReservation_h
+#define PageReservation_h
+
+#include <wtf/PageAllocation.h>
+
+namespace WTF {
+
+/*
+ PageReservation
+
+ Like PageAllocation, the PageReservation class provides a cross-platform memory
+ allocation interface, but with a set of capabilities more similar to that of
+ VirtualAlloc than posix mmap. PageReservation can be used to allocate virtual
+ memory without committing physical memory pages using PageReservation::reserve.
+ Following a call to reserve all memory in the region is in a decommited state,
+ in which the memory should not be used (accessing the memory may cause a fault).
+
+ Before using memory it must be committed by calling commit, which is passed start
+ and size values (both of which require system page size granularity). One the
+ committed memory is no longer needed 'decommit' may be called to return the
+ memory to its devommitted state. Commit should only be called on memory that is
+ currently decommitted, and decommit should only be called on memory regions that
+ are currently committed. All memory should be decommited before the reservation
+ is deallocated. Values in memory may not be retained accross a pair of calls if
+ the region of memory is decommitted and then committed again.
+
+ Memory protection should not be changed on decommitted memory, and if protection
+ is changed on memory while it is committed it should be returned to the orignal
+ protection before decommit is called.
+*/
+
+class PageReservation : private PageBlock {
+public:
+ PageReservation()
+ : m_committed(0)
+ , m_writable(false)
+ , m_executable(false)
+ {
+ }
+
+ using PageBlock::base;
+ using PageBlock::size;
+
+#ifndef __clang__
+ using PageBlock::operator bool;
+#else
+ // FIXME: This is a workaround for <rdar://problem/8876150>, wherein Clang incorrectly emits an access
+ // control warning when a client tries to use operator bool exposed above via "using PageBlock::operator bool".
+ operator bool() const { return PageBlock::operator bool(); }
+#endif
+
+ void commit(void* start, size_t size)
+ {
+ ASSERT(*this);
+ ASSERT(isPageAligned(start));
+ ASSERT(isPageAligned(size));
+ ASSERT(contains(start, size));
+
+ m_committed += size;
+ OSAllocator::commit(start, size, m_writable, m_executable);
+ }
+
+ void decommit(void* start, size_t size)
+ {
+ ASSERT(*this);
+ ASSERT(isPageAligned(start));
+ ASSERT(isPageAligned(size));
+ ASSERT(contains(start, size));
+
+ m_committed -= size;
+ OSAllocator::decommit(start, size);
+ }
+
+ size_t committed()
+ {
+ return m_committed;
+ }
+
+ static PageReservation reserve(size_t size, OSAllocator::Usage usage = OSAllocator::UnknownUsage, bool writable = true, bool executable = false)
+ {
+ ASSERT(isPageAligned(size));
+ return PageReservation(OSAllocator::reserveUncommitted(size, usage, writable, executable), size, writable, executable, false);
+ }
+
+ static PageReservation reserveWithGuardPages(size_t size, OSAllocator::Usage usage = OSAllocator::UnknownUsage, bool writable = true, bool executable = false)
+ {
+ ASSERT(isPageAligned(size));
+ return PageReservation(OSAllocator::reserveUncommitted(size + pageSize() * 2, usage, writable, executable, true), size, writable, executable, true);
+ }
+
+ void deallocate()
+ {
+ ASSERT(!m_committed);
+
+ // Clear base & size before calling release; if this is *inside* allocation
+ // then we won't be able to clear then after deallocating the memory.
+ PageReservation tmp;
+ std::swap(tmp, *this);
+
+ ASSERT(tmp);
+ ASSERT(!*this);
+
+ OSAllocator::releaseDecommitted(tmp.base(), tmp.size());
+ }
+
+private:
+ PageReservation(void* base, size_t size, bool writable, bool executable, bool hasGuardPages)
+ : PageBlock(base, size, hasGuardPages)
+ , m_committed(0)
+ , m_writable(writable)
+ , m_executable(executable)
+ {
+ }
+
+ size_t m_committed;
+ bool m_writable;
+ bool m_executable;
+};
+
+}
+
+using WTF::PageReservation;
+
+#endif // PageReservation_h
diff --git a/src/3rdparty/masm/wtf/Platform.h b/src/3rdparty/masm/wtf/Platform.h
new file mode 100644
index 0000000000..5c85c15634
--- /dev/null
+++ b/src/3rdparty/masm/wtf/Platform.h
@@ -0,0 +1,1019 @@
+/*
+ * Copyright (C) 2006, 2007, 2008, 2009, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2007-2009 Torch Mobile, Inc.
+ * Copyright (C) 2010, 2011 Research In Motion Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_Platform_h
+#define WTF_Platform_h
+
+/* Include compiler specific macros */
+#include <wtf/Compiler.h>
+
+/* ==== PLATFORM handles OS, operating environment, graphics API, and
+ CPU. This macro will be phased out in favor of platform adaptation
+ macros, policy decision macros, and top-level port definitions. ==== */
+#define PLATFORM(WTF_FEATURE) (defined WTF_PLATFORM_##WTF_FEATURE && WTF_PLATFORM_##WTF_FEATURE)
+
+
+/* ==== Platform adaptation macros: these describe properties of the target environment. ==== */
+
+/* CPU() - the target CPU architecture */
+#define CPU(WTF_FEATURE) (defined WTF_CPU_##WTF_FEATURE && WTF_CPU_##WTF_FEATURE)
+/* HAVE() - specific system features (headers, functions or similar) that are present or not */
+#define HAVE(WTF_FEATURE) (defined HAVE_##WTF_FEATURE && HAVE_##WTF_FEATURE)
+/* OS() - underlying operating system; only to be used for mandated low-level services like
+ virtual memory, not to choose a GUI toolkit */
+#define OS(WTF_FEATURE) (defined WTF_OS_##WTF_FEATURE && WTF_OS_##WTF_FEATURE)
+
+
+/* ==== Policy decision macros: these define policy choices for a particular port. ==== */
+
+/* USE() - use a particular third-party library or optional OS service */
+#define USE(WTF_FEATURE) (defined WTF_USE_##WTF_FEATURE && WTF_USE_##WTF_FEATURE)
+/* ENABLE() - turn on a specific feature of WebKit */
+#define ENABLE(WTF_FEATURE) (defined ENABLE_##WTF_FEATURE && ENABLE_##WTF_FEATURE)
+
+
+/* ==== CPU() - the target CPU architecture ==== */
+
+/* This also defines CPU(BIG_ENDIAN) or CPU(MIDDLE_ENDIAN) or neither, as appropriate. */
+
+/* CPU(ALPHA) - DEC Alpha */
+#if defined(__alpha__)
+#define WTF_CPU_ALPHA 1
+#endif
+
+/* CPU(IA64) - Itanium / IA-64 */
+#if defined(__ia64__)
+#define WTF_CPU_IA64 1
+/* 32-bit mode on Itanium */
+#if !defined(__LP64__)
+#define WTF_CPU_IA64_32 1
+#endif
+#endif
+
+/* CPU(MIPS) - MIPS 32-bit */
+/* Note: Only O32 ABI is tested, so we enable it for O32 ABI for now. */
+#if (defined(mips) || defined(__mips__) || defined(MIPS) || defined(_MIPS_)) \
+ && defined(_ABIO32)
+#define WTF_CPU_MIPS 1
+#if defined(__MIPSEB__)
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+#define WTF_MIPS_PIC (defined __PIC__)
+#define WTF_MIPS_ARCH __mips
+#define WTF_MIPS_ISA(v) (defined WTF_MIPS_ARCH && WTF_MIPS_ARCH == v)
+#define WTF_MIPS_ISA_AT_LEAST(v) (defined WTF_MIPS_ARCH && WTF_MIPS_ARCH >= v)
+#define WTF_MIPS_ARCH_REV __mips_isa_rev
+#define WTF_MIPS_ISA_REV(v) (defined WTF_MIPS_ARCH_REV && WTF_MIPS_ARCH_REV == v)
+#define WTF_MIPS_DOUBLE_FLOAT (defined __mips_hard_float && !defined __mips_single_float)
+#define WTF_MIPS_FP64 (defined __mips_fpr && __mips_fpr == 64)
+/* MIPS requires allocators to use aligned memory */
+#define WTF_USE_ARENA_ALLOC_ALIGNMENT_INTEGER 1
+#endif /* MIPS */
+
+/* CPU(PPC) - PowerPC 32-bit */
+#if defined(__ppc__) \
+ || defined(__PPC__) \
+ || defined(__powerpc__) \
+ || defined(__powerpc) \
+ || defined(__POWERPC__) \
+ || defined(_M_PPC) \
+ || defined(__PPC)
+#define WTF_CPU_PPC 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
+/* CPU(PPC64) - PowerPC 64-bit */
+#if defined(__ppc64__) \
+ || defined(__PPC64__)
+#define WTF_CPU_PPC64 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
+/* CPU(SH4) - SuperH SH-4 */
+#if defined(__SH4__)
+#define WTF_CPU_SH4 1
+#endif
+
+/* CPU(SPARC32) - SPARC 32-bit */
+#if defined(__sparc) && !defined(__arch64__) || defined(__sparcv8)
+#define WTF_CPU_SPARC32 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
+/* CPU(SPARC64) - SPARC 64-bit */
+#if defined(__sparc__) && defined(__arch64__) || defined (__sparcv9)
+#define WTF_CPU_SPARC64 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
+/* CPU(SPARC) - any SPARC, true for CPU(SPARC32) and CPU(SPARC64) */
+#if CPU(SPARC32) || CPU(SPARC64)
+#define WTF_CPU_SPARC 1
+#endif
+
+/* CPU(S390X) - S390 64-bit */
+#if defined(__s390x__)
+#define WTF_CPU_S390X 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
+/* CPU(S390) - S390 32-bit */
+#if defined(__s390__)
+#define WTF_CPU_S390 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
+/* CPU(X86) - i386 / x86 32-bit */
+#if defined(__i386__) \
+ || defined(i386) \
+ || defined(_M_IX86) \
+ || defined(_X86_) \
+ || defined(__THW_INTEL)
+#define WTF_CPU_X86 1
+#endif
+
+/* CPU(X86_64) - AMD64 / Intel64 / x86_64 64-bit */
+#if defined(__x86_64__) \
+ || defined(_M_X64)
+#define WTF_CPU_X86_64 1
+#endif
+
+/* CPU(ARM) - ARM, any version*/
+#define WTF_ARM_ARCH_AT_LEAST(N) (CPU(ARM) && WTF_ARM_ARCH_VERSION >= N)
+
+#if defined(arm) \
+ || defined(__arm__) \
+ || defined(ARM) \
+ || defined(_ARM_)
+#define WTF_CPU_ARM 1
+
+#if defined(__ARM_PCS_VFP)
+#define WTF_CPU_ARM_HARDFP 1
+#endif
+
+#if defined(__ARMEB__) || (COMPILER(RVCT) && defined(__BIG_ENDIAN))
+#define WTF_CPU_BIG_ENDIAN 1
+
+#elif !defined(__ARM_EABI__) \
+ && !defined(__EABI__) \
+ && !defined(__VFP_FP__) \
+ && !defined(_WIN32_WCE)
+#define WTF_CPU_MIDDLE_ENDIAN 1
+
+#endif
+
+/* Set WTF_ARM_ARCH_VERSION */
+#if defined(__ARM_ARCH_4__) \
+ || defined(__ARM_ARCH_4T__) \
+ || defined(__MARM_ARMV4__)
+#define WTF_ARM_ARCH_VERSION 4
+
+#elif defined(__ARM_ARCH_5__) \
+ || defined(__ARM_ARCH_5T__) \
+ || defined(__MARM_ARMV5__)
+#define WTF_ARM_ARCH_VERSION 5
+
+#elif defined(__ARM_ARCH_5E__) \
+ || defined(__ARM_ARCH_5TE__) \
+ || defined(__ARM_ARCH_5TEJ__)
+#define WTF_ARM_ARCH_VERSION 5
+/*ARMv5TE requires allocators to use aligned memory*/
+#define WTF_USE_ARENA_ALLOC_ALIGNMENT_INTEGER 1
+
+#elif defined(__ARM_ARCH_6__) \
+ || defined(__ARM_ARCH_6J__) \
+ || defined(__ARM_ARCH_6K__) \
+ || defined(__ARM_ARCH_6Z__) \
+ || defined(__ARM_ARCH_6ZK__) \
+ || defined(__ARM_ARCH_6T2__) \
+ || defined(__ARMV6__)
+#define WTF_ARM_ARCH_VERSION 6
+
+#elif defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7R__) \
+ || defined(__ARM_ARCH_7S__)
+#define WTF_ARM_ARCH_VERSION 7
+
+/* MSVC sets _M_ARM */
+#elif defined(_M_ARM)
+#define WTF_ARM_ARCH_VERSION _M_ARM
+
+/* RVCT sets _TARGET_ARCH_ARM */
+#elif defined(__TARGET_ARCH_ARM)
+#define WTF_ARM_ARCH_VERSION __TARGET_ARCH_ARM
+
+#if defined(__TARGET_ARCH_5E) \
+ || defined(__TARGET_ARCH_5TE) \
+ || defined(__TARGET_ARCH_5TEJ)
+/*ARMv5TE requires allocators to use aligned memory*/
+#define WTF_USE_ARENA_ALLOC_ALIGNMENT_INTEGER 1
+#endif
+
+#else
+#define WTF_ARM_ARCH_VERSION 0
+
+#endif
+
+/* Set WTF_THUMB_ARCH_VERSION */
+#if defined(__ARM_ARCH_4T__)
+#define WTF_THUMB_ARCH_VERSION 1
+
+#elif defined(__ARM_ARCH_5T__) \
+ || defined(__ARM_ARCH_5TE__) \
+ || defined(__ARM_ARCH_5TEJ__)
+#define WTF_THUMB_ARCH_VERSION 2
+
+#elif defined(__ARM_ARCH_6J__) \
+ || defined(__ARM_ARCH_6K__) \
+ || defined(__ARM_ARCH_6Z__) \
+ || defined(__ARM_ARCH_6ZK__) \
+ || defined(__ARM_ARCH_6M__)
+#define WTF_THUMB_ARCH_VERSION 3
+
+#elif defined(__ARM_ARCH_6T2__) \
+ || defined(__ARM_ARCH_7__) \
+ || defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7M__) \
+ || defined(__ARM_ARCH_7R__) \
+ || defined(__ARM_ARCH_7S__)
+#define WTF_THUMB_ARCH_VERSION 4
+
+/* RVCT sets __TARGET_ARCH_THUMB */
+#elif defined(__TARGET_ARCH_THUMB)
+#define WTF_THUMB_ARCH_VERSION __TARGET_ARCH_THUMB
+
+#else
+#define WTF_THUMB_ARCH_VERSION 0
+#endif
+
+
+/* CPU(ARMV5_OR_LOWER) - ARM instruction set v5 or earlier */
+/* On ARMv5 and below the natural alignment is required.
+ And there are some other differences for v5 or earlier. */
+#if !defined(ARMV5_OR_LOWER) && !WTF_ARM_ARCH_AT_LEAST(6)
+#define WTF_CPU_ARMV5_OR_LOWER 1
+#endif
+
+
+/* CPU(ARM_TRADITIONAL) - Thumb2 is not available, only traditional ARM (v4 or greater) */
+/* CPU(ARM_THUMB2) - Thumb2 instruction set is available */
+/* Only one of these will be defined. */
+#if !defined(WTF_CPU_ARM_TRADITIONAL) && !defined(WTF_CPU_ARM_THUMB2)
+# if defined(thumb2) || defined(__thumb2__) \
+ || ((defined(__thumb) || defined(__thumb__)) && WTF_THUMB_ARCH_VERSION == 4)
+# define WTF_CPU_ARM_TRADITIONAL 0
+# define WTF_CPU_ARM_THUMB2 1
+# elif WTF_ARM_ARCH_AT_LEAST(4)
+# define WTF_CPU_ARM_TRADITIONAL 1
+# define WTF_CPU_ARM_THUMB2 0
+# else
+# error "Not supported ARM architecture"
+# endif
+#elif CPU(ARM_TRADITIONAL) && CPU(ARM_THUMB2) /* Sanity Check */
+# error "Cannot use both of WTF_CPU_ARM_TRADITIONAL and WTF_CPU_ARM_THUMB2 platforms"
+#endif /* !defined(WTF_CPU_ARM_TRADITIONAL) && !defined(WTF_CPU_ARM_THUMB2) */
+
+#if defined(__ARM_NEON__) && !defined(WTF_CPU_ARM_NEON)
+#define WTF_CPU_ARM_NEON 1
+#endif
+
+#if CPU(ARM_NEON) && (!COMPILER(GCC) || GCC_VERSION_AT_LEAST(4, 7, 0))
+// All NEON intrinsics usage can be disabled by this macro.
+#define HAVE_ARM_NEON_INTRINSICS 1
+#endif
+
+#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
+#define WTF_CPU_ARM_VFP 1
+#endif
+
+#if defined(__ARM_ARCH_7S__)
+#define WTF_CPU_APPLE_ARMV7S 1
+#endif
+
+#endif /* ARM */
+
+#if CPU(ARM) || CPU(MIPS) || CPU(SH4) || CPU(SPARC)
+#define WTF_CPU_NEEDS_ALIGNED_ACCESS 1
+#endif
+
+/* ==== OS() - underlying operating system; only to be used for mandated low-level services like
+ virtual memory, not to choose a GUI toolkit ==== */
+
+/* OS(AIX) - AIX */
+#ifdef _AIX
+#define WTF_OS_AIX 1
+#endif
+
+/* OS(DARWIN) - Any Darwin-based OS, including Mac OS X and iPhone OS */
+#ifdef __APPLE__
+#define WTF_OS_DARWIN 1
+
+#include <Availability.h>
+#include <AvailabilityMacros.h>
+#include <TargetConditionals.h>
+#endif
+
+/* OS(IOS) - iOS */
+/* OS(MAC_OS_X) - Mac OS X (not including iOS) */
+#if OS(DARWIN) && ((defined(TARGET_OS_EMBEDDED) && TARGET_OS_EMBEDDED) \
+ || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) \
+ || (defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR))
+#define WTF_OS_IOS 1
+#elif OS(DARWIN) && defined(TARGET_OS_MAC) && TARGET_OS_MAC
+#define WTF_OS_MAC_OS_X 1
+
+/* FIXME: These can be removed after sufficient time has passed since the removal of BUILDING_ON / TARGETING macros. */
+
+#define ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MIN_REQUIRED 0 / 0
+#define ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MAX_ALLOWED 0 / 0
+
+#define BUILDING_ON_LEOPARD ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MIN_REQUIRED
+#define BUILDING_ON_SNOW_LEOPARD ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MIN_REQUIRED
+#define BUILDING_ON_LION ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MIN_REQUIRED
+
+#define TARGETING_LEOPARD ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MAX_ALLOWED
+#define TARGETING_SNOW_LEOPARD ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MAX_ALLOWED
+#define TARGETING_LION ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MAX_ALLOWED
+#endif
+
+/* OS(FREEBSD) - FreeBSD */
+#if defined(__FreeBSD__) || defined(__DragonFly__) || defined(__FreeBSD_kernel__)
+#define WTF_OS_FREEBSD 1
+#endif
+
+/* OS(HURD) - GNU/Hurd */
+#ifdef __GNU__
+#define WTF_OS_HURD 1
+#endif
+
+/* OS(LINUX) - Linux */
+#ifdef __linux__
+#define WTF_OS_LINUX 1
+#endif
+
+/* OS(NETBSD) - NetBSD */
+#if defined(__NetBSD__)
+#define WTF_OS_NETBSD 1
+#endif
+
+/* OS(OPENBSD) - OpenBSD */
+#ifdef __OpenBSD__
+#define WTF_OS_OPENBSD 1
+#endif
+
+/* OS(QNX) - QNX */
+#if defined(__QNXNTO__)
+#define WTF_OS_QNX 1
+#endif
+
+/* OS(SOLARIS) - Solaris */
+#if defined(sun) || defined(__sun)
+#define WTF_OS_SOLARIS 1
+#endif
+
+/* OS(WINCE) - Windows CE; note that for this platform OS(WINDOWS) is also defined */
+#if defined(_WIN32_WCE)
+#define WTF_OS_WINCE 1
+#endif
+
+/* OS(WINDOWS) - Any version of Windows */
+#if defined(WIN32) || defined(_WIN32)
+#define WTF_OS_WINDOWS 1
+#endif
+
+#define WTF_OS_WIN ERROR "USE WINDOWS WITH OS NOT WIN"
+#define WTF_OS_MAC ERROR "USE MAC_OS_X WITH OS NOT MAC"
+
+/* OS(UNIX) - Any Unix-like system */
+#if OS(AIX) \
+ || OS(DARWIN) \
+ || OS(FREEBSD) \
+ || OS(HURD) \
+ || OS(LINUX) \
+ || OS(NETBSD) \
+ || OS(OPENBSD) \
+ || OS(QNX) \
+ || OS(SOLARIS) \
+ || defined(unix) \
+ || defined(__unix) \
+ || defined(__unix__)
+#define WTF_OS_UNIX 1
+#endif
+
+/* Operating environments */
+
+/* FIXME: these are all mixes of OS, operating environment and policy choices. */
+/* PLATFORM(QT) */
+/* PLATFORM(WX) */
+/* PLATFORM(EFL) */
+/* PLATFORM(GTK) */
+/* PLATFORM(BLACKBERRY) */
+/* PLATFORM(MAC) */
+/* PLATFORM(WIN) */
+#if defined(BUILDING_QT__)
+#define WTF_PLATFORM_QT 1
+#elif defined(BUILDING_WX__)
+#define WTF_PLATFORM_WX 1
+#elif defined(BUILDING_EFL__)
+#define WTF_PLATFORM_EFL 1
+#elif defined(BUILDING_GTK__)
+#define WTF_PLATFORM_GTK 1
+#elif defined(BUILDING_BLACKBERRY__)
+#define WTF_PLATFORM_BLACKBERRY 1
+#elif OS(DARWIN)
+#define WTF_PLATFORM_MAC 1
+#elif OS(WINDOWS)
+#define WTF_PLATFORM_WIN 1
+#endif
+
+/* PLATFORM(IOS) */
+/* FIXME: this is sometimes used as an OS switch and sometimes for higher-level things */
+#if (defined(TARGET_OS_EMBEDDED) && TARGET_OS_EMBEDDED) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE)
+#define WTF_PLATFORM_IOS 1
+#endif
+
+/* PLATFORM(IOS_SIMULATOR) */
+#if defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR
+#define WTF_PLATFORM_IOS 1
+#define WTF_PLATFORM_IOS_SIMULATOR 1
+#endif
+
+/* Graphics engines */
+
+/* USE(CG) and PLATFORM(CI) */
+#if PLATFORM(MAC) || PLATFORM(IOS)
+#define WTF_USE_CG 1
+#endif
+#if PLATFORM(MAC) || PLATFORM(IOS) || (PLATFORM(WIN) && USE(CG))
+#define WTF_USE_CA 1
+#endif
+
+#if PLATFORM(BLACKBERRY)
+#define WTF_USE_SKIA 1
+#define WTF_USE_LOW_QUALITY_IMAGE_INTERPOLATION 1
+#define WTF_USE_LOW_QUALITY_IMAGE_NO_JPEG_DITHERING 1
+#define WTF_USE_LOW_QUALITY_IMAGE_NO_JPEG_FANCY_UPSAMPLING 1
+#endif
+
+#if PLATFORM(GTK)
+#define WTF_USE_CAIRO 1
+#define ENABLE_GLOBAL_FASTMALLOC_NEW 0
+#endif
+
+/* On Windows, use QueryPerformanceCounter by default */
+#if OS(WINDOWS)
+#define WTF_USE_QUERY_PERFORMANCE_COUNTER 1
+#endif
+
+#if OS(WINCE) && !PLATFORM(QT)
+#define NOSHLWAPI /* shlwapi.h not available on WinCe */
+
+/* MSDN documentation says these functions are provided with uspce.lib. But we cannot find this file. */
+#define __usp10__ /* disable "usp10.h" */
+
+#define _INC_ASSERT /* disable "assert.h" */
+#define assert(x)
+
+#endif /* OS(WINCE) && !PLATFORM(QT) */
+
+#if OS(WINCE) && !PLATFORM(QT)
+#define WTF_USE_WCHAR_UNICODE 1
+#elif PLATFORM(GTK)
+/* The GTK+ Unicode backend is configurable */
+#else
+#define WTF_USE_ICU_UNICODE 1
+#endif
+
+#if PLATFORM(MAC) && !PLATFORM(IOS)
+#if CPU(X86_64)
+#define WTF_USE_PLUGIN_HOST_PROCESS 1
+#endif
+#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
+#define WTF_USE_SCROLLBAR_PAINTER 1
+#define HAVE_XPC 1
+#endif
+#define WTF_USE_CF 1
+#define HAVE_READLINE 1
+#define HAVE_RUNLOOP_TIMER 1
+#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
+#define HAVE_LAYER_HOSTING_IN_WINDOW_SERVER 1
+#endif
+#define WTF_USE_APPKIT 1
+#define WTF_USE_SECURITY_FRAMEWORK 1
+#endif /* PLATFORM(MAC) && !PLATFORM(IOS) */
+
+#if PLATFORM(IOS)
+#define DONT_FINALIZE_ON_MAIN_THREAD 1
+#endif
+
+#if PLATFORM(QT) && OS(DARWIN)
+#define WTF_USE_CF 1
+#endif
+
+#if OS(DARWIN) && !PLATFORM(GTK) && !PLATFORM(QT)
+#define ENABLE_PURGEABLE_MEMORY 1
+#endif
+
+#if PLATFORM(IOS)
+#define HAVE_READLINE 1
+#define WTF_USE_APPKIT 0
+#define WTF_USE_CF 1
+#define WTF_USE_CFNETWORK 1
+#define WTF_USE_NETWORK_CFDATA_ARRAY_CALLBACK 1
+#define WTF_USE_SECURITY_FRAMEWORK 0
+#define WTF_USE_WEB_THREAD 1
+#endif /* PLATFORM(IOS) */
+
+#if PLATFORM(WIN) && !OS(WINCE)
+#define WTF_USE_CF 1
+#endif
+
+#if PLATFORM(WIN) && !OS(WINCE) && !PLATFORM(WIN_CAIRO)
+#define WTF_USE_CFNETWORK 1
+#endif
+
+#if USE(CFNETWORK) || PLATFORM(MAC) || PLATFORM(IOS)
+#define WTF_USE_CFURLCACHE 1
+#endif
+
+#if PLATFORM(WX)
+#if !CPU(PPC)
+#if !defined(ENABLE_ASSEMBLER)
+#define ENABLE_ASSEMBLER 1
+#endif
+#define ENABLE_JIT 1
+#endif
+#define ENABLE_GLOBAL_FASTMALLOC_NEW 0
+#define ENABLE_LLINT 0
+#if OS(DARWIN)
+#define WTF_USE_CF 1
+#endif
+#endif
+
+#if !defined(HAVE_ACCESSIBILITY)
+#if PLATFORM(IOS) || PLATFORM(MAC) || PLATFORM(WIN) || PLATFORM(GTK) || PLATFORM(EFL)
+#define HAVE_ACCESSIBILITY 1
+#endif
+#endif /* !defined(HAVE_ACCESSIBILITY) */
+
+#if OS(UNIX)
+#define HAVE_ERRNO_H 1
+#define HAVE_MMAP 1
+#define HAVE_SIGNAL_H 1
+#define HAVE_STRINGS_H 1
+#define HAVE_SYS_PARAM_H 1
+#define HAVE_SYS_TIME_H 1
+#define WTF_USE_OS_RANDOMNESS 1
+#define WTF_USE_PTHREADS 1
+#endif /* OS(UNIX) */
+
+#if OS(UNIX) && !OS(QNX)
+#define HAVE_LANGINFO_H 1
+#endif
+
+#if (OS(FREEBSD) || OS(OPENBSD)) && !defined(__GLIBC__)
+#define HAVE_PTHREAD_NP_H 1
+#endif
+
+#if !defined(HAVE_VASPRINTF)
+#if !COMPILER(MSVC) && !COMPILER(RVCT) && !COMPILER(MINGW) && !(COMPILER(GCC) && OS(QNX))
+#define HAVE_VASPRINTF 1
+#endif
+#endif
+
+#if !defined(HAVE_STRNSTR)
+#if OS(DARWIN) || (OS(FREEBSD) && !defined(__GLIBC__))
+#define HAVE_STRNSTR 1
+#endif
+#endif
+
+#if !OS(WINDOWS) && !OS(SOLARIS)
+#define HAVE_TM_GMTOFF 1
+#define HAVE_TM_ZONE 1
+#define HAVE_TIMEGM 1
+#endif
+
+#if OS(DARWIN)
+
+#define HAVE_DISPATCH_H 1
+#define HAVE_MADV_FREE 1
+#define HAVE_MADV_FREE_REUSE 1
+#define HAVE_MERGESORT 1
+#define HAVE_PTHREAD_SETNAME_NP 1
+#define HAVE_SYS_TIMEB_H 1
+#define WTF_USE_ACCELERATE 1
+
+#if !PLATFORM(IOS)
+#define HAVE_HOSTED_CORE_ANIMATION 1
+#endif /* !PLATFORM(IOS) */
+
+#endif /* OS(DARWIN) */
+
+#if OS(WINDOWS) && !OS(WINCE)
+#define HAVE_SYS_TIMEB_H 1
+#define HAVE_ALIGNED_MALLOC 1
+#define HAVE_ISDEBUGGERPRESENT 1
+#endif
+
+#if OS(WINDOWS)
+#define HAVE_VIRTUALALLOC 1
+#define WTF_USE_OS_RANDOMNESS 1
+#endif
+
+#if OS(QNX)
+#define HAVE_MADV_FREE_REUSE 1
+#define HAVE_MADV_FREE 1
+#endif
+
+/* ENABLE macro defaults */
+
+/* FIXME: move out all ENABLE() defines from here to FeatureDefines.h */
+
+/* Include feature macros */
+#include <wtf/FeatureDefines.h>
+
+#if PLATFORM(QT)
+/* We must not customize the global operator new and delete for the Qt port. */
+#define ENABLE_GLOBAL_FASTMALLOC_NEW 0
+#if !OS(UNIX)
+#define USE_SYSTEM_MALLOC 1
+#endif
+#endif
+
+#if PLATFORM(EFL)
+#define ENABLE_GLOBAL_FASTMALLOC_NEW 0
+#endif
+
+#if !defined(ENABLE_GLOBAL_FASTMALLOC_NEW)
+#define ENABLE_GLOBAL_FASTMALLOC_NEW 1
+#endif
+
+#define ENABLE_DEBUG_WITH_BREAKPOINT 0
+#define ENABLE_SAMPLING_COUNTERS 0
+#define ENABLE_SAMPLING_FLAGS 0
+#define ENABLE_SAMPLING_REGIONS 0
+#define ENABLE_OPCODE_SAMPLING 0
+#define ENABLE_CODEBLOCK_SAMPLING 0
+#if ENABLE(CODEBLOCK_SAMPLING) && !ENABLE(OPCODE_SAMPLING)
+#error "CODEBLOCK_SAMPLING requires OPCODE_SAMPLING"
+#endif
+#if ENABLE(OPCODE_SAMPLING) || ENABLE(SAMPLING_FLAGS) || ENABLE(SAMPLING_REGIONS)
+#define ENABLE_SAMPLING_THREAD 1
+#endif
+
+#if !defined(WTF_USE_JSVALUE64) && !defined(WTF_USE_JSVALUE32_64)
+#if (CPU(X86_64) && (OS(UNIX) || OS(WINDOWS))) \
+ || (CPU(IA64) && !CPU(IA64_32)) \
+ || CPU(ALPHA) \
+ || CPU(SPARC64) \
+ || CPU(S390X) \
+ || CPU(PPC64)
+#define WTF_USE_JSVALUE64 1
+#else
+#define WTF_USE_JSVALUE32_64 1
+#endif
+#endif /* !defined(WTF_USE_JSVALUE64) && !defined(WTF_USE_JSVALUE32_64) */
+
+/* Disable the JIT on versions of GCC prior to 4.1 */
+#if !defined(ENABLE_JIT) && COMPILER(GCC) && !GCC_VERSION_AT_LEAST(4, 1, 0)
+#define ENABLE_JIT 0
+#endif
+
+#if !defined(ENABLE_JIT) && CPU(SH4) && PLATFORM(QT)
+#define ENABLE_JIT 1
+#endif
+
+/* The JIT is enabled by default on all x86, x86-64, ARM & MIPS platforms. */
+#if !defined(ENABLE_JIT) \
+ && (CPU(X86) || CPU(X86_64) || CPU(ARM) || CPU(MIPS)) \
+ && (OS(DARWIN) || !COMPILER(GCC) || GCC_VERSION_AT_LEAST(4, 1, 0)) \
+ && !OS(WINCE) \
+ && !(OS(QNX) && !PLATFORM(QT)) /* We use JIT in QNX Qt */
+#define ENABLE_JIT 1
+#endif
+
+/* If possible, try to enable a disassembler. This is optional. We proceed in two
+ steps: first we try to find some disassembler that we can use, and then we
+ decide if the high-level disassembler API can be enabled. */
+#if !defined(WTF_USE_UDIS86) && ENABLE(JIT) && (PLATFORM(MAC) || (PLATFORM(QT) && OS(LINUX))) \
+ && (CPU(X86) || CPU(X86_64))
+#define WTF_USE_UDIS86 1
+#endif
+
+#if !defined(ENABLE_DISASSEMBLER) && USE(UDIS86)
+#define ENABLE_DISASSEMBLER 1
+#endif
+
+/* On the GTK+ port we take an extra precaution for LLINT support:
+ * We disable it on x86 builds if the build target doesn't support SSE2
+ * instructions (LLINT requires SSE2 on this platform). */
+#if !defined(ENABLE_LLINT) && PLATFORM(GTK) && CPU(X86) && COMPILER(GCC) \
+ && !defined(__SSE2__)
+#define ENABLE_LLINT 0
+#endif
+
+/* On some of the platforms where we have a JIT, we want to also have the
+ low-level interpreter. */
+#if !defined(ENABLE_LLINT) \
+ && ENABLE(JIT) \
+ && (OS(DARWIN) || OS(LINUX)) \
+ && (PLATFORM(MAC) || PLATFORM(IOS) || PLATFORM(GTK) || PLATFORM(QT)) \
+ && (CPU(X86) || CPU(X86_64) || CPU(ARM_THUMB2) || CPU(ARM_TRADITIONAL) || CPU(MIPS))
+#define ENABLE_LLINT 1
+#endif
+
+#if !defined(ENABLE_DFG_JIT) && ENABLE(JIT) && !COMPILER(MSVC)
+/* Enable the DFG JIT on X86 and X86_64. Only tested on Mac and GNU/Linux. */
+#if (CPU(X86) || CPU(X86_64)) && (OS(DARWIN) || OS(LINUX))
+#define ENABLE_DFG_JIT 1
+#endif
+/* Enable the DFG JIT on ARMv7. Only tested on iOS and Qt Linux. */
+#if CPU(ARM_THUMB2) && (PLATFORM(IOS) || PLATFORM(BLACKBERRY) || PLATFORM(QT))
+#define ENABLE_DFG_JIT 1
+#endif
+/* Enable the DFG JIT on ARM. */
+#if CPU(ARM_TRADITIONAL)
+#define ENABLE_DFG_JIT 1
+#endif
+/* Enable the DFG JIT on MIPS. */
+#if CPU(MIPS)
+#define ENABLE_DFG_JIT 1
+#endif
+#endif
+
+/* If the jit is not available, enable the LLInt C Loop: */
+#if !ENABLE(JIT)
+#undef ENABLE_LLINT /* Undef so that we can redefine it. */
+#undef ENABLE_LLINT_C_LOOP /* Undef so that we can redefine it. */
+#undef ENABLE_DFG_JIT /* Undef so that we can redefine it. */
+#define ENABLE_LLINT 1
+#define ENABLE_LLINT_C_LOOP 1
+#define ENABLE_DFG_JIT 0
+#endif
+
+/* Do a sanity check to make sure that we at least have one execution engine in
+ use: */
+#if !(ENABLE(JIT) || ENABLE(LLINT))
+#error You have to have at least one execution model enabled to build JSC
+#endif
+
+/* Profiling of types and values used by JIT code. DFG_JIT depends on it, but you
+ can enable it manually with DFG turned off if you want to use it as a standalone
+ profiler. In that case, you probably want to also enable VERBOSE_VALUE_PROFILE
+ below. */
+#if !defined(ENABLE_VALUE_PROFILER) && ENABLE(DFG_JIT)
+#define ENABLE_VALUE_PROFILER 1
+#endif
+
+#if !defined(ENABLE_VERBOSE_VALUE_PROFILE) && ENABLE(VALUE_PROFILER)
+#define ENABLE_VERBOSE_VALUE_PROFILE 0
+#endif
+
+#if !defined(ENABLE_SIMPLE_HEAP_PROFILING)
+#define ENABLE_SIMPLE_HEAP_PROFILING 0
+#endif
+
+/* Counts uses of write barriers using sampling counters. Be sure to also
+ set ENABLE_SAMPLING_COUNTERS to 1. */
+#if !defined(ENABLE_WRITE_BARRIER_PROFILING)
+#define ENABLE_WRITE_BARRIER_PROFILING 0
+#endif
+
+/* Enable verification that that register allocations are not made within generated control flow.
+ Turned on for debug builds. */
+#if !defined(ENABLE_DFG_REGISTER_ALLOCATION_VALIDATION) && ENABLE(DFG_JIT)
+#if !defined(NDEBUG)
+#define ENABLE_DFG_REGISTER_ALLOCATION_VALIDATION 1
+#else
+#define ENABLE_DFG_REGISTER_ALLOCATION_VALIDATION 0
+#endif
+#endif
+
+/* Configure the JIT */
+#if CPU(X86) && COMPILER(MSVC)
+#define JSC_HOST_CALL __fastcall
+#elif CPU(X86) && COMPILER(GCC)
+#define JSC_HOST_CALL __attribute__ ((fastcall))
+#else
+#define JSC_HOST_CALL
+#endif
+
+/* Configure the interpreter */
+#if COMPILER(GCC) || (COMPILER(RVCT) && defined(__GNUC__))
+#define HAVE_COMPUTED_GOTO 1
+#endif
+
+/* Determine if we need to enable Computed Goto Opcodes or not: */
+#if HAVE(COMPUTED_GOTO) && ENABLE(LLINT)
+#define ENABLE_COMPUTED_GOTO_OPCODES 1
+#endif
+
+/* Regular Expression Tracing - Set to 1 to trace RegExp's in jsc. Results dumped at exit */
+#define ENABLE_REGEXP_TRACING 0
+
+/* Yet Another Regex Runtime - turned on by default for JIT enabled ports. */
+#if !defined(ENABLE_YARR_JIT) && (ENABLE(JIT) || ENABLE(LLINT_C_LOOP)) && !(OS(QNX) && PLATFORM(QT))
+#define ENABLE_YARR_JIT 1
+
+/* Setting this flag compares JIT results with interpreter results. */
+#define ENABLE_YARR_JIT_DEBUG 0
+#endif
+
+/* If either the JIT or the RegExp JIT is enabled, then the Assembler must be
+ enabled as well: */
+#if ENABLE(JIT) || ENABLE(YARR_JIT)
+#if defined(ENABLE_ASSEMBLER) && !ENABLE_ASSEMBLER
+#error "Cannot enable the JIT or RegExp JIT without enabling the Assembler"
+#else
+#undef ENABLE_ASSEMBLER
+#define ENABLE_ASSEMBLER 1
+#endif
+#endif
+
+/* Pick which allocator to use; we only need an executable allocator if the assembler is compiled in.
+ On x86-64 we use a single fixed mmap, on other platforms we mmap on demand. */
+#if ENABLE(ASSEMBLER)
+#if CPU(X86_64) && !OS(WINDOWS) || PLATFORM(IOS)
+#define ENABLE_EXECUTABLE_ALLOCATOR_FIXED 1
+#else
+#define ENABLE_EXECUTABLE_ALLOCATOR_DEMAND 1
+#endif
+#endif
+
+/* Use the QXmlStreamReader implementation for XMLDocumentParser */
+/* Use the QXmlQuery implementation for XSLTProcessor */
+#if PLATFORM(QT)
+#if !USE(LIBXML2)
+#define WTF_USE_QXMLSTREAM 1
+#define WTF_USE_QXMLQUERY 1
+#endif
+#endif
+
+/* Accelerated compositing */
+#if PLATFORM(MAC) || PLATFORM(IOS) || PLATFORM(QT) || (PLATFORM(WIN) && !OS(WINCE) && !PLATFORM(WIN_CAIRO))
+#define WTF_USE_ACCELERATED_COMPOSITING 1
+#endif
+
+#if ENABLE(WEBGL) && !defined(WTF_USE_3D_GRAPHICS)
+#define WTF_USE_3D_GRAPHICS 1
+#endif
+
+/* Qt always uses Texture Mapper */
+#if PLATFORM(QT)
+#define WTF_USE_TEXTURE_MAPPER 1
+#endif
+
+#if USE(TEXTURE_MAPPER) && USE(3D_GRAPHICS) && !defined(WTF_USE_TEXTURE_MAPPER_GL)
+#define WTF_USE_TEXTURE_MAPPER_GL 1
+#endif
+
+/* Compositing on the UI-process in WebKit2 */
+#if USE(3D_GRAPHICS) && PLATFORM(QT)
+#define WTF_USE_COORDINATED_GRAPHICS 1
+#endif
+
+#if PLATFORM(MAC) || PLATFORM(IOS)
+#define WTF_USE_PROTECTION_SPACE_AUTH_CALLBACK 1
+#endif
+
+/* Set up a define for a common error that is intended to cause a build error -- thus the space after Error. */
+#define WTF_PLATFORM_CFNETWORK Error USE_macro_should_be_used_with_CFNETWORK
+
+/* FIXME: Eventually we should enable this for all platforms and get rid of the define. */
+#if PLATFORM(IOS) || PLATFORM(MAC) || PLATFORM(WIN) || PLATFORM(QT) || PLATFORM(GTK) || PLATFORM(EFL)
+#define WTF_USE_PLATFORM_STRATEGIES 1
+#endif
+
+#if PLATFORM(WIN)
+#define WTF_USE_CROSS_PLATFORM_CONTEXT_MENUS 1
+#endif
+
+#if PLATFORM(MAC) && HAVE(ACCESSIBILITY)
+#define WTF_USE_ACCESSIBILITY_CONTEXT_MENUS 1
+#endif
+
+#if CPU(ARM_THUMB2)
+#define ENABLE_BRANCH_COMPACTION 1
+#endif
+
+#if !defined(ENABLE_THREADING_LIBDISPATCH) && HAVE(DISPATCH_H)
+#define ENABLE_THREADING_LIBDISPATCH 1
+#elif !defined(ENABLE_THREADING_OPENMP) && defined(_OPENMP)
+#define ENABLE_THREADING_OPENMP 1
+#elif !defined(THREADING_GENERIC)
+#define ENABLE_THREADING_GENERIC 1
+#endif
+
+#if USE(GLIB)
+#include <wtf/gobject/GTypedefs.h>
+#endif
+
+/* FIXME: This define won't be needed once #27551 is fully landed. However,
+ since most ports try to support sub-project independence, adding new headers
+ to WTF causes many ports to break, and so this way we can address the build
+ breakages one port at a time. */
+#if !defined(WTF_USE_EXPORT_MACROS) && (PLATFORM(MAC) || PLATFORM(QT) || PLATFORM(WX))
+#define WTF_USE_EXPORT_MACROS 1
+#endif
+
+#if !defined(WTF_USE_EXPORT_MACROS_FOR_TESTING) && (PLATFORM(GTK) || PLATFORM(WIN))
+#define WTF_USE_EXPORT_MACROS_FOR_TESTING 1
+#endif
+
+#if (PLATFORM(QT) && !OS(DARWIN) && !OS(WINDOWS)) || PLATFORM(GTK) || PLATFORM(EFL)
+#define WTF_USE_UNIX_DOMAIN_SOCKETS 1
+#endif
+
+#if !defined(ENABLE_COMPARE_AND_SWAP) && (OS(WINDOWS) || (COMPILER(GCC) && (CPU(X86) || CPU(X86_64) || CPU(ARM_THUMB2))))
+#define ENABLE_COMPARE_AND_SWAP 1
+#endif
+
+#define ENABLE_OBJECT_MARK_LOGGING 0
+
+#if !defined(ENABLE_PARALLEL_GC) && !ENABLE(OBJECT_MARK_LOGGING) && (PLATFORM(MAC) || PLATFORM(IOS) || PLATFORM(BLACKBERRY) || PLATFORM(GTK)) && ENABLE(COMPARE_AND_SWAP)
+#define ENABLE_PARALLEL_GC 1
+#elif PLATFORM(QT)
+// Parallel GC is temporarily disabled on Qt because of regular crashes, see https://bugs.webkit.org/show_bug.cgi?id=90957 for details
+#define ENABLE_PARALLEL_GC 0
+#endif
+
+#if !defined(ENABLE_GC_VALIDATION) && !defined(NDEBUG)
+#define ENABLE_GC_VALIDATION 1
+#endif
+
+#if !defined(ENABLE_BINDING_INTEGRITY)
+#define ENABLE_BINDING_INTEGRITY 1
+#endif
+
+#if PLATFORM(MAC) && !PLATFORM(IOS) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
+#define WTF_USE_AVFOUNDATION 1
+#endif
+
+#if (PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 60000) || (PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080)
+#define WTF_USE_COREMEDIA 1
+#endif
+
+#if PLATFORM(MAC) && !PLATFORM(IOS) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090
+#define HAVE_AVFOUNDATION_TEXT_TRACK_SUPPORT 1
+#endif
+
+#if PLATFORM(MAC) && !PLATFORM(IOS) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090
+#define HAVE_MEDIA_ACCESSIBILITY_FRAMEWORK 1
+#endif
+
+#if PLATFORM(MAC) || PLATFORM(GTK) || (PLATFORM(WIN) && !OS(WINCE) && !PLATFORM(WIN_CAIRO)) || PLATFORM(BLACKBERRY)
+#define WTF_USE_REQUEST_ANIMATION_FRAME_TIMER 1
+#endif
+
+#if PLATFORM(MAC) || PLATFORM(BLACKBERRY)
+#define WTF_USE_REQUEST_ANIMATION_FRAME_DISPLAY_MONITOR 1
+#endif
+
+#if PLATFORM(MAC) && (PLATFORM(IOS) || __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070)
+#define HAVE_INVERTED_WHEEL_EVENTS 1
+#endif
+
+#if PLATFORM(MAC)
+#define WTF_USE_COREAUDIO 1
+#endif
+
+#if !defined(WTF_USE_ZLIB) && !PLATFORM(QT)
+#define WTF_USE_ZLIB 1
+#endif
+
+#if PLATFORM(QT)
+#include <qglobal.h>
+#if defined(QT_OPENGL_ES_2) && !defined(WTF_USE_OPENGL_ES_2)
+#define WTF_USE_OPENGL_ES_2 1
+#endif
+#endif
+
+#if !PLATFORM(IOS) && PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
+#define WTF_USE_CONTENT_FILTERING 1
+#endif
+
+#endif /* WTF_Platform_h */
diff --git a/src/3rdparty/masm/wtf/PossiblyNull.h b/src/3rdparty/masm/wtf/PossiblyNull.h
new file mode 100644
index 0000000000..46a7d713be
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PossiblyNull.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PossiblyNull_h
+#define PossiblyNull_h
+
+#include <wtf/Assertions.h>
+
+namespace WTF {
+
+template <typename T> struct PossiblyNull {
+ PossiblyNull(T data)
+ : m_data(data)
+ {
+ }
+ PossiblyNull(const PossiblyNull<T>& source)
+ : m_data(source.m_data)
+ {
+ source.m_data = 0;
+ }
+ ~PossiblyNull() { ASSERT(!m_data); }
+ bool getValue(T& out) WARN_UNUSED_RETURN;
+private:
+ mutable T m_data;
+};
+
+template <typename T> bool PossiblyNull<T>::getValue(T& out)
+{
+ out = m_data;
+ bool result = !!m_data;
+ m_data = 0;
+ return result;
+}
+
+}
+
+#endif
diff --git a/src/3rdparty/masm/wtf/PrintStream.cpp b/src/3rdparty/masm/wtf/PrintStream.cpp
new file mode 100644
index 0000000000..3bf362e281
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PrintStream.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PrintStream.h"
+
+#include <stdio.h>
+#include <wtf/text/CString.h>
+#include <wtf/text/WTFString.h>
+
+namespace WTF {
+
+PrintStream::PrintStream() { }
+PrintStream::~PrintStream() { } // Force the vtable to be in this module
+
+void PrintStream::printf(const char* format, ...)
+{
+ va_list argList;
+ va_start(argList, format);
+ vprintf(format, argList);
+ va_end(argList);
+}
+
+void PrintStream::flush()
+{
+}
+
+void printInternal(PrintStream& out, const char* string)
+{
+ out.printf("%s", string);
+}
+
+void printInternal(PrintStream& out, bool value)
+{
+ if (value)
+ out.print("true");
+ else
+ out.print("false");
+}
+
+void printInternal(PrintStream& out, int value)
+{
+ out.printf("%d", value);
+}
+
+void printInternal(PrintStream& out, unsigned value)
+{
+ out.printf("%u", value);
+}
+
+void printInternal(PrintStream& out, long value)
+{
+ out.printf("%ld", value);
+}
+
+void printInternal(PrintStream& out, unsigned long value)
+{
+ out.printf("%lu", value);
+}
+
+void printInternal(PrintStream& out, long long value)
+{
+ out.printf("%lld", value);
+}
+
+void printInternal(PrintStream& out, unsigned long long value)
+{
+ out.printf("%llu", value);
+}
+
+void printInternal(PrintStream& out, float value)
+{
+ out.print(static_cast<double>(value));
+}
+
+void printInternal(PrintStream& out, double value)
+{
+ out.printf("%lf", value);
+}
+
+void printInternal(PrintStream& out, RawPointer value)
+{
+ out.printf("%p", value.value());
+}
+
+void dumpCharacter(PrintStream& out, char value)
+{
+ out.printf("%c", value);
+}
+
+} // namespace WTF
+
diff --git a/src/3rdparty/masm/wtf/PrintStream.h b/src/3rdparty/masm/wtf/PrintStream.h
new file mode 100644
index 0000000000..6fcf9c1567
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PrintStream.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PrintStream_h
+#define PrintStream_h
+
+#include <stdarg.h>
+#include <wtf/FastAllocBase.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/Platform.h>
+#include <wtf/RawPointer.h>
+#include <wtf/StdLibExtras.h>
+
+namespace WTF {
+
+class CString;
+class String;
+
+class PrintStream {
+ WTF_MAKE_FAST_ALLOCATED; WTF_MAKE_NONCOPYABLE(PrintStream);
+public:
+ PrintStream();
+ virtual ~PrintStream();
+
+ void printf(const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
+ virtual void vprintf(const char* format, va_list) WTF_ATTRIBUTE_PRINTF(2, 0) = 0;
+
+ // Typically a no-op for many subclasses of PrintStream, this is a hint that
+ // the implementation should flush its buffers if it had not done so already.
+ virtual void flush();
+
+ template<typename T>
+ void print(const T& value)
+ {
+ printInternal(*this, value);
+ }
+
+ template<typename T1, typename T2>
+ void print(const T1& value1, const T2& value2)
+ {
+ print(value1);
+ print(value2);
+ }
+
+ template<typename T1, typename T2, typename T3>
+ void print(const T1& value1, const T2& value2, const T3& value3)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ print(value6);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ print(value6);
+ print(value7);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ print(value6);
+ print(value7);
+ print(value8);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ print(value6);
+ print(value7);
+ print(value8);
+ print(value9);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ print(value6);
+ print(value7);
+ print(value8);
+ print(value9);
+ print(value10);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ print(value6);
+ print(value7);
+ print(value8);
+ print(value9);
+ print(value10);
+ print(value11);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11, const T12& value12)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ print(value6);
+ print(value7);
+ print(value8);
+ print(value9);
+ print(value10);
+ print(value11);
+ print(value12);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11, const T12& value12, const T13& value13)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ print(value6);
+ print(value7);
+ print(value8);
+ print(value9);
+ print(value10);
+ print(value11);
+ print(value12);
+ print(value13);
+ }
+};
+
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, const char*);
+inline void printInternal(PrintStream& out, char* value) { printInternal(out, static_cast<const char*>(value)); }
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, bool);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, int);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, unsigned);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, long);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, unsigned long);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, long long);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, unsigned long long);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, float);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, double);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, RawPointer);
+
+template<typename T>
+void printInternal(PrintStream& out, const T& value)
+{
+ value.dump(out);
+}
+
+#define MAKE_PRINT_ADAPTOR(Name, Type, function) \
+ class Name { \
+ public: \
+ Name(const Type& value) \
+ : m_value(value) \
+ { \
+ } \
+ void dump(PrintStream& out) const \
+ { \
+ function(out, m_value); \
+ } \
+ private: \
+ Type m_value; \
+ }
+
+#define MAKE_PRINT_METHOD_ADAPTOR(Name, Type, method) \
+ class Name { \
+ public: \
+ Name(const Type& value) \
+ : m_value(value) \
+ { \
+ } \
+ void dump(PrintStream& out) const \
+ { \
+ m_value.method(out); \
+ } \
+ private: \
+ const Type& m_value; \
+ }
+
+#define MAKE_PRINT_METHOD(Type, dumpMethod, method) \
+ MAKE_PRINT_METHOD_ADAPTOR(DumperFor_##method, Type, dumpMethod); \
+ DumperFor_##method method() const { return DumperFor_##method(*this); }
+
+// Use an adaptor-based dumper for characters to avoid situations where
+// you've "compressed" an integer to a character and it ends up printing
+// as ASCII when you wanted it to print as a number.
+void dumpCharacter(PrintStream&, char);
+MAKE_PRINT_ADAPTOR(CharacterDump, char, dumpCharacter);
+
+template<typename T>
+class PointerDump {
+public:
+ PointerDump(const T* ptr)
+ : m_ptr(ptr)
+ {
+ }
+
+ void dump(PrintStream& out) const
+ {
+ if (m_ptr)
+ printInternal(out, *m_ptr);
+ else
+ out.print("(null)");
+ }
+private:
+ const T* m_ptr;
+};
+
+template<typename T>
+PointerDump<T> pointerDump(const T* ptr) { return PointerDump<T>(ptr); }
+
+} // namespace WTF
+
+using WTF::CharacterDump;
+using WTF::PointerDump;
+using WTF::PrintStream;
+using WTF::pointerDump;
+
+#endif // PrintStream_h
+
diff --git a/src/3rdparty/masm/wtf/RawPointer.h b/src/3rdparty/masm/wtf/RawPointer.h
new file mode 100644
index 0000000000..6dc7292fb4
--- /dev/null
+++ b/src/3rdparty/masm/wtf/RawPointer.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RawPointer_h
+#define RawPointer_h
+
+namespace WTF {
+
+class RawPointer {
+public:
+ RawPointer()
+ : m_value(0)
+ {
+ }
+
+ explicit RawPointer(void* value)
+ : m_value(value)
+ {
+ }
+
+ explicit RawPointer(const void* value)
+ : m_value(value)
+ {
+ }
+
+ const void* value() const { return m_value; }
+
+private:
+ const void* m_value;
+};
+
+} // namespace WTF
+
+using WTF::RawPointer;
+
+#endif // RawPointer_h
diff --git a/src/3rdparty/masm/wtf/StdLibExtras.h b/src/3rdparty/masm/wtf/StdLibExtras.h
new file mode 100644
index 0000000000..605f98ec82
--- /dev/null
+++ b/src/3rdparty/masm/wtf/StdLibExtras.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_StdLibExtras_h
+#define WTF_StdLibExtras_h
+
+#include <wtf/Assertions.h>
+#include <wtf/CheckedArithmetic.h>
+
+// Use these to declare and define a static local variable (static T;) so that
+// it is leaked so that its destructors are not called at exit. Using this
+// macro also allows workarounds a compiler bug present in Apple's version of GCC 4.0.1.
+#ifndef DEFINE_STATIC_LOCAL
+#if COMPILER(GCC) && defined(__APPLE_CC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 0 && __GNUC_PATCHLEVEL__ == 1
+#define DEFINE_STATIC_LOCAL(type, name, arguments) \
+ static type* name##Ptr = new type arguments; \
+ type& name = *name##Ptr
+#else
+#define DEFINE_STATIC_LOCAL(type, name, arguments) \
+ static type& name = *new type arguments
+#endif
+#endif
+
+// Use this macro to declare and define a debug-only global variable that may have a
+// non-trivial constructor and destructor. When building with clang, this will suppress
+// warnings about global constructors and exit-time destructors.
+#ifndef NDEBUG
+#if COMPILER(CLANG)
+#define DEFINE_DEBUG_ONLY_GLOBAL(type, name, arguments) \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
+ _Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
+ static type name arguments; \
+ _Pragma("clang diagnostic pop")
+#else
+#define DEFINE_DEBUG_ONLY_GLOBAL(type, name, arguments) \
+ static type name arguments;
+#endif // COMPILER(CLANG)
+#else
+#define DEFINE_DEBUG_ONLY_GLOBAL(type, name, arguments)
+#endif // NDEBUG
+
+// OBJECT_OFFSETOF: Like the C++ offsetof macro, but you can use it with classes.
+// The magic number 0x4000 is insignificant. We use it to avoid using NULL, since
+// NULL can cause compiler problems, especially in cases of multiple inheritance.
+#define OBJECT_OFFSETOF(class, field) (reinterpret_cast<ptrdiff_t>(&(reinterpret_cast<class*>(0x4000)->field)) - 0x4000)
+
+// STRINGIZE: Can convert any value to quoted string, even expandable macros
+#define STRINGIZE(exp) #exp
+#define STRINGIZE_VALUE_OF(exp) STRINGIZE(exp)
+
+/*
+ * The reinterpret_cast<Type1*>([pointer to Type2]) expressions - where
+ * sizeof(Type1) > sizeof(Type2) - cause the following warning on ARM with GCC:
+ * increases required alignment of target type.
+ *
+ * An implicit or an extra static_cast<void*> bypasses the warning.
+ * For more info see the following bugzilla entries:
+ * - https://bugs.webkit.org/show_bug.cgi?id=38045
+ * - http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43976
+ */
+#if (CPU(ARM) || CPU(MIPS)) && COMPILER(GCC)
+template<typename Type>
+bool isPointerTypeAlignmentOkay(Type* ptr)
+{
+ return !(reinterpret_cast<intptr_t>(ptr) % __alignof__(Type));
+}
+
+template<typename TypePtr>
+TypePtr reinterpret_cast_ptr(void* ptr)
+{
+ ASSERT(isPointerTypeAlignmentOkay(reinterpret_cast<TypePtr>(ptr)));
+ return reinterpret_cast<TypePtr>(ptr);
+}
+
+template<typename TypePtr>
+TypePtr reinterpret_cast_ptr(const void* ptr)
+{
+ ASSERT(isPointerTypeAlignmentOkay(reinterpret_cast<TypePtr>(ptr)));
+ return reinterpret_cast<TypePtr>(ptr);
+}
+#else
+template<typename Type>
+bool isPointerTypeAlignmentOkay(Type*)
+{
+ return true;
+}
+#define reinterpret_cast_ptr reinterpret_cast
+#endif
+
+namespace WTF {
+
+static const size_t KB = 1024;
+static const size_t MB = 1024 * 1024;
+
+inline bool isPointerAligned(void* p)
+{
+ return !((intptr_t)(p) & (sizeof(char*) - 1));
+}
+
+inline bool is8ByteAligned(void* p)
+{
+ return !((uintptr_t)(p) & (sizeof(double) - 1));
+}
+
+/*
+ * C++'s idea of a reinterpret_cast lacks sufficient cojones.
+ */
+template<typename TO, typename FROM>
+inline TO bitwise_cast(FROM from)
+{
+ COMPILE_ASSERT(sizeof(TO) == sizeof(FROM), WTF_bitwise_cast_sizeof_casted_types_is_equal);
+ union {
+ FROM from;
+ TO to;
+ } u;
+ u.from = from;
+ return u.to;
+}
+
+template<typename To, typename From>
+inline To safeCast(From value)
+{
+ ASSERT(isInBounds<To>(value));
+ return static_cast<To>(value);
+}
+
+// Returns a count of the number of bits set in 'bits'.
+inline size_t bitCount(unsigned bits)
+{
+ bits = bits - ((bits >> 1) & 0x55555555);
+ bits = (bits & 0x33333333) + ((bits >> 2) & 0x33333333);
+ return (((bits + (bits >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24;
+}
+
+// Macro that returns a compile time constant with the length of an array, but gives an error if passed a non-array.
+template<typename T, size_t Size> char (&ArrayLengthHelperFunction(T (&)[Size]))[Size];
+// GCC needs some help to deduce a 0 length array.
+#if COMPILER(GCC)
+template<typename T> char (&ArrayLengthHelperFunction(T (&)[0]))[0];
+#endif
+#define WTF_ARRAY_LENGTH(array) sizeof(::WTF::ArrayLengthHelperFunction(array))
+
+// Efficient implementation that takes advantage of powers of two.
+inline size_t roundUpToMultipleOf(size_t divisor, size_t x)
+{
+ ASSERT(divisor && !(divisor & (divisor - 1)));
+ size_t remainderMask = divisor - 1;
+ return (x + remainderMask) & ~remainderMask;
+}
+template<size_t divisor> inline size_t roundUpToMultipleOf(size_t x)
+{
+ COMPILE_ASSERT(divisor && !(divisor & (divisor - 1)), divisor_is_a_power_of_two);
+ return roundUpToMultipleOf(divisor, x);
+}
+
+enum BinarySearchMode {
+ KeyMustBePresentInArray,
+ KeyMightNotBePresentInArray,
+ ReturnAdjacentElementIfKeyIsNotPresent
+};
+
+template<typename ArrayElementType, typename KeyType, typename ArrayType, typename ExtractKey, BinarySearchMode mode>
+inline ArrayElementType* binarySearchImpl(ArrayType& array, size_t size, KeyType key, const ExtractKey& extractKey = ExtractKey())
+{
+ size_t offset = 0;
+ while (size > 1) {
+ size_t pos = (size - 1) >> 1;
+ KeyType val = extractKey(&array[offset + pos]);
+
+ if (val == key)
+ return &array[offset + pos];
+ // The item we are looking for is smaller than the item being check; reduce the value of 'size',
+ // chopping off the right hand half of the array.
+ if (key < val)
+ size = pos;
+ // Discard all values in the left hand half of the array, up to and including the item at pos.
+ else {
+ size -= (pos + 1);
+ offset += (pos + 1);
+ }
+
+ ASSERT(mode != KeyMustBePresentInArray || size);
+ }
+
+ if (mode == KeyMightNotBePresentInArray && !size)
+ return 0;
+
+ ArrayElementType* result = &array[offset];
+
+ if (mode == KeyMightNotBePresentInArray && key != extractKey(result))
+ return 0;
+
+ if (mode == KeyMustBePresentInArray) {
+ ASSERT(size == 1);
+ ASSERT(key == extractKey(result));
+ }
+
+ return result;
+}
+
+// If the element is not found, crash if asserts are enabled, and behave like approximateBinarySearch in release builds.
+template<typename ArrayElementType, typename KeyType, typename ArrayType, typename ExtractKey>
+inline ArrayElementType* binarySearch(ArrayType& array, size_t size, KeyType key, ExtractKey extractKey = ExtractKey())
+{
+ return binarySearchImpl<ArrayElementType, KeyType, ArrayType, ExtractKey, KeyMustBePresentInArray>(array, size, key, extractKey);
+}
+
+// Return zero if the element is not found.
+template<typename ArrayElementType, typename KeyType, typename ArrayType, typename ExtractKey>
+inline ArrayElementType* tryBinarySearch(ArrayType& array, size_t size, KeyType key, ExtractKey extractKey = ExtractKey())
+{
+ return binarySearchImpl<ArrayElementType, KeyType, ArrayType, ExtractKey, KeyMightNotBePresentInArray>(array, size, key, extractKey);
+}
+
+// Return the element that is either to the left, or the right, of where the element would have been found.
+template<typename ArrayElementType, typename KeyType, typename ArrayType, typename ExtractKey>
+inline ArrayElementType* approximateBinarySearch(ArrayType& array, size_t size, KeyType key, ExtractKey extractKey = ExtractKey())
+{
+ return binarySearchImpl<ArrayElementType, KeyType, ArrayType, ExtractKey, ReturnAdjacentElementIfKeyIsNotPresent>(array, size, key, extractKey);
+}
+
+// Variants of the above that use const.
+template<typename ArrayElementType, typename KeyType, typename ArrayType, typename ExtractKey>
+inline ArrayElementType* binarySearch(const ArrayType& array, size_t size, KeyType key, ExtractKey extractKey = ExtractKey())
+{
+ return binarySearchImpl<ArrayElementType, KeyType, ArrayType, ExtractKey, KeyMustBePresentInArray>(const_cast<ArrayType&>(array), size, key, extractKey);
+}
+template<typename ArrayElementType, typename KeyType, typename ArrayType, typename ExtractKey>
+inline ArrayElementType* tryBinarySearch(const ArrayType& array, size_t size, KeyType key, ExtractKey extractKey = ExtractKey())
+{
+ return binarySearchImpl<ArrayElementType, KeyType, ArrayType, ExtractKey, KeyMightNotBePresentInArray>(const_cast<ArrayType&>(array), size, key, extractKey);
+}
+template<typename ArrayElementType, typename KeyType, typename ArrayType, typename ExtractKey>
+inline ArrayElementType* approximateBinarySearch(const ArrayType& array, size_t size, KeyType key, ExtractKey extractKey = ExtractKey())
+{
+ return binarySearchImpl<ArrayElementType, KeyType, ArrayType, ExtractKey, ReturnAdjacentElementIfKeyIsNotPresent>(const_cast<ArrayType&>(array), size, key, extractKey);
+}
+
+} // namespace WTF
+
+// This version of placement new omits a 0 check.
+enum NotNullTag { NotNull };
+inline void* operator new(size_t, NotNullTag, void* location)
+{
+ ASSERT(location);
+ return location;
+}
+
+using WTF::KB;
+using WTF::MB;
+using WTF::isPointerAligned;
+using WTF::is8ByteAligned;
+using WTF::binarySearch;
+using WTF::tryBinarySearch;
+using WTF::approximateBinarySearch;
+using WTF::bitwise_cast;
+using WTF::safeCast;
+
+#endif // WTF_StdLibExtras_h
diff --git a/src/3rdparty/masm/wtf/VMTags.h b/src/3rdparty/masm/wtf/VMTags.h
new file mode 100644
index 0000000000..117bc3721e
--- /dev/null
+++ b/src/3rdparty/masm/wtf/VMTags.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VMTags_h
+#define VMTags_h
+
+// On Mac OS X, the VM subsystem allows tagging memory requested from mmap and vm_map
+// in order to aid tools that inspect system memory use.
+#if OS(DARWIN)
+
+#include <mach/vm_statistics.h>
+
+#if defined(VM_MEMORY_TCMALLOC)
+#define VM_TAG_FOR_TCMALLOC_MEMORY VM_MAKE_TAG(VM_MEMORY_TCMALLOC)
+#else
+#define VM_TAG_FOR_TCMALLOC_MEMORY VM_MAKE_TAG(53)
+#endif // defined(VM_MEMORY_TCMALLOC)
+
+#if defined(VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR)
+#define VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY VM_MAKE_TAG(VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR)
+#else
+#define VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY VM_MAKE_TAG(64)
+#endif // defined(VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR)
+
+#if defined(VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE)
+#define VM_TAG_FOR_REGISTERFILE_MEMORY VM_MAKE_TAG(VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE)
+#else
+#define VM_TAG_FOR_REGISTERFILE_MEMORY VM_MAKE_TAG(65)
+#endif // defined(VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE)
+
+#if defined(VM_MEMORY_JAVASCRIPT_CORE)
+#define VM_TAG_FOR_COLLECTOR_MEMORY VM_MAKE_TAG(VM_MEMORY_JAVASCRIPT_CORE)
+#else
+#define VM_TAG_FOR_COLLECTOR_MEMORY VM_MAKE_TAG(63)
+#endif // defined(VM_MEMORY_JAVASCRIPT_CORE)
+
+#if defined(VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS)
+#define VM_TAG_FOR_WEBCORE_PURGEABLE_MEMORY VM_MAKE_TAG(VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS)
+#else
+#define VM_TAG_FOR_WEBCORE_PURGEABLE_MEMORY VM_MAKE_TAG(69)
+#endif // defined(VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS)
+
+#else // OS(DARWIN)
+
+#define VM_TAG_FOR_TCMALLOC_MEMORY -1
+#define VM_TAG_FOR_COLLECTOR_MEMORY -1
+#define VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY -1
+#define VM_TAG_FOR_REGISTERFILE_MEMORY -1
+#define VM_TAG_FOR_WEBCORE_PURGEABLE_MEMORY -1
+
+#endif // OS(DARWIN)
+
+#endif // VMTags_h
diff --git a/src/3rdparty/masm/yarr/Yarr.h b/src/3rdparty/masm/yarr/Yarr.h
new file mode 100644
index 0000000000..d393e9fa90
--- /dev/null
+++ b/src/3rdparty/masm/yarr/Yarr.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Peter Varga (pvarga@inf.u-szeged.hu), University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Yarr_h
+#define Yarr_h
+
+#include "YarrInterpreter.h"
+#include "YarrPattern.h"
+
+namespace JSC { namespace Yarr {
+
+#define YarrStackSpaceForBackTrackInfoPatternCharacter 1 // Only for !fixed quantifiers.
+#define YarrStackSpaceForBackTrackInfoCharacterClass 1 // Only for !fixed quantifiers.
+#define YarrStackSpaceForBackTrackInfoBackReference 2
+#define YarrStackSpaceForBackTrackInfoAlternative 1 // One per alternative.
+#define YarrStackSpaceForBackTrackInfoParentheticalAssertion 1
+#define YarrStackSpaceForBackTrackInfoParenthesesOnce 1 // Only for !fixed quantifiers.
+#define YarrStackSpaceForBackTrackInfoParenthesesTerminal 1
+#define YarrStackSpaceForBackTrackInfoParentheses 2
+
+static const unsigned quantifyInfinite = UINT_MAX;
+static const unsigned offsetNoMatch = (unsigned)-1;
+
+// The below limit restricts the number of "recursive" match calls in order to
+// avoid spending exponential time on complex regular expressions.
+static const unsigned matchLimit = 1000000;
+
+enum JSRegExpResult {
+ JSRegExpMatch = 1,
+ JSRegExpNoMatch = 0,
+ JSRegExpErrorNoMatch = -1,
+ JSRegExpErrorHitLimit = -2,
+ JSRegExpErrorNoMemory = -3,
+ JSRegExpErrorInternal = -4
+};
+
+enum YarrCharSize {
+ Char8,
+ Char16
+};
+
+} } // namespace JSC::Yarr
+
+#endif // Yarr_h
+
diff --git a/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.cpp b/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.cpp
new file mode 100644
index 0000000000..7bb3d08eb5
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.cpp
@@ -0,0 +1,463 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// DO NOT EDIT! - this file autogenerated by YarrCanonicalizeUCS2.js
+
+#include "config.h"
+#include "YarrCanonicalizeUCS2.h"
+
+namespace JSC { namespace Yarr {
+
+#include <stdint.h>
+
+uint16_t ucs2CharacterSet0[] = { 0x01c4u, 0x01c5u, 0x01c6u, 0 };
+uint16_t ucs2CharacterSet1[] = { 0x01c7u, 0x01c8u, 0x01c9u, 0 };
+uint16_t ucs2CharacterSet2[] = { 0x01cau, 0x01cbu, 0x01ccu, 0 };
+uint16_t ucs2CharacterSet3[] = { 0x01f1u, 0x01f2u, 0x01f3u, 0 };
+uint16_t ucs2CharacterSet4[] = { 0x0392u, 0x03b2u, 0x03d0u, 0 };
+uint16_t ucs2CharacterSet5[] = { 0x0395u, 0x03b5u, 0x03f5u, 0 };
+uint16_t ucs2CharacterSet6[] = { 0x0398u, 0x03b8u, 0x03d1u, 0 };
+uint16_t ucs2CharacterSet7[] = { 0x0345u, 0x0399u, 0x03b9u, 0x1fbeu, 0 };
+uint16_t ucs2CharacterSet8[] = { 0x039au, 0x03bau, 0x03f0u, 0 };
+uint16_t ucs2CharacterSet9[] = { 0x00b5u, 0x039cu, 0x03bcu, 0 };
+uint16_t ucs2CharacterSet10[] = { 0x03a0u, 0x03c0u, 0x03d6u, 0 };
+uint16_t ucs2CharacterSet11[] = { 0x03a1u, 0x03c1u, 0x03f1u, 0 };
+uint16_t ucs2CharacterSet12[] = { 0x03a3u, 0x03c2u, 0x03c3u, 0 };
+uint16_t ucs2CharacterSet13[] = { 0x03a6u, 0x03c6u, 0x03d5u, 0 };
+uint16_t ucs2CharacterSet14[] = { 0x1e60u, 0x1e61u, 0x1e9bu, 0 };
+
+static const size_t UCS2_CANONICALIZATION_SETS = 15;
+uint16_t* characterSetInfo[UCS2_CANONICALIZATION_SETS] = {
+ ucs2CharacterSet0,
+ ucs2CharacterSet1,
+ ucs2CharacterSet2,
+ ucs2CharacterSet3,
+ ucs2CharacterSet4,
+ ucs2CharacterSet5,
+ ucs2CharacterSet6,
+ ucs2CharacterSet7,
+ ucs2CharacterSet8,
+ ucs2CharacterSet9,
+ ucs2CharacterSet10,
+ ucs2CharacterSet11,
+ ucs2CharacterSet12,
+ ucs2CharacterSet13,
+ ucs2CharacterSet14,
+};
+
+const size_t UCS2_CANONICALIZATION_RANGES = 364;
+UCS2CanonicalizationRange rangeInfo[UCS2_CANONICALIZATION_RANGES] = {
+ { 0x0000u, 0x0040u, 0x0000u, CanonicalizeUnique },
+ { 0x0041u, 0x005au, 0x0020u, CanonicalizeRangeLo },
+ { 0x005bu, 0x0060u, 0x0000u, CanonicalizeUnique },
+ { 0x0061u, 0x007au, 0x0020u, CanonicalizeRangeHi },
+ { 0x007bu, 0x00b4u, 0x0000u, CanonicalizeUnique },
+ { 0x00b5u, 0x00b5u, 0x0009u, CanonicalizeSet },
+ { 0x00b6u, 0x00bfu, 0x0000u, CanonicalizeUnique },
+ { 0x00c0u, 0x00d6u, 0x0020u, CanonicalizeRangeLo },
+ { 0x00d7u, 0x00d7u, 0x0000u, CanonicalizeUnique },
+ { 0x00d8u, 0x00deu, 0x0020u, CanonicalizeRangeLo },
+ { 0x00dfu, 0x00dfu, 0x0000u, CanonicalizeUnique },
+ { 0x00e0u, 0x00f6u, 0x0020u, CanonicalizeRangeHi },
+ { 0x00f7u, 0x00f7u, 0x0000u, CanonicalizeUnique },
+ { 0x00f8u, 0x00feu, 0x0020u, CanonicalizeRangeHi },
+ { 0x00ffu, 0x00ffu, 0x0079u, CanonicalizeRangeLo },
+ { 0x0100u, 0x012fu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0130u, 0x0131u, 0x0000u, CanonicalizeUnique },
+ { 0x0132u, 0x0137u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0138u, 0x0138u, 0x0000u, CanonicalizeUnique },
+ { 0x0139u, 0x0148u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x0149u, 0x0149u, 0x0000u, CanonicalizeUnique },
+ { 0x014au, 0x0177u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0178u, 0x0178u, 0x0079u, CanonicalizeRangeHi },
+ { 0x0179u, 0x017eu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x017fu, 0x017fu, 0x0000u, CanonicalizeUnique },
+ { 0x0180u, 0x0180u, 0x00c3u, CanonicalizeRangeLo },
+ { 0x0181u, 0x0181u, 0x00d2u, CanonicalizeRangeLo },
+ { 0x0182u, 0x0185u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0186u, 0x0186u, 0x00ceu, CanonicalizeRangeLo },
+ { 0x0187u, 0x0188u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x0189u, 0x018au, 0x00cdu, CanonicalizeRangeLo },
+ { 0x018bu, 0x018cu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x018du, 0x018du, 0x0000u, CanonicalizeUnique },
+ { 0x018eu, 0x018eu, 0x004fu, CanonicalizeRangeLo },
+ { 0x018fu, 0x018fu, 0x00cau, CanonicalizeRangeLo },
+ { 0x0190u, 0x0190u, 0x00cbu, CanonicalizeRangeLo },
+ { 0x0191u, 0x0192u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x0193u, 0x0193u, 0x00cdu, CanonicalizeRangeLo },
+ { 0x0194u, 0x0194u, 0x00cfu, CanonicalizeRangeLo },
+ { 0x0195u, 0x0195u, 0x0061u, CanonicalizeRangeLo },
+ { 0x0196u, 0x0196u, 0x00d3u, CanonicalizeRangeLo },
+ { 0x0197u, 0x0197u, 0x00d1u, CanonicalizeRangeLo },
+ { 0x0198u, 0x0199u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x019au, 0x019au, 0x00a3u, CanonicalizeRangeLo },
+ { 0x019bu, 0x019bu, 0x0000u, CanonicalizeUnique },
+ { 0x019cu, 0x019cu, 0x00d3u, CanonicalizeRangeLo },
+ { 0x019du, 0x019du, 0x00d5u, CanonicalizeRangeLo },
+ { 0x019eu, 0x019eu, 0x0082u, CanonicalizeRangeLo },
+ { 0x019fu, 0x019fu, 0x00d6u, CanonicalizeRangeLo },
+ { 0x01a0u, 0x01a5u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x01a6u, 0x01a6u, 0x00dau, CanonicalizeRangeLo },
+ { 0x01a7u, 0x01a8u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x01a9u, 0x01a9u, 0x00dau, CanonicalizeRangeLo },
+ { 0x01aau, 0x01abu, 0x0000u, CanonicalizeUnique },
+ { 0x01acu, 0x01adu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x01aeu, 0x01aeu, 0x00dau, CanonicalizeRangeLo },
+ { 0x01afu, 0x01b0u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x01b1u, 0x01b2u, 0x00d9u, CanonicalizeRangeLo },
+ { 0x01b3u, 0x01b6u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x01b7u, 0x01b7u, 0x00dbu, CanonicalizeRangeLo },
+ { 0x01b8u, 0x01b9u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x01bau, 0x01bbu, 0x0000u, CanonicalizeUnique },
+ { 0x01bcu, 0x01bdu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x01beu, 0x01beu, 0x0000u, CanonicalizeUnique },
+ { 0x01bfu, 0x01bfu, 0x0038u, CanonicalizeRangeLo },
+ { 0x01c0u, 0x01c3u, 0x0000u, CanonicalizeUnique },
+ { 0x01c4u, 0x01c6u, 0x0000u, CanonicalizeSet },
+ { 0x01c7u, 0x01c9u, 0x0001u, CanonicalizeSet },
+ { 0x01cau, 0x01ccu, 0x0002u, CanonicalizeSet },
+ { 0x01cdu, 0x01dcu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x01ddu, 0x01ddu, 0x004fu, CanonicalizeRangeHi },
+ { 0x01deu, 0x01efu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x01f0u, 0x01f0u, 0x0000u, CanonicalizeUnique },
+ { 0x01f1u, 0x01f3u, 0x0003u, CanonicalizeSet },
+ { 0x01f4u, 0x01f5u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x01f6u, 0x01f6u, 0x0061u, CanonicalizeRangeHi },
+ { 0x01f7u, 0x01f7u, 0x0038u, CanonicalizeRangeHi },
+ { 0x01f8u, 0x021fu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0220u, 0x0220u, 0x0082u, CanonicalizeRangeHi },
+ { 0x0221u, 0x0221u, 0x0000u, CanonicalizeUnique },
+ { 0x0222u, 0x0233u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0234u, 0x0239u, 0x0000u, CanonicalizeUnique },
+ { 0x023au, 0x023au, 0x2a2bu, CanonicalizeRangeLo },
+ { 0x023bu, 0x023cu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x023du, 0x023du, 0x00a3u, CanonicalizeRangeHi },
+ { 0x023eu, 0x023eu, 0x2a28u, CanonicalizeRangeLo },
+ { 0x023fu, 0x0240u, 0x2a3fu, CanonicalizeRangeLo },
+ { 0x0241u, 0x0242u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x0243u, 0x0243u, 0x00c3u, CanonicalizeRangeHi },
+ { 0x0244u, 0x0244u, 0x0045u, CanonicalizeRangeLo },
+ { 0x0245u, 0x0245u, 0x0047u, CanonicalizeRangeLo },
+ { 0x0246u, 0x024fu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0250u, 0x0250u, 0x2a1fu, CanonicalizeRangeLo },
+ { 0x0251u, 0x0251u, 0x2a1cu, CanonicalizeRangeLo },
+ { 0x0252u, 0x0252u, 0x2a1eu, CanonicalizeRangeLo },
+ { 0x0253u, 0x0253u, 0x00d2u, CanonicalizeRangeHi },
+ { 0x0254u, 0x0254u, 0x00ceu, CanonicalizeRangeHi },
+ { 0x0255u, 0x0255u, 0x0000u, CanonicalizeUnique },
+ { 0x0256u, 0x0257u, 0x00cdu, CanonicalizeRangeHi },
+ { 0x0258u, 0x0258u, 0x0000u, CanonicalizeUnique },
+ { 0x0259u, 0x0259u, 0x00cau, CanonicalizeRangeHi },
+ { 0x025au, 0x025au, 0x0000u, CanonicalizeUnique },
+ { 0x025bu, 0x025bu, 0x00cbu, CanonicalizeRangeHi },
+ { 0x025cu, 0x025fu, 0x0000u, CanonicalizeUnique },
+ { 0x0260u, 0x0260u, 0x00cdu, CanonicalizeRangeHi },
+ { 0x0261u, 0x0262u, 0x0000u, CanonicalizeUnique },
+ { 0x0263u, 0x0263u, 0x00cfu, CanonicalizeRangeHi },
+ { 0x0264u, 0x0264u, 0x0000u, CanonicalizeUnique },
+ { 0x0265u, 0x0265u, 0xa528u, CanonicalizeRangeLo },
+ { 0x0266u, 0x0267u, 0x0000u, CanonicalizeUnique },
+ { 0x0268u, 0x0268u, 0x00d1u, CanonicalizeRangeHi },
+ { 0x0269u, 0x0269u, 0x00d3u, CanonicalizeRangeHi },
+ { 0x026au, 0x026au, 0x0000u, CanonicalizeUnique },
+ { 0x026bu, 0x026bu, 0x29f7u, CanonicalizeRangeLo },
+ { 0x026cu, 0x026eu, 0x0000u, CanonicalizeUnique },
+ { 0x026fu, 0x026fu, 0x00d3u, CanonicalizeRangeHi },
+ { 0x0270u, 0x0270u, 0x0000u, CanonicalizeUnique },
+ { 0x0271u, 0x0271u, 0x29fdu, CanonicalizeRangeLo },
+ { 0x0272u, 0x0272u, 0x00d5u, CanonicalizeRangeHi },
+ { 0x0273u, 0x0274u, 0x0000u, CanonicalizeUnique },
+ { 0x0275u, 0x0275u, 0x00d6u, CanonicalizeRangeHi },
+ { 0x0276u, 0x027cu, 0x0000u, CanonicalizeUnique },
+ { 0x027du, 0x027du, 0x29e7u, CanonicalizeRangeLo },
+ { 0x027eu, 0x027fu, 0x0000u, CanonicalizeUnique },
+ { 0x0280u, 0x0280u, 0x00dau, CanonicalizeRangeHi },
+ { 0x0281u, 0x0282u, 0x0000u, CanonicalizeUnique },
+ { 0x0283u, 0x0283u, 0x00dau, CanonicalizeRangeHi },
+ { 0x0284u, 0x0287u, 0x0000u, CanonicalizeUnique },
+ { 0x0288u, 0x0288u, 0x00dau, CanonicalizeRangeHi },
+ { 0x0289u, 0x0289u, 0x0045u, CanonicalizeRangeHi },
+ { 0x028au, 0x028bu, 0x00d9u, CanonicalizeRangeHi },
+ { 0x028cu, 0x028cu, 0x0047u, CanonicalizeRangeHi },
+ { 0x028du, 0x0291u, 0x0000u, CanonicalizeUnique },
+ { 0x0292u, 0x0292u, 0x00dbu, CanonicalizeRangeHi },
+ { 0x0293u, 0x0344u, 0x0000u, CanonicalizeUnique },
+ { 0x0345u, 0x0345u, 0x0007u, CanonicalizeSet },
+ { 0x0346u, 0x036fu, 0x0000u, CanonicalizeUnique },
+ { 0x0370u, 0x0373u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0374u, 0x0375u, 0x0000u, CanonicalizeUnique },
+ { 0x0376u, 0x0377u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0378u, 0x037au, 0x0000u, CanonicalizeUnique },
+ { 0x037bu, 0x037du, 0x0082u, CanonicalizeRangeLo },
+ { 0x037eu, 0x0385u, 0x0000u, CanonicalizeUnique },
+ { 0x0386u, 0x0386u, 0x0026u, CanonicalizeRangeLo },
+ { 0x0387u, 0x0387u, 0x0000u, CanonicalizeUnique },
+ { 0x0388u, 0x038au, 0x0025u, CanonicalizeRangeLo },
+ { 0x038bu, 0x038bu, 0x0000u, CanonicalizeUnique },
+ { 0x038cu, 0x038cu, 0x0040u, CanonicalizeRangeLo },
+ { 0x038du, 0x038du, 0x0000u, CanonicalizeUnique },
+ { 0x038eu, 0x038fu, 0x003fu, CanonicalizeRangeLo },
+ { 0x0390u, 0x0390u, 0x0000u, CanonicalizeUnique },
+ { 0x0391u, 0x0391u, 0x0020u, CanonicalizeRangeLo },
+ { 0x0392u, 0x0392u, 0x0004u, CanonicalizeSet },
+ { 0x0393u, 0x0394u, 0x0020u, CanonicalizeRangeLo },
+ { 0x0395u, 0x0395u, 0x0005u, CanonicalizeSet },
+ { 0x0396u, 0x0397u, 0x0020u, CanonicalizeRangeLo },
+ { 0x0398u, 0x0398u, 0x0006u, CanonicalizeSet },
+ { 0x0399u, 0x0399u, 0x0007u, CanonicalizeSet },
+ { 0x039au, 0x039au, 0x0008u, CanonicalizeSet },
+ { 0x039bu, 0x039bu, 0x0020u, CanonicalizeRangeLo },
+ { 0x039cu, 0x039cu, 0x0009u, CanonicalizeSet },
+ { 0x039du, 0x039fu, 0x0020u, CanonicalizeRangeLo },
+ { 0x03a0u, 0x03a0u, 0x000au, CanonicalizeSet },
+ { 0x03a1u, 0x03a1u, 0x000bu, CanonicalizeSet },
+ { 0x03a2u, 0x03a2u, 0x0000u, CanonicalizeUnique },
+ { 0x03a3u, 0x03a3u, 0x000cu, CanonicalizeSet },
+ { 0x03a4u, 0x03a5u, 0x0020u, CanonicalizeRangeLo },
+ { 0x03a6u, 0x03a6u, 0x000du, CanonicalizeSet },
+ { 0x03a7u, 0x03abu, 0x0020u, CanonicalizeRangeLo },
+ { 0x03acu, 0x03acu, 0x0026u, CanonicalizeRangeHi },
+ { 0x03adu, 0x03afu, 0x0025u, CanonicalizeRangeHi },
+ { 0x03b0u, 0x03b0u, 0x0000u, CanonicalizeUnique },
+ { 0x03b1u, 0x03b1u, 0x0020u, CanonicalizeRangeHi },
+ { 0x03b2u, 0x03b2u, 0x0004u, CanonicalizeSet },
+ { 0x03b3u, 0x03b4u, 0x0020u, CanonicalizeRangeHi },
+ { 0x03b5u, 0x03b5u, 0x0005u, CanonicalizeSet },
+ { 0x03b6u, 0x03b7u, 0x0020u, CanonicalizeRangeHi },
+ { 0x03b8u, 0x03b8u, 0x0006u, CanonicalizeSet },
+ { 0x03b9u, 0x03b9u, 0x0007u, CanonicalizeSet },
+ { 0x03bau, 0x03bau, 0x0008u, CanonicalizeSet },
+ { 0x03bbu, 0x03bbu, 0x0020u, CanonicalizeRangeHi },
+ { 0x03bcu, 0x03bcu, 0x0009u, CanonicalizeSet },
+ { 0x03bdu, 0x03bfu, 0x0020u, CanonicalizeRangeHi },
+ { 0x03c0u, 0x03c0u, 0x000au, CanonicalizeSet },
+ { 0x03c1u, 0x03c1u, 0x000bu, CanonicalizeSet },
+ { 0x03c2u, 0x03c3u, 0x000cu, CanonicalizeSet },
+ { 0x03c4u, 0x03c5u, 0x0020u, CanonicalizeRangeHi },
+ { 0x03c6u, 0x03c6u, 0x000du, CanonicalizeSet },
+ { 0x03c7u, 0x03cbu, 0x0020u, CanonicalizeRangeHi },
+ { 0x03ccu, 0x03ccu, 0x0040u, CanonicalizeRangeHi },
+ { 0x03cdu, 0x03ceu, 0x003fu, CanonicalizeRangeHi },
+ { 0x03cfu, 0x03cfu, 0x0008u, CanonicalizeRangeLo },
+ { 0x03d0u, 0x03d0u, 0x0004u, CanonicalizeSet },
+ { 0x03d1u, 0x03d1u, 0x0006u, CanonicalizeSet },
+ { 0x03d2u, 0x03d4u, 0x0000u, CanonicalizeUnique },
+ { 0x03d5u, 0x03d5u, 0x000du, CanonicalizeSet },
+ { 0x03d6u, 0x03d6u, 0x000au, CanonicalizeSet },
+ { 0x03d7u, 0x03d7u, 0x0008u, CanonicalizeRangeHi },
+ { 0x03d8u, 0x03efu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x03f0u, 0x03f0u, 0x0008u, CanonicalizeSet },
+ { 0x03f1u, 0x03f1u, 0x000bu, CanonicalizeSet },
+ { 0x03f2u, 0x03f2u, 0x0007u, CanonicalizeRangeLo },
+ { 0x03f3u, 0x03f4u, 0x0000u, CanonicalizeUnique },
+ { 0x03f5u, 0x03f5u, 0x0005u, CanonicalizeSet },
+ { 0x03f6u, 0x03f6u, 0x0000u, CanonicalizeUnique },
+ { 0x03f7u, 0x03f8u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x03f9u, 0x03f9u, 0x0007u, CanonicalizeRangeHi },
+ { 0x03fau, 0x03fbu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x03fcu, 0x03fcu, 0x0000u, CanonicalizeUnique },
+ { 0x03fdu, 0x03ffu, 0x0082u, CanonicalizeRangeHi },
+ { 0x0400u, 0x040fu, 0x0050u, CanonicalizeRangeLo },
+ { 0x0410u, 0x042fu, 0x0020u, CanonicalizeRangeLo },
+ { 0x0430u, 0x044fu, 0x0020u, CanonicalizeRangeHi },
+ { 0x0450u, 0x045fu, 0x0050u, CanonicalizeRangeHi },
+ { 0x0460u, 0x0481u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0482u, 0x0489u, 0x0000u, CanonicalizeUnique },
+ { 0x048au, 0x04bfu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x04c0u, 0x04c0u, 0x000fu, CanonicalizeRangeLo },
+ { 0x04c1u, 0x04ceu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x04cfu, 0x04cfu, 0x000fu, CanonicalizeRangeHi },
+ { 0x04d0u, 0x0527u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0528u, 0x0530u, 0x0000u, CanonicalizeUnique },
+ { 0x0531u, 0x0556u, 0x0030u, CanonicalizeRangeLo },
+ { 0x0557u, 0x0560u, 0x0000u, CanonicalizeUnique },
+ { 0x0561u, 0x0586u, 0x0030u, CanonicalizeRangeHi },
+ { 0x0587u, 0x109fu, 0x0000u, CanonicalizeUnique },
+ { 0x10a0u, 0x10c5u, 0x1c60u, CanonicalizeRangeLo },
+ { 0x10c6u, 0x1d78u, 0x0000u, CanonicalizeUnique },
+ { 0x1d79u, 0x1d79u, 0x8a04u, CanonicalizeRangeLo },
+ { 0x1d7au, 0x1d7cu, 0x0000u, CanonicalizeUnique },
+ { 0x1d7du, 0x1d7du, 0x0ee6u, CanonicalizeRangeLo },
+ { 0x1d7eu, 0x1dffu, 0x0000u, CanonicalizeUnique },
+ { 0x1e00u, 0x1e5fu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x1e60u, 0x1e61u, 0x000eu, CanonicalizeSet },
+ { 0x1e62u, 0x1e95u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x1e96u, 0x1e9au, 0x0000u, CanonicalizeUnique },
+ { 0x1e9bu, 0x1e9bu, 0x000eu, CanonicalizeSet },
+ { 0x1e9cu, 0x1e9fu, 0x0000u, CanonicalizeUnique },
+ { 0x1ea0u, 0x1effu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x1f00u, 0x1f07u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f08u, 0x1f0fu, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f10u, 0x1f15u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f16u, 0x1f17u, 0x0000u, CanonicalizeUnique },
+ { 0x1f18u, 0x1f1du, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f1eu, 0x1f1fu, 0x0000u, CanonicalizeUnique },
+ { 0x1f20u, 0x1f27u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f28u, 0x1f2fu, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f30u, 0x1f37u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f38u, 0x1f3fu, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f40u, 0x1f45u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f46u, 0x1f47u, 0x0000u, CanonicalizeUnique },
+ { 0x1f48u, 0x1f4du, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f4eu, 0x1f50u, 0x0000u, CanonicalizeUnique },
+ { 0x1f51u, 0x1f51u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f52u, 0x1f52u, 0x0000u, CanonicalizeUnique },
+ { 0x1f53u, 0x1f53u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f54u, 0x1f54u, 0x0000u, CanonicalizeUnique },
+ { 0x1f55u, 0x1f55u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f56u, 0x1f56u, 0x0000u, CanonicalizeUnique },
+ { 0x1f57u, 0x1f57u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f58u, 0x1f58u, 0x0000u, CanonicalizeUnique },
+ { 0x1f59u, 0x1f59u, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f5au, 0x1f5au, 0x0000u, CanonicalizeUnique },
+ { 0x1f5bu, 0x1f5bu, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f5cu, 0x1f5cu, 0x0000u, CanonicalizeUnique },
+ { 0x1f5du, 0x1f5du, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f5eu, 0x1f5eu, 0x0000u, CanonicalizeUnique },
+ { 0x1f5fu, 0x1f5fu, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f60u, 0x1f67u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f68u, 0x1f6fu, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f70u, 0x1f71u, 0x004au, CanonicalizeRangeLo },
+ { 0x1f72u, 0x1f75u, 0x0056u, CanonicalizeRangeLo },
+ { 0x1f76u, 0x1f77u, 0x0064u, CanonicalizeRangeLo },
+ { 0x1f78u, 0x1f79u, 0x0080u, CanonicalizeRangeLo },
+ { 0x1f7au, 0x1f7bu, 0x0070u, CanonicalizeRangeLo },
+ { 0x1f7cu, 0x1f7du, 0x007eu, CanonicalizeRangeLo },
+ { 0x1f7eu, 0x1fafu, 0x0000u, CanonicalizeUnique },
+ { 0x1fb0u, 0x1fb1u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1fb2u, 0x1fb7u, 0x0000u, CanonicalizeUnique },
+ { 0x1fb8u, 0x1fb9u, 0x0008u, CanonicalizeRangeHi },
+ { 0x1fbau, 0x1fbbu, 0x004au, CanonicalizeRangeHi },
+ { 0x1fbcu, 0x1fbdu, 0x0000u, CanonicalizeUnique },
+ { 0x1fbeu, 0x1fbeu, 0x0007u, CanonicalizeSet },
+ { 0x1fbfu, 0x1fc7u, 0x0000u, CanonicalizeUnique },
+ { 0x1fc8u, 0x1fcbu, 0x0056u, CanonicalizeRangeHi },
+ { 0x1fccu, 0x1fcfu, 0x0000u, CanonicalizeUnique },
+ { 0x1fd0u, 0x1fd1u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1fd2u, 0x1fd7u, 0x0000u, CanonicalizeUnique },
+ { 0x1fd8u, 0x1fd9u, 0x0008u, CanonicalizeRangeHi },
+ { 0x1fdau, 0x1fdbu, 0x0064u, CanonicalizeRangeHi },
+ { 0x1fdcu, 0x1fdfu, 0x0000u, CanonicalizeUnique },
+ { 0x1fe0u, 0x1fe1u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1fe2u, 0x1fe4u, 0x0000u, CanonicalizeUnique },
+ { 0x1fe5u, 0x1fe5u, 0x0007u, CanonicalizeRangeLo },
+ { 0x1fe6u, 0x1fe7u, 0x0000u, CanonicalizeUnique },
+ { 0x1fe8u, 0x1fe9u, 0x0008u, CanonicalizeRangeHi },
+ { 0x1feau, 0x1febu, 0x0070u, CanonicalizeRangeHi },
+ { 0x1fecu, 0x1fecu, 0x0007u, CanonicalizeRangeHi },
+ { 0x1fedu, 0x1ff7u, 0x0000u, CanonicalizeUnique },
+ { 0x1ff8u, 0x1ff9u, 0x0080u, CanonicalizeRangeHi },
+ { 0x1ffau, 0x1ffbu, 0x007eu, CanonicalizeRangeHi },
+ { 0x1ffcu, 0x2131u, 0x0000u, CanonicalizeUnique },
+ { 0x2132u, 0x2132u, 0x001cu, CanonicalizeRangeLo },
+ { 0x2133u, 0x214du, 0x0000u, CanonicalizeUnique },
+ { 0x214eu, 0x214eu, 0x001cu, CanonicalizeRangeHi },
+ { 0x214fu, 0x215fu, 0x0000u, CanonicalizeUnique },
+ { 0x2160u, 0x216fu, 0x0010u, CanonicalizeRangeLo },
+ { 0x2170u, 0x217fu, 0x0010u, CanonicalizeRangeHi },
+ { 0x2180u, 0x2182u, 0x0000u, CanonicalizeUnique },
+ { 0x2183u, 0x2184u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x2185u, 0x24b5u, 0x0000u, CanonicalizeUnique },
+ { 0x24b6u, 0x24cfu, 0x001au, CanonicalizeRangeLo },
+ { 0x24d0u, 0x24e9u, 0x001au, CanonicalizeRangeHi },
+ { 0x24eau, 0x2bffu, 0x0000u, CanonicalizeUnique },
+ { 0x2c00u, 0x2c2eu, 0x0030u, CanonicalizeRangeLo },
+ { 0x2c2fu, 0x2c2fu, 0x0000u, CanonicalizeUnique },
+ { 0x2c30u, 0x2c5eu, 0x0030u, CanonicalizeRangeHi },
+ { 0x2c5fu, 0x2c5fu, 0x0000u, CanonicalizeUnique },
+ { 0x2c60u, 0x2c61u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x2c62u, 0x2c62u, 0x29f7u, CanonicalizeRangeHi },
+ { 0x2c63u, 0x2c63u, 0x0ee6u, CanonicalizeRangeHi },
+ { 0x2c64u, 0x2c64u, 0x29e7u, CanonicalizeRangeHi },
+ { 0x2c65u, 0x2c65u, 0x2a2bu, CanonicalizeRangeHi },
+ { 0x2c66u, 0x2c66u, 0x2a28u, CanonicalizeRangeHi },
+ { 0x2c67u, 0x2c6cu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x2c6du, 0x2c6du, 0x2a1cu, CanonicalizeRangeHi },
+ { 0x2c6eu, 0x2c6eu, 0x29fdu, CanonicalizeRangeHi },
+ { 0x2c6fu, 0x2c6fu, 0x2a1fu, CanonicalizeRangeHi },
+ { 0x2c70u, 0x2c70u, 0x2a1eu, CanonicalizeRangeHi },
+ { 0x2c71u, 0x2c71u, 0x0000u, CanonicalizeUnique },
+ { 0x2c72u, 0x2c73u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x2c74u, 0x2c74u, 0x0000u, CanonicalizeUnique },
+ { 0x2c75u, 0x2c76u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x2c77u, 0x2c7du, 0x0000u, CanonicalizeUnique },
+ { 0x2c7eu, 0x2c7fu, 0x2a3fu, CanonicalizeRangeHi },
+ { 0x2c80u, 0x2ce3u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x2ce4u, 0x2ceau, 0x0000u, CanonicalizeUnique },
+ { 0x2cebu, 0x2ceeu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x2cefu, 0x2cffu, 0x0000u, CanonicalizeUnique },
+ { 0x2d00u, 0x2d25u, 0x1c60u, CanonicalizeRangeHi },
+ { 0x2d26u, 0xa63fu, 0x0000u, CanonicalizeUnique },
+ { 0xa640u, 0xa66du, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0xa66eu, 0xa67fu, 0x0000u, CanonicalizeUnique },
+ { 0xa680u, 0xa697u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0xa698u, 0xa721u, 0x0000u, CanonicalizeUnique },
+ { 0xa722u, 0xa72fu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0xa730u, 0xa731u, 0x0000u, CanonicalizeUnique },
+ { 0xa732u, 0xa76fu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0xa770u, 0xa778u, 0x0000u, CanonicalizeUnique },
+ { 0xa779u, 0xa77cu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0xa77du, 0xa77du, 0x8a04u, CanonicalizeRangeHi },
+ { 0xa77eu, 0xa787u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0xa788u, 0xa78au, 0x0000u, CanonicalizeUnique },
+ { 0xa78bu, 0xa78cu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0xa78du, 0xa78du, 0xa528u, CanonicalizeRangeHi },
+ { 0xa78eu, 0xa78fu, 0x0000u, CanonicalizeUnique },
+ { 0xa790u, 0xa791u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0xa792u, 0xa79fu, 0x0000u, CanonicalizeUnique },
+ { 0xa7a0u, 0xa7a9u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0xa7aau, 0xff20u, 0x0000u, CanonicalizeUnique },
+ { 0xff21u, 0xff3au, 0x0020u, CanonicalizeRangeLo },
+ { 0xff3bu, 0xff40u, 0x0000u, CanonicalizeUnique },
+ { 0xff41u, 0xff5au, 0x0020u, CanonicalizeRangeHi },
+ { 0xff5bu, 0xffffu, 0x0000u, CanonicalizeUnique },
+};
+
+const size_t LATIN_CANONICALIZATION_RANGES = 20;
+LatinCanonicalizationRange latinRangeInfo[LATIN_CANONICALIZATION_RANGES] = {
+ { 0x0000u, 0x0040u, 0x0000u, CanonicalizeLatinSelf },
+ { 0x0041u, 0x005au, 0x0000u, CanonicalizeLatinMask0x20 },
+ { 0x005bu, 0x0060u, 0x0000u, CanonicalizeLatinSelf },
+ { 0x0061u, 0x007au, 0x0000u, CanonicalizeLatinMask0x20 },
+ { 0x007bu, 0x00bfu, 0x0000u, CanonicalizeLatinSelf },
+ { 0x00c0u, 0x00d6u, 0x0000u, CanonicalizeLatinMask0x20 },
+ { 0x00d7u, 0x00d7u, 0x0000u, CanonicalizeLatinSelf },
+ { 0x00d8u, 0x00deu, 0x0000u, CanonicalizeLatinMask0x20 },
+ { 0x00dfu, 0x00dfu, 0x0000u, CanonicalizeLatinSelf },
+ { 0x00e0u, 0x00f6u, 0x0000u, CanonicalizeLatinMask0x20 },
+ { 0x00f7u, 0x00f7u, 0x0000u, CanonicalizeLatinSelf },
+ { 0x00f8u, 0x00feu, 0x0000u, CanonicalizeLatinMask0x20 },
+ { 0x00ffu, 0x00ffu, 0x0000u, CanonicalizeLatinSelf },
+ { 0x0100u, 0x0177u, 0x0000u, CanonicalizeLatinInvalid },
+ { 0x0178u, 0x0178u, 0x00ffu, CanonicalizeLatinOther },
+ { 0x0179u, 0x039bu, 0x0000u, CanonicalizeLatinInvalid },
+ { 0x039cu, 0x039cu, 0x00b5u, CanonicalizeLatinOther },
+ { 0x039du, 0x03bbu, 0x0000u, CanonicalizeLatinInvalid },
+ { 0x03bcu, 0x03bcu, 0x00b5u, CanonicalizeLatinOther },
+ { 0x03bdu, 0xffffu, 0x0000u, CanonicalizeLatinInvalid },
+};
+
+} } // JSC::Yarr
+
diff --git a/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.h b/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.h
new file mode 100644
index 0000000000..9dce78200c
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef YarrCanonicalizeUCS2_H
+#define YarrCanonicalizeUCS2_H
+
+#include <stdint.h>
+#include <wtf/unicode/Unicode.h>
+
+namespace JSC { namespace Yarr {
+
+// This set of data (autogenerated using YarrCanonicalizeUCS2.js into YarrCanonicalizeUCS2.cpp)
+// provides information for each UCS2 code point as to the set of code points that it should
+// match under the ES5.1 case insensitive RegExp matching rules, specified in 15.10.2.8.
+enum UCS2CanonicalizationType {
+ CanonicalizeUnique, // No canonically equal values, e.g. 0x0.
+ CanonicalizeSet, // Value indicates a set in characterSetInfo.
+ CanonicalizeRangeLo, // Value is positive delta to pair, E.g. 0x41 has value 0x20, -> 0x61.
+ CanonicalizeRangeHi, // Value is positive delta to pair, E.g. 0x61 has value 0x20, -> 0x41.
+ CanonicalizeAlternatingAligned, // Aligned consequtive pair, e.g. 0x1f4,0x1f5.
+ CanonicalizeAlternatingUnaligned, // Unaligned consequtive pair, e.g. 0x241,0x242.
+};
+struct UCS2CanonicalizationRange { uint16_t begin, end, value, type; };
+extern const size_t UCS2_CANONICALIZATION_RANGES;
+extern uint16_t* characterSetInfo[];
+extern UCS2CanonicalizationRange rangeInfo[];
+
+// This table is similar to the full rangeInfo table, however this maps from UCS2 codepoints to
+// the set of Latin1 codepoints that could match.
+enum LatinCanonicalizationType {
+ CanonicalizeLatinSelf, // This character is in the Latin1 range, but has no canonical equivalent in the range.
+ CanonicalizeLatinMask0x20, // One of a pair of characters, under the mask 0x20.
+ CanonicalizeLatinOther, // This character is not in the Latin1 range, but canonicalizes to another that is.
+ CanonicalizeLatinInvalid, // Cannot match against Latin1 input.
+};
+struct LatinCanonicalizationRange { uint16_t begin, end, value, type; };
+extern const size_t LATIN_CANONICALIZATION_RANGES;
+extern LatinCanonicalizationRange latinRangeInfo[];
+
+// This searches in log2 time over ~364 entries, so should typically result in 8 compares.
+inline UCS2CanonicalizationRange* rangeInfoFor(UChar ch)
+{
+ UCS2CanonicalizationRange* info = rangeInfo;
+ size_t entries = UCS2_CANONICALIZATION_RANGES;
+
+ while (true) {
+ size_t candidate = entries >> 1;
+ UCS2CanonicalizationRange* candidateInfo = info + candidate;
+ if (ch < candidateInfo->begin)
+ entries = candidate;
+ else if (ch <= candidateInfo->end)
+ return candidateInfo;
+ else {
+ info = candidateInfo + 1;
+ entries -= (candidate + 1);
+ }
+ }
+}
+
+// Should only be called for characters that have one canonically matching value.
+inline UChar getCanonicalPair(UCS2CanonicalizationRange* info, UChar ch)
+{
+ ASSERT(ch >= info->begin && ch <= info->end);
+ switch (info->type) {
+ case CanonicalizeRangeLo:
+ return ch + info->value;
+ case CanonicalizeRangeHi:
+ return ch - info->value;
+ case CanonicalizeAlternatingAligned:
+ return ch ^ 1;
+ case CanonicalizeAlternatingUnaligned:
+ return ((ch - 1) ^ 1) + 1;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+}
+
+// Returns true if no other UCS2 codepoint can match this value.
+inline bool isCanonicallyUnique(UChar ch)
+{
+ return rangeInfoFor(ch)->type == CanonicalizeUnique;
+}
+
+// Returns true if values are equal, under the canonicalization rules.
+inline bool areCanonicallyEquivalent(UChar a, UChar b)
+{
+ UCS2CanonicalizationRange* info = rangeInfoFor(a);
+ switch (info->type) {
+ case CanonicalizeUnique:
+ return a == b;
+ case CanonicalizeSet: {
+ for (uint16_t* set = characterSetInfo[info->value]; (a = *set); ++set) {
+ if (a == b)
+ return true;
+ }
+ return false;
+ }
+ case CanonicalizeRangeLo:
+ return (a == b) || (a + info->value == b);
+ case CanonicalizeRangeHi:
+ return (a == b) || (a - info->value == b);
+ case CanonicalizeAlternatingAligned:
+ return (a | 1) == (b | 1);
+ case CanonicalizeAlternatingUnaligned:
+ return ((a - 1) | 1) == ((b - 1) | 1);
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+}
+
+} } // JSC::Yarr
+
+#endif
diff --git a/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.js b/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.js
new file mode 100644
index 0000000000..00361dd46e
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.js
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// See ES 5.1, 15.10.2.8
+function canonicalize(ch)
+{
+ var u = String.fromCharCode(ch).toUpperCase();
+ if (u.length > 1)
+ return ch;
+ var cu = u.charCodeAt(0);
+ if (ch >= 128 && cu < 128)
+ return ch;
+ return cu;
+}
+
+var MAX_UCS2 = 0xFFFF;
+var MAX_LATIN = 0xFF;
+
+var groupedCanonically = [];
+// Pass 1: populate groupedCanonically - this is mapping from canonicalized
+// values back to the set of character code that canonicalize to them.
+for (var i = 0; i <= MAX_UCS2; ++i) {
+ var ch = canonicalize(i);
+ if (!groupedCanonically[ch])
+ groupedCanonically[ch] = [];
+ groupedCanonically[ch].push(i);
+}
+
+var typeInfo = [];
+var latinTypeInfo = [];
+var characterSetInfo = [];
+// Pass 2: populate typeInfo & characterSetInfo. For every character calculate
+// a typeInfo value, described by the types above, and a value payload.
+for (cu in groupedCanonically) {
+ // The set of characters that canonicalize to cu
+ var characters = groupedCanonically[cu];
+
+ // If there is only one, it is unique.
+ if (characters.length == 1) {
+ typeInfo[characters[0]] = "CanonicalizeUnique:0";
+ latinTypeInfo[characters[0]] = characters[0] <= MAX_LATIN ? "CanonicalizeLatinSelf:0" : "CanonicalizeLatinInvalid:0";
+ continue;
+ }
+
+ // Sort the array.
+ characters.sort(function(x,y){return x-y;});
+
+ // If there are more than two characters, create an entry in characterSetInfo.
+ if (characters.length > 2) {
+ for (i in characters)
+ typeInfo[characters[i]] = "CanonicalizeSet:" + characterSetInfo.length;
+ characterSetInfo.push(characters);
+
+ if (characters[1] <= MAX_LATIN)
+ throw new Error("sets with more than one latin character not supported!");
+ if (characters[0] <= MAX_LATIN) {
+ for (i in characters)
+ latinTypeInfo[characters[i]] = "CanonicalizeLatinOther:" + characters[0];
+ latinTypeInfo[characters[0]] = "CanonicalizeLatinSelf:0";
+ } else {
+ for (i in characters)
+ latinTypeInfo[characters[i]] = "CanonicalizeLatinInvalid:0";
+ }
+
+ continue;
+ }
+
+ // We have a pair, mark alternating ranges, otherwise track whether this is the low or high partner.
+ var lo = characters[0];
+ var hi = characters[1];
+ var delta = hi - lo;
+ if (delta == 1) {
+ var type = lo & 1 ? "CanonicalizeAlternatingUnaligned:0" : "CanonicalizeAlternatingAligned:0";
+ typeInfo[lo] = type;
+ typeInfo[hi] = type;
+ } else {
+ typeInfo[lo] = "CanonicalizeRangeLo:" + delta;
+ typeInfo[hi] = "CanonicalizeRangeHi:" + delta;
+ }
+
+ if (lo > MAX_LATIN) {
+ latinTypeInfo[lo] = "CanonicalizeLatinInvalid:0";
+ latinTypeInfo[hi] = "CanonicalizeLatinInvalid:0";
+ } else if (hi > MAX_LATIN) {
+ latinTypeInfo[lo] = "CanonicalizeLatinSelf:0";
+ latinTypeInfo[hi] = "CanonicalizeLatinOther:" + lo;
+ } else {
+ if (delta != 0x20 || lo & 0x20)
+ throw new Error("pairs of latin characters that don't mask with 0x20 not supported!");
+ latinTypeInfo[lo] = "CanonicalizeLatinMask0x20:0";
+ latinTypeInfo[hi] = "CanonicalizeLatinMask0x20:0";
+ }
+}
+
+var rangeInfo = [];
+// Pass 3: coallesce types into ranges.
+for (var end = 0; end <= MAX_UCS2; ++end) {
+ var begin = end;
+ var type = typeInfo[end];
+ while (end < MAX_UCS2 && typeInfo[end + 1] == type)
+ ++end;
+ rangeInfo.push({begin:begin, end:end, type:type});
+}
+
+var latinRangeInfo = [];
+// Pass 4: coallesce latin-1 types into ranges.
+for (var end = 0; end <= MAX_UCS2; ++end) {
+ var begin = end;
+ var type = latinTypeInfo[end];
+ while (end < MAX_UCS2 && latinTypeInfo[end + 1] == type)
+ ++end;
+ latinRangeInfo.push({begin:begin, end:end, type:type});
+}
+
+
+// Helper function to convert a number to a fixed width hex representation of a C uint16_t.
+function hex(x)
+{
+ var s = Number(x).toString(16);
+ while (s.length < 4)
+ s = 0 + s;
+ return "0x" + s + "u";
+}
+
+var copyright = (
+ "/*" + "\n" +
+ " * Copyright (C) 2012 Apple Inc. All rights reserved." + "\n" +
+ " *" + "\n" +
+ " * Redistribution and use in source and binary forms, with or without" + "\n" +
+ " * modification, are permitted provided that the following conditions" + "\n" +
+ " * are met:" + "\n" +
+ " * 1. Redistributions of source code must retain the above copyright" + "\n" +
+ " * notice, this list of conditions and the following disclaimer." + "\n" +
+ " * 2. Redistributions in binary form must reproduce the above copyright" + "\n" +
+ " * notice, this list of conditions and the following disclaimer in the" + "\n" +
+ " * documentation and/or other materials provided with the distribution." + "\n" +
+ " *" + "\n" +
+ " * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY" + "\n" +
+ " * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE" + "\n" +
+ " * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR" + "\n" +
+ " * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR" + "\n" +
+ " * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL," + "\n" +
+ " * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO," + "\n" +
+ " * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR" + "\n" +
+ " * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY" + "\n" +
+ " * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT" + "\n" +
+ " * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE" + "\n" +
+ " * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. " + "\n" +
+ " */");
+
+print(copyright);
+print();
+print("// DO NOT EDIT! - this file autogenerated by YarrCanonicalizeUCS2.js");
+print();
+print('#include "config.h"');
+print('#include "YarrCanonicalizeUCS2.h"');
+print();
+print("namespace JSC { namespace Yarr {");
+print();
+print("#include <stdint.h>");
+print();
+
+for (i in characterSetInfo) {
+ var characters = ""
+ var set = characterSetInfo[i];
+ for (var j in set)
+ characters += hex(set[j]) + ", ";
+ print("uint16_t ucs2CharacterSet" + i + "[] = { " + characters + "0 };");
+}
+print();
+print("static const size_t UCS2_CANONICALIZATION_SETS = " + characterSetInfo.length + ";");
+print("uint16_t* characterSetInfo[UCS2_CANONICALIZATION_SETS] = {");
+for (i in characterSetInfo)
+print(" ucs2CharacterSet" + i + ",");
+print("};");
+print();
+print("const size_t UCS2_CANONICALIZATION_RANGES = " + rangeInfo.length + ";");
+print("UCS2CanonicalizationRange rangeInfo[UCS2_CANONICALIZATION_RANGES] = {");
+for (i in rangeInfo) {
+ var info = rangeInfo[i];
+ var typeAndValue = info.type.split(':');
+ print(" { " + hex(info.begin) + ", " + hex(info.end) + ", " + hex(typeAndValue[1]) + ", " + typeAndValue[0] + " },");
+}
+print("};");
+print();
+print("const size_t LATIN_CANONICALIZATION_RANGES = " + latinRangeInfo.length + ";");
+print("LatinCanonicalizationRange latinRangeInfo[LATIN_CANONICALIZATION_RANGES] = {");
+for (i in latinRangeInfo) {
+ var info = latinRangeInfo[i];
+ var typeAndValue = info.type.split(':');
+ print(" { " + hex(info.begin) + ", " + hex(info.end) + ", " + hex(typeAndValue[1]) + ", " + typeAndValue[0] + " },");
+}
+print("};");
+print();
+print("} } // JSC::Yarr");
+print();
+
diff --git a/src/3rdparty/masm/yarr/YarrInterpreter.cpp b/src/3rdparty/masm/yarr/YarrInterpreter.cpp
new file mode 100644
index 0000000000..f0312ea251
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrInterpreter.cpp
@@ -0,0 +1,1959 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Peter Varga (pvarga@inf.u-szeged.hu), University of Szeged
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "YarrInterpreter.h"
+
+#include "Yarr.h"
+#include "YarrCanonicalizeUCS2.h"
+#include <wtf/BumpPointerAllocator.h>
+#include <wtf/DataLog.h>
+#include <wtf/text/CString.h>
+#include <wtf/text/WTFString.h>
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace WTF;
+
+namespace JSC { namespace Yarr {
+
+template<typename CharType>
+class Interpreter {
+public:
+ struct ParenthesesDisjunctionContext;
+
+ struct BackTrackInfoPatternCharacter {
+ uintptr_t matchAmount;
+ };
+ struct BackTrackInfoCharacterClass {
+ uintptr_t matchAmount;
+ };
+ struct BackTrackInfoBackReference {
+ uintptr_t begin; // Not really needed for greedy quantifiers.
+ uintptr_t matchAmount; // Not really needed for fixed quantifiers.
+ };
+ struct BackTrackInfoAlternative {
+ uintptr_t offset;
+ };
+ struct BackTrackInfoParentheticalAssertion {
+ uintptr_t begin;
+ };
+ struct BackTrackInfoParenthesesOnce {
+ uintptr_t begin;
+ };
+ struct BackTrackInfoParenthesesTerminal {
+ uintptr_t begin;
+ };
+ struct BackTrackInfoParentheses {
+ uintptr_t matchAmount;
+ ParenthesesDisjunctionContext* lastContext;
+ };
+
+ static inline void appendParenthesesDisjunctionContext(BackTrackInfoParentheses* backTrack, ParenthesesDisjunctionContext* context)
+ {
+ context->next = backTrack->lastContext;
+ backTrack->lastContext = context;
+ ++backTrack->matchAmount;
+ }
+
+ static inline void popParenthesesDisjunctionContext(BackTrackInfoParentheses* backTrack)
+ {
+ RELEASE_ASSERT(backTrack->matchAmount);
+ RELEASE_ASSERT(backTrack->lastContext);
+ backTrack->lastContext = backTrack->lastContext->next;
+ --backTrack->matchAmount;
+ }
+
+ struct DisjunctionContext
+ {
+ DisjunctionContext()
+ : term(0)
+ {
+ }
+
+ void* operator new(size_t, void* where)
+ {
+ return where;
+ }
+
+ int term;
+ unsigned matchBegin;
+ unsigned matchEnd;
+ uintptr_t frame[1];
+ };
+
+ DisjunctionContext* allocDisjunctionContext(ByteDisjunction* disjunction)
+ {
+ size_t size = sizeof(DisjunctionContext) - sizeof(uintptr_t) + disjunction->m_frameSize * sizeof(uintptr_t);
+ allocatorPool = allocatorPool->ensureCapacity(size);
+ RELEASE_ASSERT(allocatorPool);
+ return new (allocatorPool->alloc(size)) DisjunctionContext();
+ }
+
+ void freeDisjunctionContext(DisjunctionContext* context)
+ {
+ allocatorPool = allocatorPool->dealloc(context);
+ }
+
+ struct ParenthesesDisjunctionContext
+ {
+ ParenthesesDisjunctionContext(unsigned* output, ByteTerm& term)
+ : next(0)
+ {
+ unsigned firstSubpatternId = term.atom.subpatternId;
+ unsigned numNestedSubpatterns = term.atom.parenthesesDisjunction->m_numSubpatterns;
+
+ for (unsigned i = 0; i < (numNestedSubpatterns << 1); ++i) {
+ subpatternBackup[i] = output[(firstSubpatternId << 1) + i];
+ output[(firstSubpatternId << 1) + i] = offsetNoMatch;
+ }
+
+ new (getDisjunctionContext(term)) DisjunctionContext();
+ }
+
+ void* operator new(size_t, void* where)
+ {
+ return where;
+ }
+
+ void restoreOutput(unsigned* output, unsigned firstSubpatternId, unsigned numNestedSubpatterns)
+ {
+ for (unsigned i = 0; i < (numNestedSubpatterns << 1); ++i)
+ output[(firstSubpatternId << 1) + i] = subpatternBackup[i];
+ }
+
+ DisjunctionContext* getDisjunctionContext(ByteTerm& term)
+ {
+ return reinterpret_cast<DisjunctionContext*>(&(subpatternBackup[term.atom.parenthesesDisjunction->m_numSubpatterns << 1]));
+ }
+
+ ParenthesesDisjunctionContext* next;
+ unsigned subpatternBackup[1];
+ };
+
+ ParenthesesDisjunctionContext* allocParenthesesDisjunctionContext(ByteDisjunction* disjunction, unsigned* output, ByteTerm& term)
+ {
+ size_t size = sizeof(ParenthesesDisjunctionContext) - sizeof(unsigned) + (term.atom.parenthesesDisjunction->m_numSubpatterns << 1) * sizeof(unsigned) + sizeof(DisjunctionContext) - sizeof(uintptr_t) + disjunction->m_frameSize * sizeof(uintptr_t);
+ allocatorPool = allocatorPool->ensureCapacity(size);
+ RELEASE_ASSERT(allocatorPool);
+ return new (allocatorPool->alloc(size)) ParenthesesDisjunctionContext(output, term);
+ }
+
+ void freeParenthesesDisjunctionContext(ParenthesesDisjunctionContext* context)
+ {
+ allocatorPool = allocatorPool->dealloc(context);
+ }
+
+ class InputStream {
+ public:
+ InputStream(const CharType* input, unsigned start, unsigned length)
+ : input(input)
+ , pos(start)
+ , length(length)
+ {
+ }
+
+ void next()
+ {
+ ++pos;
+ }
+
+ void rewind(unsigned amount)
+ {
+ ASSERT(pos >= amount);
+ pos -= amount;
+ }
+
+ int read()
+ {
+ ASSERT(pos < length);
+ if (pos < length)
+ return input[pos];
+ return -1;
+ }
+
+ int readPair()
+ {
+ ASSERT(pos + 1 < length);
+ return input[pos] | input[pos + 1] << 16;
+ }
+
+ int readChecked(unsigned negativePositionOffest)
+ {
+ RELEASE_ASSERT(pos >= negativePositionOffest);
+ unsigned p = pos - negativePositionOffest;
+ ASSERT(p < length);
+ return input[p];
+ }
+
+ int reread(unsigned from)
+ {
+ ASSERT(from < length);
+ return input[from];
+ }
+
+ int prev()
+ {
+ ASSERT(!(pos > length));
+ if (pos && length)
+ return input[pos - 1];
+ return -1;
+ }
+
+ unsigned getPos()
+ {
+ return pos;
+ }
+
+ void setPos(unsigned p)
+ {
+ pos = p;
+ }
+
+ bool atStart()
+ {
+ return pos == 0;
+ }
+
+ bool atEnd()
+ {
+ return pos == length;
+ }
+
+ unsigned end()
+ {
+ return length;
+ }
+
+ bool checkInput(unsigned count)
+ {
+ if (((pos + count) <= length) && ((pos + count) >= pos)) {
+ pos += count;
+ return true;
+ }
+ return false;
+ }
+
+ void uncheckInput(unsigned count)
+ {
+ RELEASE_ASSERT(pos >= count);
+ pos -= count;
+ }
+
+ bool atStart(unsigned negativePositionOffest)
+ {
+ return pos == negativePositionOffest;
+ }
+
+ bool atEnd(unsigned negativePositionOffest)
+ {
+ RELEASE_ASSERT(pos >= negativePositionOffest);
+ return (pos - negativePositionOffest) == length;
+ }
+
+ bool isAvailableInput(unsigned offset)
+ {
+ return (((pos + offset) <= length) && ((pos + offset) >= pos));
+ }
+
+ private:
+ const CharType* input;
+ unsigned pos;
+ unsigned length;
+ };
+
+ bool testCharacterClass(CharacterClass* characterClass, int ch)
+ {
+ if (ch & 0xFF80) {
+ for (unsigned i = 0; i < characterClass->m_matchesUnicode.size(); ++i)
+ if (ch == characterClass->m_matchesUnicode[i])
+ return true;
+ for (unsigned i = 0; i < characterClass->m_rangesUnicode.size(); ++i)
+ if ((ch >= characterClass->m_rangesUnicode[i].begin) && (ch <= characterClass->m_rangesUnicode[i].end))
+ return true;
+ } else {
+ for (unsigned i = 0; i < characterClass->m_matches.size(); ++i)
+ if (ch == characterClass->m_matches[i])
+ return true;
+ for (unsigned i = 0; i < characterClass->m_ranges.size(); ++i)
+ if ((ch >= characterClass->m_ranges[i].begin) && (ch <= characterClass->m_ranges[i].end))
+ return true;
+ }
+
+ return false;
+ }
+
+ bool checkCharacter(int testChar, unsigned negativeInputOffset)
+ {
+ return testChar == input.readChecked(negativeInputOffset);
+ }
+
+ bool checkCasedCharacter(int loChar, int hiChar, unsigned negativeInputOffset)
+ {
+ int ch = input.readChecked(negativeInputOffset);
+ return (loChar == ch) || (hiChar == ch);
+ }
+
+ bool checkCharacterClass(CharacterClass* characterClass, bool invert, unsigned negativeInputOffset)
+ {
+ bool match = testCharacterClass(characterClass, input.readChecked(negativeInputOffset));
+ return invert ? !match : match;
+ }
+
+ bool tryConsumeBackReference(int matchBegin, int matchEnd, unsigned negativeInputOffset)
+ {
+ unsigned matchSize = (unsigned)(matchEnd - matchBegin);
+
+ if (!input.checkInput(matchSize))
+ return false;
+
+ if (pattern->m_ignoreCase) {
+ for (unsigned i = 0; i < matchSize; ++i) {
+ int oldCh = input.reread(matchBegin + i);
+ int ch = input.readChecked(negativeInputOffset + matchSize - i);
+
+ if (oldCh == ch)
+ continue;
+
+ // The definition for canonicalize (see ES 5.1, 15.10.2.8) means that
+ // unicode values are never allowed to match against ascii ones.
+ if (isASCII(oldCh) || isASCII(ch)) {
+ if (toASCIIUpper(oldCh) == toASCIIUpper(ch))
+ continue;
+ } else if (areCanonicallyEquivalent(oldCh, ch))
+ continue;
+
+ input.uncheckInput(matchSize);
+ return false;
+ }
+ } else {
+ for (unsigned i = 0; i < matchSize; ++i) {
+ if (!checkCharacter(input.reread(matchBegin + i), negativeInputOffset + matchSize - i)) {
+ input.uncheckInput(matchSize);
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+
+ bool matchAssertionBOL(ByteTerm& term)
+ {
+ return (input.atStart(term.inputPosition)) || (pattern->m_multiline && testCharacterClass(pattern->newlineCharacterClass, input.readChecked(term.inputPosition + 1)));
+ }
+
+ bool matchAssertionEOL(ByteTerm& term)
+ {
+ if (term.inputPosition)
+ return (input.atEnd(term.inputPosition)) || (pattern->m_multiline && testCharacterClass(pattern->newlineCharacterClass, input.readChecked(term.inputPosition)));
+
+ return (input.atEnd()) || (pattern->m_multiline && testCharacterClass(pattern->newlineCharacterClass, input.read()));
+ }
+
+ bool matchAssertionWordBoundary(ByteTerm& term)
+ {
+ bool prevIsWordchar = !input.atStart(term.inputPosition) && testCharacterClass(pattern->wordcharCharacterClass, input.readChecked(term.inputPosition + 1));
+ bool readIsWordchar;
+ if (term.inputPosition)
+ readIsWordchar = !input.atEnd(term.inputPosition) && testCharacterClass(pattern->wordcharCharacterClass, input.readChecked(term.inputPosition));
+ else
+ readIsWordchar = !input.atEnd() && testCharacterClass(pattern->wordcharCharacterClass, input.read());
+
+ bool wordBoundary = prevIsWordchar != readIsWordchar;
+ return term.invert() ? !wordBoundary : wordBoundary;
+ }
+
+ bool backtrackPatternCharacter(ByteTerm& term, DisjunctionContext* context)
+ {
+ BackTrackInfoPatternCharacter* backTrack = reinterpret_cast<BackTrackInfoPatternCharacter*>(context->frame + term.frameLocation);
+
+ switch (term.atom.quantityType) {
+ case QuantifierFixedCount:
+ break;
+
+ case QuantifierGreedy:
+ if (backTrack->matchAmount) {
+ --backTrack->matchAmount;
+ input.uncheckInput(1);
+ return true;
+ }
+ break;
+
+ case QuantifierNonGreedy:
+ if ((backTrack->matchAmount < term.atom.quantityCount) && input.checkInput(1)) {
+ ++backTrack->matchAmount;
+ if (checkCharacter(term.atom.patternCharacter, term.inputPosition + 1))
+ return true;
+ }
+ input.uncheckInput(backTrack->matchAmount);
+ break;
+ }
+
+ return false;
+ }
+
+ bool backtrackPatternCasedCharacter(ByteTerm& term, DisjunctionContext* context)
+ {
+ BackTrackInfoPatternCharacter* backTrack = reinterpret_cast<BackTrackInfoPatternCharacter*>(context->frame + term.frameLocation);
+
+ switch (term.atom.quantityType) {
+ case QuantifierFixedCount:
+ break;
+
+ case QuantifierGreedy:
+ if (backTrack->matchAmount) {
+ --backTrack->matchAmount;
+ input.uncheckInput(1);
+ return true;
+ }
+ break;
+
+ case QuantifierNonGreedy:
+ if ((backTrack->matchAmount < term.atom.quantityCount) && input.checkInput(1)) {
+ ++backTrack->matchAmount;
+ if (checkCasedCharacter(term.atom.casedCharacter.lo, term.atom.casedCharacter.hi, term.inputPosition + 1))
+ return true;
+ }
+ input.uncheckInput(backTrack->matchAmount);
+ break;
+ }
+
+ return false;
+ }
+
+ bool matchCharacterClass(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeCharacterClass);
+ BackTrackInfoPatternCharacter* backTrack = reinterpret_cast<BackTrackInfoPatternCharacter*>(context->frame + term.frameLocation);
+
+ switch (term.atom.quantityType) {
+ case QuantifierFixedCount: {
+ for (unsigned matchAmount = 0; matchAmount < term.atom.quantityCount; ++matchAmount) {
+ if (!checkCharacterClass(term.atom.characterClass, term.invert(), term.inputPosition - matchAmount))
+ return false;
+ }
+ return true;
+ }
+
+ case QuantifierGreedy: {
+ unsigned matchAmount = 0;
+ while ((matchAmount < term.atom.quantityCount) && input.checkInput(1)) {
+ if (!checkCharacterClass(term.atom.characterClass, term.invert(), term.inputPosition + 1)) {
+ input.uncheckInput(1);
+ break;
+ }
+ ++matchAmount;
+ }
+ backTrack->matchAmount = matchAmount;
+
+ return true;
+ }
+
+ case QuantifierNonGreedy:
+ backTrack->matchAmount = 0;
+ return true;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+
+ bool backtrackCharacterClass(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeCharacterClass);
+ BackTrackInfoPatternCharacter* backTrack = reinterpret_cast<BackTrackInfoPatternCharacter*>(context->frame + term.frameLocation);
+
+ switch (term.atom.quantityType) {
+ case QuantifierFixedCount:
+ break;
+
+ case QuantifierGreedy:
+ if (backTrack->matchAmount) {
+ --backTrack->matchAmount;
+ input.uncheckInput(1);
+ return true;
+ }
+ break;
+
+ case QuantifierNonGreedy:
+ if ((backTrack->matchAmount < term.atom.quantityCount) && input.checkInput(1)) {
+ ++backTrack->matchAmount;
+ if (checkCharacterClass(term.atom.characterClass, term.invert(), term.inputPosition + 1))
+ return true;
+ }
+ input.uncheckInput(backTrack->matchAmount);
+ break;
+ }
+
+ return false;
+ }
+
+ bool matchBackReference(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeBackReference);
+ BackTrackInfoBackReference* backTrack = reinterpret_cast<BackTrackInfoBackReference*>(context->frame + term.frameLocation);
+
+ unsigned matchBegin = output[(term.atom.subpatternId << 1)];
+ unsigned matchEnd = output[(term.atom.subpatternId << 1) + 1];
+
+ // If the end position of the referenced match hasn't set yet then the backreference in the same parentheses where it references to that.
+ // In this case the result of match is empty string like when it references to a parentheses with zero-width match.
+ // Eg.: /(a\1)/
+ if (matchEnd == offsetNoMatch)
+ return true;
+
+ if (matchBegin == offsetNoMatch)
+ return true;
+
+ ASSERT(matchBegin <= matchEnd);
+
+ if (matchBegin == matchEnd)
+ return true;
+
+ switch (term.atom.quantityType) {
+ case QuantifierFixedCount: {
+ backTrack->begin = input.getPos();
+ for (unsigned matchAmount = 0; matchAmount < term.atom.quantityCount; ++matchAmount) {
+ if (!tryConsumeBackReference(matchBegin, matchEnd, term.inputPosition)) {
+ input.setPos(backTrack->begin);
+ return false;
+ }
+ }
+ return true;
+ }
+
+ case QuantifierGreedy: {
+ unsigned matchAmount = 0;
+ while ((matchAmount < term.atom.quantityCount) && tryConsumeBackReference(matchBegin, matchEnd, term.inputPosition))
+ ++matchAmount;
+ backTrack->matchAmount = matchAmount;
+ return true;
+ }
+
+ case QuantifierNonGreedy:
+ backTrack->begin = input.getPos();
+ backTrack->matchAmount = 0;
+ return true;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+
+ bool backtrackBackReference(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeBackReference);
+ BackTrackInfoBackReference* backTrack = reinterpret_cast<BackTrackInfoBackReference*>(context->frame + term.frameLocation);
+
+ unsigned matchBegin = output[(term.atom.subpatternId << 1)];
+ unsigned matchEnd = output[(term.atom.subpatternId << 1) + 1];
+
+ if (matchBegin == offsetNoMatch)
+ return false;
+
+ ASSERT(matchBegin <= matchEnd);
+
+ if (matchBegin == matchEnd)
+ return false;
+
+ switch (term.atom.quantityType) {
+ case QuantifierFixedCount:
+ // for quantityCount == 1, could rewind.
+ input.setPos(backTrack->begin);
+ break;
+
+ case QuantifierGreedy:
+ if (backTrack->matchAmount) {
+ --backTrack->matchAmount;
+ input.rewind(matchEnd - matchBegin);
+ return true;
+ }
+ break;
+
+ case QuantifierNonGreedy:
+ if ((backTrack->matchAmount < term.atom.quantityCount) && tryConsumeBackReference(matchBegin, matchEnd, term.inputPosition)) {
+ ++backTrack->matchAmount;
+ return true;
+ }
+ input.setPos(backTrack->begin);
+ break;
+ }
+
+ return false;
+ }
+
+ void recordParenthesesMatch(ByteTerm& term, ParenthesesDisjunctionContext* context)
+ {
+ if (term.capture()) {
+ unsigned subpatternId = term.atom.subpatternId;
+ output[(subpatternId << 1)] = context->getDisjunctionContext(term)->matchBegin + term.inputPosition;
+ output[(subpatternId << 1) + 1] = context->getDisjunctionContext(term)->matchEnd + term.inputPosition;
+ }
+ }
+ void resetMatches(ByteTerm& term, ParenthesesDisjunctionContext* context)
+ {
+ unsigned firstSubpatternId = term.atom.subpatternId;
+ unsigned count = term.atom.parenthesesDisjunction->m_numSubpatterns;
+ context->restoreOutput(output, firstSubpatternId, count);
+ }
+ JSRegExpResult parenthesesDoBacktrack(ByteTerm& term, BackTrackInfoParentheses* backTrack)
+ {
+ while (backTrack->matchAmount) {
+ ParenthesesDisjunctionContext* context = backTrack->lastContext;
+
+ JSRegExpResult result = matchDisjunction(term.atom.parenthesesDisjunction, context->getDisjunctionContext(term), true);
+ if (result == JSRegExpMatch)
+ return JSRegExpMatch;
+
+ resetMatches(term, context);
+ popParenthesesDisjunctionContext(backTrack);
+ freeParenthesesDisjunctionContext(context);
+
+ if (result != JSRegExpNoMatch)
+ return result;
+ }
+
+ return JSRegExpNoMatch;
+ }
+
+ bool matchParenthesesOnceBegin(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpatternOnceBegin);
+ ASSERT(term.atom.quantityCount == 1);
+
+ BackTrackInfoParenthesesOnce* backTrack = reinterpret_cast<BackTrackInfoParenthesesOnce*>(context->frame + term.frameLocation);
+
+ switch (term.atom.quantityType) {
+ case QuantifierGreedy: {
+ // set this speculatively; if we get to the parens end this will be true.
+ backTrack->begin = input.getPos();
+ break;
+ }
+ case QuantifierNonGreedy: {
+ backTrack->begin = notFound;
+ context->term += term.atom.parenthesesWidth;
+ return true;
+ }
+ case QuantifierFixedCount:
+ break;
+ }
+
+ if (term.capture()) {
+ unsigned subpatternId = term.atom.subpatternId;
+ output[(subpatternId << 1)] = input.getPos() - term.inputPosition;
+ }
+
+ return true;
+ }
+
+ bool matchParenthesesOnceEnd(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpatternOnceEnd);
+ ASSERT(term.atom.quantityCount == 1);
+
+ if (term.capture()) {
+ unsigned subpatternId = term.atom.subpatternId;
+ output[(subpatternId << 1) + 1] = input.getPos() + term.inputPosition;
+ }
+
+ if (term.atom.quantityType == QuantifierFixedCount)
+ return true;
+
+ BackTrackInfoParenthesesOnce* backTrack = reinterpret_cast<BackTrackInfoParenthesesOnce*>(context->frame + term.frameLocation);
+ return backTrack->begin != input.getPos();
+ }
+
+ bool backtrackParenthesesOnceBegin(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpatternOnceBegin);
+ ASSERT(term.atom.quantityCount == 1);
+
+ BackTrackInfoParenthesesOnce* backTrack = reinterpret_cast<BackTrackInfoParenthesesOnce*>(context->frame + term.frameLocation);
+
+ if (term.capture()) {
+ unsigned subpatternId = term.atom.subpatternId;
+ output[(subpatternId << 1)] = offsetNoMatch;
+ output[(subpatternId << 1) + 1] = offsetNoMatch;
+ }
+
+ switch (term.atom.quantityType) {
+ case QuantifierGreedy:
+ // if we backtrack to this point, there is another chance - try matching nothing.
+ ASSERT(backTrack->begin != notFound);
+ backTrack->begin = notFound;
+ context->term += term.atom.parenthesesWidth;
+ return true;
+ case QuantifierNonGreedy:
+ ASSERT(backTrack->begin != notFound);
+ case QuantifierFixedCount:
+ break;
+ }
+
+ return false;
+ }
+
+ bool backtrackParenthesesOnceEnd(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpatternOnceEnd);
+ ASSERT(term.atom.quantityCount == 1);
+
+ BackTrackInfoParenthesesOnce* backTrack = reinterpret_cast<BackTrackInfoParenthesesOnce*>(context->frame + term.frameLocation);
+
+ switch (term.atom.quantityType) {
+ case QuantifierGreedy:
+ if (backTrack->begin == notFound) {
+ context->term -= term.atom.parenthesesWidth;
+ return false;
+ }
+ case QuantifierNonGreedy:
+ if (backTrack->begin == notFound) {
+ backTrack->begin = input.getPos();
+ if (term.capture()) {
+ // Technically this access to inputPosition should be accessing the begin term's
+ // inputPosition, but for repeats other than fixed these values should be
+ // the same anyway! (We don't pre-check for greedy or non-greedy matches.)
+ ASSERT((&term - term.atom.parenthesesWidth)->type == ByteTerm::TypeParenthesesSubpatternOnceBegin);
+ ASSERT((&term - term.atom.parenthesesWidth)->inputPosition == term.inputPosition);
+ unsigned subpatternId = term.atom.subpatternId;
+ output[subpatternId << 1] = input.getPos() + term.inputPosition;
+ }
+ context->term -= term.atom.parenthesesWidth;
+ return true;
+ }
+ case QuantifierFixedCount:
+ break;
+ }
+
+ return false;
+ }
+
+ bool matchParenthesesTerminalBegin(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpatternTerminalBegin);
+ ASSERT(term.atom.quantityType == QuantifierGreedy);
+ ASSERT(term.atom.quantityCount == quantifyInfinite);
+ ASSERT(!term.capture());
+
+ BackTrackInfoParenthesesTerminal* backTrack = reinterpret_cast<BackTrackInfoParenthesesTerminal*>(context->frame + term.frameLocation);
+ backTrack->begin = input.getPos();
+ return true;
+ }
+
+ bool matchParenthesesTerminalEnd(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpatternTerminalEnd);
+
+ BackTrackInfoParenthesesTerminal* backTrack = reinterpret_cast<BackTrackInfoParenthesesTerminal*>(context->frame + term.frameLocation);
+ // Empty match is a failed match.
+ if (backTrack->begin == input.getPos())
+ return false;
+
+ // Successful match! Okay, what's next? - loop around and try to match moar!
+ context->term -= (term.atom.parenthesesWidth + 1);
+ return true;
+ }
+
+ bool backtrackParenthesesTerminalBegin(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpatternTerminalBegin);
+ ASSERT(term.atom.quantityType == QuantifierGreedy);
+ ASSERT(term.atom.quantityCount == quantifyInfinite);
+ ASSERT(!term.capture());
+
+ // If we backtrack to this point, we have failed to match this iteration of the parens.
+ // Since this is greedy / zero minimum a failed is also accepted as a match!
+ context->term += term.atom.parenthesesWidth;
+ return true;
+ }
+
+ bool backtrackParenthesesTerminalEnd(ByteTerm&, DisjunctionContext*)
+ {
+ // 'Terminal' parentheses are at the end of the regex, and as such a match past end
+ // should always be returned as a successful match - we should never backtrack to here.
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+
+ bool matchParentheticalAssertionBegin(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParentheticalAssertionBegin);
+ ASSERT(term.atom.quantityCount == 1);
+
+ BackTrackInfoParentheticalAssertion* backTrack = reinterpret_cast<BackTrackInfoParentheticalAssertion*>(context->frame + term.frameLocation);
+
+ backTrack->begin = input.getPos();
+ return true;
+ }
+
+ bool matchParentheticalAssertionEnd(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParentheticalAssertionEnd);
+ ASSERT(term.atom.quantityCount == 1);
+
+ BackTrackInfoParentheticalAssertion* backTrack = reinterpret_cast<BackTrackInfoParentheticalAssertion*>(context->frame + term.frameLocation);
+
+ input.setPos(backTrack->begin);
+
+ // We've reached the end of the parens; if they are inverted, this is failure.
+ if (term.invert()) {
+ context->term -= term.atom.parenthesesWidth;
+ return false;
+ }
+
+ return true;
+ }
+
+ bool backtrackParentheticalAssertionBegin(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParentheticalAssertionBegin);
+ ASSERT(term.atom.quantityCount == 1);
+
+ // We've failed to match parens; if they are inverted, this is win!
+ if (term.invert()) {
+ context->term += term.atom.parenthesesWidth;
+ return true;
+ }
+
+ return false;
+ }
+
+ bool backtrackParentheticalAssertionEnd(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParentheticalAssertionEnd);
+ ASSERT(term.atom.quantityCount == 1);
+
+ BackTrackInfoParentheticalAssertion* backTrack = reinterpret_cast<BackTrackInfoParentheticalAssertion*>(context->frame + term.frameLocation);
+
+ input.setPos(backTrack->begin);
+
+ context->term -= term.atom.parenthesesWidth;
+ return false;
+ }
+
+ JSRegExpResult matchParentheses(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpattern);
+
+ BackTrackInfoParentheses* backTrack = reinterpret_cast<BackTrackInfoParentheses*>(context->frame + term.frameLocation);
+ ByteDisjunction* disjunctionBody = term.atom.parenthesesDisjunction;
+
+ backTrack->matchAmount = 0;
+ backTrack->lastContext = 0;
+
+ switch (term.atom.quantityType) {
+ case QuantifierFixedCount: {
+ // While we haven't yet reached our fixed limit,
+ while (backTrack->matchAmount < term.atom.quantityCount) {
+ // Try to do a match, and it it succeeds, add it to the list.
+ ParenthesesDisjunctionContext* context = allocParenthesesDisjunctionContext(disjunctionBody, output, term);
+ JSRegExpResult result = matchDisjunction(disjunctionBody, context->getDisjunctionContext(term));
+ if (result == JSRegExpMatch)
+ appendParenthesesDisjunctionContext(backTrack, context);
+ else {
+ // The match failed; try to find an alternate point to carry on from.
+ resetMatches(term, context);
+ freeParenthesesDisjunctionContext(context);
+
+ if (result != JSRegExpNoMatch)
+ return result;
+ JSRegExpResult backtrackResult = parenthesesDoBacktrack(term, backTrack);
+ if (backtrackResult != JSRegExpMatch)
+ return backtrackResult;
+ }
+ }
+
+ ASSERT(backTrack->matchAmount == term.atom.quantityCount);
+ ParenthesesDisjunctionContext* context = backTrack->lastContext;
+ recordParenthesesMatch(term, context);
+ return JSRegExpMatch;
+ }
+
+ case QuantifierGreedy: {
+ while (backTrack->matchAmount < term.atom.quantityCount) {
+ ParenthesesDisjunctionContext* context = allocParenthesesDisjunctionContext(disjunctionBody, output, term);
+ JSRegExpResult result = matchNonZeroDisjunction(disjunctionBody, context->getDisjunctionContext(term));
+ if (result == JSRegExpMatch)
+ appendParenthesesDisjunctionContext(backTrack, context);
+ else {
+ resetMatches(term, context);
+ freeParenthesesDisjunctionContext(context);
+
+ if (result != JSRegExpNoMatch)
+ return result;
+
+ break;
+ }
+ }
+
+ if (backTrack->matchAmount) {
+ ParenthesesDisjunctionContext* context = backTrack->lastContext;
+ recordParenthesesMatch(term, context);
+ }
+ return JSRegExpMatch;
+ }
+
+ case QuantifierNonGreedy:
+ return JSRegExpMatch;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return JSRegExpErrorNoMatch;
+ }
+
+ // Rules for backtracking differ depending on whether this is greedy or non-greedy.
+ //
+ // Greedy matches never should try just adding more - you should already have done
+ // the 'more' cases. Always backtrack, at least a leetle bit. However cases where
+ // you backtrack an item off the list needs checking, since we'll never have matched
+ // the one less case. Tracking forwards, still add as much as possible.
+ //
+ // Non-greedy, we've already done the one less case, so don't match on popping.
+ // We haven't done the one more case, so always try to add that.
+ //
+ JSRegExpResult backtrackParentheses(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpattern);
+
+ BackTrackInfoParentheses* backTrack = reinterpret_cast<BackTrackInfoParentheses*>(context->frame + term.frameLocation);
+ ByteDisjunction* disjunctionBody = term.atom.parenthesesDisjunction;
+
+ switch (term.atom.quantityType) {
+ case QuantifierFixedCount: {
+ ASSERT(backTrack->matchAmount == term.atom.quantityCount);
+
+ ParenthesesDisjunctionContext* context = 0;
+ JSRegExpResult result = parenthesesDoBacktrack(term, backTrack);
+
+ if (result != JSRegExpMatch)
+ return result;
+
+ // While we haven't yet reached our fixed limit,
+ while (backTrack->matchAmount < term.atom.quantityCount) {
+ // Try to do a match, and it it succeeds, add it to the list.
+ context = allocParenthesesDisjunctionContext(disjunctionBody, output, term);
+ result = matchDisjunction(disjunctionBody, context->getDisjunctionContext(term));
+
+ if (result == JSRegExpMatch)
+ appendParenthesesDisjunctionContext(backTrack, context);
+ else {
+ // The match failed; try to find an alternate point to carry on from.
+ resetMatches(term, context);
+ freeParenthesesDisjunctionContext(context);
+
+ if (result != JSRegExpNoMatch)
+ return result;
+ JSRegExpResult backtrackResult = parenthesesDoBacktrack(term, backTrack);
+ if (backtrackResult != JSRegExpMatch)
+ return backtrackResult;
+ }
+ }
+
+ ASSERT(backTrack->matchAmount == term.atom.quantityCount);
+ context = backTrack->lastContext;
+ recordParenthesesMatch(term, context);
+ return JSRegExpMatch;
+ }
+
+ case QuantifierGreedy: {
+ if (!backTrack->matchAmount)
+ return JSRegExpNoMatch;
+
+ ParenthesesDisjunctionContext* context = backTrack->lastContext;
+ JSRegExpResult result = matchNonZeroDisjunction(disjunctionBody, context->getDisjunctionContext(term), true);
+ if (result == JSRegExpMatch) {
+ while (backTrack->matchAmount < term.atom.quantityCount) {
+ ParenthesesDisjunctionContext* context = allocParenthesesDisjunctionContext(disjunctionBody, output, term);
+ JSRegExpResult parenthesesResult = matchNonZeroDisjunction(disjunctionBody, context->getDisjunctionContext(term));
+ if (parenthesesResult == JSRegExpMatch)
+ appendParenthesesDisjunctionContext(backTrack, context);
+ else {
+ resetMatches(term, context);
+ freeParenthesesDisjunctionContext(context);
+
+ if (parenthesesResult != JSRegExpNoMatch)
+ return parenthesesResult;
+
+ break;
+ }
+ }
+ } else {
+ resetMatches(term, context);
+ popParenthesesDisjunctionContext(backTrack);
+ freeParenthesesDisjunctionContext(context);
+
+ if (result != JSRegExpNoMatch)
+ return result;
+ }
+
+ if (backTrack->matchAmount) {
+ ParenthesesDisjunctionContext* context = backTrack->lastContext;
+ recordParenthesesMatch(term, context);
+ }
+ return JSRegExpMatch;
+ }
+
+ case QuantifierNonGreedy: {
+ // If we've not reached the limit, try to add one more match.
+ if (backTrack->matchAmount < term.atom.quantityCount) {
+ ParenthesesDisjunctionContext* context = allocParenthesesDisjunctionContext(disjunctionBody, output, term);
+ JSRegExpResult result = matchNonZeroDisjunction(disjunctionBody, context->getDisjunctionContext(term));
+ if (result == JSRegExpMatch) {
+ appendParenthesesDisjunctionContext(backTrack, context);
+ recordParenthesesMatch(term, context);
+ return JSRegExpMatch;
+ }
+
+ resetMatches(term, context);
+ freeParenthesesDisjunctionContext(context);
+
+ if (result != JSRegExpNoMatch)
+ return result;
+ }
+
+ // Nope - okay backtrack looking for an alternative.
+ while (backTrack->matchAmount) {
+ ParenthesesDisjunctionContext* context = backTrack->lastContext;
+ JSRegExpResult result = matchNonZeroDisjunction(disjunctionBody, context->getDisjunctionContext(term), true);
+ if (result == JSRegExpMatch) {
+ // successful backtrack! we're back in the game!
+ if (backTrack->matchAmount) {
+ context = backTrack->lastContext;
+ recordParenthesesMatch(term, context);
+ }
+ return JSRegExpMatch;
+ }
+
+ // pop a match off the stack
+ resetMatches(term, context);
+ popParenthesesDisjunctionContext(backTrack);
+ freeParenthesesDisjunctionContext(context);
+
+ if (result != JSRegExpNoMatch)
+ return result;
+ }
+
+ return JSRegExpNoMatch;
+ }
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return JSRegExpErrorNoMatch;
+ }
+
+ bool matchDotStarEnclosure(ByteTerm& term, DisjunctionContext* context)
+ {
+ UNUSED_PARAM(term);
+ unsigned matchBegin = context->matchBegin;
+
+ if (matchBegin) {
+ for (matchBegin--; true; matchBegin--) {
+ if (testCharacterClass(pattern->newlineCharacterClass, input.reread(matchBegin))) {
+ ++matchBegin;
+ break;
+ }
+
+ if (!matchBegin)
+ break;
+ }
+ }
+
+ unsigned matchEnd = input.getPos();
+
+ for (; (matchEnd != input.end())
+ && (!testCharacterClass(pattern->newlineCharacterClass, input.reread(matchEnd))); matchEnd++) { }
+
+ if (((matchBegin && term.anchors.m_bol)
+ || ((matchEnd != input.end()) && term.anchors.m_eol))
+ && !pattern->m_multiline)
+ return false;
+
+ context->matchBegin = matchBegin;
+ context->matchEnd = matchEnd;
+ return true;
+ }
+
+#define MATCH_NEXT() { ++context->term; goto matchAgain; }
+#define BACKTRACK() { --context->term; goto backtrack; }
+#define currentTerm() (disjunction->terms[context->term])
+ JSRegExpResult matchDisjunction(ByteDisjunction* disjunction, DisjunctionContext* context, bool btrack = false)
+ {
+ if (!--remainingMatchCount)
+ return JSRegExpErrorHitLimit;
+
+ if (btrack)
+ BACKTRACK();
+
+ context->matchBegin = input.getPos();
+ context->term = 0;
+
+ matchAgain:
+ ASSERT(context->term < static_cast<int>(disjunction->terms.size()));
+
+ switch (currentTerm().type) {
+ case ByteTerm::TypeSubpatternBegin:
+ MATCH_NEXT();
+ case ByteTerm::TypeSubpatternEnd:
+ context->matchEnd = input.getPos();
+ return JSRegExpMatch;
+
+ case ByteTerm::TypeBodyAlternativeBegin:
+ MATCH_NEXT();
+ case ByteTerm::TypeBodyAlternativeDisjunction:
+ case ByteTerm::TypeBodyAlternativeEnd:
+ context->matchEnd = input.getPos();
+ return JSRegExpMatch;
+
+ case ByteTerm::TypeAlternativeBegin:
+ MATCH_NEXT();
+ case ByteTerm::TypeAlternativeDisjunction:
+ case ByteTerm::TypeAlternativeEnd: {
+ int offset = currentTerm().alternative.end;
+ BackTrackInfoAlternative* backTrack = reinterpret_cast<BackTrackInfoAlternative*>(context->frame + currentTerm().frameLocation);
+ backTrack->offset = offset;
+ context->term += offset;
+ MATCH_NEXT();
+ }
+
+ case ByteTerm::TypeAssertionBOL:
+ if (matchAssertionBOL(currentTerm()))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeAssertionEOL:
+ if (matchAssertionEOL(currentTerm()))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeAssertionWordBoundary:
+ if (matchAssertionWordBoundary(currentTerm()))
+ MATCH_NEXT();
+ BACKTRACK();
+
+ case ByteTerm::TypePatternCharacterOnce:
+ case ByteTerm::TypePatternCharacterFixed: {
+ for (unsigned matchAmount = 0; matchAmount < currentTerm().atom.quantityCount; ++matchAmount) {
+ if (!checkCharacter(currentTerm().atom.patternCharacter, currentTerm().inputPosition - matchAmount))
+ BACKTRACK();
+ }
+ MATCH_NEXT();
+ }
+ case ByteTerm::TypePatternCharacterGreedy: {
+ BackTrackInfoPatternCharacter* backTrack = reinterpret_cast<BackTrackInfoPatternCharacter*>(context->frame + currentTerm().frameLocation);
+ unsigned matchAmount = 0;
+ while ((matchAmount < currentTerm().atom.quantityCount) && input.checkInput(1)) {
+ if (!checkCharacter(currentTerm().atom.patternCharacter, currentTerm().inputPosition + 1)) {
+ input.uncheckInput(1);
+ break;
+ }
+ ++matchAmount;
+ }
+ backTrack->matchAmount = matchAmount;
+
+ MATCH_NEXT();
+ }
+ case ByteTerm::TypePatternCharacterNonGreedy: {
+ BackTrackInfoPatternCharacter* backTrack = reinterpret_cast<BackTrackInfoPatternCharacter*>(context->frame + currentTerm().frameLocation);
+ backTrack->matchAmount = 0;
+ MATCH_NEXT();
+ }
+
+ case ByteTerm::TypePatternCasedCharacterOnce:
+ case ByteTerm::TypePatternCasedCharacterFixed: {
+ for (unsigned matchAmount = 0; matchAmount < currentTerm().atom.quantityCount; ++matchAmount) {
+ if (!checkCasedCharacter(currentTerm().atom.casedCharacter.lo, currentTerm().atom.casedCharacter.hi, currentTerm().inputPosition - matchAmount))
+ BACKTRACK();
+ }
+ MATCH_NEXT();
+ }
+ case ByteTerm::TypePatternCasedCharacterGreedy: {
+ BackTrackInfoPatternCharacter* backTrack = reinterpret_cast<BackTrackInfoPatternCharacter*>(context->frame + currentTerm().frameLocation);
+ unsigned matchAmount = 0;
+ while ((matchAmount < currentTerm().atom.quantityCount) && input.checkInput(1)) {
+ if (!checkCasedCharacter(currentTerm().atom.casedCharacter.lo, currentTerm().atom.casedCharacter.hi, currentTerm().inputPosition + 1)) {
+ input.uncheckInput(1);
+ break;
+ }
+ ++matchAmount;
+ }
+ backTrack->matchAmount = matchAmount;
+
+ MATCH_NEXT();
+ }
+ case ByteTerm::TypePatternCasedCharacterNonGreedy: {
+ BackTrackInfoPatternCharacter* backTrack = reinterpret_cast<BackTrackInfoPatternCharacter*>(context->frame + currentTerm().frameLocation);
+ backTrack->matchAmount = 0;
+ MATCH_NEXT();
+ }
+
+ case ByteTerm::TypeCharacterClass:
+ if (matchCharacterClass(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeBackReference:
+ if (matchBackReference(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParenthesesSubpattern: {
+ JSRegExpResult result = matchParentheses(currentTerm(), context);
+
+ if (result == JSRegExpMatch) {
+ MATCH_NEXT();
+ } else if (result != JSRegExpNoMatch)
+ return result;
+
+ BACKTRACK();
+ }
+ case ByteTerm::TypeParenthesesSubpatternOnceBegin:
+ if (matchParenthesesOnceBegin(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParenthesesSubpatternOnceEnd:
+ if (matchParenthesesOnceEnd(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParenthesesSubpatternTerminalBegin:
+ if (matchParenthesesTerminalBegin(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParenthesesSubpatternTerminalEnd:
+ if (matchParenthesesTerminalEnd(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParentheticalAssertionBegin:
+ if (matchParentheticalAssertionBegin(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParentheticalAssertionEnd:
+ if (matchParentheticalAssertionEnd(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+
+ case ByteTerm::TypeCheckInput:
+ if (input.checkInput(currentTerm().checkInputCount))
+ MATCH_NEXT();
+ BACKTRACK();
+
+ case ByteTerm::TypeUncheckInput:
+ input.uncheckInput(currentTerm().checkInputCount);
+ MATCH_NEXT();
+
+ case ByteTerm::TypeDotStarEnclosure:
+ if (matchDotStarEnclosure(currentTerm(), context))
+ return JSRegExpMatch;
+ BACKTRACK();
+ }
+
+ // We should never fall-through to here.
+ RELEASE_ASSERT_NOT_REACHED();
+
+ backtrack:
+ ASSERT(context->term < static_cast<int>(disjunction->terms.size()));
+
+ switch (currentTerm().type) {
+ case ByteTerm::TypeSubpatternBegin:
+ return JSRegExpNoMatch;
+ case ByteTerm::TypeSubpatternEnd:
+ RELEASE_ASSERT_NOT_REACHED();
+
+ case ByteTerm::TypeBodyAlternativeBegin:
+ case ByteTerm::TypeBodyAlternativeDisjunction: {
+ int offset = currentTerm().alternative.next;
+ context->term += offset;
+ if (offset > 0)
+ MATCH_NEXT();
+
+ if (input.atEnd())
+ return JSRegExpNoMatch;
+
+ input.next();
+
+ context->matchBegin = input.getPos();
+
+ if (currentTerm().alternative.onceThrough)
+ context->term += currentTerm().alternative.next;
+
+ MATCH_NEXT();
+ }
+ case ByteTerm::TypeBodyAlternativeEnd:
+ RELEASE_ASSERT_NOT_REACHED();
+
+ case ByteTerm::TypeAlternativeBegin:
+ case ByteTerm::TypeAlternativeDisjunction: {
+ int offset = currentTerm().alternative.next;
+ context->term += offset;
+ if (offset > 0)
+ MATCH_NEXT();
+ BACKTRACK();
+ }
+ case ByteTerm::TypeAlternativeEnd: {
+ // We should never backtrack back into an alternative of the main body of the regex.
+ BackTrackInfoAlternative* backTrack = reinterpret_cast<BackTrackInfoAlternative*>(context->frame + currentTerm().frameLocation);
+ unsigned offset = backTrack->offset;
+ context->term -= offset;
+ BACKTRACK();
+ }
+
+ case ByteTerm::TypeAssertionBOL:
+ case ByteTerm::TypeAssertionEOL:
+ case ByteTerm::TypeAssertionWordBoundary:
+ BACKTRACK();
+
+ case ByteTerm::TypePatternCharacterOnce:
+ case ByteTerm::TypePatternCharacterFixed:
+ case ByteTerm::TypePatternCharacterGreedy:
+ case ByteTerm::TypePatternCharacterNonGreedy:
+ if (backtrackPatternCharacter(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypePatternCasedCharacterOnce:
+ case ByteTerm::TypePatternCasedCharacterFixed:
+ case ByteTerm::TypePatternCasedCharacterGreedy:
+ case ByteTerm::TypePatternCasedCharacterNonGreedy:
+ if (backtrackPatternCasedCharacter(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeCharacterClass:
+ if (backtrackCharacterClass(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeBackReference:
+ if (backtrackBackReference(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParenthesesSubpattern: {
+ JSRegExpResult result = backtrackParentheses(currentTerm(), context);
+
+ if (result == JSRegExpMatch) {
+ MATCH_NEXT();
+ } else if (result != JSRegExpNoMatch)
+ return result;
+
+ BACKTRACK();
+ }
+ case ByteTerm::TypeParenthesesSubpatternOnceBegin:
+ if (backtrackParenthesesOnceBegin(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParenthesesSubpatternOnceEnd:
+ if (backtrackParenthesesOnceEnd(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParenthesesSubpatternTerminalBegin:
+ if (backtrackParenthesesTerminalBegin(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParenthesesSubpatternTerminalEnd:
+ if (backtrackParenthesesTerminalEnd(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParentheticalAssertionBegin:
+ if (backtrackParentheticalAssertionBegin(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParentheticalAssertionEnd:
+ if (backtrackParentheticalAssertionEnd(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+
+ case ByteTerm::TypeCheckInput:
+ input.uncheckInput(currentTerm().checkInputCount);
+ BACKTRACK();
+
+ case ByteTerm::TypeUncheckInput:
+ input.checkInput(currentTerm().checkInputCount);
+ BACKTRACK();
+
+ case ByteTerm::TypeDotStarEnclosure:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return JSRegExpErrorNoMatch;
+ }
+
+ JSRegExpResult matchNonZeroDisjunction(ByteDisjunction* disjunction, DisjunctionContext* context, bool btrack = false)
+ {
+ JSRegExpResult result = matchDisjunction(disjunction, context, btrack);
+
+ if (result == JSRegExpMatch) {
+ while (context->matchBegin == context->matchEnd) {
+ result = matchDisjunction(disjunction, context, true);
+ if (result != JSRegExpMatch)
+ return result;
+ }
+ return JSRegExpMatch;
+ }
+
+ return result;
+ }
+
+ unsigned interpret()
+ {
+ if (!input.isAvailableInput(0))
+ return offsetNoMatch;
+
+ for (unsigned i = 0; i < pattern->m_body->m_numSubpatterns + 1; ++i)
+ output[i << 1] = offsetNoMatch;
+
+ allocatorPool = pattern->m_allocator->startAllocator();
+ RELEASE_ASSERT(allocatorPool);
+
+ DisjunctionContext* context = allocDisjunctionContext(pattern->m_body.get());
+
+ JSRegExpResult result = matchDisjunction(pattern->m_body.get(), context, false);
+ if (result == JSRegExpMatch) {
+ output[0] = context->matchBegin;
+ output[1] = context->matchEnd;
+ }
+
+ freeDisjunctionContext(context);
+
+ pattern->m_allocator->stopAllocator();
+
+ ASSERT((result == JSRegExpMatch) == (output[0] != offsetNoMatch));
+ return output[0];
+ }
+
+ Interpreter(BytecodePattern* pattern, unsigned* output, const CharType* input, unsigned length, unsigned start)
+ : pattern(pattern)
+ , output(output)
+ , input(input, start, length)
+ , allocatorPool(0)
+ , remainingMatchCount(matchLimit)
+ {
+ }
+
+private:
+ BytecodePattern* pattern;
+ unsigned* output;
+ InputStream input;
+ BumpPointerPool* allocatorPool;
+ unsigned remainingMatchCount;
+};
+
+class ByteCompiler {
+ struct ParenthesesStackEntry {
+ unsigned beginTerm;
+ unsigned savedAlternativeIndex;
+ ParenthesesStackEntry(unsigned beginTerm, unsigned savedAlternativeIndex/*, unsigned subpatternId, bool capture = false*/)
+ : beginTerm(beginTerm)
+ , savedAlternativeIndex(savedAlternativeIndex)
+ {
+ }
+ };
+
+public:
+ ByteCompiler(YarrPattern& pattern)
+ : m_pattern(pattern)
+ {
+ m_currentAlternativeIndex = 0;
+ }
+
+ PassOwnPtr<BytecodePattern> compile(BumpPointerAllocator* allocator)
+ {
+ regexBegin(m_pattern.m_numSubpatterns, m_pattern.m_body->m_callFrameSize, m_pattern.m_body->m_alternatives[0]->onceThrough());
+ emitDisjunction(m_pattern.m_body);
+ regexEnd();
+
+ return adoptPtr(new BytecodePattern(m_bodyDisjunction.release(), m_allParenthesesInfo, m_pattern, allocator));
+ }
+
+ void checkInput(unsigned count)
+ {
+ m_bodyDisjunction->terms.append(ByteTerm::CheckInput(count));
+ }
+
+ void uncheckInput(unsigned count)
+ {
+ m_bodyDisjunction->terms.append(ByteTerm::UncheckInput(count));
+ }
+
+ void assertionBOL(unsigned inputPosition)
+ {
+ m_bodyDisjunction->terms.append(ByteTerm::BOL(inputPosition));
+ }
+
+ void assertionEOL(unsigned inputPosition)
+ {
+ m_bodyDisjunction->terms.append(ByteTerm::EOL(inputPosition));
+ }
+
+ void assertionWordBoundary(bool invert, unsigned inputPosition)
+ {
+ m_bodyDisjunction->terms.append(ByteTerm::WordBoundary(invert, inputPosition));
+ }
+
+ void atomPatternCharacter(UChar ch, unsigned inputPosition, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType)
+ {
+ if (m_pattern.m_ignoreCase) {
+ UChar lo = Unicode::toLower(ch);
+ UChar hi = Unicode::toUpper(ch);
+
+ if (lo != hi) {
+ m_bodyDisjunction->terms.append(ByteTerm(lo, hi, inputPosition, frameLocation, quantityCount, quantityType));
+ return;
+ }
+ }
+
+ m_bodyDisjunction->terms.append(ByteTerm(ch, inputPosition, frameLocation, quantityCount, quantityType));
+ }
+
+ void atomCharacterClass(CharacterClass* characterClass, bool invert, unsigned inputPosition, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType)
+ {
+ m_bodyDisjunction->terms.append(ByteTerm(characterClass, invert, inputPosition));
+
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].atom.quantityType = quantityType;
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = frameLocation;
+ }
+
+ void atomBackReference(unsigned subpatternId, unsigned inputPosition, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType)
+ {
+ ASSERT(subpatternId);
+
+ m_bodyDisjunction->terms.append(ByteTerm::BackReference(subpatternId, inputPosition));
+
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].atom.quantityType = quantityType;
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = frameLocation;
+ }
+
+ void atomParenthesesOnceBegin(unsigned subpatternId, bool capture, unsigned inputPosition, unsigned frameLocation, unsigned alternativeFrameLocation)
+ {
+ int beginTerm = m_bodyDisjunction->terms.size();
+
+ m_bodyDisjunction->terms.append(ByteTerm(ByteTerm::TypeParenthesesSubpatternOnceBegin, subpatternId, capture, false, inputPosition));
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = frameLocation;
+ m_bodyDisjunction->terms.append(ByteTerm::AlternativeBegin());
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = alternativeFrameLocation;
+
+ m_parenthesesStack.append(ParenthesesStackEntry(beginTerm, m_currentAlternativeIndex));
+ m_currentAlternativeIndex = beginTerm + 1;
+ }
+
+ void atomParenthesesTerminalBegin(unsigned subpatternId, bool capture, unsigned inputPosition, unsigned frameLocation, unsigned alternativeFrameLocation)
+ {
+ int beginTerm = m_bodyDisjunction->terms.size();
+
+ m_bodyDisjunction->terms.append(ByteTerm(ByteTerm::TypeParenthesesSubpatternTerminalBegin, subpatternId, capture, false, inputPosition));
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = frameLocation;
+ m_bodyDisjunction->terms.append(ByteTerm::AlternativeBegin());
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = alternativeFrameLocation;
+
+ m_parenthesesStack.append(ParenthesesStackEntry(beginTerm, m_currentAlternativeIndex));
+ m_currentAlternativeIndex = beginTerm + 1;
+ }
+
+ void atomParenthesesSubpatternBegin(unsigned subpatternId, bool capture, unsigned inputPosition, unsigned frameLocation, unsigned alternativeFrameLocation)
+ {
+ // Errrk! - this is a little crazy, we initially generate as a TypeParenthesesSubpatternOnceBegin,
+ // then fix this up at the end! - simplifying this should make it much clearer.
+ // https://bugs.webkit.org/show_bug.cgi?id=50136
+
+ int beginTerm = m_bodyDisjunction->terms.size();
+
+ m_bodyDisjunction->terms.append(ByteTerm(ByteTerm::TypeParenthesesSubpatternOnceBegin, subpatternId, capture, false, inputPosition));
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = frameLocation;
+ m_bodyDisjunction->terms.append(ByteTerm::AlternativeBegin());
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = alternativeFrameLocation;
+
+ m_parenthesesStack.append(ParenthesesStackEntry(beginTerm, m_currentAlternativeIndex));
+ m_currentAlternativeIndex = beginTerm + 1;
+ }
+
+ void atomParentheticalAssertionBegin(unsigned subpatternId, bool invert, unsigned frameLocation, unsigned alternativeFrameLocation)
+ {
+ int beginTerm = m_bodyDisjunction->terms.size();
+
+ m_bodyDisjunction->terms.append(ByteTerm(ByteTerm::TypeParentheticalAssertionBegin, subpatternId, false, invert, 0));
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = frameLocation;
+ m_bodyDisjunction->terms.append(ByteTerm::AlternativeBegin());
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = alternativeFrameLocation;
+
+ m_parenthesesStack.append(ParenthesesStackEntry(beginTerm, m_currentAlternativeIndex));
+ m_currentAlternativeIndex = beginTerm + 1;
+ }
+
+ void atomParentheticalAssertionEnd(unsigned inputPosition, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType)
+ {
+ unsigned beginTerm = popParenthesesStack();
+ closeAlternative(beginTerm + 1);
+ unsigned endTerm = m_bodyDisjunction->terms.size();
+
+ ASSERT(m_bodyDisjunction->terms[beginTerm].type == ByteTerm::TypeParentheticalAssertionBegin);
+
+ bool invert = m_bodyDisjunction->terms[beginTerm].invert();
+ unsigned subpatternId = m_bodyDisjunction->terms[beginTerm].atom.subpatternId;
+
+ m_bodyDisjunction->terms.append(ByteTerm(ByteTerm::TypeParentheticalAssertionEnd, subpatternId, false, invert, inputPosition));
+ m_bodyDisjunction->terms[beginTerm].atom.parenthesesWidth = endTerm - beginTerm;
+ m_bodyDisjunction->terms[endTerm].atom.parenthesesWidth = endTerm - beginTerm;
+ m_bodyDisjunction->terms[endTerm].frameLocation = frameLocation;
+
+ m_bodyDisjunction->terms[beginTerm].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[beginTerm].atom.quantityType = quantityType;
+ m_bodyDisjunction->terms[endTerm].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[endTerm].atom.quantityType = quantityType;
+ }
+
+ void assertionDotStarEnclosure(bool bolAnchored, bool eolAnchored)
+ {
+ m_bodyDisjunction->terms.append(ByteTerm::DotStarEnclosure(bolAnchored, eolAnchored));
+ }
+
+ unsigned popParenthesesStack()
+ {
+ ASSERT(m_parenthesesStack.size());
+ int stackEnd = m_parenthesesStack.size() - 1;
+ unsigned beginTerm = m_parenthesesStack[stackEnd].beginTerm;
+ m_currentAlternativeIndex = m_parenthesesStack[stackEnd].savedAlternativeIndex;
+ m_parenthesesStack.shrink(stackEnd);
+
+ ASSERT(beginTerm < m_bodyDisjunction->terms.size());
+ ASSERT(m_currentAlternativeIndex < m_bodyDisjunction->terms.size());
+
+ return beginTerm;
+ }
+
+#ifndef NDEBUG
+ void dumpDisjunction(ByteDisjunction* disjunction)
+ {
+ dataLogF("ByteDisjunction(%p):\n\t", disjunction);
+ for (unsigned i = 0; i < disjunction->terms.size(); ++i)
+ dataLogF("{ %d } ", disjunction->terms[i].type);
+ dataLogF("\n");
+ }
+#endif
+
+ void closeAlternative(int beginTerm)
+ {
+ int origBeginTerm = beginTerm;
+ ASSERT(m_bodyDisjunction->terms[beginTerm].type == ByteTerm::TypeAlternativeBegin);
+ int endIndex = m_bodyDisjunction->terms.size();
+
+ unsigned frameLocation = m_bodyDisjunction->terms[beginTerm].frameLocation;
+
+ if (!m_bodyDisjunction->terms[beginTerm].alternative.next)
+ m_bodyDisjunction->terms.remove(beginTerm);
+ else {
+ while (m_bodyDisjunction->terms[beginTerm].alternative.next) {
+ beginTerm += m_bodyDisjunction->terms[beginTerm].alternative.next;
+ ASSERT(m_bodyDisjunction->terms[beginTerm].type == ByteTerm::TypeAlternativeDisjunction);
+ m_bodyDisjunction->terms[beginTerm].alternative.end = endIndex - beginTerm;
+ m_bodyDisjunction->terms[beginTerm].frameLocation = frameLocation;
+ }
+
+ m_bodyDisjunction->terms[beginTerm].alternative.next = origBeginTerm - beginTerm;
+
+ m_bodyDisjunction->terms.append(ByteTerm::AlternativeEnd());
+ m_bodyDisjunction->terms[endIndex].frameLocation = frameLocation;
+ }
+ }
+
+ void closeBodyAlternative()
+ {
+ int beginTerm = 0;
+ int origBeginTerm = 0;
+ ASSERT(m_bodyDisjunction->terms[beginTerm].type == ByteTerm::TypeBodyAlternativeBegin);
+ int endIndex = m_bodyDisjunction->terms.size();
+
+ unsigned frameLocation = m_bodyDisjunction->terms[beginTerm].frameLocation;
+
+ while (m_bodyDisjunction->terms[beginTerm].alternative.next) {
+ beginTerm += m_bodyDisjunction->terms[beginTerm].alternative.next;
+ ASSERT(m_bodyDisjunction->terms[beginTerm].type == ByteTerm::TypeBodyAlternativeDisjunction);
+ m_bodyDisjunction->terms[beginTerm].alternative.end = endIndex - beginTerm;
+ m_bodyDisjunction->terms[beginTerm].frameLocation = frameLocation;
+ }
+
+ m_bodyDisjunction->terms[beginTerm].alternative.next = origBeginTerm - beginTerm;
+
+ m_bodyDisjunction->terms.append(ByteTerm::BodyAlternativeEnd());
+ m_bodyDisjunction->terms[endIndex].frameLocation = frameLocation;
+ }
+
+ void atomParenthesesSubpatternEnd(unsigned lastSubpatternId, int inputPosition, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType, unsigned callFrameSize = 0)
+ {
+ unsigned beginTerm = popParenthesesStack();
+ closeAlternative(beginTerm + 1);
+ unsigned endTerm = m_bodyDisjunction->terms.size();
+
+ ASSERT(m_bodyDisjunction->terms[beginTerm].type == ByteTerm::TypeParenthesesSubpatternOnceBegin);
+
+ ByteTerm& parenthesesBegin = m_bodyDisjunction->terms[beginTerm];
+
+ bool capture = parenthesesBegin.capture();
+ unsigned subpatternId = parenthesesBegin.atom.subpatternId;
+
+ unsigned numSubpatterns = lastSubpatternId - subpatternId + 1;
+ OwnPtr<ByteDisjunction> parenthesesDisjunction = adoptPtr(new ByteDisjunction(numSubpatterns, callFrameSize));
+
+ unsigned firstTermInParentheses = beginTerm + 1;
+ parenthesesDisjunction->terms.reserveInitialCapacity(endTerm - firstTermInParentheses + 2);
+
+ parenthesesDisjunction->terms.append(ByteTerm::SubpatternBegin());
+ for (unsigned termInParentheses = firstTermInParentheses; termInParentheses < endTerm; ++termInParentheses)
+ parenthesesDisjunction->terms.append(m_bodyDisjunction->terms[termInParentheses]);
+ parenthesesDisjunction->terms.append(ByteTerm::SubpatternEnd());
+
+ m_bodyDisjunction->terms.shrink(beginTerm);
+
+ m_bodyDisjunction->terms.append(ByteTerm(ByteTerm::TypeParenthesesSubpattern, subpatternId, parenthesesDisjunction.get(), capture, inputPosition));
+ m_allParenthesesInfo.append(parenthesesDisjunction.release());
+
+ m_bodyDisjunction->terms[beginTerm].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[beginTerm].atom.quantityType = quantityType;
+ m_bodyDisjunction->terms[beginTerm].frameLocation = frameLocation;
+ }
+
+ void atomParenthesesOnceEnd(int inputPosition, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType)
+ {
+ unsigned beginTerm = popParenthesesStack();
+ closeAlternative(beginTerm + 1);
+ unsigned endTerm = m_bodyDisjunction->terms.size();
+
+ ASSERT(m_bodyDisjunction->terms[beginTerm].type == ByteTerm::TypeParenthesesSubpatternOnceBegin);
+
+ bool capture = m_bodyDisjunction->terms[beginTerm].capture();
+ unsigned subpatternId = m_bodyDisjunction->terms[beginTerm].atom.subpatternId;
+
+ m_bodyDisjunction->terms.append(ByteTerm(ByteTerm::TypeParenthesesSubpatternOnceEnd, subpatternId, capture, false, inputPosition));
+ m_bodyDisjunction->terms[beginTerm].atom.parenthesesWidth = endTerm - beginTerm;
+ m_bodyDisjunction->terms[endTerm].atom.parenthesesWidth = endTerm - beginTerm;
+ m_bodyDisjunction->terms[endTerm].frameLocation = frameLocation;
+
+ m_bodyDisjunction->terms[beginTerm].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[beginTerm].atom.quantityType = quantityType;
+ m_bodyDisjunction->terms[endTerm].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[endTerm].atom.quantityType = quantityType;
+ }
+
+ void atomParenthesesTerminalEnd(int inputPosition, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType)
+ {
+ unsigned beginTerm = popParenthesesStack();
+ closeAlternative(beginTerm + 1);
+ unsigned endTerm = m_bodyDisjunction->terms.size();
+
+ ASSERT(m_bodyDisjunction->terms[beginTerm].type == ByteTerm::TypeParenthesesSubpatternTerminalBegin);
+
+ bool capture = m_bodyDisjunction->terms[beginTerm].capture();
+ unsigned subpatternId = m_bodyDisjunction->terms[beginTerm].atom.subpatternId;
+
+ m_bodyDisjunction->terms.append(ByteTerm(ByteTerm::TypeParenthesesSubpatternTerminalEnd, subpatternId, capture, false, inputPosition));
+ m_bodyDisjunction->terms[beginTerm].atom.parenthesesWidth = endTerm - beginTerm;
+ m_bodyDisjunction->terms[endTerm].atom.parenthesesWidth = endTerm - beginTerm;
+ m_bodyDisjunction->terms[endTerm].frameLocation = frameLocation;
+
+ m_bodyDisjunction->terms[beginTerm].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[beginTerm].atom.quantityType = quantityType;
+ m_bodyDisjunction->terms[endTerm].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[endTerm].atom.quantityType = quantityType;
+ }
+
+ void regexBegin(unsigned numSubpatterns, unsigned callFrameSize, bool onceThrough)
+ {
+ m_bodyDisjunction = adoptPtr(new ByteDisjunction(numSubpatterns, callFrameSize));
+ m_bodyDisjunction->terms.append(ByteTerm::BodyAlternativeBegin(onceThrough));
+ m_bodyDisjunction->terms[0].frameLocation = 0;
+ m_currentAlternativeIndex = 0;
+ }
+
+ void regexEnd()
+ {
+ closeBodyAlternative();
+ }
+
+ void alternativeBodyDisjunction(bool onceThrough)
+ {
+ int newAlternativeIndex = m_bodyDisjunction->terms.size();
+ m_bodyDisjunction->terms[m_currentAlternativeIndex].alternative.next = newAlternativeIndex - m_currentAlternativeIndex;
+ m_bodyDisjunction->terms.append(ByteTerm::BodyAlternativeDisjunction(onceThrough));
+
+ m_currentAlternativeIndex = newAlternativeIndex;
+ }
+
+ void alternativeDisjunction()
+ {
+ int newAlternativeIndex = m_bodyDisjunction->terms.size();
+ m_bodyDisjunction->terms[m_currentAlternativeIndex].alternative.next = newAlternativeIndex - m_currentAlternativeIndex;
+ m_bodyDisjunction->terms.append(ByteTerm::AlternativeDisjunction());
+
+ m_currentAlternativeIndex = newAlternativeIndex;
+ }
+
+ void emitDisjunction(PatternDisjunction* disjunction, unsigned inputCountAlreadyChecked = 0, unsigned parenthesesInputCountAlreadyChecked = 0)
+ {
+ for (unsigned alt = 0; alt < disjunction->m_alternatives.size(); ++alt) {
+ unsigned currentCountAlreadyChecked = inputCountAlreadyChecked;
+
+ PatternAlternative* alternative = disjunction->m_alternatives[alt].get();
+
+ if (alt) {
+ if (disjunction == m_pattern.m_body)
+ alternativeBodyDisjunction(alternative->onceThrough());
+ else
+ alternativeDisjunction();
+ }
+
+ unsigned minimumSize = alternative->m_minimumSize;
+ ASSERT(minimumSize >= parenthesesInputCountAlreadyChecked);
+ unsigned countToCheck = minimumSize - parenthesesInputCountAlreadyChecked;
+
+ if (countToCheck) {
+ checkInput(countToCheck);
+ currentCountAlreadyChecked += countToCheck;
+ }
+
+ for (unsigned i = 0; i < alternative->m_terms.size(); ++i) {
+ PatternTerm& term = alternative->m_terms[i];
+
+ switch (term.type) {
+ case PatternTerm::TypeAssertionBOL:
+ assertionBOL(currentCountAlreadyChecked - term.inputPosition);
+ break;
+
+ case PatternTerm::TypeAssertionEOL:
+ assertionEOL(currentCountAlreadyChecked - term.inputPosition);
+ break;
+
+ case PatternTerm::TypeAssertionWordBoundary:
+ assertionWordBoundary(term.invert(), currentCountAlreadyChecked - term.inputPosition);
+ break;
+
+ case PatternTerm::TypePatternCharacter:
+ atomPatternCharacter(term.patternCharacter, currentCountAlreadyChecked - term.inputPosition, term.frameLocation, term.quantityCount, term.quantityType);
+ break;
+
+ case PatternTerm::TypeCharacterClass:
+ atomCharacterClass(term.characterClass, term.invert(), currentCountAlreadyChecked- term.inputPosition, term.frameLocation, term.quantityCount, term.quantityType);
+ break;
+
+ case PatternTerm::TypeBackReference:
+ atomBackReference(term.backReferenceSubpatternId, currentCountAlreadyChecked - term.inputPosition, term.frameLocation, term.quantityCount, term.quantityType);
+ break;
+
+ case PatternTerm::TypeForwardReference:
+ break;
+
+ case PatternTerm::TypeParenthesesSubpattern: {
+ unsigned disjunctionAlreadyCheckedCount = 0;
+ if (term.quantityCount == 1 && !term.parentheses.isCopy) {
+ unsigned alternativeFrameLocation = term.frameLocation;
+ // For QuantifierFixedCount we pre-check the minimum size; for greedy/non-greedy we reserve a slot in the frame.
+ if (term.quantityType == QuantifierFixedCount)
+ disjunctionAlreadyCheckedCount = term.parentheses.disjunction->m_minimumSize;
+ else
+ alternativeFrameLocation += YarrStackSpaceForBackTrackInfoParenthesesOnce;
+ unsigned delegateEndInputOffset = term.inputPosition - currentCountAlreadyChecked;
+ atomParenthesesOnceBegin(term.parentheses.subpatternId, term.capture(), disjunctionAlreadyCheckedCount - delegateEndInputOffset, term.frameLocation, alternativeFrameLocation);
+ emitDisjunction(term.parentheses.disjunction, currentCountAlreadyChecked, disjunctionAlreadyCheckedCount);
+ atomParenthesesOnceEnd(delegateEndInputOffset, term.frameLocation, term.quantityCount, term.quantityType);
+ } else if (term.parentheses.isTerminal) {
+ unsigned delegateEndInputOffset = term.inputPosition - currentCountAlreadyChecked;
+ atomParenthesesTerminalBegin(term.parentheses.subpatternId, term.capture(), disjunctionAlreadyCheckedCount - delegateEndInputOffset, term.frameLocation, term.frameLocation + YarrStackSpaceForBackTrackInfoParenthesesOnce);
+ emitDisjunction(term.parentheses.disjunction, currentCountAlreadyChecked, disjunctionAlreadyCheckedCount);
+ atomParenthesesTerminalEnd(delegateEndInputOffset, term.frameLocation, term.quantityCount, term.quantityType);
+ } else {
+ unsigned delegateEndInputOffset = term.inputPosition - currentCountAlreadyChecked;
+ atomParenthesesSubpatternBegin(term.parentheses.subpatternId, term.capture(), disjunctionAlreadyCheckedCount - delegateEndInputOffset, term.frameLocation, 0);
+ emitDisjunction(term.parentheses.disjunction, currentCountAlreadyChecked, 0);
+ atomParenthesesSubpatternEnd(term.parentheses.lastSubpatternId, delegateEndInputOffset, term.frameLocation, term.quantityCount, term.quantityType, term.parentheses.disjunction->m_callFrameSize);
+ }
+ break;
+ }
+
+ case PatternTerm::TypeParentheticalAssertion: {
+ unsigned alternativeFrameLocation = term.frameLocation + YarrStackSpaceForBackTrackInfoParentheticalAssertion;
+
+ ASSERT(currentCountAlreadyChecked >= static_cast<unsigned>(term.inputPosition));
+ unsigned positiveInputOffset = currentCountAlreadyChecked - static_cast<unsigned>(term.inputPosition);
+ unsigned uncheckAmount = 0;
+ if (positiveInputOffset > term.parentheses.disjunction->m_minimumSize) {
+ uncheckAmount = positiveInputOffset - term.parentheses.disjunction->m_minimumSize;
+ uncheckInput(uncheckAmount);
+ currentCountAlreadyChecked -= uncheckAmount;
+ }
+
+ atomParentheticalAssertionBegin(term.parentheses.subpatternId, term.invert(), term.frameLocation, alternativeFrameLocation);
+ emitDisjunction(term.parentheses.disjunction, currentCountAlreadyChecked, positiveInputOffset - uncheckAmount);
+ atomParentheticalAssertionEnd(0, term.frameLocation, term.quantityCount, term.quantityType);
+ if (uncheckAmount) {
+ checkInput(uncheckAmount);
+ currentCountAlreadyChecked += uncheckAmount;
+ }
+ break;
+ }
+
+ case PatternTerm::TypeDotStarEnclosure:
+ assertionDotStarEnclosure(term.anchors.bolAnchor, term.anchors.eolAnchor);
+ break;
+ }
+ }
+ }
+ }
+
+private:
+ YarrPattern& m_pattern;
+ OwnPtr<ByteDisjunction> m_bodyDisjunction;
+ unsigned m_currentAlternativeIndex;
+ Vector<ParenthesesStackEntry> m_parenthesesStack;
+ Vector<OwnPtr<ByteDisjunction> > m_allParenthesesInfo;
+};
+
+PassOwnPtr<BytecodePattern> byteCompile(YarrPattern& pattern, BumpPointerAllocator* allocator)
+{
+ return ByteCompiler(pattern).compile(allocator);
+}
+
+unsigned interpret(BytecodePattern* bytecode, const String& input, unsigned start, unsigned* output)
+{
+ if (input.is8Bit())
+ return Interpreter<LChar>(bytecode, output, input.characters8(), input.length(), start).interpret();
+ return Interpreter<UChar>(bytecode, output, input.characters16(), input.length(), start).interpret();
+}
+
+unsigned interpret(BytecodePattern* bytecode, const LChar* input, unsigned length, unsigned start, unsigned* output)
+{
+ return Interpreter<LChar>(bytecode, output, input, length, start).interpret();
+}
+
+unsigned interpret(BytecodePattern* bytecode, const UChar* input, unsigned length, unsigned start, unsigned* output)
+{
+ return Interpreter<UChar>(bytecode, output, input, length, start).interpret();
+}
+
+// These should be the same for both UChar & LChar.
+COMPILE_ASSERT(sizeof(Interpreter<UChar>::BackTrackInfoPatternCharacter) == (YarrStackSpaceForBackTrackInfoPatternCharacter * sizeof(uintptr_t)), CheckYarrStackSpaceForBackTrackInfoPatternCharacter);
+COMPILE_ASSERT(sizeof(Interpreter<UChar>::BackTrackInfoCharacterClass) == (YarrStackSpaceForBackTrackInfoCharacterClass * sizeof(uintptr_t)), CheckYarrStackSpaceForBackTrackInfoCharacterClass);
+COMPILE_ASSERT(sizeof(Interpreter<UChar>::BackTrackInfoBackReference) == (YarrStackSpaceForBackTrackInfoBackReference * sizeof(uintptr_t)), CheckYarrStackSpaceForBackTrackInfoBackReference);
+COMPILE_ASSERT(sizeof(Interpreter<UChar>::BackTrackInfoAlternative) == (YarrStackSpaceForBackTrackInfoAlternative * sizeof(uintptr_t)), CheckYarrStackSpaceForBackTrackInfoAlternative);
+COMPILE_ASSERT(sizeof(Interpreter<UChar>::BackTrackInfoParentheticalAssertion) == (YarrStackSpaceForBackTrackInfoParentheticalAssertion * sizeof(uintptr_t)), CheckYarrStackSpaceForBackTrackInfoParentheticalAssertion);
+COMPILE_ASSERT(sizeof(Interpreter<UChar>::BackTrackInfoParenthesesOnce) == (YarrStackSpaceForBackTrackInfoParenthesesOnce * sizeof(uintptr_t)), CheckYarrStackSpaceForBackTrackInfoParenthesesOnce);
+COMPILE_ASSERT(sizeof(Interpreter<UChar>::BackTrackInfoParentheses) == (YarrStackSpaceForBackTrackInfoParentheses * sizeof(uintptr_t)), CheckYarrStackSpaceForBackTrackInfoParentheses);
+
+
+} }
diff --git a/src/3rdparty/masm/yarr/YarrInterpreter.h b/src/3rdparty/masm/yarr/YarrInterpreter.h
new file mode 100644
index 0000000000..3b44acbd2b
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrInterpreter.h
@@ -0,0 +1,380 @@
+/*
+ * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef YarrInterpreter_h
+#define YarrInterpreter_h
+
+#include "YarrPattern.h"
+#include <wtf/PassOwnPtr.h>
+#include <wtf/unicode/Unicode.h>
+
+namespace WTF {
+class BumpPointerAllocator;
+}
+using WTF::BumpPointerAllocator;
+
+namespace JSC { namespace Yarr {
+
+class ByteDisjunction;
+
+struct ByteTerm {
+ enum Type {
+ TypeBodyAlternativeBegin,
+ TypeBodyAlternativeDisjunction,
+ TypeBodyAlternativeEnd,
+ TypeAlternativeBegin,
+ TypeAlternativeDisjunction,
+ TypeAlternativeEnd,
+ TypeSubpatternBegin,
+ TypeSubpatternEnd,
+ TypeAssertionBOL,
+ TypeAssertionEOL,
+ TypeAssertionWordBoundary,
+ TypePatternCharacterOnce,
+ TypePatternCharacterFixed,
+ TypePatternCharacterGreedy,
+ TypePatternCharacterNonGreedy,
+ TypePatternCasedCharacterOnce,
+ TypePatternCasedCharacterFixed,
+ TypePatternCasedCharacterGreedy,
+ TypePatternCasedCharacterNonGreedy,
+ TypeCharacterClass,
+ TypeBackReference,
+ TypeParenthesesSubpattern,
+ TypeParenthesesSubpatternOnceBegin,
+ TypeParenthesesSubpatternOnceEnd,
+ TypeParenthesesSubpatternTerminalBegin,
+ TypeParenthesesSubpatternTerminalEnd,
+ TypeParentheticalAssertionBegin,
+ TypeParentheticalAssertionEnd,
+ TypeCheckInput,
+ TypeUncheckInput,
+ TypeDotStarEnclosure,
+ } type;
+ union {
+ struct {
+ union {
+ UChar patternCharacter;
+ struct {
+ UChar lo;
+ UChar hi;
+ } casedCharacter;
+ CharacterClass* characterClass;
+ unsigned subpatternId;
+ };
+ union {
+ ByteDisjunction* parenthesesDisjunction;
+ unsigned parenthesesWidth;
+ };
+ QuantifierType quantityType;
+ unsigned quantityCount;
+ } atom;
+ struct {
+ int next;
+ int end;
+ bool onceThrough;
+ } alternative;
+ struct {
+ bool m_bol : 1;
+ bool m_eol : 1;
+ } anchors;
+ unsigned checkInputCount;
+ };
+ unsigned frameLocation;
+ bool m_capture : 1;
+ bool m_invert : 1;
+ unsigned inputPosition;
+
+ ByteTerm(UChar ch, int inputPos, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType)
+ : frameLocation(frameLocation)
+ , m_capture(false)
+ , m_invert(false)
+ {
+ switch (quantityType) {
+ case QuantifierFixedCount:
+ type = (quantityCount == 1) ? ByteTerm::TypePatternCharacterOnce : ByteTerm::TypePatternCharacterFixed;
+ break;
+ case QuantifierGreedy:
+ type = ByteTerm::TypePatternCharacterGreedy;
+ break;
+ case QuantifierNonGreedy:
+ type = ByteTerm::TypePatternCharacterNonGreedy;
+ break;
+ }
+
+ atom.patternCharacter = ch;
+ atom.quantityType = quantityType;
+ atom.quantityCount = quantityCount.unsafeGet();
+ inputPosition = inputPos;
+ }
+
+ ByteTerm(UChar lo, UChar hi, int inputPos, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType)
+ : frameLocation(frameLocation)
+ , m_capture(false)
+ , m_invert(false)
+ {
+ switch (quantityType) {
+ case QuantifierFixedCount:
+ type = (quantityCount == 1) ? ByteTerm::TypePatternCasedCharacterOnce : ByteTerm::TypePatternCasedCharacterFixed;
+ break;
+ case QuantifierGreedy:
+ type = ByteTerm::TypePatternCasedCharacterGreedy;
+ break;
+ case QuantifierNonGreedy:
+ type = ByteTerm::TypePatternCasedCharacterNonGreedy;
+ break;
+ }
+
+ atom.casedCharacter.lo = lo;
+ atom.casedCharacter.hi = hi;
+ atom.quantityType = quantityType;
+ atom.quantityCount = quantityCount.unsafeGet();
+ inputPosition = inputPos;
+ }
+
+ ByteTerm(CharacterClass* characterClass, bool invert, int inputPos)
+ : type(ByteTerm::TypeCharacterClass)
+ , m_capture(false)
+ , m_invert(invert)
+ {
+ atom.characterClass = characterClass;
+ atom.quantityType = QuantifierFixedCount;
+ atom.quantityCount = 1;
+ inputPosition = inputPos;
+ }
+
+ ByteTerm(Type type, unsigned subpatternId, ByteDisjunction* parenthesesInfo, bool capture, int inputPos)
+ : type(type)
+ , m_capture(capture)
+ , m_invert(false)
+ {
+ atom.subpatternId = subpatternId;
+ atom.parenthesesDisjunction = parenthesesInfo;
+ atom.quantityType = QuantifierFixedCount;
+ atom.quantityCount = 1;
+ inputPosition = inputPos;
+ }
+
+ ByteTerm(Type type, bool invert = false)
+ : type(type)
+ , m_capture(false)
+ , m_invert(invert)
+ {
+ atom.quantityType = QuantifierFixedCount;
+ atom.quantityCount = 1;
+ }
+
+ ByteTerm(Type type, unsigned subpatternId, bool capture, bool invert, int inputPos)
+ : type(type)
+ , m_capture(capture)
+ , m_invert(invert)
+ {
+ atom.subpatternId = subpatternId;
+ atom.quantityType = QuantifierFixedCount;
+ atom.quantityCount = 1;
+ inputPosition = inputPos;
+ }
+
+ static ByteTerm BOL(int inputPos)
+ {
+ ByteTerm term(TypeAssertionBOL);
+ term.inputPosition = inputPos;
+ return term;
+ }
+
+ static ByteTerm CheckInput(Checked<unsigned> count)
+ {
+ ByteTerm term(TypeCheckInput);
+ term.checkInputCount = count.unsafeGet();
+ return term;
+ }
+
+ static ByteTerm UncheckInput(Checked<unsigned> count)
+ {
+ ByteTerm term(TypeUncheckInput);
+ term.checkInputCount = count.unsafeGet();
+ return term;
+ }
+
+ static ByteTerm EOL(int inputPos)
+ {
+ ByteTerm term(TypeAssertionEOL);
+ term.inputPosition = inputPos;
+ return term;
+ }
+
+ static ByteTerm WordBoundary(bool invert, int inputPos)
+ {
+ ByteTerm term(TypeAssertionWordBoundary, invert);
+ term.inputPosition = inputPos;
+ return term;
+ }
+
+ static ByteTerm BackReference(unsigned subpatternId, int inputPos)
+ {
+ return ByteTerm(TypeBackReference, subpatternId, false, false, inputPos);
+ }
+
+ static ByteTerm BodyAlternativeBegin(bool onceThrough)
+ {
+ ByteTerm term(TypeBodyAlternativeBegin);
+ term.alternative.next = 0;
+ term.alternative.end = 0;
+ term.alternative.onceThrough = onceThrough;
+ return term;
+ }
+
+ static ByteTerm BodyAlternativeDisjunction(bool onceThrough)
+ {
+ ByteTerm term(TypeBodyAlternativeDisjunction);
+ term.alternative.next = 0;
+ term.alternative.end = 0;
+ term.alternative.onceThrough = onceThrough;
+ return term;
+ }
+
+ static ByteTerm BodyAlternativeEnd()
+ {
+ ByteTerm term(TypeBodyAlternativeEnd);
+ term.alternative.next = 0;
+ term.alternative.end = 0;
+ term.alternative.onceThrough = false;
+ return term;
+ }
+
+ static ByteTerm AlternativeBegin()
+ {
+ ByteTerm term(TypeAlternativeBegin);
+ term.alternative.next = 0;
+ term.alternative.end = 0;
+ term.alternative.onceThrough = false;
+ return term;
+ }
+
+ static ByteTerm AlternativeDisjunction()
+ {
+ ByteTerm term(TypeAlternativeDisjunction);
+ term.alternative.next = 0;
+ term.alternative.end = 0;
+ term.alternative.onceThrough = false;
+ return term;
+ }
+
+ static ByteTerm AlternativeEnd()
+ {
+ ByteTerm term(TypeAlternativeEnd);
+ term.alternative.next = 0;
+ term.alternative.end = 0;
+ term.alternative.onceThrough = false;
+ return term;
+ }
+
+ static ByteTerm SubpatternBegin()
+ {
+ return ByteTerm(TypeSubpatternBegin);
+ }
+
+ static ByteTerm SubpatternEnd()
+ {
+ return ByteTerm(TypeSubpatternEnd);
+ }
+
+ static ByteTerm DotStarEnclosure(bool bolAnchor, bool eolAnchor)
+ {
+ ByteTerm term(TypeDotStarEnclosure);
+ term.anchors.m_bol = bolAnchor;
+ term.anchors.m_eol = eolAnchor;
+ return term;
+ }
+
+ bool invert()
+ {
+ return m_invert;
+ }
+
+ bool capture()
+ {
+ return m_capture;
+ }
+};
+
+class ByteDisjunction {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ ByteDisjunction(unsigned numSubpatterns, unsigned frameSize)
+ : m_numSubpatterns(numSubpatterns)
+ , m_frameSize(frameSize)
+ {
+ }
+
+ Vector<ByteTerm> terms;
+ unsigned m_numSubpatterns;
+ unsigned m_frameSize;
+};
+
+struct BytecodePattern {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ BytecodePattern(PassOwnPtr<ByteDisjunction> body, Vector<OwnPtr<ByteDisjunction> >& parenthesesInfoToAdopt, YarrPattern& pattern, BumpPointerAllocator* allocator)
+ : m_body(body)
+ , m_ignoreCase(pattern.m_ignoreCase)
+ , m_multiline(pattern.m_multiline)
+ , m_allocator(allocator)
+ {
+ m_body->terms.shrinkToFit();
+
+ newlineCharacterClass = pattern.newlineCharacterClass();
+ wordcharCharacterClass = pattern.wordcharCharacterClass();
+
+ m_allParenthesesInfo.swap(parenthesesInfoToAdopt);
+ m_allParenthesesInfo.shrinkToFit();
+
+ m_userCharacterClasses.swap(pattern.m_userCharacterClasses);
+ m_userCharacterClasses.shrinkToFit();
+ }
+
+ OwnPtr<ByteDisjunction> m_body;
+ bool m_ignoreCase;
+ bool m_multiline;
+ // Each BytecodePattern is associated with a RegExp, each RegExp is associated
+ // with a JSGlobalData. Cache a pointer to out JSGlobalData's m_regExpAllocator.
+ BumpPointerAllocator* m_allocator;
+
+ CharacterClass* newlineCharacterClass;
+ CharacterClass* wordcharCharacterClass;
+
+private:
+ Vector<OwnPtr<ByteDisjunction> > m_allParenthesesInfo;
+ Vector<OwnPtr<CharacterClass> > m_userCharacterClasses;
+};
+
+JS_EXPORT_PRIVATE PassOwnPtr<BytecodePattern> byteCompile(YarrPattern&, BumpPointerAllocator*);
+JS_EXPORT_PRIVATE unsigned interpret(BytecodePattern*, const String& input, unsigned start, unsigned* output);
+unsigned interpret(BytecodePattern*, const LChar* input, unsigned length, unsigned start, unsigned* output);
+unsigned interpret(BytecodePattern*, const UChar* input, unsigned length, unsigned start, unsigned* output);
+
+} } // namespace JSC::Yarr
+
+#endif // YarrInterpreter_h
diff --git a/src/3rdparty/masm/yarr/YarrJIT.cpp b/src/3rdparty/masm/yarr/YarrJIT.cpp
new file mode 100644
index 0000000000..20b26c1eb9
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrJIT.cpp
@@ -0,0 +1,2702 @@
+/*
+ * Copyright (C) 2009, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "YarrJIT.h"
+
+#include <wtf/ASCIICType.h>
+#include "LinkBuffer.h"
+#include "Options.h"
+#include "Yarr.h"
+#include "YarrCanonicalizeUCS2.h"
+
+#if ENABLE(YARR_JIT)
+
+using namespace WTF;
+
+namespace JSC { namespace Yarr {
+
+template<YarrJITCompileMode compileMode>
+class YarrGenerator : private MacroAssembler {
+ friend void jitCompile(JSGlobalData*, YarrCodeBlock& jitObject, const String& pattern, unsigned& numSubpatterns, const char*& error, bool ignoreCase, bool multiline);
+
+#if CPU(ARM)
+ static const RegisterID input = ARMRegisters::r0;
+ static const RegisterID index = ARMRegisters::r1;
+ static const RegisterID length = ARMRegisters::r2;
+ static const RegisterID output = ARMRegisters::r4;
+
+ static const RegisterID regT0 = ARMRegisters::r5;
+ static const RegisterID regT1 = ARMRegisters::r6;
+
+ static const RegisterID returnRegister = ARMRegisters::r0;
+ static const RegisterID returnRegister2 = ARMRegisters::r1;
+#elif CPU(MIPS)
+ static const RegisterID input = MIPSRegisters::a0;
+ static const RegisterID index = MIPSRegisters::a1;
+ static const RegisterID length = MIPSRegisters::a2;
+ static const RegisterID output = MIPSRegisters::a3;
+
+ static const RegisterID regT0 = MIPSRegisters::t4;
+ static const RegisterID regT1 = MIPSRegisters::t5;
+
+ static const RegisterID returnRegister = MIPSRegisters::v0;
+ static const RegisterID returnRegister2 = MIPSRegisters::v1;
+#elif CPU(SH4)
+ static const RegisterID input = SH4Registers::r4;
+ static const RegisterID index = SH4Registers::r5;
+ static const RegisterID length = SH4Registers::r6;
+ static const RegisterID output = SH4Registers::r7;
+
+ static const RegisterID regT0 = SH4Registers::r0;
+ static const RegisterID regT1 = SH4Registers::r1;
+
+ static const RegisterID returnRegister = SH4Registers::r0;
+ static const RegisterID returnRegister2 = SH4Registers::r1;
+#elif CPU(X86)
+ static const RegisterID input = X86Registers::eax;
+ static const RegisterID index = X86Registers::edx;
+ static const RegisterID length = X86Registers::ecx;
+ static const RegisterID output = X86Registers::edi;
+
+ static const RegisterID regT0 = X86Registers::ebx;
+ static const RegisterID regT1 = X86Registers::esi;
+
+ static const RegisterID returnRegister = X86Registers::eax;
+ static const RegisterID returnRegister2 = X86Registers::edx;
+#elif CPU(X86_64)
+#if !OS(WINDOWS)
+ static const RegisterID input = X86Registers::edi;
+ static const RegisterID index = X86Registers::esi;
+ static const RegisterID length = X86Registers::edx;
+ static const RegisterID output = X86Registers::ecx;
+#else
+ // If the return value doesn't fit in 64bits, its destination is pointed by rcx and the parameters are shifted.
+ // http://msdn.microsoft.com/en-us/library/7572ztz4.aspx
+ COMPILE_ASSERT(sizeof(MatchResult) > sizeof(void*), MatchResult_does_not_fit_in_64bits);
+ static const RegisterID input = X86Registers::edx;
+ static const RegisterID index = X86Registers::r8;
+ static const RegisterID length = X86Registers::r9;
+ static const RegisterID output = X86Registers::r10;
+#endif
+
+ static const RegisterID regT0 = X86Registers::eax;
+ static const RegisterID regT1 = X86Registers::ebx;
+
+ static const RegisterID returnRegister = X86Registers::eax;
+ static const RegisterID returnRegister2 = X86Registers::edx;
+#endif
+
+ void optimizeAlternative(PatternAlternative* alternative)
+ {
+ if (!alternative->m_terms.size())
+ return;
+
+ for (unsigned i = 0; i < alternative->m_terms.size() - 1; ++i) {
+ PatternTerm& term = alternative->m_terms[i];
+ PatternTerm& nextTerm = alternative->m_terms[i + 1];
+
+ if ((term.type == PatternTerm::TypeCharacterClass)
+ && (term.quantityType == QuantifierFixedCount)
+ && (nextTerm.type == PatternTerm::TypePatternCharacter)
+ && (nextTerm.quantityType == QuantifierFixedCount)) {
+ PatternTerm termCopy = term;
+ alternative->m_terms[i] = nextTerm;
+ alternative->m_terms[i + 1] = termCopy;
+ }
+ }
+ }
+
+ void matchCharacterClassRange(RegisterID character, JumpList& failures, JumpList& matchDest, const CharacterRange* ranges, unsigned count, unsigned* matchIndex, const UChar* matches, unsigned matchCount)
+ {
+ do {
+ // pick which range we're going to generate
+ int which = count >> 1;
+ char lo = ranges[which].begin;
+ char hi = ranges[which].end;
+
+ // check if there are any ranges or matches below lo. If not, just jl to failure -
+ // if there is anything else to check, check that first, if it falls through jmp to failure.
+ if ((*matchIndex < matchCount) && (matches[*matchIndex] < lo)) {
+ Jump loOrAbove = branch32(GreaterThanOrEqual, character, Imm32((unsigned short)lo));
+
+ // generate code for all ranges before this one
+ if (which)
+ matchCharacterClassRange(character, failures, matchDest, ranges, which, matchIndex, matches, matchCount);
+
+ while ((*matchIndex < matchCount) && (matches[*matchIndex] < lo)) {
+ matchDest.append(branch32(Equal, character, Imm32((unsigned short)matches[*matchIndex])));
+ ++*matchIndex;
+ }
+ failures.append(jump());
+
+ loOrAbove.link(this);
+ } else if (which) {
+ Jump loOrAbove = branch32(GreaterThanOrEqual, character, Imm32((unsigned short)lo));
+
+ matchCharacterClassRange(character, failures, matchDest, ranges, which, matchIndex, matches, matchCount);
+ failures.append(jump());
+
+ loOrAbove.link(this);
+ } else
+ failures.append(branch32(LessThan, character, Imm32((unsigned short)lo)));
+
+ while ((*matchIndex < matchCount) && (matches[*matchIndex] <= hi))
+ ++*matchIndex;
+
+ matchDest.append(branch32(LessThanOrEqual, character, Imm32((unsigned short)hi)));
+ // fall through to here, the value is above hi.
+
+ // shuffle along & loop around if there are any more matches to handle.
+ unsigned next = which + 1;
+ ranges += next;
+ count -= next;
+ } while (count);
+ }
+
+ void matchCharacterClass(RegisterID character, JumpList& matchDest, const CharacterClass* charClass)
+ {
+ if (charClass->m_table) {
+ ExtendedAddress tableEntry(character, reinterpret_cast<intptr_t>(charClass->m_table));
+ matchDest.append(branchTest8(charClass->m_tableInverted ? Zero : NonZero, tableEntry));
+ return;
+ }
+ Jump unicodeFail;
+ if (charClass->m_matchesUnicode.size() || charClass->m_rangesUnicode.size()) {
+ Jump isAscii = branch32(LessThanOrEqual, character, TrustedImm32(0x7f));
+
+ if (charClass->m_matchesUnicode.size()) {
+ for (unsigned i = 0; i < charClass->m_matchesUnicode.size(); ++i) {
+ UChar ch = charClass->m_matchesUnicode[i];
+ matchDest.append(branch32(Equal, character, Imm32(ch)));
+ }
+ }
+
+ if (charClass->m_rangesUnicode.size()) {
+ for (unsigned i = 0; i < charClass->m_rangesUnicode.size(); ++i) {
+ UChar lo = charClass->m_rangesUnicode[i].begin;
+ UChar hi = charClass->m_rangesUnicode[i].end;
+
+ Jump below = branch32(LessThan, character, Imm32(lo));
+ matchDest.append(branch32(LessThanOrEqual, character, Imm32(hi)));
+ below.link(this);
+ }
+ }
+
+ unicodeFail = jump();
+ isAscii.link(this);
+ }
+
+ if (charClass->m_ranges.size()) {
+ unsigned matchIndex = 0;
+ JumpList failures;
+ matchCharacterClassRange(character, failures, matchDest, charClass->m_ranges.begin(), charClass->m_ranges.size(), &matchIndex, charClass->m_matches.begin(), charClass->m_matches.size());
+ while (matchIndex < charClass->m_matches.size())
+ matchDest.append(branch32(Equal, character, Imm32((unsigned short)charClass->m_matches[matchIndex++])));
+
+ failures.link(this);
+ } else if (charClass->m_matches.size()) {
+ // optimization: gather 'a','A' etc back together, can mask & test once.
+ Vector<char> matchesAZaz;
+
+ for (unsigned i = 0; i < charClass->m_matches.size(); ++i) {
+ char ch = charClass->m_matches[i];
+ if (m_pattern.m_ignoreCase) {
+ if (isASCIILower(ch)) {
+ matchesAZaz.append(ch);
+ continue;
+ }
+ if (isASCIIUpper(ch))
+ continue;
+ }
+ matchDest.append(branch32(Equal, character, Imm32((unsigned short)ch)));
+ }
+
+ if (unsigned countAZaz = matchesAZaz.size()) {
+ or32(TrustedImm32(32), character);
+ for (unsigned i = 0; i < countAZaz; ++i)
+ matchDest.append(branch32(Equal, character, TrustedImm32(matchesAZaz[i])));
+ }
+ }
+
+ if (charClass->m_matchesUnicode.size() || charClass->m_rangesUnicode.size())
+ unicodeFail.link(this);
+ }
+
+ // Jumps if input not available; will have (incorrectly) incremented already!
+ Jump jumpIfNoAvailableInput(unsigned countToCheck = 0)
+ {
+ if (countToCheck)
+ add32(Imm32(countToCheck), index);
+ return branch32(Above, index, length);
+ }
+
+ Jump jumpIfAvailableInput(unsigned countToCheck)
+ {
+ add32(Imm32(countToCheck), index);
+ return branch32(BelowOrEqual, index, length);
+ }
+
+ Jump checkInput()
+ {
+ return branch32(BelowOrEqual, index, length);
+ }
+
+ Jump atEndOfInput()
+ {
+ return branch32(Equal, index, length);
+ }
+
+ Jump notAtEndOfInput()
+ {
+ return branch32(NotEqual, index, length);
+ }
+
+ Jump jumpIfCharNotEquals(UChar ch, int inputPosition, RegisterID character)
+ {
+ readCharacter(inputPosition, character);
+
+ // For case-insesitive compares, non-ascii characters that have different
+ // upper & lower case representations are converted to a character class.
+ ASSERT(!m_pattern.m_ignoreCase || isASCIIAlpha(ch) || isCanonicallyUnique(ch));
+ if (m_pattern.m_ignoreCase && isASCIIAlpha(ch)) {
+ or32(TrustedImm32(0x20), character);
+ ch |= 0x20;
+ }
+
+ return branch32(NotEqual, character, Imm32(ch));
+ }
+
+ void readCharacter(int inputPosition, RegisterID reg)
+ {
+ if (m_charSize == Char8)
+ load8(BaseIndex(input, index, TimesOne, inputPosition * sizeof(char)), reg);
+ else
+ load16(BaseIndex(input, index, TimesTwo, inputPosition * sizeof(UChar)), reg);
+ }
+
+ void storeToFrame(RegisterID reg, unsigned frameLocation)
+ {
+ poke(reg, frameLocation);
+ }
+
+ void storeToFrame(TrustedImm32 imm, unsigned frameLocation)
+ {
+ poke(imm, frameLocation);
+ }
+
+ DataLabelPtr storeToFrameWithPatch(unsigned frameLocation)
+ {
+ return storePtrWithPatch(TrustedImmPtr(0), Address(stackPointerRegister, frameLocation * sizeof(void*)));
+ }
+
+ void loadFromFrame(unsigned frameLocation, RegisterID reg)
+ {
+ peek(reg, frameLocation);
+ }
+
+ void loadFromFrameAndJump(unsigned frameLocation)
+ {
+ jump(Address(stackPointerRegister, frameLocation * sizeof(void*)));
+ }
+
+ void initCallFrame()
+ {
+ unsigned callFrameSize = m_pattern.m_body->m_callFrameSize;
+ if (callFrameSize)
+ subPtr(Imm32(callFrameSize * sizeof(void*)), stackPointerRegister);
+ }
+ void removeCallFrame()
+ {
+ unsigned callFrameSize = m_pattern.m_body->m_callFrameSize;
+ if (callFrameSize)
+ addPtr(Imm32(callFrameSize * sizeof(void*)), stackPointerRegister);
+ }
+
+ // Used to record subpatters, should only be called if compileMode is IncludeSubpatterns.
+ void setSubpatternStart(RegisterID reg, unsigned subpattern)
+ {
+ ASSERT(subpattern);
+ // FIXME: should be able to ASSERT(compileMode == IncludeSubpatterns), but then this function is conditionally NORETURN. :-(
+ store32(reg, Address(output, (subpattern << 1) * sizeof(int)));
+ }
+ void setSubpatternEnd(RegisterID reg, unsigned subpattern)
+ {
+ ASSERT(subpattern);
+ // FIXME: should be able to ASSERT(compileMode == IncludeSubpatterns), but then this function is conditionally NORETURN. :-(
+ store32(reg, Address(output, ((subpattern << 1) + 1) * sizeof(int)));
+ }
+ void clearSubpatternStart(unsigned subpattern)
+ {
+ ASSERT(subpattern);
+ // FIXME: should be able to ASSERT(compileMode == IncludeSubpatterns), but then this function is conditionally NORETURN. :-(
+ store32(TrustedImm32(-1), Address(output, (subpattern << 1) * sizeof(int)));
+ }
+
+ // We use one of three different strategies to track the start of the current match,
+ // while matching.
+ // 1) If the pattern has a fixed size, do nothing! - we calculate the value lazily
+ // at the end of matching. This is irrespective of compileMode, and in this case
+ // these methods should never be called.
+ // 2) If we're compiling IncludeSubpatterns, 'output' contains a pointer to an output
+ // vector, store the match start in the output vector.
+ // 3) If we're compiling MatchOnly, 'output' is unused, store the match start directly
+ // in this register.
+ void setMatchStart(RegisterID reg)
+ {
+ ASSERT(!m_pattern.m_body->m_hasFixedSize);
+ if (compileMode == IncludeSubpatterns)
+ store32(reg, output);
+ else
+ move(reg, output);
+ }
+ void getMatchStart(RegisterID reg)
+ {
+ ASSERT(!m_pattern.m_body->m_hasFixedSize);
+ if (compileMode == IncludeSubpatterns)
+ load32(output, reg);
+ else
+ move(output, reg);
+ }
+
+ enum YarrOpCode {
+ // These nodes wrap body alternatives - those in the main disjunction,
+ // rather than subpatterns or assertions. These are chained together in
+ // a doubly linked list, with a 'begin' node for the first alternative,
+ // a 'next' node for each subsequent alternative, and an 'end' node at
+ // the end. In the case of repeating alternatives, the 'end' node also
+ // has a reference back to 'begin'.
+ OpBodyAlternativeBegin,
+ OpBodyAlternativeNext,
+ OpBodyAlternativeEnd,
+ // Similar to the body alternatives, but used for subpatterns with two
+ // or more alternatives.
+ OpNestedAlternativeBegin,
+ OpNestedAlternativeNext,
+ OpNestedAlternativeEnd,
+ // Used for alternatives in subpatterns where there is only a single
+ // alternative (backtrackingis easier in these cases), or for alternatives
+ // which never need to be backtracked (those in parenthetical assertions,
+ // terminal subpatterns).
+ OpSimpleNestedAlternativeBegin,
+ OpSimpleNestedAlternativeNext,
+ OpSimpleNestedAlternativeEnd,
+ // Used to wrap 'Once' subpattern matches (quantityCount == 1).
+ OpParenthesesSubpatternOnceBegin,
+ OpParenthesesSubpatternOnceEnd,
+ // Used to wrap 'Terminal' subpattern matches (at the end of the regexp).
+ OpParenthesesSubpatternTerminalBegin,
+ OpParenthesesSubpatternTerminalEnd,
+ // Used to wrap parenthetical assertions.
+ OpParentheticalAssertionBegin,
+ OpParentheticalAssertionEnd,
+ // Wraps all simple terms (pattern characters, character classes).
+ OpTerm,
+ // Where an expression contains only 'once through' body alternatives
+ // and no repeating ones, this op is used to return match failure.
+ OpMatchFailed
+ };
+
+ // This structure is used to hold the compiled opcode information,
+ // including reference back to the original PatternTerm/PatternAlternatives,
+ // and JIT compilation data structures.
+ struct YarrOp {
+ explicit YarrOp(PatternTerm* term)
+ : m_op(OpTerm)
+ , m_term(term)
+ , m_isDeadCode(false)
+ {
+ }
+
+ explicit YarrOp(YarrOpCode op)
+ : m_op(op)
+ , m_isDeadCode(false)
+ {
+ }
+
+ // The operation, as a YarrOpCode, and also a reference to the PatternTerm.
+ YarrOpCode m_op;
+ PatternTerm* m_term;
+
+ // For alternatives, this holds the PatternAlternative and doubly linked
+ // references to this alternative's siblings. In the case of the
+ // OpBodyAlternativeEnd node at the end of a section of repeating nodes,
+ // m_nextOp will reference the OpBodyAlternativeBegin node of the first
+ // repeating alternative.
+ PatternAlternative* m_alternative;
+ size_t m_previousOp;
+ size_t m_nextOp;
+
+ // Used to record a set of Jumps out of the generated code, typically
+ // used for jumps out to backtracking code, and a single reentry back
+ // into the code for a node (likely where a backtrack will trigger
+ // rematching).
+ Label m_reentry;
+ JumpList m_jumps;
+
+ // Used for backtracking when the prior alternative did not consume any
+ // characters but matched.
+ Jump m_zeroLengthMatch;
+
+ // This flag is used to null out the second pattern character, when
+ // two are fused to match a pair together.
+ bool m_isDeadCode;
+
+ // Currently used in the case of some of the more complex management of
+ // 'm_checked', to cache the offset used in this alternative, to avoid
+ // recalculating it.
+ int m_checkAdjust;
+
+ // Used by OpNestedAlternativeNext/End to hold the pointer to the
+ // value that will be pushed into the pattern's frame to return to,
+ // upon backtracking back into the disjunction.
+ DataLabelPtr m_returnAddress;
+ };
+
+ // BacktrackingState
+ // This class encapsulates information about the state of code generation
+ // whilst generating the code for backtracking, when a term fails to match.
+ // Upon entry to code generation of the backtracking code for a given node,
+ // the Backtracking state will hold references to all control flow sources
+ // that are outputs in need of further backtracking from the prior node
+ // generated (which is the subsequent operation in the regular expression,
+ // and in the m_ops Vector, since we generated backtracking backwards).
+ // These references to control flow take the form of:
+ // - A jump list of jumps, to be linked to code that will backtrack them
+ // further.
+ // - A set of DataLabelPtr values, to be populated with values to be
+ // treated effectively as return addresses backtracking into complex
+ // subpatterns.
+ // - A flag indicating that the current sequence of generated code up to
+ // this point requires backtracking.
+ class BacktrackingState {
+ public:
+ BacktrackingState()
+ : m_pendingFallthrough(false)
+ {
+ }
+
+ // Add a jump or jumps, a return address, or set the flag indicating
+ // that the current 'fallthrough' control flow requires backtracking.
+ void append(const Jump& jump)
+ {
+ m_laterFailures.append(jump);
+ }
+ void append(JumpList& jumpList)
+ {
+ m_laterFailures.append(jumpList);
+ }
+ void append(const DataLabelPtr& returnAddress)
+ {
+ m_pendingReturns.append(returnAddress);
+ }
+ void fallthrough()
+ {
+ ASSERT(!m_pendingFallthrough);
+ m_pendingFallthrough = true;
+ }
+
+ // These methods clear the backtracking state, either linking to the
+ // current location, a provided label, or copying the backtracking out
+ // to a JumpList. All actions may require code generation to take place,
+ // and as such are passed a pointer to the assembler.
+ void link(MacroAssembler* assembler)
+ {
+ if (m_pendingReturns.size()) {
+ Label here(assembler);
+ for (unsigned i = 0; i < m_pendingReturns.size(); ++i)
+ m_backtrackRecords.append(ReturnAddressRecord(m_pendingReturns[i], here));
+ m_pendingReturns.clear();
+ }
+ m_laterFailures.link(assembler);
+ m_laterFailures.clear();
+ m_pendingFallthrough = false;
+ }
+ void linkTo(Label label, MacroAssembler* assembler)
+ {
+ if (m_pendingReturns.size()) {
+ for (unsigned i = 0; i < m_pendingReturns.size(); ++i)
+ m_backtrackRecords.append(ReturnAddressRecord(m_pendingReturns[i], label));
+ m_pendingReturns.clear();
+ }
+ if (m_pendingFallthrough)
+ assembler->jump(label);
+ m_laterFailures.linkTo(label, assembler);
+ m_laterFailures.clear();
+ m_pendingFallthrough = false;
+ }
+ void takeBacktracksToJumpList(JumpList& jumpList, MacroAssembler* assembler)
+ {
+ if (m_pendingReturns.size()) {
+ Label here(assembler);
+ for (unsigned i = 0; i < m_pendingReturns.size(); ++i)
+ m_backtrackRecords.append(ReturnAddressRecord(m_pendingReturns[i], here));
+ m_pendingReturns.clear();
+ m_pendingFallthrough = true;
+ }
+ if (m_pendingFallthrough)
+ jumpList.append(assembler->jump());
+ jumpList.append(m_laterFailures);
+ m_laterFailures.clear();
+ m_pendingFallthrough = false;
+ }
+
+ bool isEmpty()
+ {
+ return m_laterFailures.empty() && m_pendingReturns.isEmpty() && !m_pendingFallthrough;
+ }
+
+ // Called at the end of code generation to link all return addresses.
+ void linkDataLabels(LinkBuffer& linkBuffer)
+ {
+ ASSERT(isEmpty());
+ for (unsigned i = 0; i < m_backtrackRecords.size(); ++i)
+ linkBuffer.patch(m_backtrackRecords[i].m_dataLabel, linkBuffer.locationOf(m_backtrackRecords[i].m_backtrackLocation));
+ }
+
+ private:
+ struct ReturnAddressRecord {
+ ReturnAddressRecord(DataLabelPtr dataLabel, Label backtrackLocation)
+ : m_dataLabel(dataLabel)
+ , m_backtrackLocation(backtrackLocation)
+ {
+ }
+
+ DataLabelPtr m_dataLabel;
+ Label m_backtrackLocation;
+ };
+
+ JumpList m_laterFailures;
+ bool m_pendingFallthrough;
+ Vector<DataLabelPtr, 4> m_pendingReturns;
+ Vector<ReturnAddressRecord, 4> m_backtrackRecords;
+ };
+
+ // Generation methods:
+ // ===================
+
+ // This method provides a default implementation of backtracking common
+ // to many terms; terms commonly jump out of the forwards matching path
+ // on any failed conditions, and add these jumps to the m_jumps list. If
+ // no special handling is required we can often just backtrack to m_jumps.
+ void backtrackTermDefault(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ m_backtrackingState.append(op.m_jumps);
+ }
+
+ void generateAssertionBOL(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ if (m_pattern.m_multiline) {
+ const RegisterID character = regT0;
+
+ JumpList matchDest;
+ if (!term->inputPosition)
+ matchDest.append(branch32(Equal, index, Imm32(m_checked)));
+
+ readCharacter((term->inputPosition - m_checked) - 1, character);
+ matchCharacterClass(character, matchDest, m_pattern.newlineCharacterClass());
+ op.m_jumps.append(jump());
+
+ matchDest.link(this);
+ } else {
+ // Erk, really should poison out these alternatives early. :-/
+ if (term->inputPosition)
+ op.m_jumps.append(jump());
+ else
+ op.m_jumps.append(branch32(NotEqual, index, Imm32(m_checked)));
+ }
+ }
+ void backtrackAssertionBOL(size_t opIndex)
+ {
+ backtrackTermDefault(opIndex);
+ }
+
+ void generateAssertionEOL(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ if (m_pattern.m_multiline) {
+ const RegisterID character = regT0;
+
+ JumpList matchDest;
+ if (term->inputPosition == m_checked)
+ matchDest.append(atEndOfInput());
+
+ readCharacter(term->inputPosition - m_checked, character);
+ matchCharacterClass(character, matchDest, m_pattern.newlineCharacterClass());
+ op.m_jumps.append(jump());
+
+ matchDest.link(this);
+ } else {
+ if (term->inputPosition == m_checked)
+ op.m_jumps.append(notAtEndOfInput());
+ // Erk, really should poison out these alternatives early. :-/
+ else
+ op.m_jumps.append(jump());
+ }
+ }
+ void backtrackAssertionEOL(size_t opIndex)
+ {
+ backtrackTermDefault(opIndex);
+ }
+
+ // Also falls though on nextIsNotWordChar.
+ void matchAssertionWordchar(size_t opIndex, JumpList& nextIsWordChar, JumpList& nextIsNotWordChar)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID character = regT0;
+
+ if (term->inputPosition == m_checked)
+ nextIsNotWordChar.append(atEndOfInput());
+
+ readCharacter((term->inputPosition - m_checked), character);
+ matchCharacterClass(character, nextIsWordChar, m_pattern.wordcharCharacterClass());
+ }
+
+ void generateAssertionWordBoundary(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID character = regT0;
+
+ Jump atBegin;
+ JumpList matchDest;
+ if (!term->inputPosition)
+ atBegin = branch32(Equal, index, Imm32(m_checked));
+ readCharacter((term->inputPosition - m_checked) - 1, character);
+ matchCharacterClass(character, matchDest, m_pattern.wordcharCharacterClass());
+ if (!term->inputPosition)
+ atBegin.link(this);
+
+ // We fall through to here if the last character was not a wordchar.
+ JumpList nonWordCharThenWordChar;
+ JumpList nonWordCharThenNonWordChar;
+ if (term->invert()) {
+ matchAssertionWordchar(opIndex, nonWordCharThenNonWordChar, nonWordCharThenWordChar);
+ nonWordCharThenWordChar.append(jump());
+ } else {
+ matchAssertionWordchar(opIndex, nonWordCharThenWordChar, nonWordCharThenNonWordChar);
+ nonWordCharThenNonWordChar.append(jump());
+ }
+ op.m_jumps.append(nonWordCharThenNonWordChar);
+
+ // We jump here if the last character was a wordchar.
+ matchDest.link(this);
+ JumpList wordCharThenWordChar;
+ JumpList wordCharThenNonWordChar;
+ if (term->invert()) {
+ matchAssertionWordchar(opIndex, wordCharThenNonWordChar, wordCharThenWordChar);
+ wordCharThenWordChar.append(jump());
+ } else {
+ matchAssertionWordchar(opIndex, wordCharThenWordChar, wordCharThenNonWordChar);
+ // This can fall-though!
+ }
+
+ op.m_jumps.append(wordCharThenWordChar);
+
+ nonWordCharThenWordChar.link(this);
+ wordCharThenNonWordChar.link(this);
+ }
+ void backtrackAssertionWordBoundary(size_t opIndex)
+ {
+ backtrackTermDefault(opIndex);
+ }
+
+ void generatePatternCharacterOnce(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+
+ if (op.m_isDeadCode)
+ return;
+
+ // m_ops always ends with a OpBodyAlternativeEnd or OpMatchFailed
+ // node, so there must always be at least one more node.
+ ASSERT(opIndex + 1 < m_ops.size());
+ YarrOp* nextOp = &m_ops[opIndex + 1];
+
+ PatternTerm* term = op.m_term;
+ UChar ch = term->patternCharacter;
+
+ if ((ch > 0xff) && (m_charSize == Char8)) {
+ // Have a 16 bit pattern character and an 8 bit string - short circuit
+ op.m_jumps.append(jump());
+ return;
+ }
+
+ const RegisterID character = regT0;
+ int maxCharactersAtOnce = m_charSize == Char8 ? 4 : 2;
+ unsigned ignoreCaseMask = 0;
+#if CPU(BIG_ENDIAN)
+ int allCharacters = ch << (m_charSize == Char8 ? 24 : 16);
+#else
+ int allCharacters = ch;
+#endif
+ int numberCharacters;
+ int startTermPosition = term->inputPosition;
+
+ // For case-insesitive compares, non-ascii characters that have different
+ // upper & lower case representations are converted to a character class.
+ ASSERT(!m_pattern.m_ignoreCase || isASCIIAlpha(ch) || isCanonicallyUnique(ch));
+
+ if (m_pattern.m_ignoreCase && isASCIIAlpha(ch))
+#if CPU(BIG_ENDIAN)
+ ignoreCaseMask |= 32 << (m_charSize == Char8 ? 24 : 16);
+#else
+ ignoreCaseMask |= 32;
+#endif
+
+ for (numberCharacters = 1; numberCharacters < maxCharactersAtOnce && nextOp->m_op == OpTerm; ++numberCharacters, nextOp = &m_ops[opIndex + numberCharacters]) {
+ PatternTerm* nextTerm = nextOp->m_term;
+
+ if (nextTerm->type != PatternTerm::TypePatternCharacter
+ || nextTerm->quantityType != QuantifierFixedCount
+ || nextTerm->quantityCount != 1
+ || nextTerm->inputPosition != (startTermPosition + numberCharacters))
+ break;
+
+ nextOp->m_isDeadCode = true;
+
+#if CPU(BIG_ENDIAN)
+ int shiftAmount = (m_charSize == Char8 ? 24 : 16) - ((m_charSize == Char8 ? 8 : 16) * numberCharacters);
+#else
+ int shiftAmount = (m_charSize == Char8 ? 8 : 16) * numberCharacters;
+#endif
+
+ UChar currentCharacter = nextTerm->patternCharacter;
+
+ if ((currentCharacter > 0xff) && (m_charSize == Char8)) {
+ // Have a 16 bit pattern character and an 8 bit string - short circuit
+ op.m_jumps.append(jump());
+ return;
+ }
+
+ // For case-insesitive compares, non-ascii characters that have different
+ // upper & lower case representations are converted to a character class.
+ ASSERT(!m_pattern.m_ignoreCase || isASCIIAlpha(currentCharacter) || isCanonicallyUnique(currentCharacter));
+
+ allCharacters |= (currentCharacter << shiftAmount);
+
+ if ((m_pattern.m_ignoreCase) && (isASCIIAlpha(currentCharacter)))
+ ignoreCaseMask |= 32 << shiftAmount;
+ }
+
+ if (m_charSize == Char8) {
+ switch (numberCharacters) {
+ case 1:
+ op.m_jumps.append(jumpIfCharNotEquals(ch, startTermPosition - m_checked, character));
+ return;
+ case 2: {
+ BaseIndex address(input, index, TimesOne, (startTermPosition - m_checked) * sizeof(LChar));
+ load16Unaligned(address, character);
+ break;
+ }
+ case 3: {
+ BaseIndex highAddress(input, index, TimesOne, (startTermPosition - m_checked) * sizeof(LChar));
+ load16Unaligned(highAddress, character);
+ if (ignoreCaseMask)
+ or32(Imm32(ignoreCaseMask), character);
+ op.m_jumps.append(branch32(NotEqual, character, Imm32((allCharacters & 0xffff) | ignoreCaseMask)));
+ op.m_jumps.append(jumpIfCharNotEquals(allCharacters >> 16, startTermPosition + 2 - m_checked, character));
+ return;
+ }
+ case 4: {
+ BaseIndex address(input, index, TimesOne, (startTermPosition - m_checked) * sizeof(LChar));
+ load32WithUnalignedHalfWords(address, character);
+ break;
+ }
+ }
+ } else {
+ switch (numberCharacters) {
+ case 1:
+ op.m_jumps.append(jumpIfCharNotEquals(ch, term->inputPosition - m_checked, character));
+ return;
+ case 2:
+ BaseIndex address(input, index, TimesTwo, (term->inputPosition - m_checked) * sizeof(UChar));
+ load32WithUnalignedHalfWords(address, character);
+ break;
+ }
+ }
+
+ if (ignoreCaseMask)
+ or32(Imm32(ignoreCaseMask), character);
+ op.m_jumps.append(branch32(NotEqual, character, Imm32(allCharacters | ignoreCaseMask)));
+ return;
+ }
+ void backtrackPatternCharacterOnce(size_t opIndex)
+ {
+ backtrackTermDefault(opIndex);
+ }
+
+ void generatePatternCharacterFixed(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+ UChar ch = term->patternCharacter;
+
+ const RegisterID character = regT0;
+ const RegisterID countRegister = regT1;
+
+ move(index, countRegister);
+ sub32(Imm32(term->quantityCount.unsafeGet()), countRegister);
+
+ Label loop(this);
+ BaseIndex address(input, countRegister, m_charScale, (Checked<int>(term->inputPosition - m_checked + Checked<int64_t>(term->quantityCount)) * static_cast<int>(m_charSize == Char8 ? sizeof(char) : sizeof(UChar))).unsafeGet());
+
+ if (m_charSize == Char8)
+ load8(address, character);
+ else
+ load16(address, character);
+
+ // For case-insesitive compares, non-ascii characters that have different
+ // upper & lower case representations are converted to a character class.
+ ASSERT(!m_pattern.m_ignoreCase || isASCIIAlpha(ch) || isCanonicallyUnique(ch));
+ if (m_pattern.m_ignoreCase && isASCIIAlpha(ch)) {
+ or32(TrustedImm32(0x20), character);
+ ch |= 0x20;
+ }
+
+ op.m_jumps.append(branch32(NotEqual, character, Imm32(ch)));
+ add32(TrustedImm32(1), countRegister);
+ branch32(NotEqual, countRegister, index).linkTo(loop, this);
+ }
+ void backtrackPatternCharacterFixed(size_t opIndex)
+ {
+ backtrackTermDefault(opIndex);
+ }
+
+ void generatePatternCharacterGreedy(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+ UChar ch = term->patternCharacter;
+
+ const RegisterID character = regT0;
+ const RegisterID countRegister = regT1;
+
+ move(TrustedImm32(0), countRegister);
+
+ // Unless have a 16 bit pattern character and an 8 bit string - short circuit
+ if (!((ch > 0xff) && (m_charSize == Char8))) {
+ JumpList failures;
+ Label loop(this);
+ failures.append(atEndOfInput());
+ failures.append(jumpIfCharNotEquals(ch, term->inputPosition - m_checked, character));
+
+ add32(TrustedImm32(1), countRegister);
+ add32(TrustedImm32(1), index);
+ if (term->quantityCount == quantifyInfinite)
+ jump(loop);
+ else
+ branch32(NotEqual, countRegister, Imm32(term->quantityCount.unsafeGet())).linkTo(loop, this);
+
+ failures.link(this);
+ }
+ op.m_reentry = label();
+
+ storeToFrame(countRegister, term->frameLocation);
+ }
+ void backtrackPatternCharacterGreedy(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID countRegister = regT1;
+
+ m_backtrackingState.link(this);
+
+ loadFromFrame(term->frameLocation, countRegister);
+ m_backtrackingState.append(branchTest32(Zero, countRegister));
+ sub32(TrustedImm32(1), countRegister);
+ sub32(TrustedImm32(1), index);
+ jump(op.m_reentry);
+ }
+
+ void generatePatternCharacterNonGreedy(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID countRegister = regT1;
+
+ move(TrustedImm32(0), countRegister);
+ op.m_reentry = label();
+ storeToFrame(countRegister, term->frameLocation);
+ }
+ void backtrackPatternCharacterNonGreedy(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+ UChar ch = term->patternCharacter;
+
+ const RegisterID character = regT0;
+ const RegisterID countRegister = regT1;
+
+ m_backtrackingState.link(this);
+
+ loadFromFrame(term->frameLocation, countRegister);
+
+ // Unless have a 16 bit pattern character and an 8 bit string - short circuit
+ if (!((ch > 0xff) && (m_charSize == Char8))) {
+ JumpList nonGreedyFailures;
+ nonGreedyFailures.append(atEndOfInput());
+ if (term->quantityCount != quantifyInfinite)
+ nonGreedyFailures.append(branch32(Equal, countRegister, Imm32(term->quantityCount.unsafeGet())));
+ nonGreedyFailures.append(jumpIfCharNotEquals(ch, term->inputPosition - m_checked, character));
+
+ add32(TrustedImm32(1), countRegister);
+ add32(TrustedImm32(1), index);
+
+ jump(op.m_reentry);
+ nonGreedyFailures.link(this);
+ }
+
+ sub32(countRegister, index);
+ m_backtrackingState.fallthrough();
+ }
+
+ void generateCharacterClassOnce(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID character = regT0;
+
+ JumpList matchDest;
+ readCharacter(term->inputPosition - m_checked, character);
+ matchCharacterClass(character, matchDest, term->characterClass);
+
+ if (term->invert())
+ op.m_jumps.append(matchDest);
+ else {
+ op.m_jumps.append(jump());
+ matchDest.link(this);
+ }
+ }
+ void backtrackCharacterClassOnce(size_t opIndex)
+ {
+ backtrackTermDefault(opIndex);
+ }
+
+ void generateCharacterClassFixed(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID character = regT0;
+ const RegisterID countRegister = regT1;
+
+ move(index, countRegister);
+ sub32(Imm32(term->quantityCount.unsafeGet()), countRegister);
+
+ Label loop(this);
+ JumpList matchDest;
+ if (m_charSize == Char8)
+ load8(BaseIndex(input, countRegister, TimesOne, (Checked<int>(term->inputPosition - m_checked + Checked<int64_t>(term->quantityCount)) * static_cast<int>(sizeof(char))).unsafeGet()), character);
+ else
+ load16(BaseIndex(input, countRegister, TimesTwo, (Checked<int>(term->inputPosition - m_checked + Checked<int64_t>(term->quantityCount)) * static_cast<int>(sizeof(UChar))).unsafeGet()), character);
+ matchCharacterClass(character, matchDest, term->characterClass);
+
+ if (term->invert())
+ op.m_jumps.append(matchDest);
+ else {
+ op.m_jumps.append(jump());
+ matchDest.link(this);
+ }
+
+ add32(TrustedImm32(1), countRegister);
+ branch32(NotEqual, countRegister, index).linkTo(loop, this);
+ }
+ void backtrackCharacterClassFixed(size_t opIndex)
+ {
+ backtrackTermDefault(opIndex);
+ }
+
+ void generateCharacterClassGreedy(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID character = regT0;
+ const RegisterID countRegister = regT1;
+
+ move(TrustedImm32(0), countRegister);
+
+ JumpList failures;
+ Label loop(this);
+ failures.append(atEndOfInput());
+
+ if (term->invert()) {
+ readCharacter(term->inputPosition - m_checked, character);
+ matchCharacterClass(character, failures, term->characterClass);
+ } else {
+ JumpList matchDest;
+ readCharacter(term->inputPosition - m_checked, character);
+ matchCharacterClass(character, matchDest, term->characterClass);
+ failures.append(jump());
+ matchDest.link(this);
+ }
+
+ add32(TrustedImm32(1), countRegister);
+ add32(TrustedImm32(1), index);
+ if (term->quantityCount != quantifyInfinite) {
+ branch32(NotEqual, countRegister, Imm32(term->quantityCount.unsafeGet())).linkTo(loop, this);
+ failures.append(jump());
+ } else
+ jump(loop);
+
+ failures.link(this);
+ op.m_reentry = label();
+
+ storeToFrame(countRegister, term->frameLocation);
+ }
+ void backtrackCharacterClassGreedy(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID countRegister = regT1;
+
+ m_backtrackingState.link(this);
+
+ loadFromFrame(term->frameLocation, countRegister);
+ m_backtrackingState.append(branchTest32(Zero, countRegister));
+ sub32(TrustedImm32(1), countRegister);
+ sub32(TrustedImm32(1), index);
+ jump(op.m_reentry);
+ }
+
+ void generateCharacterClassNonGreedy(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID countRegister = regT1;
+
+ move(TrustedImm32(0), countRegister);
+ op.m_reentry = label();
+ storeToFrame(countRegister, term->frameLocation);
+ }
+ void backtrackCharacterClassNonGreedy(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID character = regT0;
+ const RegisterID countRegister = regT1;
+
+ JumpList nonGreedyFailures;
+
+ m_backtrackingState.link(this);
+
+ loadFromFrame(term->frameLocation, countRegister);
+
+ nonGreedyFailures.append(atEndOfInput());
+ nonGreedyFailures.append(branch32(Equal, countRegister, Imm32(term->quantityCount.unsafeGet())));
+
+ JumpList matchDest;
+ readCharacter(term->inputPosition - m_checked, character);
+ matchCharacterClass(character, matchDest, term->characterClass);
+
+ if (term->invert())
+ nonGreedyFailures.append(matchDest);
+ else {
+ nonGreedyFailures.append(jump());
+ matchDest.link(this);
+ }
+
+ add32(TrustedImm32(1), countRegister);
+ add32(TrustedImm32(1), index);
+
+ jump(op.m_reentry);
+
+ nonGreedyFailures.link(this);
+ sub32(countRegister, index);
+ m_backtrackingState.fallthrough();
+ }
+
+ void generateDotStarEnclosure(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID character = regT0;
+ const RegisterID matchPos = regT1;
+
+ JumpList foundBeginningNewLine;
+ JumpList saveStartIndex;
+ JumpList foundEndingNewLine;
+
+ ASSERT(!m_pattern.m_body->m_hasFixedSize);
+ getMatchStart(matchPos);
+
+ saveStartIndex.append(branchTest32(Zero, matchPos));
+ Label findBOLLoop(this);
+ sub32(TrustedImm32(1), matchPos);
+ if (m_charSize == Char8)
+ load8(BaseIndex(input, matchPos, TimesOne, 0), character);
+ else
+ load16(BaseIndex(input, matchPos, TimesTwo, 0), character);
+ matchCharacterClass(character, foundBeginningNewLine, m_pattern.newlineCharacterClass());
+ branchTest32(NonZero, matchPos).linkTo(findBOLLoop, this);
+ saveStartIndex.append(jump());
+
+ foundBeginningNewLine.link(this);
+ add32(TrustedImm32(1), matchPos); // Advance past newline
+ saveStartIndex.link(this);
+
+ if (!m_pattern.m_multiline && term->anchors.bolAnchor)
+ op.m_jumps.append(branchTest32(NonZero, matchPos));
+
+ ASSERT(!m_pattern.m_body->m_hasFixedSize);
+ setMatchStart(matchPos);
+
+ move(index, matchPos);
+
+ Label findEOLLoop(this);
+ foundEndingNewLine.append(branch32(Equal, matchPos, length));
+ if (m_charSize == Char8)
+ load8(BaseIndex(input, matchPos, TimesOne, 0), character);
+ else
+ load16(BaseIndex(input, matchPos, TimesTwo, 0), character);
+ matchCharacterClass(character, foundEndingNewLine, m_pattern.newlineCharacterClass());
+ add32(TrustedImm32(1), matchPos);
+ jump(findEOLLoop);
+
+ foundEndingNewLine.link(this);
+
+ if (!m_pattern.m_multiline && term->anchors.eolAnchor)
+ op.m_jumps.append(branch32(NotEqual, matchPos, length));
+
+ move(matchPos, index);
+ }
+
+ void backtrackDotStarEnclosure(size_t opIndex)
+ {
+ backtrackTermDefault(opIndex);
+ }
+
+ // Code generation/backtracking for simple terms
+ // (pattern characters, character classes, and assertions).
+ // These methods farm out work to the set of functions above.
+ void generateTerm(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ switch (term->type) {
+ case PatternTerm::TypePatternCharacter:
+ switch (term->quantityType) {
+ case QuantifierFixedCount:
+ if (term->quantityCount == 1)
+ generatePatternCharacterOnce(opIndex);
+ else
+ generatePatternCharacterFixed(opIndex);
+ break;
+ case QuantifierGreedy:
+ generatePatternCharacterGreedy(opIndex);
+ break;
+ case QuantifierNonGreedy:
+ generatePatternCharacterNonGreedy(opIndex);
+ break;
+ }
+ break;
+
+ case PatternTerm::TypeCharacterClass:
+ switch (term->quantityType) {
+ case QuantifierFixedCount:
+ if (term->quantityCount == 1)
+ generateCharacterClassOnce(opIndex);
+ else
+ generateCharacterClassFixed(opIndex);
+ break;
+ case QuantifierGreedy:
+ generateCharacterClassGreedy(opIndex);
+ break;
+ case QuantifierNonGreedy:
+ generateCharacterClassNonGreedy(opIndex);
+ break;
+ }
+ break;
+
+ case PatternTerm::TypeAssertionBOL:
+ generateAssertionBOL(opIndex);
+ break;
+
+ case PatternTerm::TypeAssertionEOL:
+ generateAssertionEOL(opIndex);
+ break;
+
+ case PatternTerm::TypeAssertionWordBoundary:
+ generateAssertionWordBoundary(opIndex);
+ break;
+
+ case PatternTerm::TypeForwardReference:
+ break;
+
+ case PatternTerm::TypeParenthesesSubpattern:
+ case PatternTerm::TypeParentheticalAssertion:
+ RELEASE_ASSERT_NOT_REACHED();
+ case PatternTerm::TypeBackReference:
+ m_shouldFallBack = true;
+ break;
+ case PatternTerm::TypeDotStarEnclosure:
+ generateDotStarEnclosure(opIndex);
+ break;
+ }
+ }
+ void backtrackTerm(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ switch (term->type) {
+ case PatternTerm::TypePatternCharacter:
+ switch (term->quantityType) {
+ case QuantifierFixedCount:
+ if (term->quantityCount == 1)
+ backtrackPatternCharacterOnce(opIndex);
+ else
+ backtrackPatternCharacterFixed(opIndex);
+ break;
+ case QuantifierGreedy:
+ backtrackPatternCharacterGreedy(opIndex);
+ break;
+ case QuantifierNonGreedy:
+ backtrackPatternCharacterNonGreedy(opIndex);
+ break;
+ }
+ break;
+
+ case PatternTerm::TypeCharacterClass:
+ switch (term->quantityType) {
+ case QuantifierFixedCount:
+ if (term->quantityCount == 1)
+ backtrackCharacterClassOnce(opIndex);
+ else
+ backtrackCharacterClassFixed(opIndex);
+ break;
+ case QuantifierGreedy:
+ backtrackCharacterClassGreedy(opIndex);
+ break;
+ case QuantifierNonGreedy:
+ backtrackCharacterClassNonGreedy(opIndex);
+ break;
+ }
+ break;
+
+ case PatternTerm::TypeAssertionBOL:
+ backtrackAssertionBOL(opIndex);
+ break;
+
+ case PatternTerm::TypeAssertionEOL:
+ backtrackAssertionEOL(opIndex);
+ break;
+
+ case PatternTerm::TypeAssertionWordBoundary:
+ backtrackAssertionWordBoundary(opIndex);
+ break;
+
+ case PatternTerm::TypeForwardReference:
+ break;
+
+ case PatternTerm::TypeParenthesesSubpattern:
+ case PatternTerm::TypeParentheticalAssertion:
+ RELEASE_ASSERT_NOT_REACHED();
+
+ case PatternTerm::TypeDotStarEnclosure:
+ backtrackDotStarEnclosure(opIndex);
+ break;
+
+ case PatternTerm::TypeBackReference:
+ m_shouldFallBack = true;
+ break;
+ }
+ }
+
+ void generate()
+ {
+ // Forwards generate the matching code.
+ ASSERT(m_ops.size());
+ size_t opIndex = 0;
+
+ do {
+ YarrOp& op = m_ops[opIndex];
+ switch (op.m_op) {
+
+ case OpTerm:
+ generateTerm(opIndex);
+ break;
+
+ // OpBodyAlternativeBegin/Next/End
+ //
+ // These nodes wrap the set of alternatives in the body of the regular expression.
+ // There may be either one or two chains of OpBodyAlternative nodes, one representing
+ // the 'once through' sequence of alternatives (if any exist), and one representing
+ // the repeating alternatives (again, if any exist).
+ //
+ // Upon normal entry to the Begin alternative, we will check that input is available.
+ // Reentry to the Begin alternative will take place after the check has taken place,
+ // and will assume that the input position has already been progressed as appropriate.
+ //
+ // Entry to subsequent Next/End alternatives occurs when the prior alternative has
+ // successfully completed a match - return a success state from JIT code.
+ //
+ // Next alternatives allow for reentry optimized to suit backtracking from its
+ // preceding alternative. It expects the input position to still be set to a position
+ // appropriate to its predecessor, and it will only perform an input check if the
+ // predecessor had a minimum size less than its own.
+ //
+ // In the case 'once through' expressions, the End node will also have a reentry
+ // point to jump to when the last alternative fails. Again, this expects the input
+ // position to still reflect that expected by the prior alternative.
+ case OpBodyAlternativeBegin: {
+ PatternAlternative* alternative = op.m_alternative;
+
+ // Upon entry at the head of the set of alternatives, check if input is available
+ // to run the first alternative. (This progresses the input position).
+ op.m_jumps.append(jumpIfNoAvailableInput(alternative->m_minimumSize));
+ // We will reenter after the check, and assume the input position to have been
+ // set as appropriate to this alternative.
+ op.m_reentry = label();
+
+ m_checked += alternative->m_minimumSize;
+ break;
+ }
+ case OpBodyAlternativeNext:
+ case OpBodyAlternativeEnd: {
+ PatternAlternative* priorAlternative = m_ops[op.m_previousOp].m_alternative;
+ PatternAlternative* alternative = op.m_alternative;
+
+ // If we get here, the prior alternative matched - return success.
+
+ // Adjust the stack pointer to remove the pattern's frame.
+ removeCallFrame();
+
+ // Load appropriate values into the return register and the first output
+ // slot, and return. In the case of pattern with a fixed size, we will
+ // not have yet set the value in the first
+ ASSERT(index != returnRegister);
+ if (m_pattern.m_body->m_hasFixedSize) {
+ move(index, returnRegister);
+ if (priorAlternative->m_minimumSize)
+ sub32(Imm32(priorAlternative->m_minimumSize), returnRegister);
+ if (compileMode == IncludeSubpatterns)
+ store32(returnRegister, output);
+ } else
+ getMatchStart(returnRegister);
+ if (compileMode == IncludeSubpatterns)
+ store32(index, Address(output, 4));
+ move(index, returnRegister2);
+
+ generateReturn();
+
+ // This is the divide between the tail of the prior alternative, above, and
+ // the head of the subsequent alternative, below.
+
+ if (op.m_op == OpBodyAlternativeNext) {
+ // This is the reentry point for the Next alternative. We expect any code
+ // that jumps here to do so with the input position matching that of the
+ // PRIOR alteranative, and we will only check input availability if we
+ // need to progress it forwards.
+ op.m_reentry = label();
+ if (alternative->m_minimumSize > priorAlternative->m_minimumSize) {
+ add32(Imm32(alternative->m_minimumSize - priorAlternative->m_minimumSize), index);
+ op.m_jumps.append(jumpIfNoAvailableInput());
+ } else if (priorAlternative->m_minimumSize > alternative->m_minimumSize)
+ sub32(Imm32(priorAlternative->m_minimumSize - alternative->m_minimumSize), index);
+ } else if (op.m_nextOp == notFound) {
+ // This is the reentry point for the End of 'once through' alternatives,
+ // jumped to when the last alternative fails to match.
+ op.m_reentry = label();
+ sub32(Imm32(priorAlternative->m_minimumSize), index);
+ }
+
+ if (op.m_op == OpBodyAlternativeNext)
+ m_checked += alternative->m_minimumSize;
+ m_checked -= priorAlternative->m_minimumSize;
+ break;
+ }
+
+ // OpSimpleNestedAlternativeBegin/Next/End
+ // OpNestedAlternativeBegin/Next/End
+ //
+ // These nodes are used to handle sets of alternatives that are nested within
+ // subpatterns and parenthetical assertions. The 'simple' forms are used where
+ // we do not need to be able to backtrack back into any alternative other than
+ // the last, the normal forms allow backtracking into any alternative.
+ //
+ // Each Begin/Next node is responsible for planting an input check to ensure
+ // sufficient input is available on entry. Next nodes additionally need to
+ // jump to the end - Next nodes use the End node's m_jumps list to hold this
+ // set of jumps.
+ //
+ // In the non-simple forms, successful alternative matches must store a
+ // 'return address' using a DataLabelPtr, used to store the address to jump
+ // to when backtracking, to get to the code for the appropriate alternative.
+ case OpSimpleNestedAlternativeBegin:
+ case OpNestedAlternativeBegin: {
+ PatternTerm* term = op.m_term;
+ PatternAlternative* alternative = op.m_alternative;
+ PatternDisjunction* disjunction = term->parentheses.disjunction;
+
+ // Calculate how much input we need to check for, and if non-zero check.
+ op.m_checkAdjust = alternative->m_minimumSize;
+ if ((term->quantityType == QuantifierFixedCount) && (term->type != PatternTerm::TypeParentheticalAssertion))
+ op.m_checkAdjust -= disjunction->m_minimumSize;
+ if (op.m_checkAdjust)
+ op.m_jumps.append(jumpIfNoAvailableInput(op.m_checkAdjust));
+
+ m_checked += op.m_checkAdjust;
+ break;
+ }
+ case OpSimpleNestedAlternativeNext:
+ case OpNestedAlternativeNext: {
+ PatternTerm* term = op.m_term;
+ PatternAlternative* alternative = op.m_alternative;
+ PatternDisjunction* disjunction = term->parentheses.disjunction;
+
+ // In the non-simple case, store a 'return address' so we can backtrack correctly.
+ if (op.m_op == OpNestedAlternativeNext) {
+ unsigned parenthesesFrameLocation = term->frameLocation;
+ unsigned alternativeFrameLocation = parenthesesFrameLocation;
+ if (term->quantityType != QuantifierFixedCount)
+ alternativeFrameLocation += YarrStackSpaceForBackTrackInfoParenthesesOnce;
+ op.m_returnAddress = storeToFrameWithPatch(alternativeFrameLocation);
+ }
+
+ if (term->quantityType != QuantifierFixedCount && !m_ops[op.m_previousOp].m_alternative->m_minimumSize) {
+ // If the previous alternative matched without consuming characters then
+ // backtrack to try to match while consumming some input.
+ op.m_zeroLengthMatch = branch32(Equal, index, Address(stackPointerRegister, term->frameLocation * sizeof(void*)));
+ }
+
+ // If we reach here then the last alternative has matched - jump to the
+ // End node, to skip over any further alternatives.
+ //
+ // FIXME: this is logically O(N^2) (though N can be expected to be very
+ // small). We could avoid this either by adding an extra jump to the JIT
+ // data structures, or by making backtracking code that jumps to Next
+ // alternatives are responsible for checking that input is available (if
+ // we didn't need to plant the input checks, then m_jumps would be free).
+ YarrOp* endOp = &m_ops[op.m_nextOp];
+ while (endOp->m_nextOp != notFound) {
+ ASSERT(endOp->m_op == OpSimpleNestedAlternativeNext || endOp->m_op == OpNestedAlternativeNext);
+ endOp = &m_ops[endOp->m_nextOp];
+ }
+ ASSERT(endOp->m_op == OpSimpleNestedAlternativeEnd || endOp->m_op == OpNestedAlternativeEnd);
+ endOp->m_jumps.append(jump());
+
+ // This is the entry point for the next alternative.
+ op.m_reentry = label();
+
+ // Calculate how much input we need to check for, and if non-zero check.
+ op.m_checkAdjust = alternative->m_minimumSize;
+ if ((term->quantityType == QuantifierFixedCount) && (term->type != PatternTerm::TypeParentheticalAssertion))
+ op.m_checkAdjust -= disjunction->m_minimumSize;
+ if (op.m_checkAdjust)
+ op.m_jumps.append(jumpIfNoAvailableInput(op.m_checkAdjust));
+
+ YarrOp& lastOp = m_ops[op.m_previousOp];
+ m_checked -= lastOp.m_checkAdjust;
+ m_checked += op.m_checkAdjust;
+ break;
+ }
+ case OpSimpleNestedAlternativeEnd:
+ case OpNestedAlternativeEnd: {
+ PatternTerm* term = op.m_term;
+
+ // In the non-simple case, store a 'return address' so we can backtrack correctly.
+ if (op.m_op == OpNestedAlternativeEnd) {
+ unsigned parenthesesFrameLocation = term->frameLocation;
+ unsigned alternativeFrameLocation = parenthesesFrameLocation;
+ if (term->quantityType != QuantifierFixedCount)
+ alternativeFrameLocation += YarrStackSpaceForBackTrackInfoParenthesesOnce;
+ op.m_returnAddress = storeToFrameWithPatch(alternativeFrameLocation);
+ }
+
+ if (term->quantityType != QuantifierFixedCount && !m_ops[op.m_previousOp].m_alternative->m_minimumSize) {
+ // If the previous alternative matched without consuming characters then
+ // backtrack to try to match while consumming some input.
+ op.m_zeroLengthMatch = branch32(Equal, index, Address(stackPointerRegister, term->frameLocation * sizeof(void*)));
+ }
+
+ // If this set of alternatives contains more than one alternative,
+ // then the Next nodes will have planted jumps to the End, and added
+ // them to this node's m_jumps list.
+ op.m_jumps.link(this);
+ op.m_jumps.clear();
+
+ YarrOp& lastOp = m_ops[op.m_previousOp];
+ m_checked -= lastOp.m_checkAdjust;
+ break;
+ }
+
+ // OpParenthesesSubpatternOnceBegin/End
+ //
+ // These nodes support (optionally) capturing subpatterns, that have a
+ // quantity count of 1 (this covers fixed once, and ?/?? quantifiers).
+ case OpParenthesesSubpatternOnceBegin: {
+ PatternTerm* term = op.m_term;
+ unsigned parenthesesFrameLocation = term->frameLocation;
+ const RegisterID indexTemporary = regT0;
+ ASSERT(term->quantityCount == 1);
+
+ // Upon entry to a Greedy quantified set of parenthese store the index.
+ // We'll use this for two purposes:
+ // - To indicate which iteration we are on of mathing the remainder of
+ // the expression after the parentheses - the first, including the
+ // match within the parentheses, or the second having skipped over them.
+ // - To check for empty matches, which must be rejected.
+ //
+ // At the head of a NonGreedy set of parentheses we'll immediately set the
+ // value on the stack to -1 (indicating a match skipping the subpattern),
+ // and plant a jump to the end. We'll also plant a label to backtrack to
+ // to reenter the subpattern later, with a store to set up index on the
+ // second iteration.
+ //
+ // FIXME: for capturing parens, could use the index in the capture array?
+ if (term->quantityType == QuantifierGreedy)
+ storeToFrame(index, parenthesesFrameLocation);
+ else if (term->quantityType == QuantifierNonGreedy) {
+ storeToFrame(TrustedImm32(-1), parenthesesFrameLocation);
+ op.m_jumps.append(jump());
+ op.m_reentry = label();
+ storeToFrame(index, parenthesesFrameLocation);
+ }
+
+ // If the parenthese are capturing, store the starting index value to the
+ // captures array, offsetting as necessary.
+ //
+ // FIXME: could avoid offsetting this value in JIT code, apply
+ // offsets only afterwards, at the point the results array is
+ // being accessed.
+ if (term->capture() && compileMode == IncludeSubpatterns) {
+ int inputOffset = term->inputPosition - m_checked;
+ if (term->quantityType == QuantifierFixedCount)
+ inputOffset -= term->parentheses.disjunction->m_minimumSize;
+ if (inputOffset) {
+ move(index, indexTemporary);
+ add32(Imm32(inputOffset), indexTemporary);
+ setSubpatternStart(indexTemporary, term->parentheses.subpatternId);
+ } else
+ setSubpatternStart(index, term->parentheses.subpatternId);
+ }
+ break;
+ }
+ case OpParenthesesSubpatternOnceEnd: {
+ PatternTerm* term = op.m_term;
+ const RegisterID indexTemporary = regT0;
+ ASSERT(term->quantityCount == 1);
+
+#ifndef NDEBUG
+ // Runtime ASSERT to make sure that the nested alternative handled the
+ // "no input consumed" check.
+ if (term->quantityType != QuantifierFixedCount && !term->parentheses.disjunction->m_minimumSize) {
+ Jump pastBreakpoint;
+ pastBreakpoint = branch32(NotEqual, index, Address(stackPointerRegister, term->frameLocation * sizeof(void*)));
+ breakpoint();
+ pastBreakpoint.link(this);
+ }
+#endif
+
+ // If the parenthese are capturing, store the ending index value to the
+ // captures array, offsetting as necessary.
+ //
+ // FIXME: could avoid offsetting this value in JIT code, apply
+ // offsets only afterwards, at the point the results array is
+ // being accessed.
+ if (term->capture() && compileMode == IncludeSubpatterns) {
+ int inputOffset = term->inputPosition - m_checked;
+ if (inputOffset) {
+ move(index, indexTemporary);
+ add32(Imm32(inputOffset), indexTemporary);
+ setSubpatternEnd(indexTemporary, term->parentheses.subpatternId);
+ } else
+ setSubpatternEnd(index, term->parentheses.subpatternId);
+ }
+
+ // If the parentheses are quantified Greedy then add a label to jump back
+ // to if get a failed match from after the parentheses. For NonGreedy
+ // parentheses, link the jump from before the subpattern to here.
+ if (term->quantityType == QuantifierGreedy)
+ op.m_reentry = label();
+ else if (term->quantityType == QuantifierNonGreedy) {
+ YarrOp& beginOp = m_ops[op.m_previousOp];
+ beginOp.m_jumps.link(this);
+ }
+ break;
+ }
+
+ // OpParenthesesSubpatternTerminalBegin/End
+ case OpParenthesesSubpatternTerminalBegin: {
+ PatternTerm* term = op.m_term;
+ ASSERT(term->quantityType == QuantifierGreedy);
+ ASSERT(term->quantityCount == quantifyInfinite);
+ ASSERT(!term->capture());
+
+ // Upon entry set a label to loop back to.
+ op.m_reentry = label();
+
+ // Store the start index of the current match; we need to reject zero
+ // length matches.
+ storeToFrame(index, term->frameLocation);
+ break;
+ }
+ case OpParenthesesSubpatternTerminalEnd: {
+ YarrOp& beginOp = m_ops[op.m_previousOp];
+#ifndef NDEBUG
+ PatternTerm* term = op.m_term;
+
+ // Runtime ASSERT to make sure that the nested alternative handled the
+ // "no input consumed" check.
+ Jump pastBreakpoint;
+ pastBreakpoint = branch32(NotEqual, index, Address(stackPointerRegister, term->frameLocation * sizeof(void*)));
+ breakpoint();
+ pastBreakpoint.link(this);
+#endif
+
+ // We know that the match is non-zero, we can accept it and
+ // loop back up to the head of the subpattern.
+ jump(beginOp.m_reentry);
+
+ // This is the entry point to jump to when we stop matching - we will
+ // do so once the subpattern cannot match any more.
+ op.m_reentry = label();
+ break;
+ }
+
+ // OpParentheticalAssertionBegin/End
+ case OpParentheticalAssertionBegin: {
+ PatternTerm* term = op.m_term;
+
+ // Store the current index - assertions should not update index, so
+ // we will need to restore it upon a successful match.
+ unsigned parenthesesFrameLocation = term->frameLocation;
+ storeToFrame(index, parenthesesFrameLocation);
+
+ // Check
+ op.m_checkAdjust = m_checked - term->inputPosition;
+ if (op.m_checkAdjust)
+ sub32(Imm32(op.m_checkAdjust), index);
+
+ m_checked -= op.m_checkAdjust;
+ break;
+ }
+ case OpParentheticalAssertionEnd: {
+ PatternTerm* term = op.m_term;
+
+ // Restore the input index value.
+ unsigned parenthesesFrameLocation = term->frameLocation;
+ loadFromFrame(parenthesesFrameLocation, index);
+
+ // If inverted, a successful match of the assertion must be treated
+ // as a failure, so jump to backtracking.
+ if (term->invert()) {
+ op.m_jumps.append(jump());
+ op.m_reentry = label();
+ }
+
+ YarrOp& lastOp = m_ops[op.m_previousOp];
+ m_checked += lastOp.m_checkAdjust;
+ break;
+ }
+
+ case OpMatchFailed:
+ removeCallFrame();
+ move(TrustedImmPtr((void*)WTF::notFound), returnRegister);
+ move(TrustedImm32(0), returnRegister2);
+ generateReturn();
+ break;
+ }
+
+ ++opIndex;
+ } while (opIndex < m_ops.size());
+ }
+
+ void backtrack()
+ {
+ // Backwards generate the backtracking code.
+ size_t opIndex = m_ops.size();
+ ASSERT(opIndex);
+
+ do {
+ --opIndex;
+ YarrOp& op = m_ops[opIndex];
+ switch (op.m_op) {
+
+ case OpTerm:
+ backtrackTerm(opIndex);
+ break;
+
+ // OpBodyAlternativeBegin/Next/End
+ //
+ // For each Begin/Next node representing an alternative, we need to decide what to do
+ // in two circumstances:
+ // - If we backtrack back into this node, from within the alternative.
+ // - If the input check at the head of the alternative fails (if this exists).
+ //
+ // We treat these two cases differently since in the former case we have slightly
+ // more information - since we are backtracking out of a prior alternative we know
+ // that at least enough input was available to run it. For example, given the regular
+ // expression /a|b/, if we backtrack out of the first alternative (a failed pattern
+ // character match of 'a'), then we need not perform an additional input availability
+ // check before running the second alternative.
+ //
+ // Backtracking required differs for the last alternative, which in the case of the
+ // repeating set of alternatives must loop. The code generated for the last alternative
+ // will also be used to handle all input check failures from any prior alternatives -
+ // these require similar functionality, in seeking the next available alternative for
+ // which there is sufficient input.
+ //
+ // Since backtracking of all other alternatives simply requires us to link backtracks
+ // to the reentry point for the subsequent alternative, we will only be generating any
+ // code when backtracking the last alternative.
+ case OpBodyAlternativeBegin:
+ case OpBodyAlternativeNext: {
+ PatternAlternative* alternative = op.m_alternative;
+
+ if (op.m_op == OpBodyAlternativeNext) {
+ PatternAlternative* priorAlternative = m_ops[op.m_previousOp].m_alternative;
+ m_checked += priorAlternative->m_minimumSize;
+ }
+ m_checked -= alternative->m_minimumSize;
+
+ // Is this the last alternative? If not, then if we backtrack to this point we just
+ // need to jump to try to match the next alternative.
+ if (m_ops[op.m_nextOp].m_op != OpBodyAlternativeEnd) {
+ m_backtrackingState.linkTo(m_ops[op.m_nextOp].m_reentry, this);
+ break;
+ }
+ YarrOp& endOp = m_ops[op.m_nextOp];
+
+ YarrOp* beginOp = &op;
+ while (beginOp->m_op != OpBodyAlternativeBegin) {
+ ASSERT(beginOp->m_op == OpBodyAlternativeNext);
+ beginOp = &m_ops[beginOp->m_previousOp];
+ }
+
+ bool onceThrough = endOp.m_nextOp == notFound;
+
+ // First, generate code to handle cases where we backtrack out of an attempted match
+ // of the last alternative. If this is a 'once through' set of alternatives then we
+ // have nothing to do - link this straight through to the End.
+ if (onceThrough)
+ m_backtrackingState.linkTo(endOp.m_reentry, this);
+ else {
+ // If we don't need to move the input poistion, and the pattern has a fixed size
+ // (in which case we omit the store of the start index until the pattern has matched)
+ // then we can just link the backtrack out of the last alternative straight to the
+ // head of the first alternative.
+ if (m_pattern.m_body->m_hasFixedSize
+ && (alternative->m_minimumSize > beginOp->m_alternative->m_minimumSize)
+ && (alternative->m_minimumSize - beginOp->m_alternative->m_minimumSize == 1))
+ m_backtrackingState.linkTo(beginOp->m_reentry, this);
+ else {
+ // We need to generate a trampoline of code to execute before looping back
+ // around to the first alternative.
+ m_backtrackingState.link(this);
+
+ // If the pattern size is not fixed, then store the start index, for use if we match.
+ if (!m_pattern.m_body->m_hasFixedSize) {
+ if (alternative->m_minimumSize == 1)
+ setMatchStart(index);
+ else {
+ move(index, regT0);
+ if (alternative->m_minimumSize)
+ sub32(Imm32(alternative->m_minimumSize - 1), regT0);
+ else
+ add32(TrustedImm32(1), regT0);
+ setMatchStart(regT0);
+ }
+ }
+
+ // Generate code to loop. Check whether the last alternative is longer than the
+ // first (e.g. /a|xy/ or /a|xyz/).
+ if (alternative->m_minimumSize > beginOp->m_alternative->m_minimumSize) {
+ // We want to loop, and increment input position. If the delta is 1, it is
+ // already correctly incremented, if more than one then decrement as appropriate.
+ unsigned delta = alternative->m_minimumSize - beginOp->m_alternative->m_minimumSize;
+ ASSERT(delta);
+ if (delta != 1)
+ sub32(Imm32(delta - 1), index);
+ jump(beginOp->m_reentry);
+ } else {
+ // If the first alternative has minimum size 0xFFFFFFFFu, then there cannot
+ // be sufficent input available to handle this, so just fall through.
+ unsigned delta = beginOp->m_alternative->m_minimumSize - alternative->m_minimumSize;
+ if (delta != 0xFFFFFFFFu) {
+ // We need to check input because we are incrementing the input.
+ add32(Imm32(delta + 1), index);
+ checkInput().linkTo(beginOp->m_reentry, this);
+ }
+ }
+ }
+ }
+
+ // We can reach this point in the code in two ways:
+ // - Fallthrough from the code above (a repeating alternative backtracked out of its
+ // last alternative, and did not have sufficent input to run the first).
+ // - We will loop back up to the following label when a releating alternative loops,
+ // following a failed input check.
+ //
+ // Either way, we have just failed the input check for the first alternative.
+ Label firstInputCheckFailed(this);
+
+ // Generate code to handle input check failures from alternatives except the last.
+ // prevOp is the alternative we're handling a bail out from (initially Begin), and
+ // nextOp is the alternative we will be attempting to reenter into.
+ //
+ // We will link input check failures from the forwards matching path back to the code
+ // that can handle them.
+ YarrOp* prevOp = beginOp;
+ YarrOp* nextOp = &m_ops[beginOp->m_nextOp];
+ while (nextOp->m_op != OpBodyAlternativeEnd) {
+ prevOp->m_jumps.link(this);
+
+ // We only get here if an input check fails, it is only worth checking again
+ // if the next alternative has a minimum size less than the last.
+ if (prevOp->m_alternative->m_minimumSize > nextOp->m_alternative->m_minimumSize) {
+ // FIXME: if we added an extra label to YarrOp, we could avoid needing to
+ // subtract delta back out, and reduce this code. Should performance test
+ // the benefit of this.
+ unsigned delta = prevOp->m_alternative->m_minimumSize - nextOp->m_alternative->m_minimumSize;
+ sub32(Imm32(delta), index);
+ Jump fail = jumpIfNoAvailableInput();
+ add32(Imm32(delta), index);
+ jump(nextOp->m_reentry);
+ fail.link(this);
+ } else if (prevOp->m_alternative->m_minimumSize < nextOp->m_alternative->m_minimumSize)
+ add32(Imm32(nextOp->m_alternative->m_minimumSize - prevOp->m_alternative->m_minimumSize), index);
+ prevOp = nextOp;
+ nextOp = &m_ops[nextOp->m_nextOp];
+ }
+
+ // We fall through to here if there is insufficient input to run the last alternative.
+
+ // If there is insufficient input to run the last alternative, then for 'once through'
+ // alternatives we are done - just jump back up into the forwards matching path at the End.
+ if (onceThrough) {
+ op.m_jumps.linkTo(endOp.m_reentry, this);
+ jump(endOp.m_reentry);
+ break;
+ }
+
+ // For repeating alternatives, link any input check failure from the last alternative to
+ // this point.
+ op.m_jumps.link(this);
+
+ bool needsToUpdateMatchStart = !m_pattern.m_body->m_hasFixedSize;
+
+ // Check for cases where input position is already incremented by 1 for the last
+ // alternative (this is particularly useful where the minimum size of the body
+ // disjunction is 0, e.g. /a*|b/).
+ if (needsToUpdateMatchStart && alternative->m_minimumSize == 1) {
+ // index is already incremented by 1, so just store it now!
+ setMatchStart(index);
+ needsToUpdateMatchStart = false;
+ }
+
+ // Check whether there is sufficient input to loop. Increment the input position by
+ // one, and check. Also add in the minimum disjunction size before checking - there
+ // is no point in looping if we're just going to fail all the input checks around
+ // the next iteration.
+ ASSERT(alternative->m_minimumSize >= m_pattern.m_body->m_minimumSize);
+ if (alternative->m_minimumSize == m_pattern.m_body->m_minimumSize) {
+ // If the last alternative had the same minimum size as the disjunction,
+ // just simply increment input pos by 1, no adjustment based on minimum size.
+ add32(TrustedImm32(1), index);
+ } else {
+ // If the minumum for the last alternative was one greater than than that
+ // for the disjunction, we're already progressed by 1, nothing to do!
+ unsigned delta = (alternative->m_minimumSize - m_pattern.m_body->m_minimumSize) - 1;
+ if (delta)
+ sub32(Imm32(delta), index);
+ }
+ Jump matchFailed = jumpIfNoAvailableInput();
+
+ if (needsToUpdateMatchStart) {
+ if (!m_pattern.m_body->m_minimumSize)
+ setMatchStart(index);
+ else {
+ move(index, regT0);
+ sub32(Imm32(m_pattern.m_body->m_minimumSize), regT0);
+ setMatchStart(regT0);
+ }
+ }
+
+ // Calculate how much more input the first alternative requires than the minimum
+ // for the body as a whole. If no more is needed then we dont need an additional
+ // input check here - jump straight back up to the start of the first alternative.
+ if (beginOp->m_alternative->m_minimumSize == m_pattern.m_body->m_minimumSize)
+ jump(beginOp->m_reentry);
+ else {
+ if (beginOp->m_alternative->m_minimumSize > m_pattern.m_body->m_minimumSize)
+ add32(Imm32(beginOp->m_alternative->m_minimumSize - m_pattern.m_body->m_minimumSize), index);
+ else
+ sub32(Imm32(m_pattern.m_body->m_minimumSize - beginOp->m_alternative->m_minimumSize), index);
+ checkInput().linkTo(beginOp->m_reentry, this);
+ jump(firstInputCheckFailed);
+ }
+
+ // We jump to here if we iterate to the point that there is insufficient input to
+ // run any matches, and need to return a failure state from JIT code.
+ matchFailed.link(this);
+
+ removeCallFrame();
+ move(TrustedImmPtr((void*)WTF::notFound), returnRegister);
+ move(TrustedImm32(0), returnRegister2);
+ generateReturn();
+ break;
+ }
+ case OpBodyAlternativeEnd: {
+ // We should never backtrack back into a body disjunction.
+ ASSERT(m_backtrackingState.isEmpty());
+
+ PatternAlternative* priorAlternative = m_ops[op.m_previousOp].m_alternative;
+ m_checked += priorAlternative->m_minimumSize;
+ break;
+ }
+
+ // OpSimpleNestedAlternativeBegin/Next/End
+ // OpNestedAlternativeBegin/Next/End
+ //
+ // Generate code for when we backtrack back out of an alternative into
+ // a Begin or Next node, or when the entry input count check fails. If
+ // there are more alternatives we need to jump to the next alternative,
+ // if not we backtrack back out of the current set of parentheses.
+ //
+ // In the case of non-simple nested assertions we need to also link the
+ // 'return address' appropriately to backtrack back out into the correct
+ // alternative.
+ case OpSimpleNestedAlternativeBegin:
+ case OpSimpleNestedAlternativeNext:
+ case OpNestedAlternativeBegin:
+ case OpNestedAlternativeNext: {
+ YarrOp& nextOp = m_ops[op.m_nextOp];
+ bool isBegin = op.m_previousOp == notFound;
+ bool isLastAlternative = nextOp.m_nextOp == notFound;
+ ASSERT(isBegin == (op.m_op == OpSimpleNestedAlternativeBegin || op.m_op == OpNestedAlternativeBegin));
+ ASSERT(isLastAlternative == (nextOp.m_op == OpSimpleNestedAlternativeEnd || nextOp.m_op == OpNestedAlternativeEnd));
+
+ // Treat an input check failure the same as a failed match.
+ m_backtrackingState.append(op.m_jumps);
+
+ // Set the backtracks to jump to the appropriate place. We may need
+ // to link the backtracks in one of three different way depending on
+ // the type of alternative we are dealing with:
+ // - A single alternative, with no simplings.
+ // - The last alternative of a set of two or more.
+ // - An alternative other than the last of a set of two or more.
+ //
+ // In the case of a single alternative on its own, we don't need to
+ // jump anywhere - if the alternative fails to match we can just
+ // continue to backtrack out of the parentheses without jumping.
+ //
+ // In the case of the last alternative in a set of more than one, we
+ // need to jump to return back out to the beginning. We'll do so by
+ // adding a jump to the End node's m_jumps list, and linking this
+ // when we come to generate the Begin node. For alternatives other
+ // than the last, we need to jump to the next alternative.
+ //
+ // If the alternative had adjusted the input position we must link
+ // backtracking to here, correct, and then jump on. If not we can
+ // link the backtracks directly to their destination.
+ if (op.m_checkAdjust) {
+ // Handle the cases where we need to link the backtracks here.
+ m_backtrackingState.link(this);
+ sub32(Imm32(op.m_checkAdjust), index);
+ if (!isLastAlternative) {
+ // An alternative that is not the last should jump to its successor.
+ jump(nextOp.m_reentry);
+ } else if (!isBegin) {
+ // The last of more than one alternatives must jump back to the beginning.
+ nextOp.m_jumps.append(jump());
+ } else {
+ // A single alternative on its own can fall through.
+ m_backtrackingState.fallthrough();
+ }
+ } else {
+ // Handle the cases where we can link the backtracks directly to their destinations.
+ if (!isLastAlternative) {
+ // An alternative that is not the last should jump to its successor.
+ m_backtrackingState.linkTo(nextOp.m_reentry, this);
+ } else if (!isBegin) {
+ // The last of more than one alternatives must jump back to the beginning.
+ m_backtrackingState.takeBacktracksToJumpList(nextOp.m_jumps, this);
+ }
+ // In the case of a single alternative on its own do nothing - it can fall through.
+ }
+
+ // If there is a backtrack jump from a zero length match link it here.
+ if (op.m_zeroLengthMatch.isSet())
+ m_backtrackingState.append(op.m_zeroLengthMatch);
+
+ // At this point we've handled the backtracking back into this node.
+ // Now link any backtracks that need to jump to here.
+
+ // For non-simple alternatives, link the alternative's 'return address'
+ // so that we backtrack back out into the previous alternative.
+ if (op.m_op == OpNestedAlternativeNext)
+ m_backtrackingState.append(op.m_returnAddress);
+
+ // If there is more than one alternative, then the last alternative will
+ // have planted a jump to be linked to the end. This jump was added to the
+ // End node's m_jumps list. If we are back at the beginning, link it here.
+ if (isBegin) {
+ YarrOp* endOp = &m_ops[op.m_nextOp];
+ while (endOp->m_nextOp != notFound) {
+ ASSERT(endOp->m_op == OpSimpleNestedAlternativeNext || endOp->m_op == OpNestedAlternativeNext);
+ endOp = &m_ops[endOp->m_nextOp];
+ }
+ ASSERT(endOp->m_op == OpSimpleNestedAlternativeEnd || endOp->m_op == OpNestedAlternativeEnd);
+ m_backtrackingState.append(endOp->m_jumps);
+ }
+
+ if (!isBegin) {
+ YarrOp& lastOp = m_ops[op.m_previousOp];
+ m_checked += lastOp.m_checkAdjust;
+ }
+ m_checked -= op.m_checkAdjust;
+ break;
+ }
+ case OpSimpleNestedAlternativeEnd:
+ case OpNestedAlternativeEnd: {
+ PatternTerm* term = op.m_term;
+
+ // If there is a backtrack jump from a zero length match link it here.
+ if (op.m_zeroLengthMatch.isSet())
+ m_backtrackingState.append(op.m_zeroLengthMatch);
+
+ // If we backtrack into the end of a simple subpattern do nothing;
+ // just continue through into the last alternative. If we backtrack
+ // into the end of a non-simple set of alterntives we need to jump
+ // to the backtracking return address set up during generation.
+ if (op.m_op == OpNestedAlternativeEnd) {
+ m_backtrackingState.link(this);
+
+ // Plant a jump to the return address.
+ unsigned parenthesesFrameLocation = term->frameLocation;
+ unsigned alternativeFrameLocation = parenthesesFrameLocation;
+ if (term->quantityType != QuantifierFixedCount)
+ alternativeFrameLocation += YarrStackSpaceForBackTrackInfoParenthesesOnce;
+ loadFromFrameAndJump(alternativeFrameLocation);
+
+ // Link the DataLabelPtr associated with the end of the last
+ // alternative to this point.
+ m_backtrackingState.append(op.m_returnAddress);
+ }
+
+ YarrOp& lastOp = m_ops[op.m_previousOp];
+ m_checked += lastOp.m_checkAdjust;
+ break;
+ }
+
+ // OpParenthesesSubpatternOnceBegin/End
+ //
+ // When we are backtracking back out of a capturing subpattern we need
+ // to clear the start index in the matches output array, to record that
+ // this subpattern has not been captured.
+ //
+ // When backtracking back out of a Greedy quantified subpattern we need
+ // to catch this, and try running the remainder of the alternative after
+ // the subpattern again, skipping the parentheses.
+ //
+ // Upon backtracking back into a quantified set of parentheses we need to
+ // check whether we were currently skipping the subpattern. If not, we
+ // can backtrack into them, if we were we need to either backtrack back
+ // out of the start of the parentheses, or jump back to the forwards
+ // matching start, depending of whether the match is Greedy or NonGreedy.
+ case OpParenthesesSubpatternOnceBegin: {
+ PatternTerm* term = op.m_term;
+ ASSERT(term->quantityCount == 1);
+
+ // We only need to backtrack to thispoint if capturing or greedy.
+ if ((term->capture() && compileMode == IncludeSubpatterns) || term->quantityType == QuantifierGreedy) {
+ m_backtrackingState.link(this);
+
+ // If capturing, clear the capture (we only need to reset start).
+ if (term->capture() && compileMode == IncludeSubpatterns)
+ clearSubpatternStart(term->parentheses.subpatternId);
+
+ // If Greedy, jump to the end.
+ if (term->quantityType == QuantifierGreedy) {
+ // Clear the flag in the stackframe indicating we ran through the subpattern.
+ unsigned parenthesesFrameLocation = term->frameLocation;
+ storeToFrame(TrustedImm32(-1), parenthesesFrameLocation);
+ // Jump to after the parentheses, skipping the subpattern.
+ jump(m_ops[op.m_nextOp].m_reentry);
+ // A backtrack from after the parentheses, when skipping the subpattern,
+ // will jump back to here.
+ op.m_jumps.link(this);
+ }
+
+ m_backtrackingState.fallthrough();
+ }
+ break;
+ }
+ case OpParenthesesSubpatternOnceEnd: {
+ PatternTerm* term = op.m_term;
+
+ if (term->quantityType != QuantifierFixedCount) {
+ m_backtrackingState.link(this);
+
+ // Check whether we should backtrack back into the parentheses, or if we
+ // are currently in a state where we had skipped over the subpattern
+ // (in which case the flag value on the stack will be -1).
+ unsigned parenthesesFrameLocation = term->frameLocation;
+ Jump hadSkipped = branch32(Equal, Address(stackPointerRegister, parenthesesFrameLocation * sizeof(void*)), TrustedImm32(-1));
+
+ if (term->quantityType == QuantifierGreedy) {
+ // For Greedy parentheses, we skip after having already tried going
+ // through the subpattern, so if we get here we're done.
+ YarrOp& beginOp = m_ops[op.m_previousOp];
+ beginOp.m_jumps.append(hadSkipped);
+ } else {
+ // For NonGreedy parentheses, we try skipping the subpattern first,
+ // so if we get here we need to try running through the subpattern
+ // next. Jump back to the start of the parentheses in the forwards
+ // matching path.
+ ASSERT(term->quantityType == QuantifierNonGreedy);
+ YarrOp& beginOp = m_ops[op.m_previousOp];
+ hadSkipped.linkTo(beginOp.m_reentry, this);
+ }
+
+ m_backtrackingState.fallthrough();
+ }
+
+ m_backtrackingState.append(op.m_jumps);
+ break;
+ }
+
+ // OpParenthesesSubpatternTerminalBegin/End
+ //
+ // Terminal subpatterns will always match - there is nothing after them to
+ // force a backtrack, and they have a minimum count of 0, and as such will
+ // always produce an acceptable result.
+ case OpParenthesesSubpatternTerminalBegin: {
+ // We will backtrack to this point once the subpattern cannot match any
+ // more. Since no match is accepted as a successful match (we are Greedy
+ // quantified with a minimum of zero) jump back to the forwards matching
+ // path at the end.
+ YarrOp& endOp = m_ops[op.m_nextOp];
+ m_backtrackingState.linkTo(endOp.m_reentry, this);
+ break;
+ }
+ case OpParenthesesSubpatternTerminalEnd:
+ // We should never be backtracking to here (hence the 'terminal' in the name).
+ ASSERT(m_backtrackingState.isEmpty());
+ m_backtrackingState.append(op.m_jumps);
+ break;
+
+ // OpParentheticalAssertionBegin/End
+ case OpParentheticalAssertionBegin: {
+ PatternTerm* term = op.m_term;
+ YarrOp& endOp = m_ops[op.m_nextOp];
+
+ // We need to handle the backtracks upon backtracking back out
+ // of a parenthetical assertion if either we need to correct
+ // the input index, or the assertion was inverted.
+ if (op.m_checkAdjust || term->invert()) {
+ m_backtrackingState.link(this);
+
+ if (op.m_checkAdjust)
+ add32(Imm32(op.m_checkAdjust), index);
+
+ // In an inverted assertion failure to match the subpattern
+ // is treated as a successful match - jump to the end of the
+ // subpattern. We already have adjusted the input position
+ // back to that before the assertion, which is correct.
+ if (term->invert())
+ jump(endOp.m_reentry);
+
+ m_backtrackingState.fallthrough();
+ }
+
+ // The End node's jump list will contain any backtracks into
+ // the end of the assertion. Also, if inverted, we will have
+ // added the failure caused by a successful match to this.
+ m_backtrackingState.append(endOp.m_jumps);
+
+ m_checked += op.m_checkAdjust;
+ break;
+ }
+ case OpParentheticalAssertionEnd: {
+ // FIXME: We should really be clearing any nested subpattern
+ // matches on bailing out from after the pattern. Firefox has
+ // this bug too (presumably because they use YARR!)
+
+ // Never backtrack into an assertion; later failures bail to before the begin.
+ m_backtrackingState.takeBacktracksToJumpList(op.m_jumps, this);
+
+ YarrOp& lastOp = m_ops[op.m_previousOp];
+ m_checked -= lastOp.m_checkAdjust;
+ break;
+ }
+
+ case OpMatchFailed:
+ break;
+ }
+
+ } while (opIndex);
+ }
+
+ // Compilation methods:
+ // ====================
+
+ // opCompileParenthesesSubpattern
+ // Emits ops for a subpattern (set of parentheses). These consist
+ // of a set of alternatives wrapped in an outer set of nodes for
+ // the parentheses.
+ // Supported types of parentheses are 'Once' (quantityCount == 1)
+ // and 'Terminal' (non-capturing parentheses quantified as greedy
+ // and infinite).
+ // Alternatives will use the 'Simple' set of ops if either the
+ // subpattern is terminal (in which case we will never need to
+ // backtrack), or if the subpattern only contains one alternative.
+ void opCompileParenthesesSubpattern(PatternTerm* term)
+ {
+ YarrOpCode parenthesesBeginOpCode;
+ YarrOpCode parenthesesEndOpCode;
+ YarrOpCode alternativeBeginOpCode = OpSimpleNestedAlternativeBegin;
+ YarrOpCode alternativeNextOpCode = OpSimpleNestedAlternativeNext;
+ YarrOpCode alternativeEndOpCode = OpSimpleNestedAlternativeEnd;
+
+ // We can currently only compile quantity 1 subpatterns that are
+ // not copies. We generate a copy in the case of a range quantifier,
+ // e.g. /(?:x){3,9}/, or /(?:x)+/ (These are effectively expanded to
+ // /(?:x){3,3}(?:x){0,6}/ and /(?:x)(?:x)*/ repectively). The problem
+ // comes where the subpattern is capturing, in which case we would
+ // need to restore the capture from the first subpattern upon a
+ // failure in the second.
+ if (term->quantityCount == 1 && !term->parentheses.isCopy) {
+ // Select the 'Once' nodes.
+ parenthesesBeginOpCode = OpParenthesesSubpatternOnceBegin;
+ parenthesesEndOpCode = OpParenthesesSubpatternOnceEnd;
+
+ // If there is more than one alternative we cannot use the 'simple' nodes.
+ if (term->parentheses.disjunction->m_alternatives.size() != 1) {
+ alternativeBeginOpCode = OpNestedAlternativeBegin;
+ alternativeNextOpCode = OpNestedAlternativeNext;
+ alternativeEndOpCode = OpNestedAlternativeEnd;
+ }
+ } else if (term->parentheses.isTerminal) {
+ // Select the 'Terminal' nodes.
+ parenthesesBeginOpCode = OpParenthesesSubpatternTerminalBegin;
+ parenthesesEndOpCode = OpParenthesesSubpatternTerminalEnd;
+ } else {
+ // This subpattern is not supported by the JIT.
+ m_shouldFallBack = true;
+ return;
+ }
+
+ size_t parenBegin = m_ops.size();
+ m_ops.append(parenthesesBeginOpCode);
+
+ m_ops.append(alternativeBeginOpCode);
+ m_ops.last().m_previousOp = notFound;
+ m_ops.last().m_term = term;
+ Vector<OwnPtr<PatternAlternative> >& alternatives = term->parentheses.disjunction->m_alternatives;
+ for (unsigned i = 0; i < alternatives.size(); ++i) {
+ size_t lastOpIndex = m_ops.size() - 1;
+
+ PatternAlternative* nestedAlternative = alternatives[i].get();
+ opCompileAlternative(nestedAlternative);
+
+ size_t thisOpIndex = m_ops.size();
+ m_ops.append(YarrOp(alternativeNextOpCode));
+
+ YarrOp& lastOp = m_ops[lastOpIndex];
+ YarrOp& thisOp = m_ops[thisOpIndex];
+
+ lastOp.m_alternative = nestedAlternative;
+ lastOp.m_nextOp = thisOpIndex;
+ thisOp.m_previousOp = lastOpIndex;
+ thisOp.m_term = term;
+ }
+ YarrOp& lastOp = m_ops.last();
+ ASSERT(lastOp.m_op == alternativeNextOpCode);
+ lastOp.m_op = alternativeEndOpCode;
+ lastOp.m_alternative = 0;
+ lastOp.m_nextOp = notFound;
+
+ size_t parenEnd = m_ops.size();
+ m_ops.append(parenthesesEndOpCode);
+
+ m_ops[parenBegin].m_term = term;
+ m_ops[parenBegin].m_previousOp = notFound;
+ m_ops[parenBegin].m_nextOp = parenEnd;
+ m_ops[parenEnd].m_term = term;
+ m_ops[parenEnd].m_previousOp = parenBegin;
+ m_ops[parenEnd].m_nextOp = notFound;
+ }
+
+ // opCompileParentheticalAssertion
+ // Emits ops for a parenthetical assertion. These consist of an
+ // OpSimpleNestedAlternativeBegin/Next/End set of nodes wrapping
+ // the alternatives, with these wrapped by an outer pair of
+ // OpParentheticalAssertionBegin/End nodes.
+ // We can always use the OpSimpleNestedAlternative nodes in the
+ // case of parenthetical assertions since these only ever match
+ // once, and will never backtrack back into the assertion.
+ void opCompileParentheticalAssertion(PatternTerm* term)
+ {
+ size_t parenBegin = m_ops.size();
+ m_ops.append(OpParentheticalAssertionBegin);
+
+ m_ops.append(OpSimpleNestedAlternativeBegin);
+ m_ops.last().m_previousOp = notFound;
+ m_ops.last().m_term = term;
+ Vector<OwnPtr<PatternAlternative> >& alternatives = term->parentheses.disjunction->m_alternatives;
+ for (unsigned i = 0; i < alternatives.size(); ++i) {
+ size_t lastOpIndex = m_ops.size() - 1;
+
+ PatternAlternative* nestedAlternative = alternatives[i].get();
+ opCompileAlternative(nestedAlternative);
+
+ size_t thisOpIndex = m_ops.size();
+ m_ops.append(YarrOp(OpSimpleNestedAlternativeNext));
+
+ YarrOp& lastOp = m_ops[lastOpIndex];
+ YarrOp& thisOp = m_ops[thisOpIndex];
+
+ lastOp.m_alternative = nestedAlternative;
+ lastOp.m_nextOp = thisOpIndex;
+ thisOp.m_previousOp = lastOpIndex;
+ thisOp.m_term = term;
+ }
+ YarrOp& lastOp = m_ops.last();
+ ASSERT(lastOp.m_op == OpSimpleNestedAlternativeNext);
+ lastOp.m_op = OpSimpleNestedAlternativeEnd;
+ lastOp.m_alternative = 0;
+ lastOp.m_nextOp = notFound;
+
+ size_t parenEnd = m_ops.size();
+ m_ops.append(OpParentheticalAssertionEnd);
+
+ m_ops[parenBegin].m_term = term;
+ m_ops[parenBegin].m_previousOp = notFound;
+ m_ops[parenBegin].m_nextOp = parenEnd;
+ m_ops[parenEnd].m_term = term;
+ m_ops[parenEnd].m_previousOp = parenBegin;
+ m_ops[parenEnd].m_nextOp = notFound;
+ }
+
+ // opCompileAlternative
+ // Called to emit nodes for all terms in an alternative.
+ void opCompileAlternative(PatternAlternative* alternative)
+ {
+ optimizeAlternative(alternative);
+
+ for (unsigned i = 0; i < alternative->m_terms.size(); ++i) {
+ PatternTerm* term = &alternative->m_terms[i];
+
+ switch (term->type) {
+ case PatternTerm::TypeParenthesesSubpattern:
+ opCompileParenthesesSubpattern(term);
+ break;
+
+ case PatternTerm::TypeParentheticalAssertion:
+ opCompileParentheticalAssertion(term);
+ break;
+
+ default:
+ m_ops.append(term);
+ }
+ }
+ }
+
+ // opCompileBody
+ // This method compiles the body disjunction of the regular expression.
+ // The body consists of two sets of alternatives - zero or more 'once
+ // through' (BOL anchored) alternatives, followed by zero or more
+ // repeated alternatives.
+ // For each of these two sets of alteratives, if not empty they will be
+ // wrapped in a set of OpBodyAlternativeBegin/Next/End nodes (with the
+ // 'begin' node referencing the first alternative, and 'next' nodes
+ // referencing any further alternatives. The begin/next/end nodes are
+ // linked together in a doubly linked list. In the case of repeating
+ // alternatives, the end node is also linked back to the beginning.
+ // If no repeating alternatives exist, then a OpMatchFailed node exists
+ // to return the failing result.
+ void opCompileBody(PatternDisjunction* disjunction)
+ {
+ Vector<OwnPtr<PatternAlternative> >& alternatives = disjunction->m_alternatives;
+ size_t currentAlternativeIndex = 0;
+
+ // Emit the 'once through' alternatives.
+ if (alternatives.size() && alternatives[0]->onceThrough()) {
+ m_ops.append(YarrOp(OpBodyAlternativeBegin));
+ m_ops.last().m_previousOp = notFound;
+
+ do {
+ size_t lastOpIndex = m_ops.size() - 1;
+ PatternAlternative* alternative = alternatives[currentAlternativeIndex].get();
+ opCompileAlternative(alternative);
+
+ size_t thisOpIndex = m_ops.size();
+ m_ops.append(YarrOp(OpBodyAlternativeNext));
+
+ YarrOp& lastOp = m_ops[lastOpIndex];
+ YarrOp& thisOp = m_ops[thisOpIndex];
+
+ lastOp.m_alternative = alternative;
+ lastOp.m_nextOp = thisOpIndex;
+ thisOp.m_previousOp = lastOpIndex;
+
+ ++currentAlternativeIndex;
+ } while (currentAlternativeIndex < alternatives.size() && alternatives[currentAlternativeIndex]->onceThrough());
+
+ YarrOp& lastOp = m_ops.last();
+
+ ASSERT(lastOp.m_op == OpBodyAlternativeNext);
+ lastOp.m_op = OpBodyAlternativeEnd;
+ lastOp.m_alternative = 0;
+ lastOp.m_nextOp = notFound;
+ }
+
+ if (currentAlternativeIndex == alternatives.size()) {
+ m_ops.append(YarrOp(OpMatchFailed));
+ return;
+ }
+
+ // Emit the repeated alternatives.
+ size_t repeatLoop = m_ops.size();
+ m_ops.append(YarrOp(OpBodyAlternativeBegin));
+ m_ops.last().m_previousOp = notFound;
+ do {
+ size_t lastOpIndex = m_ops.size() - 1;
+ PatternAlternative* alternative = alternatives[currentAlternativeIndex].get();
+ ASSERT(!alternative->onceThrough());
+ opCompileAlternative(alternative);
+
+ size_t thisOpIndex = m_ops.size();
+ m_ops.append(YarrOp(OpBodyAlternativeNext));
+
+ YarrOp& lastOp = m_ops[lastOpIndex];
+ YarrOp& thisOp = m_ops[thisOpIndex];
+
+ lastOp.m_alternative = alternative;
+ lastOp.m_nextOp = thisOpIndex;
+ thisOp.m_previousOp = lastOpIndex;
+
+ ++currentAlternativeIndex;
+ } while (currentAlternativeIndex < alternatives.size());
+ YarrOp& lastOp = m_ops.last();
+ ASSERT(lastOp.m_op == OpBodyAlternativeNext);
+ lastOp.m_op = OpBodyAlternativeEnd;
+ lastOp.m_alternative = 0;
+ lastOp.m_nextOp = repeatLoop;
+ }
+
+ void generateEnter()
+ {
+#if CPU(X86_64)
+ push(X86Registers::ebp);
+ move(stackPointerRegister, X86Registers::ebp);
+ push(X86Registers::ebx);
+ // The ABI doesn't guarantee the upper bits are zero on unsigned arguments, so clear them ourselves.
+ zeroExtend32ToPtr(index, index);
+ zeroExtend32ToPtr(length, length);
+#if OS(WINDOWS)
+ if (compileMode == IncludeSubpatterns)
+ loadPtr(Address(X86Registers::ebp, 6 * sizeof(void*)), output);
+#endif
+#elif CPU(X86)
+ push(X86Registers::ebp);
+ move(stackPointerRegister, X86Registers::ebp);
+ // TODO: do we need spill registers to fill the output pointer if there are no sub captures?
+ push(X86Registers::ebx);
+ push(X86Registers::edi);
+ push(X86Registers::esi);
+ // load output into edi (2 = saved ebp + return address).
+ #if COMPILER(MSVC)
+ loadPtr(Address(X86Registers::ebp, 2 * sizeof(void*)), input);
+ loadPtr(Address(X86Registers::ebp, 3 * sizeof(void*)), index);
+ loadPtr(Address(X86Registers::ebp, 4 * sizeof(void*)), length);
+ if (compileMode == IncludeSubpatterns)
+ loadPtr(Address(X86Registers::ebp, 5 * sizeof(void*)), output);
+ #else
+ if (compileMode == IncludeSubpatterns)
+ loadPtr(Address(X86Registers::ebp, 2 * sizeof(void*)), output);
+ #endif
+#elif CPU(ARM)
+ push(ARMRegisters::r4);
+ push(ARMRegisters::r5);
+ push(ARMRegisters::r6);
+#if CPU(ARM_TRADITIONAL)
+ push(ARMRegisters::r8); // scratch register
+#endif
+ if (compileMode == IncludeSubpatterns)
+ move(ARMRegisters::r3, output);
+#elif CPU(SH4)
+ push(SH4Registers::r11);
+ push(SH4Registers::r13);
+#elif CPU(MIPS)
+ // Do nothing.
+#endif
+ }
+
+ void generateReturn()
+ {
+#if CPU(X86_64)
+#if OS(WINDOWS)
+ // Store the return value in the allocated space pointed by rcx.
+ store64(returnRegister, Address(X86Registers::ecx));
+ store64(returnRegister2, Address(X86Registers::ecx, sizeof(void*)));
+ move(X86Registers::ecx, returnRegister);
+#endif
+ pop(X86Registers::ebx);
+ pop(X86Registers::ebp);
+#elif CPU(X86)
+ pop(X86Registers::esi);
+ pop(X86Registers::edi);
+ pop(X86Registers::ebx);
+ pop(X86Registers::ebp);
+#elif CPU(ARM)
+#if CPU(ARM_TRADITIONAL)
+ pop(ARMRegisters::r8); // scratch register
+#endif
+ pop(ARMRegisters::r6);
+ pop(ARMRegisters::r5);
+ pop(ARMRegisters::r4);
+#elif CPU(SH4)
+ pop(SH4Registers::r13);
+ pop(SH4Registers::r11);
+#elif CPU(MIPS)
+ // Do nothing
+#endif
+ ret();
+ }
+
+public:
+ YarrGenerator(YarrPattern& pattern, YarrCharSize charSize)
+ : m_pattern(pattern)
+ , m_charSize(charSize)
+ , m_charScale(m_charSize == Char8 ? TimesOne: TimesTwo)
+ , m_shouldFallBack(false)
+ , m_checked(0)
+ {
+ }
+
+ void compile(JSGlobalData* globalData, YarrCodeBlock& jitObject)
+ {
+ generateEnter();
+
+ Jump hasInput = checkInput();
+ move(TrustedImmPtr((void*)WTF::notFound), returnRegister);
+ move(TrustedImm32(0), returnRegister2);
+ generateReturn();
+ hasInput.link(this);
+
+ if (compileMode == IncludeSubpatterns) {
+ for (unsigned i = 0; i < m_pattern.m_numSubpatterns + 1; ++i)
+ store32(TrustedImm32(-1), Address(output, (i << 1) * sizeof(int)));
+ }
+
+ if (!m_pattern.m_body->m_hasFixedSize)
+ setMatchStart(index);
+
+ initCallFrame();
+
+ // Compile the pattern to the internal 'YarrOp' representation.
+ opCompileBody(m_pattern.m_body);
+
+ // If we encountered anything we can't handle in the JIT code
+ // (e.g. backreferences) then return early.
+ if (m_shouldFallBack) {
+ jitObject.setFallBack(true);
+ return;
+ }
+
+ generate();
+ backtrack();
+
+ // Link & finalize the code.
+ LinkBuffer linkBuffer(*globalData, this, REGEXP_CODE_ID);
+ m_backtrackingState.linkDataLabels(linkBuffer);
+
+ if (compileMode == MatchOnly) {
+ if (m_charSize == Char8)
+ jitObject.set8BitCodeMatchOnly(FINALIZE_CODE(linkBuffer, ("Match-only 8-bit regular expression")));
+ else
+ jitObject.set16BitCodeMatchOnly(FINALIZE_CODE(linkBuffer, ("Match-only 16-bit regular expression")));
+ } else {
+ if (m_charSize == Char8)
+ jitObject.set8BitCode(FINALIZE_CODE(linkBuffer, ("8-bit regular expression")));
+ else
+ jitObject.set16BitCode(FINALIZE_CODE(linkBuffer, ("16-bit regular expression")));
+ }
+ jitObject.setFallBack(m_shouldFallBack);
+ }
+
+private:
+ YarrPattern& m_pattern;
+
+ YarrCharSize m_charSize;
+
+ Scale m_charScale;
+
+ // Used to detect regular expression constructs that are not currently
+ // supported in the JIT; fall back to the interpreter when this is detected.
+ bool m_shouldFallBack;
+
+ // The regular expression expressed as a linear sequence of operations.
+ Vector<YarrOp, 128> m_ops;
+
+ // This records the current input offset being applied due to the current
+ // set of alternatives we are nested within. E.g. when matching the
+ // character 'b' within the regular expression /abc/, we will know that
+ // the minimum size for the alternative is 3, checked upon entry to the
+ // alternative, and that 'b' is at offset 1 from the start, and as such
+ // when matching 'b' we need to apply an offset of -2 to the load.
+ //
+ // FIXME: This should go away. Rather than tracking this value throughout
+ // code generation, we should gather this information up front & store it
+ // on the YarrOp structure.
+ int m_checked;
+
+ // This class records state whilst generating the backtracking path of code.
+ BacktrackingState m_backtrackingState;
+};
+
+void jitCompile(YarrPattern& pattern, YarrCharSize charSize, JSGlobalData* globalData, YarrCodeBlock& jitObject, YarrJITCompileMode mode)
+{
+ if (mode == MatchOnly)
+ YarrGenerator<MatchOnly>(pattern, charSize).compile(globalData, jitObject);
+ else
+ YarrGenerator<IncludeSubpatterns>(pattern, charSize).compile(globalData, jitObject);
+}
+
+}}
+
+#endif
diff --git a/src/3rdparty/masm/yarr/YarrJIT.h b/src/3rdparty/masm/yarr/YarrJIT.h
new file mode 100644
index 0000000000..bb7033fdea
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrJIT.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef YarrJIT_h
+#define YarrJIT_h
+
+#if ENABLE(YARR_JIT)
+
+#include "JSGlobalData.h"
+#include "MacroAssemblerCodeRef.h"
+#include "MatchResult.h"
+#include "Yarr.h"
+#include "YarrPattern.h"
+
+#if CPU(X86) && !COMPILER(MSVC)
+#define YARR_CALL __attribute__ ((regparm (3)))
+#else
+#define YARR_CALL
+#endif
+
+namespace JSC {
+
+class JSGlobalData;
+class ExecutablePool;
+
+namespace Yarr {
+
+class YarrCodeBlock {
+#if CPU(X86_64)
+ typedef MatchResult (*YarrJITCode8)(const LChar* input, unsigned start, unsigned length, int* output) YARR_CALL;
+ typedef MatchResult (*YarrJITCode16)(const UChar* input, unsigned start, unsigned length, int* output) YARR_CALL;
+ typedef MatchResult (*YarrJITCodeMatchOnly8)(const LChar* input, unsigned start, unsigned length) YARR_CALL;
+ typedef MatchResult (*YarrJITCodeMatchOnly16)(const UChar* input, unsigned start, unsigned length) YARR_CALL;
+#else
+ typedef EncodedMatchResult (*YarrJITCode8)(const LChar* input, unsigned start, unsigned length, int* output) YARR_CALL;
+ typedef EncodedMatchResult (*YarrJITCode16)(const UChar* input, unsigned start, unsigned length, int* output) YARR_CALL;
+ typedef EncodedMatchResult (*YarrJITCodeMatchOnly8)(const LChar* input, unsigned start, unsigned length) YARR_CALL;
+ typedef EncodedMatchResult (*YarrJITCodeMatchOnly16)(const UChar* input, unsigned start, unsigned length) YARR_CALL;
+#endif
+
+public:
+ YarrCodeBlock()
+ : m_needFallBack(false)
+ {
+ }
+
+ ~YarrCodeBlock()
+ {
+ }
+
+ void setFallBack(bool fallback) { m_needFallBack = fallback; }
+ bool isFallBack() { return m_needFallBack; }
+
+ bool has8BitCode() { return m_ref8.size(); }
+ bool has16BitCode() { return m_ref16.size(); }
+ void set8BitCode(MacroAssemblerCodeRef ref) { m_ref8 = ref; }
+ void set16BitCode(MacroAssemblerCodeRef ref) { m_ref16 = ref; }
+
+ bool has8BitCodeMatchOnly() { return m_matchOnly8.size(); }
+ bool has16BitCodeMatchOnly() { return m_matchOnly16.size(); }
+ void set8BitCodeMatchOnly(MacroAssemblerCodeRef matchOnly) { m_matchOnly8 = matchOnly; }
+ void set16BitCodeMatchOnly(MacroAssemblerCodeRef matchOnly) { m_matchOnly16 = matchOnly; }
+
+ MatchResult execute(const LChar* input, unsigned start, unsigned length, int* output)
+ {
+ ASSERT(has8BitCode());
+ return MatchResult(reinterpret_cast<YarrJITCode8>(m_ref8.code().executableAddress())(input, start, length, output));
+ }
+
+ MatchResult execute(const UChar* input, unsigned start, unsigned length, int* output)
+ {
+ ASSERT(has16BitCode());
+ return MatchResult(reinterpret_cast<YarrJITCode16>(m_ref16.code().executableAddress())(input, start, length, output));
+ }
+
+ MatchResult execute(const LChar* input, unsigned start, unsigned length)
+ {
+ ASSERT(has8BitCodeMatchOnly());
+ return MatchResult(reinterpret_cast<YarrJITCodeMatchOnly8>(m_matchOnly8.code().executableAddress())(input, start, length));
+ }
+
+ MatchResult execute(const UChar* input, unsigned start, unsigned length)
+ {
+ ASSERT(has16BitCodeMatchOnly());
+ return MatchResult(reinterpret_cast<YarrJITCodeMatchOnly16>(m_matchOnly16.code().executableAddress())(input, start, length));
+ }
+
+#if ENABLE(REGEXP_TRACING)
+ void *getAddr() { return m_ref.code().executableAddress(); }
+#endif
+
+ void clear()
+ {
+ m_ref8 = MacroAssemblerCodeRef();
+ m_ref16 = MacroAssemblerCodeRef();
+ m_matchOnly8 = MacroAssemblerCodeRef();
+ m_matchOnly16 = MacroAssemblerCodeRef();
+ m_needFallBack = false;
+ }
+
+private:
+ MacroAssemblerCodeRef m_ref8;
+ MacroAssemblerCodeRef m_ref16;
+ MacroAssemblerCodeRef m_matchOnly8;
+ MacroAssemblerCodeRef m_matchOnly16;
+ bool m_needFallBack;
+};
+
+enum YarrJITCompileMode {
+ MatchOnly,
+ IncludeSubpatterns
+};
+void jitCompile(YarrPattern&, YarrCharSize, JSGlobalData*, YarrCodeBlock& jitObject, YarrJITCompileMode = IncludeSubpatterns);
+
+} } // namespace JSC::Yarr
+
+#endif
+
+#endif // YarrJIT_h
diff --git a/src/3rdparty/masm/yarr/YarrParser.h b/src/3rdparty/masm/yarr/YarrParser.h
new file mode 100644
index 0000000000..8c5d71b5fe
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrParser.h
@@ -0,0 +1,880 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef YarrParser_h
+#define YarrParser_h
+
+#include "Yarr.h"
+#include <wtf/ASCIICType.h>
+#include <wtf/text/WTFString.h>
+#include <wtf/unicode/Unicode.h>
+
+namespace JSC { namespace Yarr {
+
+#define REGEXP_ERROR_PREFIX "Invalid regular expression: "
+
+enum BuiltInCharacterClassID {
+ DigitClassID,
+ SpaceClassID,
+ WordClassID,
+ NewlineClassID,
+};
+
+// The Parser class should not be used directly - only via the Yarr::parse() method.
+template<class Delegate, typename CharType>
+class Parser {
+private:
+ template<class FriendDelegate>
+ friend const char* parse(FriendDelegate&, const String& pattern, unsigned backReferenceLimit);
+
+ enum ErrorCode {
+ NoError,
+ PatternTooLarge,
+ QuantifierOutOfOrder,
+ QuantifierWithoutAtom,
+ QuantifierTooLarge,
+ MissingParentheses,
+ ParenthesesUnmatched,
+ ParenthesesTypeInvalid,
+ CharacterClassUnmatched,
+ CharacterClassOutOfOrder,
+ EscapeUnterminated,
+ NumberOfErrorCodes
+ };
+
+ /*
+ * CharacterClassParserDelegate:
+ *
+ * The class CharacterClassParserDelegate is used in the parsing of character
+ * classes. This class handles detection of character ranges. This class
+ * implements enough of the delegate interface such that it can be passed to
+ * parseEscape() as an EscapeDelegate. This allows parseEscape() to be reused
+ * to perform the parsing of escape characters in character sets.
+ */
+ class CharacterClassParserDelegate {
+ public:
+ CharacterClassParserDelegate(Delegate& delegate, ErrorCode& err)
+ : m_delegate(delegate)
+ , m_err(err)
+ , m_state(Empty)
+ , m_character(0)
+ {
+ }
+
+ /*
+ * begin():
+ *
+ * Called at beginning of construction.
+ */
+ void begin(bool invert)
+ {
+ m_delegate.atomCharacterClassBegin(invert);
+ }
+
+ /*
+ * atomPatternCharacter():
+ *
+ * This method is called either from parseCharacterClass() (for an unescaped
+ * character in a character class), or from parseEscape(). In the former case
+ * the value true will be passed for the argument 'hyphenIsRange', and in this
+ * mode we will allow a hypen to be treated as indicating a range (i.e. /[a-z]/
+ * is different to /[a\-z]/).
+ */
+ void atomPatternCharacter(UChar ch, bool hyphenIsRange = false)
+ {
+ switch (m_state) {
+ case AfterCharacterClass:
+ // Following a builtin character class we need look out for a hyphen.
+ // We're looking for invalid ranges, such as /[\d-x]/ or /[\d-\d]/.
+ // If we see a hyphen following a charater class then unlike usual
+ // we'll report it to the delegate immediately, and put ourself into
+ // a poisoned state. Any following calls to add another character or
+ // character class will result in an error. (A hypen following a
+ // character-class is itself valid, but only at the end of a regex).
+ if (hyphenIsRange && ch == '-') {
+ m_delegate.atomCharacterClassAtom('-');
+ m_state = AfterCharacterClassHyphen;
+ return;
+ }
+ // Otherwise just fall through - cached character so treat this as Empty.
+
+ case Empty:
+ m_character = ch;
+ m_state = CachedCharacter;
+ return;
+
+ case CachedCharacter:
+ if (hyphenIsRange && ch == '-')
+ m_state = CachedCharacterHyphen;
+ else {
+ m_delegate.atomCharacterClassAtom(m_character);
+ m_character = ch;
+ }
+ return;
+
+ case CachedCharacterHyphen:
+ if (ch < m_character) {
+ m_err = CharacterClassOutOfOrder;
+ return;
+ }
+ m_delegate.atomCharacterClassRange(m_character, ch);
+ m_state = Empty;
+ return;
+
+ // See coment in atomBuiltInCharacterClass below.
+ // This too is technically an error, per ECMA-262, and again we
+ // we chose to allow this. Note a subtlely here that while we
+ // diverge from the spec's definition of CharacterRange we do
+ // remain in compliance with the grammar. For example, consider
+ // the expression /[\d-a-z]/. We comply with the grammar in
+ // this case by not allowing a-z to be matched as a range.
+ case AfterCharacterClassHyphen:
+ m_delegate.atomCharacterClassAtom(ch);
+ m_state = Empty;
+ return;
+ }
+ }
+
+ /*
+ * atomBuiltInCharacterClass():
+ *
+ * Adds a built-in character class, called by parseEscape().
+ */
+ void atomBuiltInCharacterClass(BuiltInCharacterClassID classID, bool invert)
+ {
+ switch (m_state) {
+ case CachedCharacter:
+ // Flush the currently cached character, then fall through.
+ m_delegate.atomCharacterClassAtom(m_character);
+
+ case Empty:
+ case AfterCharacterClass:
+ m_state = AfterCharacterClass;
+ m_delegate.atomCharacterClassBuiltIn(classID, invert);
+ return;
+
+ // If we hit either of these cases, we have an invalid range that
+ // looks something like /[x-\d]/ or /[\d-\d]/.
+ // According to ECMA-262 this should be a syntax error, but
+ // empirical testing shows this to break teh webz. Instead we
+ // comply with to the ECMA-262 grammar, and assume the grammar to
+ // have matched the range correctly, but tweak our interpretation
+ // of CharacterRange. Effectively we implicitly handle the hyphen
+ // as if it were escaped, e.g. /[\w-_]/ is treated as /[\w\-_]/.
+ case CachedCharacterHyphen:
+ m_delegate.atomCharacterClassAtom(m_character);
+ m_delegate.atomCharacterClassAtom('-');
+ // fall through
+ case AfterCharacterClassHyphen:
+ m_delegate.atomCharacterClassBuiltIn(classID, invert);
+ m_state = Empty;
+ return;
+ }
+ }
+
+ /*
+ * end():
+ *
+ * Called at end of construction.
+ */
+ void end()
+ {
+ if (m_state == CachedCharacter)
+ m_delegate.atomCharacterClassAtom(m_character);
+ else if (m_state == CachedCharacterHyphen) {
+ m_delegate.atomCharacterClassAtom(m_character);
+ m_delegate.atomCharacterClassAtom('-');
+ }
+ m_delegate.atomCharacterClassEnd();
+ }
+
+ // parseEscape() should never call these delegate methods when
+ // invoked with inCharacterClass set.
+ NO_RETURN_DUE_TO_ASSERT void assertionWordBoundary(bool) { RELEASE_ASSERT_NOT_REACHED(); }
+ NO_RETURN_DUE_TO_ASSERT void atomBackReference(unsigned) { RELEASE_ASSERT_NOT_REACHED(); }
+
+ private:
+ Delegate& m_delegate;
+ ErrorCode& m_err;
+ enum CharacterClassConstructionState {
+ Empty,
+ CachedCharacter,
+ CachedCharacterHyphen,
+ AfterCharacterClass,
+ AfterCharacterClassHyphen,
+ } m_state;
+ UChar m_character;
+ };
+
+ Parser(Delegate& delegate, const String& pattern, unsigned backReferenceLimit)
+ : m_delegate(delegate)
+ , m_backReferenceLimit(backReferenceLimit)
+ , m_err(NoError)
+ , m_data(pattern.getCharacters<CharType>())
+ , m_size(pattern.length())
+ , m_index(0)
+ , m_parenthesesNestingDepth(0)
+ {
+ }
+
+ /*
+ * parseEscape():
+ *
+ * Helper for parseTokens() AND parseCharacterClass().
+ * Unlike the other parser methods, this function does not report tokens
+ * directly to the member delegate (m_delegate), instead tokens are
+ * emitted to the delegate provided as an argument. In the case of atom
+ * escapes, parseTokens() will call parseEscape() passing m_delegate as
+ * an argument, and as such the escape will be reported to the delegate.
+ *
+ * However this method may also be used by parseCharacterClass(), in which
+ * case a CharacterClassParserDelegate will be passed as the delegate that
+ * tokens should be added to. A boolean flag is also provided to indicate
+ * whether that an escape in a CharacterClass is being parsed (some parsing
+ * rules change in this context).
+ *
+ * The boolean value returned by this method indicates whether the token
+ * parsed was an atom (outside of a characted class \b and \B will be
+ * interpreted as assertions).
+ */
+ template<bool inCharacterClass, class EscapeDelegate>
+ bool parseEscape(EscapeDelegate& delegate)
+ {
+ ASSERT(!m_err);
+ ASSERT(peek() == '\\');
+ consume();
+
+ if (atEndOfPattern()) {
+ m_err = EscapeUnterminated;
+ return false;
+ }
+
+ switch (peek()) {
+ // Assertions
+ case 'b':
+ consume();
+ if (inCharacterClass)
+ delegate.atomPatternCharacter('\b');
+ else {
+ delegate.assertionWordBoundary(false);
+ return false;
+ }
+ break;
+ case 'B':
+ consume();
+ if (inCharacterClass)
+ delegate.atomPatternCharacter('B');
+ else {
+ delegate.assertionWordBoundary(true);
+ return false;
+ }
+ break;
+
+ // CharacterClassEscape
+ case 'd':
+ consume();
+ delegate.atomBuiltInCharacterClass(DigitClassID, false);
+ break;
+ case 's':
+ consume();
+ delegate.atomBuiltInCharacterClass(SpaceClassID, false);
+ break;
+ case 'w':
+ consume();
+ delegate.atomBuiltInCharacterClass(WordClassID, false);
+ break;
+ case 'D':
+ consume();
+ delegate.atomBuiltInCharacterClass(DigitClassID, true);
+ break;
+ case 'S':
+ consume();
+ delegate.atomBuiltInCharacterClass(SpaceClassID, true);
+ break;
+ case 'W':
+ consume();
+ delegate.atomBuiltInCharacterClass(WordClassID, true);
+ break;
+
+ // DecimalEscape
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9': {
+ // To match Firefox, we parse an invalid backreference in the range [1-7] as an octal escape.
+ // First, try to parse this as backreference.
+ if (!inCharacterClass) {
+ ParseState state = saveState();
+
+ unsigned backReference = consumeNumber();
+ if (backReference <= m_backReferenceLimit) {
+ delegate.atomBackReference(backReference);
+ break;
+ }
+
+ restoreState(state);
+ }
+
+ // Not a backreference, and not octal.
+ if (peek() >= '8') {
+ delegate.atomPatternCharacter('\\');
+ break;
+ }
+
+ // Fall-through to handle this as an octal escape.
+ }
+
+ // Octal escape
+ case '0':
+ delegate.atomPatternCharacter(consumeOctal());
+ break;
+
+ // ControlEscape
+ case 'f':
+ consume();
+ delegate.atomPatternCharacter('\f');
+ break;
+ case 'n':
+ consume();
+ delegate.atomPatternCharacter('\n');
+ break;
+ case 'r':
+ consume();
+ delegate.atomPatternCharacter('\r');
+ break;
+ case 't':
+ consume();
+ delegate.atomPatternCharacter('\t');
+ break;
+ case 'v':
+ consume();
+ delegate.atomPatternCharacter('\v');
+ break;
+
+ // ControlLetter
+ case 'c': {
+ ParseState state = saveState();
+ consume();
+ if (!atEndOfPattern()) {
+ int control = consume();
+
+ // To match Firefox, inside a character class, we also accept numbers and '_' as control characters.
+ if (inCharacterClass ? WTF::isASCIIAlphanumeric(control) || (control == '_') : WTF::isASCIIAlpha(control)) {
+ delegate.atomPatternCharacter(control & 0x1f);
+ break;
+ }
+ }
+ restoreState(state);
+ delegate.atomPatternCharacter('\\');
+ break;
+ }
+
+ // HexEscape
+ case 'x': {
+ consume();
+ int x = tryConsumeHex(2);
+ if (x == -1)
+ delegate.atomPatternCharacter('x');
+ else
+ delegate.atomPatternCharacter(x);
+ break;
+ }
+
+ // UnicodeEscape
+ case 'u': {
+ consume();
+ int u = tryConsumeHex(4);
+ if (u == -1)
+ delegate.atomPatternCharacter('u');
+ else
+ delegate.atomPatternCharacter(u);
+ break;
+ }
+
+ // IdentityEscape
+ default:
+ delegate.atomPatternCharacter(consume());
+ }
+
+ return true;
+ }
+
+ /*
+ * parseAtomEscape(), parseCharacterClassEscape():
+ *
+ * These methods alias to parseEscape().
+ */
+ bool parseAtomEscape()
+ {
+ return parseEscape<false>(m_delegate);
+ }
+ void parseCharacterClassEscape(CharacterClassParserDelegate& delegate)
+ {
+ parseEscape<true>(delegate);
+ }
+
+ /*
+ * parseCharacterClass():
+ *
+ * Helper for parseTokens(); calls dirctly and indirectly (via parseCharacterClassEscape)
+ * to an instance of CharacterClassParserDelegate, to describe the character class to the
+ * delegate.
+ */
+ void parseCharacterClass()
+ {
+ ASSERT(!m_err);
+ ASSERT(peek() == '[');
+ consume();
+
+ CharacterClassParserDelegate characterClassConstructor(m_delegate, m_err);
+
+ characterClassConstructor.begin(tryConsume('^'));
+
+ while (!atEndOfPattern()) {
+ switch (peek()) {
+ case ']':
+ consume();
+ characterClassConstructor.end();
+ return;
+
+ case '\\':
+ parseCharacterClassEscape(characterClassConstructor);
+ break;
+
+ default:
+ characterClassConstructor.atomPatternCharacter(consume(), true);
+ }
+
+ if (m_err)
+ return;
+ }
+
+ m_err = CharacterClassUnmatched;
+ }
+
+ /*
+ * parseParenthesesBegin():
+ *
+ * Helper for parseTokens(); checks for parentheses types other than regular capturing subpatterns.
+ */
+ void parseParenthesesBegin()
+ {
+ ASSERT(!m_err);
+ ASSERT(peek() == '(');
+ consume();
+
+ if (tryConsume('?')) {
+ if (atEndOfPattern()) {
+ m_err = ParenthesesTypeInvalid;
+ return;
+ }
+
+ switch (consume()) {
+ case ':':
+ m_delegate.atomParenthesesSubpatternBegin(false);
+ break;
+
+ case '=':
+ m_delegate.atomParentheticalAssertionBegin();
+ break;
+
+ case '!':
+ m_delegate.atomParentheticalAssertionBegin(true);
+ break;
+
+ default:
+ m_err = ParenthesesTypeInvalid;
+ }
+ } else
+ m_delegate.atomParenthesesSubpatternBegin();
+
+ ++m_parenthesesNestingDepth;
+ }
+
+ /*
+ * parseParenthesesEnd():
+ *
+ * Helper for parseTokens(); checks for parse errors (due to unmatched parentheses).
+ */
+ void parseParenthesesEnd()
+ {
+ ASSERT(!m_err);
+ ASSERT(peek() == ')');
+ consume();
+
+ if (m_parenthesesNestingDepth > 0)
+ m_delegate.atomParenthesesEnd();
+ else
+ m_err = ParenthesesUnmatched;
+
+ --m_parenthesesNestingDepth;
+ }
+
+ /*
+ * parseQuantifier():
+ *
+ * Helper for parseTokens(); checks for parse errors and non-greedy quantifiers.
+ */
+ void parseQuantifier(bool lastTokenWasAnAtom, unsigned min, unsigned max)
+ {
+ ASSERT(!m_err);
+ ASSERT(min <= max);
+
+ if (min == UINT_MAX) {
+ m_err = QuantifierTooLarge;
+ return;
+ }
+
+ if (lastTokenWasAnAtom)
+ m_delegate.quantifyAtom(min, max, !tryConsume('?'));
+ else
+ m_err = QuantifierWithoutAtom;
+ }
+
+ /*
+ * parseTokens():
+ *
+ * This method loops over the input pattern reporting tokens to the delegate.
+ * The method returns when a parse error is detected, or the end of the pattern
+ * is reached. One piece of state is tracked around the loop, which is whether
+ * the last token passed to the delegate was an atom (this is necessary to detect
+ * a parse error when a quantifier provided without an atom to quantify).
+ */
+ void parseTokens()
+ {
+ bool lastTokenWasAnAtom = false;
+
+ while (!atEndOfPattern()) {
+ switch (peek()) {
+ case '|':
+ consume();
+ m_delegate.disjunction();
+ lastTokenWasAnAtom = false;
+ break;
+
+ case '(':
+ parseParenthesesBegin();
+ lastTokenWasAnAtom = false;
+ break;
+
+ case ')':
+ parseParenthesesEnd();
+ lastTokenWasAnAtom = true;
+ break;
+
+ case '^':
+ consume();
+ m_delegate.assertionBOL();
+ lastTokenWasAnAtom = false;
+ break;
+
+ case '$':
+ consume();
+ m_delegate.assertionEOL();
+ lastTokenWasAnAtom = false;
+ break;
+
+ case '.':
+ consume();
+ m_delegate.atomBuiltInCharacterClass(NewlineClassID, true);
+ lastTokenWasAnAtom = true;
+ break;
+
+ case '[':
+ parseCharacterClass();
+ lastTokenWasAnAtom = true;
+ break;
+
+ case '\\':
+ lastTokenWasAnAtom = parseAtomEscape();
+ break;
+
+ case '*':
+ consume();
+ parseQuantifier(lastTokenWasAnAtom, 0, quantifyInfinite);
+ lastTokenWasAnAtom = false;
+ break;
+
+ case '+':
+ consume();
+ parseQuantifier(lastTokenWasAnAtom, 1, quantifyInfinite);
+ lastTokenWasAnAtom = false;
+ break;
+
+ case '?':
+ consume();
+ parseQuantifier(lastTokenWasAnAtom, 0, 1);
+ lastTokenWasAnAtom = false;
+ break;
+
+ case '{': {
+ ParseState state = saveState();
+
+ consume();
+ if (peekIsDigit()) {
+ unsigned min = consumeNumber();
+ unsigned max = min;
+
+ if (tryConsume(','))
+ max = peekIsDigit() ? consumeNumber() : quantifyInfinite;
+
+ if (tryConsume('}')) {
+ if (min <= max)
+ parseQuantifier(lastTokenWasAnAtom, min, max);
+ else
+ m_err = QuantifierOutOfOrder;
+ lastTokenWasAnAtom = false;
+ break;
+ }
+ }
+
+ restoreState(state);
+ } // if we did not find a complete quantifer, fall through to the default case.
+
+ default:
+ m_delegate.atomPatternCharacter(consume());
+ lastTokenWasAnAtom = true;
+ }
+
+ if (m_err)
+ return;
+ }
+
+ if (m_parenthesesNestingDepth > 0)
+ m_err = MissingParentheses;
+ }
+
+ /*
+ * parse():
+ *
+ * This method calls parseTokens() to parse over the input and converts any
+ * error code to a const char* for a result.
+ */
+ const char* parse()
+ {
+ if (m_size > MAX_PATTERN_SIZE)
+ m_err = PatternTooLarge;
+ else
+ parseTokens();
+ ASSERT(atEndOfPattern() || m_err);
+
+ // The order of this array must match the ErrorCode enum.
+ static const char* errorMessages[NumberOfErrorCodes] = {
+ 0, // NoError
+ REGEXP_ERROR_PREFIX "regular expression too large",
+ REGEXP_ERROR_PREFIX "numbers out of order in {} quantifier",
+ REGEXP_ERROR_PREFIX "nothing to repeat",
+ REGEXP_ERROR_PREFIX "number too large in {} quantifier",
+ REGEXP_ERROR_PREFIX "missing )",
+ REGEXP_ERROR_PREFIX "unmatched parentheses",
+ REGEXP_ERROR_PREFIX "unrecognized character after (?",
+ REGEXP_ERROR_PREFIX "missing terminating ] for character class",
+ REGEXP_ERROR_PREFIX "range out of order in character class",
+ REGEXP_ERROR_PREFIX "\\ at end of pattern"
+ };
+
+ return errorMessages[m_err];
+ }
+
+ // Misc helper functions:
+
+ typedef unsigned ParseState;
+
+ ParseState saveState()
+ {
+ return m_index;
+ }
+
+ void restoreState(ParseState state)
+ {
+ m_index = state;
+ }
+
+ bool atEndOfPattern()
+ {
+ ASSERT(m_index <= m_size);
+ return m_index == m_size;
+ }
+
+ int peek()
+ {
+ ASSERT(m_index < m_size);
+ return m_data[m_index];
+ }
+
+ bool peekIsDigit()
+ {
+ return !atEndOfPattern() && WTF::isASCIIDigit(peek());
+ }
+
+ unsigned peekDigit()
+ {
+ ASSERT(peekIsDigit());
+ return peek() - '0';
+ }
+
+ int consume()
+ {
+ ASSERT(m_index < m_size);
+ return m_data[m_index++];
+ }
+
+ unsigned consumeDigit()
+ {
+ ASSERT(peekIsDigit());
+ return consume() - '0';
+ }
+
+ unsigned consumeNumber()
+ {
+ unsigned n = consumeDigit();
+ // check for overflow.
+ for (unsigned newValue; peekIsDigit() && ((newValue = n * 10 + peekDigit()) >= n); ) {
+ n = newValue;
+ consume();
+ }
+ return n;
+ }
+
+ unsigned consumeOctal()
+ {
+ ASSERT(WTF::isASCIIOctalDigit(peek()));
+
+ unsigned n = consumeDigit();
+ while (n < 32 && !atEndOfPattern() && WTF::isASCIIOctalDigit(peek()))
+ n = n * 8 + consumeDigit();
+ return n;
+ }
+
+ bool tryConsume(UChar ch)
+ {
+ if (atEndOfPattern() || (m_data[m_index] != ch))
+ return false;
+ ++m_index;
+ return true;
+ }
+
+ int tryConsumeHex(int count)
+ {
+ ParseState state = saveState();
+
+ int n = 0;
+ while (count--) {
+ if (atEndOfPattern() || !WTF::isASCIIHexDigit(peek())) {
+ restoreState(state);
+ return -1;
+ }
+ n = (n << 4) | WTF::toASCIIHexValue(consume());
+ }
+ return n;
+ }
+
+ Delegate& m_delegate;
+ unsigned m_backReferenceLimit;
+ ErrorCode m_err;
+ const CharType* m_data;
+ unsigned m_size;
+ unsigned m_index;
+ unsigned m_parenthesesNestingDepth;
+
+ // Derived by empirical testing of compile time in PCRE and WREC.
+ static const unsigned MAX_PATTERN_SIZE = 1024 * 1024;
+};
+
+/*
+ * Yarr::parse():
+ *
+ * The parse method is passed a pattern to be parsed and a delegate upon which
+ * callbacks will be made to record the parsed tokens forming the regex.
+ * Yarr::parse() returns null on success, or a const C string providing an error
+ * message where a parse error occurs.
+ *
+ * The Delegate must implement the following interface:
+ *
+ * void assertionBOL();
+ * void assertionEOL();
+ * void assertionWordBoundary(bool invert);
+ *
+ * void atomPatternCharacter(UChar ch);
+ * void atomBuiltInCharacterClass(BuiltInCharacterClassID classID, bool invert);
+ * void atomCharacterClassBegin(bool invert)
+ * void atomCharacterClassAtom(UChar ch)
+ * void atomCharacterClassRange(UChar begin, UChar end)
+ * void atomCharacterClassBuiltIn(BuiltInCharacterClassID classID, bool invert)
+ * void atomCharacterClassEnd()
+ * void atomParenthesesSubpatternBegin(bool capture = true);
+ * void atomParentheticalAssertionBegin(bool invert = false);
+ * void atomParenthesesEnd();
+ * void atomBackReference(unsigned subpatternId);
+ *
+ * void quantifyAtom(unsigned min, unsigned max, bool greedy);
+ *
+ * void disjunction();
+ *
+ * The regular expression is described by a sequence of assertion*() and atom*()
+ * callbacks to the delegate, describing the terms in the regular expression.
+ * Following an atom a quantifyAtom() call may occur to indicate that the previous
+ * atom should be quantified. In the case of atoms described across multiple
+ * calls (parentheses and character classes) the call to quantifyAtom() will come
+ * after the call to the atom*End() method, never after atom*Begin().
+ *
+ * Character classes may either be described by a single call to
+ * atomBuiltInCharacterClass(), or by a sequence of atomCharacterClass*() calls.
+ * In the latter case, ...Begin() will be called, followed by a sequence of
+ * calls to ...Atom(), ...Range(), and ...BuiltIn(), followed by a call to ...End().
+ *
+ * Sequences of atoms and assertions are broken into alternatives via calls to
+ * disjunction(). Assertions, atoms, and disjunctions emitted between calls to
+ * atomParenthesesBegin() and atomParenthesesEnd() form the body of a subpattern.
+ * atomParenthesesBegin() is passed a subpatternId. In the case of a regular
+ * capturing subpattern, this will be the subpatternId associated with these
+ * parentheses, and will also by definition be the lowest subpatternId of these
+ * parentheses and of any nested paretheses. The atomParenthesesEnd() method
+ * is passed the subpatternId of the last capturing subexpression nested within
+ * these paretheses. In the case of a capturing subpattern with no nested
+ * capturing subpatterns, the same subpatternId will be passed to the begin and
+ * end functions. In the case of non-capturing subpatterns the subpatternId
+ * passed to the begin method is also the first possible subpatternId that might
+ * be nested within these paretheses. If a set of non-capturing parentheses does
+ * not contain any capturing subpatterns, then the subpatternId passed to begin
+ * will be greater than the subpatternId passed to end.
+ */
+
+template<class Delegate>
+const char* parse(Delegate& delegate, const String& pattern, unsigned backReferenceLimit = quantifyInfinite)
+{
+ if (pattern.is8Bit())
+ return Parser<Delegate, LChar>(delegate, pattern, backReferenceLimit).parse();
+ return Parser<Delegate, UChar>(delegate, pattern, backReferenceLimit).parse();
+}
+
+} } // namespace JSC::Yarr
+
+#endif // YarrParser_h
diff --git a/src/3rdparty/masm/yarr/YarrPattern.cpp b/src/3rdparty/masm/yarr/YarrPattern.cpp
new file mode 100644
index 0000000000..3ce0216e5f
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrPattern.cpp
@@ -0,0 +1,880 @@
+/*
+ * Copyright (C) 2009, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Peter Varga (pvarga@inf.u-szeged.hu), University of Szeged
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "YarrPattern.h"
+
+#include "Yarr.h"
+#include "YarrCanonicalizeUCS2.h"
+#include "YarrParser.h"
+#include <wtf/Vector.h>
+
+using namespace WTF;
+
+namespace JSC { namespace Yarr {
+
+#include "RegExpJitTables.h"
+
+class CharacterClassConstructor {
+public:
+ CharacterClassConstructor(bool isCaseInsensitive = false)
+ : m_isCaseInsensitive(isCaseInsensitive)
+ {
+ }
+
+ void reset()
+ {
+ m_matches.clear();
+ m_ranges.clear();
+ m_matchesUnicode.clear();
+ m_rangesUnicode.clear();
+ }
+
+ void append(const CharacterClass* other)
+ {
+ for (size_t i = 0; i < other->m_matches.size(); ++i)
+ addSorted(m_matches, other->m_matches[i]);
+ for (size_t i = 0; i < other->m_ranges.size(); ++i)
+ addSortedRange(m_ranges, other->m_ranges[i].begin, other->m_ranges[i].end);
+ for (size_t i = 0; i < other->m_matchesUnicode.size(); ++i)
+ addSorted(m_matchesUnicode, other->m_matchesUnicode[i]);
+ for (size_t i = 0; i < other->m_rangesUnicode.size(); ++i)
+ addSortedRange(m_rangesUnicode, other->m_rangesUnicode[i].begin, other->m_rangesUnicode[i].end);
+ }
+
+ void putChar(UChar ch)
+ {
+ // Handle ascii cases.
+ if (ch <= 0x7f) {
+ if (m_isCaseInsensitive && isASCIIAlpha(ch)) {
+ addSorted(m_matches, toASCIIUpper(ch));
+ addSorted(m_matches, toASCIILower(ch));
+ } else
+ addSorted(m_matches, ch);
+ return;
+ }
+
+ // Simple case, not a case-insensitive match.
+ if (!m_isCaseInsensitive) {
+ addSorted(m_matchesUnicode, ch);
+ return;
+ }
+
+ // Add multiple matches, if necessary.
+ UCS2CanonicalizationRange* info = rangeInfoFor(ch);
+ if (info->type == CanonicalizeUnique)
+ addSorted(m_matchesUnicode, ch);
+ else
+ putUnicodeIgnoreCase(ch, info);
+ }
+
+ void putUnicodeIgnoreCase(UChar ch, UCS2CanonicalizationRange* info)
+ {
+ ASSERT(m_isCaseInsensitive);
+ ASSERT(ch > 0x7f);
+ ASSERT(ch >= info->begin && ch <= info->end);
+ ASSERT(info->type != CanonicalizeUnique);
+ if (info->type == CanonicalizeSet) {
+ for (uint16_t* set = characterSetInfo[info->value]; (ch = *set); ++set)
+ addSorted(m_matchesUnicode, ch);
+ } else {
+ addSorted(m_matchesUnicode, ch);
+ addSorted(m_matchesUnicode, getCanonicalPair(info, ch));
+ }
+ }
+
+ void putRange(UChar lo, UChar hi)
+ {
+ if (lo <= 0x7f) {
+ char asciiLo = lo;
+ char asciiHi = std::min(hi, (UChar)0x7f);
+ addSortedRange(m_ranges, lo, asciiHi);
+
+ if (m_isCaseInsensitive) {
+ if ((asciiLo <= 'Z') && (asciiHi >= 'A'))
+ addSortedRange(m_ranges, std::max(asciiLo, 'A')+('a'-'A'), std::min(asciiHi, 'Z')+('a'-'A'));
+ if ((asciiLo <= 'z') && (asciiHi >= 'a'))
+ addSortedRange(m_ranges, std::max(asciiLo, 'a')+('A'-'a'), std::min(asciiHi, 'z')+('A'-'a'));
+ }
+ }
+ if (hi <= 0x7f)
+ return;
+
+ lo = std::max(lo, (UChar)0x80);
+ addSortedRange(m_rangesUnicode, lo, hi);
+
+ if (!m_isCaseInsensitive)
+ return;
+
+ UCS2CanonicalizationRange* info = rangeInfoFor(lo);
+ while (true) {
+ // Handle the range [lo .. end]
+ UChar end = std::min<UChar>(info->end, hi);
+
+ switch (info->type) {
+ case CanonicalizeUnique:
+ // Nothing to do - no canonical equivalents.
+ break;
+ case CanonicalizeSet: {
+ UChar ch;
+ for (uint16_t* set = characterSetInfo[info->value]; (ch = *set); ++set)
+ addSorted(m_matchesUnicode, ch);
+ break;
+ }
+ case CanonicalizeRangeLo:
+ addSortedRange(m_rangesUnicode, lo + info->value, end + info->value);
+ break;
+ case CanonicalizeRangeHi:
+ addSortedRange(m_rangesUnicode, lo - info->value, end - info->value);
+ break;
+ case CanonicalizeAlternatingAligned:
+ // Use addSortedRange since there is likely an abutting range to combine with.
+ if (lo & 1)
+ addSortedRange(m_rangesUnicode, lo - 1, lo - 1);
+ if (!(end & 1))
+ addSortedRange(m_rangesUnicode, end + 1, end + 1);
+ break;
+ case CanonicalizeAlternatingUnaligned:
+ // Use addSortedRange since there is likely an abutting range to combine with.
+ if (!(lo & 1))
+ addSortedRange(m_rangesUnicode, lo - 1, lo - 1);
+ if (end & 1)
+ addSortedRange(m_rangesUnicode, end + 1, end + 1);
+ break;
+ }
+
+ if (hi == end)
+ return;
+
+ ++info;
+ lo = info->begin;
+ };
+
+ }
+
+ PassOwnPtr<CharacterClass> charClass()
+ {
+ OwnPtr<CharacterClass> characterClass = adoptPtr(new CharacterClass);
+
+ characterClass->m_matches.swap(m_matches);
+ characterClass->m_ranges.swap(m_ranges);
+ characterClass->m_matchesUnicode.swap(m_matchesUnicode);
+ characterClass->m_rangesUnicode.swap(m_rangesUnicode);
+
+ return characterClass.release();
+ }
+
+private:
+ void addSorted(Vector<UChar>& matches, UChar ch)
+ {
+ unsigned pos = 0;
+ unsigned range = matches.size();
+
+ // binary chop, find position to insert char.
+ while (range) {
+ unsigned index = range >> 1;
+
+ int val = matches[pos+index] - ch;
+ if (!val)
+ return;
+ else if (val > 0)
+ range = index;
+ else {
+ pos += (index+1);
+ range -= (index+1);
+ }
+ }
+
+ if (pos == matches.size())
+ matches.append(ch);
+ else
+ matches.insert(pos, ch);
+ }
+
+ void addSortedRange(Vector<CharacterRange>& ranges, UChar lo, UChar hi)
+ {
+ unsigned end = ranges.size();
+
+ // Simple linear scan - I doubt there are that many ranges anyway...
+ // feel free to fix this with something faster (eg binary chop).
+ for (unsigned i = 0; i < end; ++i) {
+ // does the new range fall before the current position in the array
+ if (hi < ranges[i].begin) {
+ // optional optimization: concatenate appending ranges? - may not be worthwhile.
+ if (hi == (ranges[i].begin - 1)) {
+ ranges[i].begin = lo;
+ return;
+ }
+ ranges.insert(i, CharacterRange(lo, hi));
+ return;
+ }
+ // Okay, since we didn't hit the last case, the end of the new range is definitely at or after the begining
+ // If the new range start at or before the end of the last range, then the overlap (if it starts one after the
+ // end of the last range they concatenate, which is just as good.
+ if (lo <= (ranges[i].end + 1)) {
+ // found an intersect! we'll replace this entry in the array.
+ ranges[i].begin = std::min(ranges[i].begin, lo);
+ ranges[i].end = std::max(ranges[i].end, hi);
+
+ // now check if the new range can subsume any subsequent ranges.
+ unsigned next = i+1;
+ // each iteration of the loop we will either remove something from the list, or break the loop.
+ while (next < ranges.size()) {
+ if (ranges[next].begin <= (ranges[i].end + 1)) {
+ // the next entry now overlaps / concatenates this one.
+ ranges[i].end = std::max(ranges[i].end, ranges[next].end);
+ ranges.remove(next);
+ } else
+ break;
+ }
+
+ return;
+ }
+ }
+
+ // CharacterRange comes after all existing ranges.
+ ranges.append(CharacterRange(lo, hi));
+ }
+
+ bool m_isCaseInsensitive;
+
+ Vector<UChar> m_matches;
+ Vector<CharacterRange> m_ranges;
+ Vector<UChar> m_matchesUnicode;
+ Vector<CharacterRange> m_rangesUnicode;
+};
+
+class YarrPatternConstructor {
+public:
+ YarrPatternConstructor(YarrPattern& pattern)
+ : m_pattern(pattern)
+ , m_characterClassConstructor(pattern.m_ignoreCase)
+ , m_invertParentheticalAssertion(false)
+ {
+ OwnPtr<PatternDisjunction> body = adoptPtr(new PatternDisjunction);
+ m_pattern.m_body = body.get();
+ m_alternative = body->addNewAlternative();
+ m_pattern.m_disjunctions.append(body.release());
+ }
+
+ ~YarrPatternConstructor()
+ {
+ }
+
+ void reset()
+ {
+ m_pattern.reset();
+ m_characterClassConstructor.reset();
+
+ OwnPtr<PatternDisjunction> body = adoptPtr(new PatternDisjunction);
+ m_pattern.m_body = body.get();
+ m_alternative = body->addNewAlternative();
+ m_pattern.m_disjunctions.append(body.release());
+ }
+
+ void assertionBOL()
+ {
+ if (!m_alternative->m_terms.size() & !m_invertParentheticalAssertion) {
+ m_alternative->m_startsWithBOL = true;
+ m_alternative->m_containsBOL = true;
+ m_pattern.m_containsBOL = true;
+ }
+ m_alternative->m_terms.append(PatternTerm::BOL());
+ }
+ void assertionEOL()
+ {
+ m_alternative->m_terms.append(PatternTerm::EOL());
+ }
+ void assertionWordBoundary(bool invert)
+ {
+ m_alternative->m_terms.append(PatternTerm::WordBoundary(invert));
+ }
+
+ void atomPatternCharacter(UChar ch)
+ {
+ // We handle case-insensitive checking of unicode characters which do have both
+ // cases by handling them as if they were defined using a CharacterClass.
+ if (!m_pattern.m_ignoreCase || isASCII(ch)) {
+ m_alternative->m_terms.append(PatternTerm(ch));
+ return;
+ }
+
+ UCS2CanonicalizationRange* info = rangeInfoFor(ch);
+ if (info->type == CanonicalizeUnique) {
+ m_alternative->m_terms.append(PatternTerm(ch));
+ return;
+ }
+
+ m_characterClassConstructor.putUnicodeIgnoreCase(ch, info);
+ OwnPtr<CharacterClass> newCharacterClass = m_characterClassConstructor.charClass();
+ m_alternative->m_terms.append(PatternTerm(newCharacterClass.get(), false));
+ m_pattern.m_userCharacterClasses.append(newCharacterClass.release());
+ }
+
+ void atomBuiltInCharacterClass(BuiltInCharacterClassID classID, bool invert)
+ {
+ switch (classID) {
+ case DigitClassID:
+ m_alternative->m_terms.append(PatternTerm(m_pattern.digitsCharacterClass(), invert));
+ break;
+ case SpaceClassID:
+ m_alternative->m_terms.append(PatternTerm(m_pattern.spacesCharacterClass(), invert));
+ break;
+ case WordClassID:
+ m_alternative->m_terms.append(PatternTerm(m_pattern.wordcharCharacterClass(), invert));
+ break;
+ case NewlineClassID:
+ m_alternative->m_terms.append(PatternTerm(m_pattern.newlineCharacterClass(), invert));
+ break;
+ }
+ }
+
+ void atomCharacterClassBegin(bool invert = false)
+ {
+ m_invertCharacterClass = invert;
+ }
+
+ void atomCharacterClassAtom(UChar ch)
+ {
+ m_characterClassConstructor.putChar(ch);
+ }
+
+ void atomCharacterClassRange(UChar begin, UChar end)
+ {
+ m_characterClassConstructor.putRange(begin, end);
+ }
+
+ void atomCharacterClassBuiltIn(BuiltInCharacterClassID classID, bool invert)
+ {
+ ASSERT(classID != NewlineClassID);
+
+ switch (classID) {
+ case DigitClassID:
+ m_characterClassConstructor.append(invert ? m_pattern.nondigitsCharacterClass() : m_pattern.digitsCharacterClass());
+ break;
+
+ case SpaceClassID:
+ m_characterClassConstructor.append(invert ? m_pattern.nonspacesCharacterClass() : m_pattern.spacesCharacterClass());
+ break;
+
+ case WordClassID:
+ m_characterClassConstructor.append(invert ? m_pattern.nonwordcharCharacterClass() : m_pattern.wordcharCharacterClass());
+ break;
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void atomCharacterClassEnd()
+ {
+ OwnPtr<CharacterClass> newCharacterClass = m_characterClassConstructor.charClass();
+ m_alternative->m_terms.append(PatternTerm(newCharacterClass.get(), m_invertCharacterClass));
+ m_pattern.m_userCharacterClasses.append(newCharacterClass.release());
+ }
+
+ void atomParenthesesSubpatternBegin(bool capture = true)
+ {
+ unsigned subpatternId = m_pattern.m_numSubpatterns + 1;
+ if (capture)
+ m_pattern.m_numSubpatterns++;
+
+ OwnPtr<PatternDisjunction> parenthesesDisjunction = adoptPtr(new PatternDisjunction(m_alternative));
+ m_alternative->m_terms.append(PatternTerm(PatternTerm::TypeParenthesesSubpattern, subpatternId, parenthesesDisjunction.get(), capture, false));
+ m_alternative = parenthesesDisjunction->addNewAlternative();
+ m_pattern.m_disjunctions.append(parenthesesDisjunction.release());
+ }
+
+ void atomParentheticalAssertionBegin(bool invert = false)
+ {
+ OwnPtr<PatternDisjunction> parenthesesDisjunction = adoptPtr(new PatternDisjunction(m_alternative));
+ m_alternative->m_terms.append(PatternTerm(PatternTerm::TypeParentheticalAssertion, m_pattern.m_numSubpatterns + 1, parenthesesDisjunction.get(), false, invert));
+ m_alternative = parenthesesDisjunction->addNewAlternative();
+ m_invertParentheticalAssertion = invert;
+ m_pattern.m_disjunctions.append(parenthesesDisjunction.release());
+ }
+
+ void atomParenthesesEnd()
+ {
+ ASSERT(m_alternative->m_parent);
+ ASSERT(m_alternative->m_parent->m_parent);
+
+ PatternDisjunction* parenthesesDisjunction = m_alternative->m_parent;
+ m_alternative = m_alternative->m_parent->m_parent;
+
+ PatternTerm& lastTerm = m_alternative->lastTerm();
+
+ unsigned numParenAlternatives = parenthesesDisjunction->m_alternatives.size();
+ unsigned numBOLAnchoredAlts = 0;
+
+ for (unsigned i = 0; i < numParenAlternatives; i++) {
+ // Bubble up BOL flags
+ if (parenthesesDisjunction->m_alternatives[i]->m_startsWithBOL)
+ numBOLAnchoredAlts++;
+ }
+
+ if (numBOLAnchoredAlts) {
+ m_alternative->m_containsBOL = true;
+ // If all the alternatives in parens start with BOL, then so does this one
+ if (numBOLAnchoredAlts == numParenAlternatives)
+ m_alternative->m_startsWithBOL = true;
+ }
+
+ lastTerm.parentheses.lastSubpatternId = m_pattern.m_numSubpatterns;
+ m_invertParentheticalAssertion = false;
+ }
+
+ void atomBackReference(unsigned subpatternId)
+ {
+ ASSERT(subpatternId);
+ m_pattern.m_containsBackreferences = true;
+ m_pattern.m_maxBackReference = std::max(m_pattern.m_maxBackReference, subpatternId);
+
+ if (subpatternId > m_pattern.m_numSubpatterns) {
+ m_alternative->m_terms.append(PatternTerm::ForwardReference());
+ return;
+ }
+
+ PatternAlternative* currentAlternative = m_alternative;
+ ASSERT(currentAlternative);
+
+ // Note to self: if we waited until the AST was baked, we could also remove forwards refs
+ while ((currentAlternative = currentAlternative->m_parent->m_parent)) {
+ PatternTerm& term = currentAlternative->lastTerm();
+ ASSERT((term.type == PatternTerm::TypeParenthesesSubpattern) || (term.type == PatternTerm::TypeParentheticalAssertion));
+
+ if ((term.type == PatternTerm::TypeParenthesesSubpattern) && term.capture() && (subpatternId == term.parentheses.subpatternId)) {
+ m_alternative->m_terms.append(PatternTerm::ForwardReference());
+ return;
+ }
+ }
+
+ m_alternative->m_terms.append(PatternTerm(subpatternId));
+ }
+
+ // deep copy the argument disjunction. If filterStartsWithBOL is true,
+ // skip alternatives with m_startsWithBOL set true.
+ PatternDisjunction* copyDisjunction(PatternDisjunction* disjunction, bool filterStartsWithBOL = false)
+ {
+ OwnPtr<PatternDisjunction> newDisjunction;
+ for (unsigned alt = 0; alt < disjunction->m_alternatives.size(); ++alt) {
+ PatternAlternative* alternative = disjunction->m_alternatives[alt].get();
+ if (!filterStartsWithBOL || !alternative->m_startsWithBOL) {
+ if (!newDisjunction) {
+ newDisjunction = adoptPtr(new PatternDisjunction());
+ newDisjunction->m_parent = disjunction->m_parent;
+ }
+ PatternAlternative* newAlternative = newDisjunction->addNewAlternative();
+ newAlternative->m_terms.reserveInitialCapacity(alternative->m_terms.size());
+ for (unsigned i = 0; i < alternative->m_terms.size(); ++i)
+ newAlternative->m_terms.append(copyTerm(alternative->m_terms[i], filterStartsWithBOL));
+ }
+ }
+
+ if (!newDisjunction)
+ return 0;
+
+ PatternDisjunction* copiedDisjunction = newDisjunction.get();
+ m_pattern.m_disjunctions.append(newDisjunction.release());
+ return copiedDisjunction;
+ }
+
+ PatternTerm copyTerm(PatternTerm& term, bool filterStartsWithBOL = false)
+ {
+ if ((term.type != PatternTerm::TypeParenthesesSubpattern) && (term.type != PatternTerm::TypeParentheticalAssertion))
+ return PatternTerm(term);
+
+ PatternTerm termCopy = term;
+ termCopy.parentheses.disjunction = copyDisjunction(termCopy.parentheses.disjunction, filterStartsWithBOL);
+ return termCopy;
+ }
+
+ void quantifyAtom(unsigned min, unsigned max, bool greedy)
+ {
+ ASSERT(min <= max);
+ ASSERT(m_alternative->m_terms.size());
+
+ if (!max) {
+ m_alternative->removeLastTerm();
+ return;
+ }
+
+ PatternTerm& term = m_alternative->lastTerm();
+ ASSERT(term.type > PatternTerm::TypeAssertionWordBoundary);
+ ASSERT((term.quantityCount == 1) && (term.quantityType == QuantifierFixedCount));
+
+ if (term.type == PatternTerm::TypeParentheticalAssertion) {
+ // If an assertion is quantified with a minimum count of zero, it can simply be removed.
+ // This arises from the RepeatMatcher behaviour in the spec. Matching an assertion never
+ // results in any input being consumed, however the continuation passed to the assertion
+ // (called in steps, 8c and 9 of the RepeatMatcher definition, ES5.1 15.10.2.5) will
+ // reject all zero length matches (see step 2.1). A match from the continuation of the
+ // expression will still be accepted regardless (via steps 8a and 11) - the upshot of all
+ // this is that matches from the assertion are not required, and won't be accepted anyway,
+ // so no need to ever run it.
+ if (!min)
+ m_alternative->removeLastTerm();
+ // We never need to run an assertion more than once. Subsequent interations will be run
+ // with the same start index (since assertions are non-capturing) and the same captures
+ // (per step 4 of RepeatMatcher in ES5.1 15.10.2.5), and as such will always produce the
+ // same result and captures. If the first match succeeds then the subsequent (min - 1)
+ // matches will too. Any additional optional matches will fail (on the same basis as the
+ // minimum zero quantified assertions, above), but this will still result in a match.
+ return;
+ }
+
+ if (min == 0)
+ term.quantify(max, greedy ? QuantifierGreedy : QuantifierNonGreedy);
+ else if (min == max)
+ term.quantify(min, QuantifierFixedCount);
+ else {
+ term.quantify(min, QuantifierFixedCount);
+ m_alternative->m_terms.append(copyTerm(term));
+ // NOTE: this term is interesting from an analysis perspective, in that it can be ignored.....
+ m_alternative->lastTerm().quantify((max == quantifyInfinite) ? max : max - min, greedy ? QuantifierGreedy : QuantifierNonGreedy);
+ if (m_alternative->lastTerm().type == PatternTerm::TypeParenthesesSubpattern)
+ m_alternative->lastTerm().parentheses.isCopy = true;
+ }
+ }
+
+ void disjunction()
+ {
+ m_alternative = m_alternative->m_parent->addNewAlternative();
+ }
+
+ unsigned setupAlternativeOffsets(PatternAlternative* alternative, unsigned currentCallFrameSize, unsigned initialInputPosition)
+ {
+ alternative->m_hasFixedSize = true;
+ Checked<unsigned> currentInputPosition = initialInputPosition;
+
+ for (unsigned i = 0; i < alternative->m_terms.size(); ++i) {
+ PatternTerm& term = alternative->m_terms[i];
+
+ switch (term.type) {
+ case PatternTerm::TypeAssertionBOL:
+ case PatternTerm::TypeAssertionEOL:
+ case PatternTerm::TypeAssertionWordBoundary:
+ term.inputPosition = currentInputPosition.unsafeGet();
+ break;
+
+ case PatternTerm::TypeBackReference:
+ term.inputPosition = currentInputPosition.unsafeGet();
+ term.frameLocation = currentCallFrameSize;
+ currentCallFrameSize += YarrStackSpaceForBackTrackInfoBackReference;
+ alternative->m_hasFixedSize = false;
+ break;
+
+ case PatternTerm::TypeForwardReference:
+ break;
+
+ case PatternTerm::TypePatternCharacter:
+ term.inputPosition = currentInputPosition.unsafeGet();
+ if (term.quantityType != QuantifierFixedCount) {
+ term.frameLocation = currentCallFrameSize;
+ currentCallFrameSize += YarrStackSpaceForBackTrackInfoPatternCharacter;
+ alternative->m_hasFixedSize = false;
+ } else
+ currentInputPosition += term.quantityCount;
+ break;
+
+ case PatternTerm::TypeCharacterClass:
+ term.inputPosition = currentInputPosition.unsafeGet();
+ if (term.quantityType != QuantifierFixedCount) {
+ term.frameLocation = currentCallFrameSize;
+ currentCallFrameSize += YarrStackSpaceForBackTrackInfoCharacterClass;
+ alternative->m_hasFixedSize = false;
+ } else
+ currentInputPosition += term.quantityCount;
+ break;
+
+ case PatternTerm::TypeParenthesesSubpattern:
+ // Note: for fixed once parentheses we will ensure at least the minimum is available; others are on their own.
+ term.frameLocation = currentCallFrameSize;
+ if (term.quantityCount == 1 && !term.parentheses.isCopy) {
+ if (term.quantityType != QuantifierFixedCount)
+ currentCallFrameSize += YarrStackSpaceForBackTrackInfoParenthesesOnce;
+ currentCallFrameSize = setupDisjunctionOffsets(term.parentheses.disjunction, currentCallFrameSize, currentInputPosition.unsafeGet());
+ // If quantity is fixed, then pre-check its minimum size.
+ if (term.quantityType == QuantifierFixedCount)
+ currentInputPosition += term.parentheses.disjunction->m_minimumSize;
+ term.inputPosition = currentInputPosition.unsafeGet();
+ } else if (term.parentheses.isTerminal) {
+ currentCallFrameSize += YarrStackSpaceForBackTrackInfoParenthesesTerminal;
+ currentCallFrameSize = setupDisjunctionOffsets(term.parentheses.disjunction, currentCallFrameSize, currentInputPosition.unsafeGet());
+ term.inputPosition = currentInputPosition.unsafeGet();
+ } else {
+ term.inputPosition = currentInputPosition.unsafeGet();
+ setupDisjunctionOffsets(term.parentheses.disjunction, 0, currentInputPosition.unsafeGet());
+ currentCallFrameSize += YarrStackSpaceForBackTrackInfoParentheses;
+ }
+ // Fixed count of 1 could be accepted, if they have a fixed size *AND* if all alternatives are of the same length.
+ alternative->m_hasFixedSize = false;
+ break;
+
+ case PatternTerm::TypeParentheticalAssertion:
+ term.inputPosition = currentInputPosition.unsafeGet();
+ term.frameLocation = currentCallFrameSize;
+ currentCallFrameSize = setupDisjunctionOffsets(term.parentheses.disjunction, currentCallFrameSize + YarrStackSpaceForBackTrackInfoParentheticalAssertion, currentInputPosition.unsafeGet());
+ break;
+
+ case PatternTerm::TypeDotStarEnclosure:
+ alternative->m_hasFixedSize = false;
+ term.inputPosition = initialInputPosition;
+ break;
+ }
+ }
+
+ alternative->m_minimumSize = (currentInputPosition - initialInputPosition).unsafeGet();
+ return currentCallFrameSize;
+ }
+
+ unsigned setupDisjunctionOffsets(PatternDisjunction* disjunction, unsigned initialCallFrameSize, unsigned initialInputPosition)
+ {
+ if ((disjunction != m_pattern.m_body) && (disjunction->m_alternatives.size() > 1))
+ initialCallFrameSize += YarrStackSpaceForBackTrackInfoAlternative;
+
+ unsigned minimumInputSize = UINT_MAX;
+ unsigned maximumCallFrameSize = 0;
+ bool hasFixedSize = true;
+
+ for (unsigned alt = 0; alt < disjunction->m_alternatives.size(); ++alt) {
+ PatternAlternative* alternative = disjunction->m_alternatives[alt].get();
+ unsigned currentAlternativeCallFrameSize = setupAlternativeOffsets(alternative, initialCallFrameSize, initialInputPosition);
+ minimumInputSize = std::min(minimumInputSize, alternative->m_minimumSize);
+ maximumCallFrameSize = std::max(maximumCallFrameSize, currentAlternativeCallFrameSize);
+ hasFixedSize &= alternative->m_hasFixedSize;
+ }
+
+ ASSERT(minimumInputSize != UINT_MAX);
+ ASSERT(maximumCallFrameSize >= initialCallFrameSize);
+
+ disjunction->m_hasFixedSize = hasFixedSize;
+ disjunction->m_minimumSize = minimumInputSize;
+ disjunction->m_callFrameSize = maximumCallFrameSize;
+ return maximumCallFrameSize;
+ }
+
+ void setupOffsets()
+ {
+ setupDisjunctionOffsets(m_pattern.m_body, 0, 0);
+ }
+
+ // This optimization identifies sets of parentheses that we will never need to backtrack.
+ // In these cases we do not need to store state from prior iterations.
+ // We can presently avoid backtracking for:
+ // * where the parens are at the end of the regular expression (last term in any of the
+ // alternatives of the main body disjunction).
+ // * where the parens are non-capturing, and quantified unbounded greedy (*).
+ // * where the parens do not contain any capturing subpatterns.
+ void checkForTerminalParentheses()
+ {
+ // This check is much too crude; should be just checking whether the candidate
+ // node contains nested capturing subpatterns, not the whole expression!
+ if (m_pattern.m_numSubpatterns)
+ return;
+
+ Vector<OwnPtr<PatternAlternative> >& alternatives = m_pattern.m_body->m_alternatives;
+ for (size_t i = 0; i < alternatives.size(); ++i) {
+ Vector<PatternTerm>& terms = alternatives[i]->m_terms;
+ if (terms.size()) {
+ PatternTerm& term = terms.last();
+ if (term.type == PatternTerm::TypeParenthesesSubpattern
+ && term.quantityType == QuantifierGreedy
+ && term.quantityCount == quantifyInfinite
+ && !term.capture())
+ term.parentheses.isTerminal = true;
+ }
+ }
+ }
+
+ void optimizeBOL()
+ {
+ // Look for expressions containing beginning of line (^) anchoring and unroll them.
+ // e.g. /^a|^b|c/ becomes /^a|^b|c/ which is executed once followed by /c/ which loops
+ // This code relies on the parsing code tagging alternatives with m_containsBOL and
+ // m_startsWithBOL and rolling those up to containing alternatives.
+ // At this point, this is only valid for non-multiline expressions.
+ PatternDisjunction* disjunction = m_pattern.m_body;
+
+ if (!m_pattern.m_containsBOL || m_pattern.m_multiline)
+ return;
+
+ PatternDisjunction* loopDisjunction = copyDisjunction(disjunction, true);
+
+ // Set alternatives in disjunction to "onceThrough"
+ for (unsigned alt = 0; alt < disjunction->m_alternatives.size(); ++alt)
+ disjunction->m_alternatives[alt]->setOnceThrough();
+
+ if (loopDisjunction) {
+ // Move alternatives from loopDisjunction to disjunction
+ for (unsigned alt = 0; alt < loopDisjunction->m_alternatives.size(); ++alt)
+ disjunction->m_alternatives.append(loopDisjunction->m_alternatives[alt].release());
+
+ loopDisjunction->m_alternatives.clear();
+ }
+ }
+
+ bool containsCapturingTerms(PatternAlternative* alternative, size_t firstTermIndex, size_t lastTermIndex)
+ {
+ Vector<PatternTerm>& terms = alternative->m_terms;
+
+ for (size_t termIndex = firstTermIndex; termIndex <= lastTermIndex; ++termIndex) {
+ PatternTerm& term = terms[termIndex];
+
+ if (term.m_capture)
+ return true;
+
+ if (term.type == PatternTerm::TypeParenthesesSubpattern) {
+ PatternDisjunction* nestedDisjunction = term.parentheses.disjunction;
+ for (unsigned alt = 0; alt < nestedDisjunction->m_alternatives.size(); ++alt) {
+ if (containsCapturingTerms(nestedDisjunction->m_alternatives[alt].get(), 0, nestedDisjunction->m_alternatives[alt]->m_terms.size() - 1))
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ // This optimization identifies alternatives in the form of
+ // [^].*[?]<expression>.*[$] for expressions that don't have any
+ // capturing terms. The alternative is changed to <expression>
+ // followed by processing of the dot stars to find and adjust the
+ // beginning and the end of the match.
+ void optimizeDotStarWrappedExpressions()
+ {
+ Vector<OwnPtr<PatternAlternative> >& alternatives = m_pattern.m_body->m_alternatives;
+ if (alternatives.size() != 1)
+ return;
+
+ PatternAlternative* alternative = alternatives[0].get();
+ Vector<PatternTerm>& terms = alternative->m_terms;
+ if (terms.size() >= 3) {
+ bool startsWithBOL = false;
+ bool endsWithEOL = false;
+ size_t termIndex, firstExpressionTerm, lastExpressionTerm;
+
+ termIndex = 0;
+ if (terms[termIndex].type == PatternTerm::TypeAssertionBOL) {
+ startsWithBOL = true;
+ ++termIndex;
+ }
+
+ PatternTerm& firstNonAnchorTerm = terms[termIndex];
+ if ((firstNonAnchorTerm.type != PatternTerm::TypeCharacterClass) || (firstNonAnchorTerm.characterClass != m_pattern.newlineCharacterClass()) || !((firstNonAnchorTerm.quantityType == QuantifierGreedy) || (firstNonAnchorTerm.quantityType == QuantifierNonGreedy)))
+ return;
+
+ firstExpressionTerm = termIndex + 1;
+
+ termIndex = terms.size() - 1;
+ if (terms[termIndex].type == PatternTerm::TypeAssertionEOL) {
+ endsWithEOL = true;
+ --termIndex;
+ }
+
+ PatternTerm& lastNonAnchorTerm = terms[termIndex];
+ if ((lastNonAnchorTerm.type != PatternTerm::TypeCharacterClass) || (lastNonAnchorTerm.characterClass != m_pattern.newlineCharacterClass()) || (lastNonAnchorTerm.quantityType != QuantifierGreedy))
+ return;
+
+ lastExpressionTerm = termIndex - 1;
+
+ if (firstExpressionTerm > lastExpressionTerm)
+ return;
+
+ if (!containsCapturingTerms(alternative, firstExpressionTerm, lastExpressionTerm)) {
+ for (termIndex = terms.size() - 1; termIndex > lastExpressionTerm; --termIndex)
+ terms.remove(termIndex);
+
+ for (termIndex = firstExpressionTerm; termIndex > 0; --termIndex)
+ terms.remove(termIndex - 1);
+
+ terms.append(PatternTerm(startsWithBOL, endsWithEOL));
+
+ m_pattern.m_containsBOL = false;
+ }
+ }
+ }
+
+private:
+ YarrPattern& m_pattern;
+ PatternAlternative* m_alternative;
+ CharacterClassConstructor m_characterClassConstructor;
+ bool m_invertCharacterClass;
+ bool m_invertParentheticalAssertion;
+};
+
+const char* YarrPattern::compile(const String& patternString)
+{
+ YarrPatternConstructor constructor(*this);
+
+ if (const char* error = parse(constructor, patternString))
+ return error;
+
+ // If the pattern contains illegal backreferences reset & reparse.
+ // Quoting Netscape's "What's new in JavaScript 1.2",
+ // "Note: if the number of left parentheses is less than the number specified
+ // in \#, the \# is taken as an octal escape as described in the next row."
+ if (containsIllegalBackReference()) {
+ unsigned numSubpatterns = m_numSubpatterns;
+
+ constructor.reset();
+#if !ASSERT_DISABLED
+ const char* error =
+#endif
+ parse(constructor, patternString, numSubpatterns);
+
+ ASSERT(!error);
+ ASSERT(numSubpatterns == m_numSubpatterns);
+ }
+
+ constructor.checkForTerminalParentheses();
+ constructor.optimizeDotStarWrappedExpressions();
+ constructor.optimizeBOL();
+
+ constructor.setupOffsets();
+
+ return 0;
+}
+
+YarrPattern::YarrPattern(const String& pattern, bool ignoreCase, bool multiline, const char** error)
+ : m_ignoreCase(ignoreCase)
+ , m_multiline(multiline)
+ , m_containsBackreferences(false)
+ , m_containsBOL(false)
+ , m_numSubpatterns(0)
+ , m_maxBackReference(0)
+ , newlineCached(0)
+ , digitsCached(0)
+ , spacesCached(0)
+ , wordcharCached(0)
+ , nondigitsCached(0)
+ , nonspacesCached(0)
+ , nonwordcharCached(0)
+{
+ *error = compile(pattern);
+}
+
+} }
diff --git a/src/3rdparty/masm/yarr/YarrPattern.h b/src/3rdparty/masm/yarr/YarrPattern.h
new file mode 100644
index 0000000000..e7d187c2b3
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrPattern.h
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2009, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Peter Varga (pvarga@inf.u-szeged.hu), University of Szeged
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef YarrPattern_h
+#define YarrPattern_h
+
+#include <wtf/CheckedArithmetic.h>
+#include <wtf/OwnPtr.h>
+#include <wtf/PassOwnPtr.h>
+#include <wtf/RefCounted.h>
+#include <wtf/Vector.h>
+#include <wtf/text/WTFString.h>
+#include <wtf/unicode/Unicode.h>
+
+namespace JSC { namespace Yarr {
+
+struct PatternDisjunction;
+
+struct CharacterRange {
+ UChar begin;
+ UChar end;
+
+ CharacterRange(UChar begin, UChar end)
+ : begin(begin)
+ , end(end)
+ {
+ }
+};
+
+struct CharacterClass {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ // All CharacterClass instances have to have the full set of matches and ranges,
+ // they may have an optional m_table for faster lookups (which must match the
+ // specified matches and ranges)
+ CharacterClass()
+ : m_table(0)
+ {
+ }
+ CharacterClass(const char* table, bool inverted)
+ : m_table(table)
+ , m_tableInverted(inverted)
+ {
+ }
+ Vector<UChar> m_matches;
+ Vector<CharacterRange> m_ranges;
+ Vector<UChar> m_matchesUnicode;
+ Vector<CharacterRange> m_rangesUnicode;
+
+ const char* m_table;
+ bool m_tableInverted;
+};
+
+enum QuantifierType {
+ QuantifierFixedCount,
+ QuantifierGreedy,
+ QuantifierNonGreedy,
+};
+
+struct PatternTerm {
+ enum Type {
+ TypeAssertionBOL,
+ TypeAssertionEOL,
+ TypeAssertionWordBoundary,
+ TypePatternCharacter,
+ TypeCharacterClass,
+ TypeBackReference,
+ TypeForwardReference,
+ TypeParenthesesSubpattern,
+ TypeParentheticalAssertion,
+ TypeDotStarEnclosure,
+ } type;
+ bool m_capture :1;
+ bool m_invert :1;
+ union {
+ UChar patternCharacter;
+ CharacterClass* characterClass;
+ unsigned backReferenceSubpatternId;
+ struct {
+ PatternDisjunction* disjunction;
+ unsigned subpatternId;
+ unsigned lastSubpatternId;
+ bool isCopy;
+ bool isTerminal;
+ } parentheses;
+ struct {
+ bool bolAnchor : 1;
+ bool eolAnchor : 1;
+ } anchors;
+ };
+ QuantifierType quantityType;
+ Checked<unsigned> quantityCount;
+ int inputPosition;
+ unsigned frameLocation;
+
+ PatternTerm(UChar ch)
+ : type(PatternTerm::TypePatternCharacter)
+ , m_capture(false)
+ , m_invert(false)
+ {
+ patternCharacter = ch;
+ quantityType = QuantifierFixedCount;
+ quantityCount = 1;
+ }
+
+ PatternTerm(CharacterClass* charClass, bool invert)
+ : type(PatternTerm::TypeCharacterClass)
+ , m_capture(false)
+ , m_invert(invert)
+ {
+ characterClass = charClass;
+ quantityType = QuantifierFixedCount;
+ quantityCount = 1;
+ }
+
+ PatternTerm(Type type, unsigned subpatternId, PatternDisjunction* disjunction, bool capture = false, bool invert = false)
+ : type(type)
+ , m_capture(capture)
+ , m_invert(invert)
+ {
+ parentheses.disjunction = disjunction;
+ parentheses.subpatternId = subpatternId;
+ parentheses.isCopy = false;
+ parentheses.isTerminal = false;
+ quantityType = QuantifierFixedCount;
+ quantityCount = 1;
+ }
+
+ PatternTerm(Type type, bool invert = false)
+ : type(type)
+ , m_capture(false)
+ , m_invert(invert)
+ {
+ quantityType = QuantifierFixedCount;
+ quantityCount = 1;
+ }
+
+ PatternTerm(unsigned spatternId)
+ : type(TypeBackReference)
+ , m_capture(false)
+ , m_invert(false)
+ {
+ backReferenceSubpatternId = spatternId;
+ quantityType = QuantifierFixedCount;
+ quantityCount = 1;
+ }
+
+ PatternTerm(bool bolAnchor, bool eolAnchor)
+ : type(TypeDotStarEnclosure)
+ , m_capture(false)
+ , m_invert(false)
+ {
+ anchors.bolAnchor = bolAnchor;
+ anchors.eolAnchor = eolAnchor;
+ quantityType = QuantifierFixedCount;
+ quantityCount = 1;
+ }
+
+ static PatternTerm ForwardReference()
+ {
+ return PatternTerm(TypeForwardReference);
+ }
+
+ static PatternTerm BOL()
+ {
+ return PatternTerm(TypeAssertionBOL);
+ }
+
+ static PatternTerm EOL()
+ {
+ return PatternTerm(TypeAssertionEOL);
+ }
+
+ static PatternTerm WordBoundary(bool invert)
+ {
+ return PatternTerm(TypeAssertionWordBoundary, invert);
+ }
+
+ bool invert()
+ {
+ return m_invert;
+ }
+
+ bool capture()
+ {
+ return m_capture;
+ }
+
+ void quantify(unsigned count, QuantifierType type)
+ {
+ quantityCount = count;
+ quantityType = type;
+ }
+};
+
+struct PatternAlternative {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ PatternAlternative(PatternDisjunction* disjunction)
+ : m_parent(disjunction)
+ , m_onceThrough(false)
+ , m_hasFixedSize(false)
+ , m_startsWithBOL(false)
+ , m_containsBOL(false)
+ {
+ }
+
+ PatternTerm& lastTerm()
+ {
+ ASSERT(m_terms.size());
+ return m_terms[m_terms.size() - 1];
+ }
+
+ void removeLastTerm()
+ {
+ ASSERT(m_terms.size());
+ m_terms.shrink(m_terms.size() - 1);
+ }
+
+ void setOnceThrough()
+ {
+ m_onceThrough = true;
+ }
+
+ bool onceThrough()
+ {
+ return m_onceThrough;
+ }
+
+ Vector<PatternTerm> m_terms;
+ PatternDisjunction* m_parent;
+ unsigned m_minimumSize;
+ bool m_onceThrough : 1;
+ bool m_hasFixedSize : 1;
+ bool m_startsWithBOL : 1;
+ bool m_containsBOL : 1;
+};
+
+struct PatternDisjunction {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ PatternDisjunction(PatternAlternative* parent = 0)
+ : m_parent(parent)
+ , m_hasFixedSize(false)
+ {
+ }
+
+ PatternAlternative* addNewAlternative()
+ {
+ PatternAlternative* alternative = new PatternAlternative(this);
+ m_alternatives.append(adoptPtr(alternative));
+ return alternative;
+ }
+
+ Vector<OwnPtr<PatternAlternative> > m_alternatives;
+ PatternAlternative* m_parent;
+ unsigned m_minimumSize;
+ unsigned m_callFrameSize;
+ bool m_hasFixedSize;
+};
+
+// You probably don't want to be calling these functions directly
+// (please to be calling newlineCharacterClass() et al on your
+// friendly neighborhood YarrPattern instance to get nicely
+// cached copies).
+CharacterClass* newlineCreate();
+CharacterClass* digitsCreate();
+CharacterClass* spacesCreate();
+CharacterClass* wordcharCreate();
+CharacterClass* nondigitsCreate();
+CharacterClass* nonspacesCreate();
+CharacterClass* nonwordcharCreate();
+
+struct TermChain {
+ TermChain(PatternTerm term)
+ : term(term)
+ {}
+
+ PatternTerm term;
+ Vector<TermChain> hotTerms;
+};
+
+struct YarrPattern {
+ JS_EXPORT_PRIVATE YarrPattern(const String& pattern, bool ignoreCase, bool multiline, const char** error);
+
+ void reset()
+ {
+ m_numSubpatterns = 0;
+ m_maxBackReference = 0;
+
+ m_containsBackreferences = false;
+ m_containsBOL = false;
+
+ newlineCached = 0;
+ digitsCached = 0;
+ spacesCached = 0;
+ wordcharCached = 0;
+ nondigitsCached = 0;
+ nonspacesCached = 0;
+ nonwordcharCached = 0;
+
+ m_disjunctions.clear();
+ m_userCharacterClasses.clear();
+ }
+
+ bool containsIllegalBackReference()
+ {
+ return m_maxBackReference > m_numSubpatterns;
+ }
+
+ CharacterClass* newlineCharacterClass()
+ {
+ if (!newlineCached)
+ m_userCharacterClasses.append(adoptPtr(newlineCached = newlineCreate()));
+ return newlineCached;
+ }
+ CharacterClass* digitsCharacterClass()
+ {
+ if (!digitsCached)
+ m_userCharacterClasses.append(adoptPtr(digitsCached = digitsCreate()));
+ return digitsCached;
+ }
+ CharacterClass* spacesCharacterClass()
+ {
+ if (!spacesCached)
+ m_userCharacterClasses.append(adoptPtr(spacesCached = spacesCreate()));
+ return spacesCached;
+ }
+ CharacterClass* wordcharCharacterClass()
+ {
+ if (!wordcharCached)
+ m_userCharacterClasses.append(adoptPtr(wordcharCached = wordcharCreate()));
+ return wordcharCached;
+ }
+ CharacterClass* nondigitsCharacterClass()
+ {
+ if (!nondigitsCached)
+ m_userCharacterClasses.append(adoptPtr(nondigitsCached = nondigitsCreate()));
+ return nondigitsCached;
+ }
+ CharacterClass* nonspacesCharacterClass()
+ {
+ if (!nonspacesCached)
+ m_userCharacterClasses.append(adoptPtr(nonspacesCached = nonspacesCreate()));
+ return nonspacesCached;
+ }
+ CharacterClass* nonwordcharCharacterClass()
+ {
+ if (!nonwordcharCached)
+ m_userCharacterClasses.append(adoptPtr(nonwordcharCached = nonwordcharCreate()));
+ return nonwordcharCached;
+ }
+
+ bool m_ignoreCase : 1;
+ bool m_multiline : 1;
+ bool m_containsBackreferences : 1;
+ bool m_containsBOL : 1;
+ unsigned m_numSubpatterns;
+ unsigned m_maxBackReference;
+ PatternDisjunction* m_body;
+ Vector<OwnPtr<PatternDisjunction>, 4> m_disjunctions;
+ Vector<OwnPtr<CharacterClass> > m_userCharacterClasses;
+
+private:
+ const char* compile(const String& patternString);
+
+ CharacterClass* newlineCached;
+ CharacterClass* digitsCached;
+ CharacterClass* spacesCached;
+ CharacterClass* wordcharCached;
+ CharacterClass* nondigitsCached;
+ CharacterClass* nonspacesCached;
+ CharacterClass* nonwordcharCached;
+};
+
+} } // namespace JSC::Yarr
+
+#endif // YarrPattern_h
diff --git a/src/3rdparty/masm/yarr/YarrSyntaxChecker.cpp b/src/3rdparty/masm/yarr/YarrSyntaxChecker.cpp
new file mode 100644
index 0000000000..aa98c4a354
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrSyntaxChecker.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "YarrSyntaxChecker.h"
+
+#include "YarrParser.h"
+
+namespace JSC { namespace Yarr {
+
+class SyntaxChecker {
+public:
+ void assertionBOL() {}
+ void assertionEOL() {}
+ void assertionWordBoundary(bool) {}
+ void atomPatternCharacter(UChar) {}
+ void atomBuiltInCharacterClass(BuiltInCharacterClassID, bool) {}
+ void atomCharacterClassBegin(bool = false) {}
+ void atomCharacterClassAtom(UChar) {}
+ void atomCharacterClassRange(UChar, UChar) {}
+ void atomCharacterClassBuiltIn(BuiltInCharacterClassID, bool) {}
+ void atomCharacterClassEnd() {}
+ void atomParenthesesSubpatternBegin(bool = true) {}
+ void atomParentheticalAssertionBegin(bool = false) {}
+ void atomParenthesesEnd() {}
+ void atomBackReference(unsigned) {}
+ void quantifyAtom(unsigned, unsigned, bool) {}
+ void disjunction() {}
+};
+
+const char* checkSyntax(const String& pattern)
+{
+ SyntaxChecker syntaxChecker;
+ return parse(syntaxChecker, pattern);
+}
+
+}} // JSC::YARR
diff --git a/src/3rdparty/masm/yarr/YarrSyntaxChecker.h b/src/3rdparty/masm/yarr/YarrSyntaxChecker.h
new file mode 100644
index 0000000000..104ced3ab4
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrSyntaxChecker.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef YarrSyntaxChecker_h
+#define YarrSyntaxChecker_h
+
+#include <wtf/text/WTFString.h>
+
+namespace JSC { namespace Yarr {
+
+const char* checkSyntax(const String& pattern);
+
+}} // JSC::YARR
+
+#endif // YarrSyntaxChecker_h
+
diff --git a/src/3rdparty/masm/yarr/yarr.pri b/src/3rdparty/masm/yarr/yarr.pri
new file mode 100644
index 0000000000..7e9b4d3f3b
--- /dev/null
+++ b/src/3rdparty/masm/yarr/yarr.pri
@@ -0,0 +1,12 @@
+# -------------------------------------------------------------------
+# Project file for YARR
+#
+# See 'Tools/qmake/README' for an overview of the build system
+# -------------------------------------------------------------------
+
+SOURCES += \
+ $$PWD/YarrInterpreter.cpp \
+ $$PWD/YarrPattern.cpp \
+ $$PWD/YarrSyntaxChecker.cpp \
+ $$PWD/YarrCanonicalizeUCS2.cpp
+