summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/javascriptcore/JavaScriptCore/assembler
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/javascriptcore/JavaScriptCore/assembler')
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.cpp393
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.h769
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMv7Assembler.h1758
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/AbstractMacroAssembler.h535
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/AssemblerBuffer.h173
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h318
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/CodeLocation.h186
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/LinkBuffer.h195
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssembler.h347
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.cpp94
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.h817
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARMv7.h1095
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerCodeRef.h188
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86.h204
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86Common.h971
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86_64.h480
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/RepatchBuffer.h136
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/X86Assembler.h2053
18 files changed, 10712 insertions, 0 deletions
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.cpp
new file mode 100644
index 000000000..1324586dc
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.cpp
@@ -0,0 +1,393 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+
+#include "ARMAssembler.h"
+
+namespace JSC {
+
+// Patching helpers
+
+ARMWord* ARMAssembler::getLdrImmAddress(ARMWord* insn, uint32_t* constPool)
+{
+ // Must be an ldr ..., [pc +/- imm]
+ ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
+
+ if (constPool && (*insn & 0x1))
+ return reinterpret_cast<ARMWord*>(constPool + ((*insn & SDT_OFFSET_MASK) >> 1));
+
+ ARMWord addr = reinterpret_cast<ARMWord>(insn) + 2 * sizeof(ARMWord);
+ if (*insn & DT_UP)
+ return reinterpret_cast<ARMWord*>(addr + (*insn & SDT_OFFSET_MASK));
+ else
+ return reinterpret_cast<ARMWord*>(addr - (*insn & SDT_OFFSET_MASK));
+}
+
+void ARMAssembler::linkBranch(void* code, JmpSrc from, void* to, int useConstantPool)
+{
+ ARMWord* insn = reinterpret_cast<ARMWord*>(code) + (from.m_offset / sizeof(ARMWord));
+
+ if (!useConstantPool) {
+ int diff = reinterpret_cast<ARMWord*>(to) - reinterpret_cast<ARMWord*>(insn + 2);
+
+ if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) {
+ *insn = B | getConditionalField(*insn) | (diff & BRANCH_MASK);
+ ExecutableAllocator::cacheFlush(insn, sizeof(ARMWord));
+ return;
+ }
+ }
+ ARMWord* addr = getLdrImmAddress(insn);
+ *addr = reinterpret_cast<ARMWord>(to);
+ ExecutableAllocator::cacheFlush(addr, sizeof(ARMWord));
+}
+
+void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+{
+ ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
+ ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr;
+ ARMWord index = (*ldr & 0xfff) >> 1;
+
+ ASSERT(diff >= 1);
+ if (diff >= 2 || index > 0) {
+ diff = (diff + index - 2) * sizeof(ARMWord);
+ ASSERT(diff <= 0xfff);
+ *ldr = (*ldr & ~0xfff) | diff;
+ } else
+ *ldr = (*ldr & ~(0xfff | ARMAssembler::DT_UP)) | sizeof(ARMWord);
+}
+
+// Handle immediates
+
+ARMWord ARMAssembler::getOp2(ARMWord imm)
+{
+ int rol;
+
+ if (imm <= 0xff)
+ return OP2_IMM | imm;
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol = 8;
+ }
+ else {
+ imm = (imm << 24) | (imm >> 8);
+ rol = 0;
+ }
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol += 4;
+ }
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ return OP2_IMM | (imm >> 24) | (rol << 8);
+
+ return 0;
+}
+
+int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
+{
+ // Step1: Search a non-immediate part
+ ARMWord mask;
+ ARMWord imm1;
+ ARMWord imm2;
+ int rol;
+
+ mask = 0xff000000;
+ rol = 8;
+ while(1) {
+ if ((imm & mask) == 0) {
+ imm = (imm << rol) | (imm >> (32 - rol));
+ rol = 4 + (rol >> 1);
+ break;
+ }
+ rol += 2;
+ mask >>= 2;
+ if (mask & 0x3) {
+ // rol 8
+ imm = (imm << 8) | (imm >> 24);
+ mask = 0xff00;
+ rol = 24;
+ while (1) {
+ if ((imm & mask) == 0) {
+ imm = (imm << rol) | (imm >> (32 - rol));
+ rol = (rol >> 1) - 8;
+ break;
+ }
+ rol += 2;
+ mask >>= 2;
+ if (mask & 0x3)
+ return 0;
+ }
+ break;
+ }
+ }
+
+ ASSERT((imm & 0xff) == 0);
+
+ if ((imm & 0xff000000) == 0) {
+ imm1 = OP2_IMM | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
+ imm2 = OP2_IMM | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
+ } else if (imm & 0xc0000000) {
+ imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+ imm <<= 8;
+ rol += 4;
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol += 4;
+ }
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
+ else
+ return 0;
+ } else {
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+ imm <<= 8;
+ rol += 4;
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
+ else
+ return 0;
+ }
+
+ if (positive) {
+ mov_r(reg, imm1);
+ orr_r(reg, reg, imm2);
+ } else {
+ mvn_r(reg, imm1);
+ bic_r(reg, reg, imm2);
+ }
+
+ return 1;
+}
+
+ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
+{
+ ARMWord tmp;
+
+ // Do it by 1 instruction
+ tmp = getOp2(imm);
+ if (tmp)
+ return tmp;
+
+ tmp = getOp2(~imm);
+ if (tmp) {
+ if (invert)
+ return tmp | OP2_INV_IMM;
+ mvn_r(tmpReg, tmp);
+ return tmpReg;
+ }
+
+ // Do it by 2 instruction
+ if (genInt(tmpReg, imm, true))
+ return tmpReg;
+ if (genInt(tmpReg, ~imm, false))
+ return tmpReg;
+
+ ldr_imm(tmpReg, imm);
+ return tmpReg;
+}
+
+void ARMAssembler::moveImm(ARMWord imm, int dest)
+{
+ ARMWord tmp;
+
+ // Do it by 1 instruction
+ tmp = getOp2(imm);
+ if (tmp) {
+ mov_r(dest, tmp);
+ return;
+ }
+
+ tmp = getOp2(~imm);
+ if (tmp) {
+ mvn_r(dest, tmp);
+ return;
+ }
+
+ // Do it by 2 instruction
+ if (genInt(dest, imm, true))
+ return;
+ if (genInt(dest, ~imm, false))
+ return;
+
+ ldr_imm(dest, imm);
+}
+
+// Memory load/store helpers
+
+void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset)
+{
+ if (offset >= 0) {
+ if (offset <= 0xfff)
+ dtr_u(isLoad, srcDst, base, offset);
+ else if (offset <= 0xfffff) {
+ add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
+ dtr_u(isLoad, srcDst, ARMRegisters::S0, offset & 0xfff);
+ } else {
+ ARMWord reg = getImm(offset, ARMRegisters::S0);
+ dtr_ur(isLoad, srcDst, base, reg);
+ }
+ } else {
+ offset = -offset;
+ if (offset <= 0xfff)
+ dtr_d(isLoad, srcDst, base, offset);
+ else if (offset <= 0xfffff) {
+ sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
+ dtr_d(isLoad, srcDst, ARMRegisters::S0, offset & 0xfff);
+ } else {
+ ARMWord reg = getImm(offset, ARMRegisters::S0);
+ dtr_dr(isLoad, srcDst, base, reg);
+ }
+ }
+}
+
+void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+ ARMWord op2;
+
+ ASSERT(scale >= 0 && scale <= 3);
+ op2 = lsl(index, scale);
+
+ if (offset >= 0 && offset <= 0xfff) {
+ add_r(ARMRegisters::S0, base, op2);
+ dtr_u(isLoad, srcDst, ARMRegisters::S0, offset);
+ return;
+ }
+ if (offset <= 0 && offset >= -0xfff) {
+ add_r(ARMRegisters::S0, base, op2);
+ dtr_d(isLoad, srcDst, ARMRegisters::S0, -offset);
+ return;
+ }
+
+ ldr_un_imm(ARMRegisters::S0, offset);
+ add_r(ARMRegisters::S0, ARMRegisters::S0, op2);
+ dtr_ur(isLoad, srcDst, base, ARMRegisters::S0);
+}
+
+void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset)
+{
+ if (offset & 0x3) {
+ if (offset <= 0x3ff && offset >= 0) {
+ fdtr_u(isLoad, srcDst, base, offset >> 2);
+ return;
+ }
+ if (offset <= 0x3ffff && offset >= 0) {
+ add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | (11 << 8));
+ fdtr_u(isLoad, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
+ return;
+ }
+ offset = -offset;
+
+ if (offset <= 0x3ff && offset >= 0) {
+ fdtr_d(isLoad, srcDst, base, offset >> 2);
+ return;
+ }
+ if (offset <= 0x3ffff && offset >= 0) {
+ sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | (11 << 8));
+ fdtr_d(isLoad, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
+ return;
+ }
+ offset = -offset;
+ }
+
+ ldr_un_imm(ARMRegisters::S0, offset);
+ add_r(ARMRegisters::S0, ARMRegisters::S0, base);
+ fdtr_u(isLoad, srcDst, ARMRegisters::S0, 0);
+}
+
+void* ARMAssembler::executableCopy(ExecutablePool* allocator)
+{
+ // 64-bit alignment is required for next constant pool and JIT code as well
+ m_buffer.flushWithoutBarrier(true);
+ if (m_buffer.uncheckedSize() & 0x7)
+ bkpt(0);
+
+ char* data = reinterpret_cast<char*>(m_buffer.executableCopy(allocator));
+
+ for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
+ // The last bit is set if the constant must be placed on constant pool.
+ int pos = (*iter) & (~0x1);
+ ARMWord* ldrAddr = reinterpret_cast<ARMWord*>(data + pos);
+ ARMWord offset = *getLdrImmAddress(ldrAddr);
+ if (offset != 0xffffffff) {
+ JmpSrc jmpSrc(pos);
+ linkBranch(data, jmpSrc, data + offset, ((*iter) & 1));
+ }
+ }
+
+ return data;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.h
new file mode 100644
index 000000000..9f9a450e1
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.h
@@ -0,0 +1,769 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARMAssembler_h
+#define ARMAssembler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+
+#include "AssemblerBufferWithConstantPool.h"
+#include <wtf/Assertions.h>
+namespace JSC {
+
+ typedef uint32_t ARMWord;
+
+ namespace ARMRegisters {
+ typedef enum {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ S0 = r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ S1 = r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ sp = r13,
+ r14,
+ lr = r14,
+ r15,
+ pc = r15
+ } RegisterID;
+
+ typedef enum {
+ d0,
+ d1,
+ d2,
+ d3,
+ SD0 = d3
+ } FPRegisterID;
+
+ } // namespace ARMRegisters
+
+ class ARMAssembler {
+ public:
+ typedef ARMRegisters::RegisterID RegisterID;
+ typedef ARMRegisters::FPRegisterID FPRegisterID;
+ typedef AssemblerBufferWithConstantPool<2048, 4, 4, ARMAssembler> ARMBuffer;
+ typedef SegmentedVector<int, 64> Jumps;
+
+ ARMAssembler() { }
+
+ // ARM conditional constants
+ typedef enum {
+ EQ = 0x00000000, // Zero
+ NE = 0x10000000, // Non-zero
+ CS = 0x20000000,
+ CC = 0x30000000,
+ MI = 0x40000000,
+ PL = 0x50000000,
+ VS = 0x60000000,
+ VC = 0x70000000,
+ HI = 0x80000000,
+ LS = 0x90000000,
+ GE = 0xa0000000,
+ LT = 0xb0000000,
+ GT = 0xc0000000,
+ LE = 0xd0000000,
+ AL = 0xe0000000
+ } Condition;
+
+ // ARM instruction constants
+ enum {
+ AND = (0x0 << 21),
+ EOR = (0x1 << 21),
+ SUB = (0x2 << 21),
+ RSB = (0x3 << 21),
+ ADD = (0x4 << 21),
+ ADC = (0x5 << 21),
+ SBC = (0x6 << 21),
+ RSC = (0x7 << 21),
+ TST = (0x8 << 21),
+ TEQ = (0x9 << 21),
+ CMP = (0xa << 21),
+ CMN = (0xb << 21),
+ ORR = (0xc << 21),
+ MOV = (0xd << 21),
+ BIC = (0xe << 21),
+ MVN = (0xf << 21),
+ MUL = 0x00000090,
+ MULL = 0x00c00090,
+ FADDD = 0x0e300b00,
+ FSUBD = 0x0e300b40,
+ FMULD = 0x0e200b00,
+ FCMPD = 0x0eb40b40,
+ DTR = 0x05000000,
+ LDRH = 0x00100090,
+ STRH = 0x00000090,
+ STMDB = 0x09200000,
+ LDMIA = 0x08b00000,
+ FDTR = 0x0d000b00,
+ B = 0x0a000000,
+ BL = 0x0b000000,
+ FMSR = 0x0e000a10,
+ FSITOD = 0x0eb80bc0,
+ FMSTAT = 0x0ef1fa10,
+#if ARM_ARCH_VERSION >= 5
+ CLZ = 0x016f0f10,
+ BKPT = 0xe120070,
+#endif
+ };
+
+ enum {
+ OP2_IMM = (1 << 25),
+ OP2_IMMh = (1 << 22),
+ OP2_INV_IMM = (1 << 26),
+ SET_CC = (1 << 20),
+ OP2_OFSREG = (1 << 25),
+ DT_UP = (1 << 23),
+ DT_WB = (1 << 21),
+ // This flag is inlcuded in LDR and STR
+ DT_PRE = (1 << 24),
+ HDT_UH = (1 << 5),
+ DT_LOAD = (1 << 20),
+ };
+
+ // Masks of ARM instructions
+ enum {
+ BRANCH_MASK = 0x00ffffff,
+ NONARM = 0xf0000000,
+ SDT_MASK = 0x0c000000,
+ SDT_OFFSET_MASK = 0xfff,
+ };
+
+ enum {
+ BOFFSET_MIN = -0x00800000,
+ BOFFSET_MAX = 0x007fffff,
+ SDT = 0x04000000,
+ };
+
+ enum {
+ padForAlign8 = 0x00,
+ padForAlign16 = 0x0000,
+ padForAlign32 = 0xee120070,
+ };
+
+ class JmpSrc {
+ friend class ARMAssembler;
+ public:
+ JmpSrc()
+ : m_offset(-1)
+ {
+ }
+
+ private:
+ JmpSrc(int offset)
+ : m_offset(offset)
+ {
+ }
+
+ int m_offset;
+ };
+
+ class JmpDst {
+ friend class ARMAssembler;
+ public:
+ JmpDst()
+ : m_offset(-1)
+ , m_used(false)
+ {
+ }
+
+ bool isUsed() const { return m_used; }
+ void used() { m_used = true; }
+ private:
+ JmpDst(int offset)
+ : m_offset(offset)
+ , m_used(false)
+ {
+ ASSERT(m_offset == offset);
+ }
+
+ int m_offset : 31;
+ int m_used : 1;
+ };
+
+ // Instruction formating
+
+ void emitInst(ARMWord op, int rd, int rn, ARMWord op2)
+ {
+ ASSERT ( ((op2 & ~OP2_IMM) <= 0xfff) || (((op2 & ~OP2_IMMh) <= 0xfff)) );
+ m_buffer.putInt(op | RN(rn) | RD(rd) | op2);
+ }
+
+ void and_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | AND, rd, rn, op2);
+ }
+
+ void ands_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | AND | SET_CC, rd, rn, op2);
+ }
+
+ void eor_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | EOR, rd, rn, op2);
+ }
+
+ void eors_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | EOR | SET_CC, rd, rn, op2);
+ }
+
+ void sub_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | SUB, rd, rn, op2);
+ }
+
+ void subs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | SUB | SET_CC, rd, rn, op2);
+ }
+
+ void rsb_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | RSB, rd, rn, op2);
+ }
+
+ void rsbs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | RSB | SET_CC, rd, rn, op2);
+ }
+
+ void add_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ADD, rd, rn, op2);
+ }
+
+ void adds_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ADD | SET_CC, rd, rn, op2);
+ }
+
+ void adc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ADC, rd, rn, op2);
+ }
+
+ void adcs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ADC | SET_CC, rd, rn, op2);
+ }
+
+ void sbc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | SBC, rd, rn, op2);
+ }
+
+ void sbcs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | SBC | SET_CC, rd, rn, op2);
+ }
+
+ void rsc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | RSC, rd, rn, op2);
+ }
+
+ void rscs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | RSC | SET_CC, rd, rn, op2);
+ }
+
+ void tst_r(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | TST | SET_CC, 0, rn, op2);
+ }
+
+ void teq_r(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | TEQ | SET_CC, 0, rn, op2);
+ }
+
+ void cmp_r(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | CMP | SET_CC, 0, rn, op2);
+ }
+
+ void orr_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ORR, rd, rn, op2);
+ }
+
+ void orrs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ORR | SET_CC, rd, rn, op2);
+ }
+
+ void mov_r(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | MOV, rd, ARMRegisters::r0, op2);
+ }
+
+ void movs_r(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | MOV | SET_CC, rd, ARMRegisters::r0, op2);
+ }
+
+ void bic_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | BIC, rd, rn, op2);
+ }
+
+ void bics_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | BIC | SET_CC, rd, rn, op2);
+ }
+
+ void mvn_r(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | MVN, rd, ARMRegisters::r0, op2);
+ }
+
+ void mvns_r(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | MVN | SET_CC, rd, ARMRegisters::r0, op2);
+ }
+
+ void mul_r(int rd, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(static_cast<ARMWord>(cc) | MUL | RN(rd) | RS(rn) | RM(rm));
+ }
+
+ void muls_r(int rd, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(static_cast<ARMWord>(cc) | MUL | SET_CC | RN(rd) | RS(rn) | RM(rm));
+ }
+
+ void mull_r(int rdhi, int rdlo, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(static_cast<ARMWord>(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm));
+ }
+
+ void faddd_r(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | FADDD, dd, dn, dm);
+ }
+
+ void fsubd_r(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | FSUBD, dd, dn, dm);
+ }
+
+ void fmuld_r(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | FMULD, dd, dn, dm);
+ }
+
+ void fcmpd_r(int dd, int dm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | FCMPD, dd, 0, dm);
+ }
+
+ void ldr_imm(int rd, ARMWord imm, Condition cc = AL)
+ {
+ m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARMRegisters::pc) | RD(rd), imm, true);
+ }
+
+ void ldr_un_imm(int rd, ARMWord imm, Condition cc = AL)
+ {
+ m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARMRegisters::pc) | RD(rd), imm);
+ }
+
+ void dtr_u(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | DT_UP, rd, rb, op2);
+ }
+
+ void dtr_ur(bool isLoad, int rd, int rb, int rm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | DT_UP | OP2_OFSREG, rd, rb, rm);
+ }
+
+ void dtr_d(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0), rd, rb, op2);
+ }
+
+ void dtr_dr(bool isLoad, int rd, int rb, int rm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | OP2_OFSREG, rd, rb, rm);
+ }
+
+ void ldrh_r(int rd, int rn, int rm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_UP | DT_PRE, rd, rn, rm);
+ }
+
+ void ldrh_d(int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_PRE, rd, rb, op2);
+ }
+
+ void ldrh_u(int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_UP | DT_PRE, rd, rb, op2);
+ }
+
+ void strh_r(int rn, int rm, int rd, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | STRH | HDT_UH | DT_UP | DT_PRE, rd, rn, rm);
+ }
+
+ void fdtr_u(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT(op2 <= 0xff);
+ emitInst(static_cast<ARMWord>(cc) | FDTR | DT_UP | (isLoad ? DT_LOAD : 0), rd, rb, op2);
+ }
+
+ void fdtr_d(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT(op2 <= 0xff);
+ emitInst(static_cast<ARMWord>(cc) | FDTR | (isLoad ? DT_LOAD : 0), rd, rb, op2);
+ }
+
+ void push_r(int reg, Condition cc = AL)
+ {
+ ASSERT(ARMWord(reg) <= 0xf);
+ m_buffer.putInt(cc | DTR | DT_WB | RN(ARMRegisters::sp) | RD(reg) | 0x4);
+ }
+
+ void pop_r(int reg, Condition cc = AL)
+ {
+ ASSERT(ARMWord(reg) <= 0xf);
+ m_buffer.putInt(cc | (DTR ^ DT_PRE) | DT_LOAD | DT_UP | RN(ARMRegisters::sp) | RD(reg) | 0x4);
+ }
+
+ inline void poke_r(int reg, Condition cc = AL)
+ {
+ dtr_d(false, ARMRegisters::sp, 0, reg, cc);
+ }
+
+ inline void peek_r(int reg, Condition cc = AL)
+ {
+ dtr_u(true, reg, ARMRegisters::sp, 0, cc);
+ }
+
+ void fmsr_r(int dd, int rn, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | FMSR, rn, dd, 0);
+ }
+
+ void fsitod_r(int dd, int dm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | FSITOD, dd, 0, dm);
+ }
+
+ void fmstat(Condition cc = AL)
+ {
+ m_buffer.putInt(static_cast<ARMWord>(cc) | FMSTAT);
+ }
+
+#if ARM_ARCH_VERSION >= 5
+ void clz_r(int rd, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(static_cast<ARMWord>(cc) | CLZ | RD(rd) | RM(rm));
+ }
+#endif
+
+ void bkpt(ARMWord value)
+ {
+#if ARM_ARCH_VERSION >= 5
+ m_buffer.putInt(BKPT | ((value & 0xff0) << 4) | (value & 0xf));
+#else
+ // Cannot access to Zero memory address
+ dtr_dr(true, ARMRegisters::S0, ARMRegisters::S0, ARMRegisters::S0);
+#endif
+ }
+
+ static ARMWord lsl(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x00;
+ }
+
+ static ARMWord lsr(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x20;
+ }
+
+ static ARMWord asr(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x40;
+ }
+
+ static ARMWord lsl_r(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(shiftReg <= ARMRegisters::pc);
+ return reg | (shiftReg << 8) | 0x10;
+ }
+
+ static ARMWord lsr_r(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(shiftReg <= ARMRegisters::pc);
+ return reg | (shiftReg << 8) | 0x30;
+ }
+
+ static ARMWord asr_r(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(shiftReg <= ARMRegisters::pc);
+ return reg | (shiftReg << 8) | 0x50;
+ }
+
+ // General helpers
+
+ int size()
+ {
+ return m_buffer.size();
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ m_buffer.ensureSpace(insnSpace, constSpace);
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_buffer.sizeOfConstantPool();
+ }
+
+ JmpDst label()
+ {
+ return JmpDst(m_buffer.size());
+ }
+
+ JmpDst align(int alignment)
+ {
+ while (!m_buffer.isAligned(alignment))
+ mov_r(ARMRegisters::r0, ARMRegisters::r0);
+
+ return label();
+ }
+
+ JmpSrc jmp(Condition cc = AL, int useConstantPool = 0)
+ {
+ ensureSpace(sizeof(ARMWord), sizeof(ARMWord));
+ int s = m_buffer.uncheckedSize();
+ ldr_un_imm(ARMRegisters::pc, 0xffffffff, cc);
+ m_jumps.append(s | (useConstantPool & 0x1));
+ return JmpSrc(s);
+ }
+
+ void* executableCopy(ExecutablePool* allocator);
+
+ // Patching helpers
+
+ static ARMWord* getLdrImmAddress(ARMWord* insn, uint32_t* constPool = 0);
+ static void linkBranch(void* code, JmpSrc from, void* to, int useConstantPool = 0);
+
+ static void patchPointerInternal(intptr_t from, void* to)
+ {
+ ARMWord* insn = reinterpret_cast<ARMWord*>(from);
+ ARMWord* addr = getLdrImmAddress(insn);
+ *addr = reinterpret_cast<ARMWord>(to);
+ ExecutableAllocator::cacheFlush(addr, sizeof(ARMWord));
+ }
+
+ static ARMWord patchConstantPoolLoad(ARMWord load, ARMWord value)
+ {
+ value = (value << 1) + 1;
+ ASSERT(!(value & ~0xfff));
+ return (load & ~0xfff) | value;
+ }
+
+ static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
+
+ // Patch pointers
+
+ static void linkPointer(void* code, JmpDst from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to);
+ }
+
+ static void repatchInt32(void* from, int32_t to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(from), reinterpret_cast<void*>(to));
+ }
+
+ static void repatchPointer(void* from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(from), to);
+ }
+
+ static void repatchLoadPtrToLEA(void* from)
+ {
+ // On arm, this is a patch from LDR to ADD. It is restricted conversion,
+ // from special case to special case, altough enough for its purpose
+ ARMWord* insn = reinterpret_cast<ARMWord*>(from);
+ ASSERT((*insn & 0x0ff00f00) == 0x05900000);
+
+ *insn = (*insn & 0xf00ff0ff) | 0x02800000;
+ ExecutableAllocator::cacheFlush(insn, sizeof(ARMWord));
+ }
+
+ // Linkers
+
+ void linkJump(JmpSrc from, JmpDst to)
+ {
+ ARMWord* insn = reinterpret_cast<ARMWord*>(m_buffer.data()) + (from.m_offset / sizeof(ARMWord));
+ *getLdrImmAddress(insn, m_buffer.poolAddress()) = static_cast<ARMWord>(to.m_offset);
+ }
+
+ static void linkJump(void* code, JmpSrc from, void* to)
+ {
+ linkBranch(code, from, to);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(from) - sizeof(ARMWord), to);
+ }
+
+ static void linkCall(void* code, JmpSrc from, void* to)
+ {
+ linkBranch(code, from, to, true);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ relinkJump(from, to);
+ }
+
+ // Address operations
+
+ static void* getRelocatedAddress(void* code, JmpSrc jump)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<ARMWord*>(code) + jump.m_offset / sizeof(ARMWord) + 1);
+ }
+
+ static void* getRelocatedAddress(void* code, JmpDst label)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<ARMWord*>(code) + label.m_offset / sizeof(ARMWord));
+ }
+
+ // Address differences
+
+ static int getDifferenceBetweenLabels(JmpDst from, JmpSrc to)
+ {
+ return (to.m_offset + sizeof(ARMWord)) - from.m_offset;
+ }
+
+ static int getDifferenceBetweenLabels(JmpDst from, JmpDst to)
+ {
+ return to.m_offset - from.m_offset;
+ }
+
+ static unsigned getCallReturnOffset(JmpSrc call)
+ {
+ return call.m_offset + sizeof(ARMWord);
+ }
+
+ // Handle immediates
+
+ static ARMWord getOp2Byte(ARMWord imm)
+ {
+ ASSERT(imm <= 0xff);
+ return OP2_IMMh | (imm & 0x0f) | ((imm & 0xf0) << 4) ;
+ }
+
+ static ARMWord getOp2(ARMWord imm);
+ ARMWord getImm(ARMWord imm, int tmpReg, bool invert = false);
+ void moveImm(ARMWord imm, int dest);
+
+ // Memory load/store helpers
+
+ void dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset);
+ void baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+ void doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset);
+
+ // Constant pool hnadlers
+
+ static ARMWord placeConstantPoolBarrier(int offset)
+ {
+ offset = (offset - sizeof(ARMWord)) >> 2;
+ ASSERT((offset <= BOFFSET_MAX && offset >= BOFFSET_MIN));
+ return AL | B | (offset & BRANCH_MASK);
+ }
+
+ private:
+ ARMWord RM(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg;
+ }
+
+ ARMWord RS(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg << 8;
+ }
+
+ ARMWord RD(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg << 12;
+ }
+
+ ARMWord RN(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg << 16;
+ }
+
+ static ARMWord getConditionalField(ARMWord i)
+ {
+ return i & 0xf0000000;
+ }
+
+ int genInt(int reg, ARMWord imm, bool positive);
+
+ ARMBuffer m_buffer;
+ Jumps m_jumps;
+ };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+
+#endif // ARMAssembler_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMv7Assembler.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMv7Assembler.h
new file mode 100644
index 000000000..078de4474
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMv7Assembler.h
@@ -0,0 +1,1758 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARMAssembler_h
+#define ARMAssembler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER) && PLATFORM(ARM_THUMB2)
+
+#include "AssemblerBuffer.h"
+#include <wtf/Assertions.h>
+#include <wtf/Vector.h>
+#include <stdint.h>
+
+namespace JSC {
+
+namespace ARMRegisters {
+ typedef enum {
+ r0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7, wr = r7, // thumb work register
+ r8,
+ r9, sb = r9, // static base
+ r10, sl = r10, // stack limit
+ r11, fp = r11, // frame pointer
+ r12, ip = r12,
+ r13, sp = r13,
+ r14, lr = r14,
+ r15, pc = r15,
+ } RegisterID;
+
+ // s0 == d0 == q0
+ // s4 == d2 == q1
+ // etc
+ typedef enum {
+ s0 = 0,
+ s1 = 1,
+ s2 = 2,
+ s3 = 3,
+ s4 = 4,
+ s5 = 5,
+ s6 = 6,
+ s7 = 7,
+ s8 = 8,
+ s9 = 9,
+ s10 = 10,
+ s11 = 11,
+ s12 = 12,
+ s13 = 13,
+ s14 = 14,
+ s15 = 15,
+ s16 = 16,
+ s17 = 17,
+ s18 = 18,
+ s19 = 19,
+ s20 = 20,
+ s21 = 21,
+ s22 = 22,
+ s23 = 23,
+ s24 = 24,
+ s25 = 25,
+ s26 = 26,
+ s27 = 27,
+ s28 = 28,
+ s29 = 29,
+ s30 = 30,
+ s31 = 31,
+ d0 = 0 << 1,
+ d1 = 1 << 1,
+ d2 = 2 << 1,
+ d3 = 3 << 1,
+ d4 = 4 << 1,
+ d5 = 5 << 1,
+ d6 = 6 << 1,
+ d7 = 7 << 1,
+ d8 = 8 << 1,
+ d9 = 9 << 1,
+ d10 = 10 << 1,
+ d11 = 11 << 1,
+ d12 = 12 << 1,
+ d13 = 13 << 1,
+ d14 = 14 << 1,
+ d15 = 15 << 1,
+ d16 = 16 << 1,
+ d17 = 17 << 1,
+ d18 = 18 << 1,
+ d19 = 19 << 1,
+ d20 = 20 << 1,
+ d21 = 21 << 1,
+ d22 = 22 << 1,
+ d23 = 23 << 1,
+ d24 = 24 << 1,
+ d25 = 25 << 1,
+ d26 = 26 << 1,
+ d27 = 27 << 1,
+ d28 = 28 << 1,
+ d29 = 29 << 1,
+ d30 = 30 << 1,
+ d31 = 31 << 1,
+ q0 = 0 << 2,
+ q1 = 1 << 2,
+ q2 = 2 << 2,
+ q3 = 3 << 2,
+ q4 = 4 << 2,
+ q5 = 5 << 2,
+ q6 = 6 << 2,
+ q7 = 7 << 2,
+ q8 = 8 << 2,
+ q9 = 9 << 2,
+ q10 = 10 << 2,
+ q11 = 11 << 2,
+ q12 = 12 << 2,
+ q13 = 13 << 2,
+ q14 = 14 << 2,
+ q15 = 15 << 2,
+ q16 = 16 << 2,
+ q17 = 17 << 2,
+ q18 = 18 << 2,
+ q19 = 19 << 2,
+ q20 = 20 << 2,
+ q21 = 21 << 2,
+ q22 = 22 << 2,
+ q23 = 23 << 2,
+ q24 = 24 << 2,
+ q25 = 25 << 2,
+ q26 = 26 << 2,
+ q27 = 27 << 2,
+ q28 = 28 << 2,
+ q29 = 29 << 2,
+ q30 = 30 << 2,
+ q31 = 31 << 2,
+ } FPRegisterID;
+}
+
+class ARMv7Assembler;
+class ARMThumbImmediate {
+ friend class ARMv7Assembler;
+
+ typedef uint8_t ThumbImmediateType;
+ static const ThumbImmediateType TypeInvalid = 0;
+ static const ThumbImmediateType TypeEncoded = 1;
+ static const ThumbImmediateType TypeUInt16 = 2;
+
+ typedef union {
+ int16_t asInt;
+ struct {
+ unsigned imm8 : 8;
+ unsigned imm3 : 3;
+ unsigned i : 1;
+ unsigned imm4 : 4;
+ };
+ // If this is an encoded immediate, then it may describe a shift, or a pattern.
+ struct {
+ unsigned shiftValue7 : 7;
+ unsigned shiftAmount : 5;
+ };
+ struct {
+ unsigned immediate : 8;
+ unsigned pattern : 4;
+ };
+ } ThumbImmediateValue;
+
+ // byte0 contains least significant bit; not using an array to make client code endian agnostic.
+ typedef union {
+ int32_t asInt;
+ struct {
+ uint8_t byte0;
+ uint8_t byte1;
+ uint8_t byte2;
+ uint8_t byte3;
+ };
+ } PatternBytes;
+
+ ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
+ {
+ if (value & ~((1<<N)-1)) /* check for any of the top N bits (of 2N bits) are set */ \
+ value >>= N; /* if any were set, lose the bottom N */ \
+ else /* if none of the top N bits are set, */ \
+ zeros += N; /* then we have identified N leading zeros */
+ }
+
+ static int32_t countLeadingZeros(uint32_t value)
+ {
+ if (!value)
+ return 32;
+
+ int32_t zeros = 0;
+ countLeadingZerosPartial(value, zeros, 16);
+ countLeadingZerosPartial(value, zeros, 8);
+ countLeadingZerosPartial(value, zeros, 4);
+ countLeadingZerosPartial(value, zeros, 2);
+ countLeadingZerosPartial(value, zeros, 1);
+ return zeros;
+ }
+
+ ARMThumbImmediate()
+ : m_type(TypeInvalid)
+ {
+ m_value.asInt = 0;
+ }
+
+ ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
+ : m_type(type)
+ , m_value(value)
+ {
+ }
+
+ ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
+ : m_type(TypeUInt16)
+ {
+ m_value.asInt = value;
+ }
+
+public:
+ static ARMThumbImmediate makeEncodedImm(uint32_t value)
+ {
+ ThumbImmediateValue encoding;
+ encoding.asInt = 0;
+
+ // okay, these are easy.
+ if (value < 256) {
+ encoding.immediate = value;
+ encoding.pattern = 0;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ int32_t leadingZeros = countLeadingZeros(value);
+ // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
+ ASSERT(leadingZeros < 24);
+
+ // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
+ // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
+ // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
+ int32_t rightShiftAmount = 24 - leadingZeros;
+ if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
+ // Shift the value down to the low byte position. The assign to
+ // shiftValue7 drops the implicit top bit.
+ encoding.shiftValue7 = value >> rightShiftAmount;
+ // The endoded shift amount is the magnitude of a right rotate.
+ encoding.shiftAmount = 8 + leadingZeros;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ PatternBytes bytes;
+ bytes.asInt = value;
+
+ if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 3;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 1;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 2;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ return ARMThumbImmediate();
+ }
+
+ static ARMThumbImmediate makeUInt12(int32_t value)
+ {
+ return (!(value & 0xfffff000))
+ ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+ : ARMThumbImmediate();
+ }
+
+ static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
+ {
+ // If this is not a 12-bit unsigned it, try making an encoded immediate.
+ return (!(value & 0xfffff000))
+ ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+ : makeEncodedImm(value);
+ }
+
+ // The 'make' methods, above, return a !isValid() value if the argument
+ // cannot be represented as the requested type. This methods is called
+ // 'get' since the argument can always be represented.
+ static ARMThumbImmediate makeUInt16(uint16_t value)
+ {
+ return ARMThumbImmediate(TypeUInt16, value);
+ }
+
+ bool isValid()
+ {
+ return m_type != TypeInvalid;
+ }
+
+ // These methods rely on the format of encoded byte values.
+ bool isUInt3() { return !(m_value.asInt & 0xfff8); }
+ bool isUInt4() { return !(m_value.asInt & 0xfff0); }
+ bool isUInt5() { return !(m_value.asInt & 0xffe0); }
+ bool isUInt6() { return !(m_value.asInt & 0xffc0); }
+ bool isUInt7() { return !(m_value.asInt & 0xff80); }
+ bool isUInt8() { return !(m_value.asInt & 0xff00); }
+ bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
+ bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
+ bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
+ bool isUInt16() { return m_type == TypeUInt16; }
+ uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
+ uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
+ uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
+ uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
+ uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
+ uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
+ uint8_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
+ uint8_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
+ uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
+ uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
+
+ bool isEncodedImm() { return m_type == TypeEncoded; }
+
+private:
+ ThumbImmediateType m_type;
+ ThumbImmediateValue m_value;
+};
+
+
+typedef enum {
+ SRType_LSL,
+ SRType_LSR,
+ SRType_ASR,
+ SRType_ROR,
+
+ SRType_RRX = SRType_ROR
+} ARMShiftType;
+
+class ARMv7Assembler;
+class ShiftTypeAndAmount {
+ friend class ARMv7Assembler;
+
+public:
+ ShiftTypeAndAmount()
+ {
+ m_u.type = (ARMShiftType)0;
+ m_u.amount = 0;
+ }
+
+ ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
+ {
+ m_u.type = type;
+ m_u.amount = amount & 31;
+ }
+
+ unsigned lo4() { return m_u.lo4; }
+ unsigned hi4() { return m_u.hi4; }
+
+private:
+ union {
+ struct {
+ unsigned lo4 : 4;
+ unsigned hi4 : 4;
+ };
+ struct {
+ unsigned type : 2;
+ unsigned amount : 5;
+ };
+ } m_u;
+};
+
+
+/*
+Some features of the Thumb instruction set are deprecated in ARMv7. Deprecated features affecting
+instructions supported by ARMv7-M are as follows:
+• use of the PC as <Rd> or <Rm> in a 16-bit ADD (SP plus register) instruction
+• use of the SP as <Rm> in a 16-bit ADD (SP plus register) instruction
+• use of the SP as <Rm> in a 16-bit CMP (register) instruction
+• use of MOV (register) instructions in which <Rd> is the SP or PC and <Rm> is also the SP or PC.
+• use of <Rn> as the lowest-numbered register in the register list of a 16-bit STM instruction with base
+register writeback
+*/
+
+class ARMv7Assembler {
+public:
+ typedef ARMRegisters::RegisterID RegisterID;
+ typedef ARMRegisters::FPRegisterID FPRegisterID;
+
+ // (HS, LO, HI, LS) -> (AE, B, A, BE)
+ // (VS, VC) -> (O, NO)
+ typedef enum {
+ ConditionEQ,
+ ConditionNE,
+ ConditionHS,
+ ConditionLO,
+ ConditionMI,
+ ConditionPL,
+ ConditionVS,
+ ConditionVC,
+ ConditionHI,
+ ConditionLS,
+ ConditionGE,
+ ConditionLT,
+ ConditionGT,
+ ConditionLE,
+ ConditionAL,
+
+ ConditionCS = ConditionHS,
+ ConditionCC = ConditionLO,
+ } Condition;
+
+ class JmpSrc {
+ friend class ARMv7Assembler;
+ friend class ARMInstructionFormatter;
+ public:
+ JmpSrc()
+ : m_offset(-1)
+ {
+ }
+
+ private:
+ JmpSrc(int offset)
+ : m_offset(offset)
+ {
+ }
+
+ int m_offset;
+ };
+
+ class JmpDst {
+ friend class ARMv7Assembler;
+ friend class ARMInstructionFormatter;
+ public:
+ JmpDst()
+ : m_offset(-1)
+ , m_used(false)
+ {
+ }
+
+ bool isUsed() const { return m_used; }
+ void used() { m_used = true; }
+ private:
+ JmpDst(int offset)
+ : m_offset(offset)
+ , m_used(false)
+ {
+ ASSERT(m_offset == offset);
+ }
+
+ int m_offset : 31;
+ int m_used : 1;
+ };
+
+private:
+
+ // ARMv7, Appx-A.6.3
+ bool BadReg(RegisterID reg)
+ {
+ return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
+ }
+
+ bool isSingleRegister(FPRegisterID reg)
+ {
+ // Check that the high bit isn't set (q16+), and that the low bit isn't (s1, s3, etc).
+ return !(reg & ~31);
+ }
+
+ bool isDoubleRegister(FPRegisterID reg)
+ {
+ // Check that the high bit isn't set (q16+), and that the low bit isn't (s1, s3, etc).
+ return !(reg & ~(31 << 1));
+ }
+
+ bool isQuadRegister(FPRegisterID reg)
+ {
+ return !(reg & ~(31 << 2));
+ }
+
+ uint32_t singleRegisterNum(FPRegisterID reg)
+ {
+ ASSERT(isSingleRegister(reg));
+ return reg;
+ }
+
+ uint32_t doubleRegisterNum(FPRegisterID reg)
+ {
+ ASSERT(isDoubleRegister(reg));
+ return reg >> 1;
+ }
+
+ uint32_t quadRegisterNum(FPRegisterID reg)
+ {
+ ASSERT(isQuadRegister(reg));
+ return reg >> 2;
+ }
+
+ uint32_t singleRegisterMask(FPRegisterID rd, int highBitsShift, int lowBitShift)
+ {
+ uint32_t rdNum = singleRegisterNum(rd);
+ uint32_t rdMask = (rdNum >> 1) << highBitsShift;
+ if (rdNum & 1)
+ rdMask |= 1 << lowBitShift;
+ return rdMask;
+ }
+
+ uint32_t doubleRegisterMask(FPRegisterID rd, int highBitShift, int lowBitsShift)
+ {
+ uint32_t rdNum = doubleRegisterNum(rd);
+ uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
+ if (rdNum & 16)
+ rdMask |= 1 << highBitShift;
+ return rdMask;
+ }
+
+ typedef enum {
+ OP_ADD_reg_T1 = 0x1800,
+ OP_ADD_S_reg_T1 = 0x1800,
+ OP_SUB_reg_T1 = 0x1A00,
+ OP_SUB_S_reg_T1 = 0x1A00,
+ OP_ADD_imm_T1 = 0x1C00,
+ OP_ADD_S_imm_T1 = 0x1C00,
+ OP_SUB_imm_T1 = 0x1E00,
+ OP_SUB_S_imm_T1 = 0x1E00,
+ OP_MOV_imm_T1 = 0x2000,
+ OP_CMP_imm_T1 = 0x2800,
+ OP_ADD_imm_T2 = 0x3000,
+ OP_ADD_S_imm_T2 = 0x3000,
+ OP_SUB_imm_T2 = 0x3800,
+ OP_SUB_S_imm_T2 = 0x3800,
+ OP_AND_reg_T1 = 0x4000,
+ OP_EOR_reg_T1 = 0x4040,
+ OP_TST_reg_T1 = 0x4200,
+ OP_CMP_reg_T1 = 0x4280,
+ OP_ORR_reg_T1 = 0x4300,
+ OP_MVN_reg_T1 = 0x43C0,
+ OP_ADD_reg_T2 = 0x4400,
+ OP_MOV_reg_T1 = 0x4600,
+ OP_BLX = 0x4700,
+ OP_BX = 0x4700,
+ OP_LDRH_reg_T1 = 0x5A00,
+ OP_STR_reg_T1 = 0x5000,
+ OP_LDR_reg_T1 = 0x5800,
+ OP_STR_imm_T1 = 0x6000,
+ OP_LDR_imm_T1 = 0x6800,
+ OP_LDRH_imm_T1 = 0x8800,
+ OP_STR_imm_T2 = 0x9000,
+ OP_LDR_imm_T2 = 0x9800,
+ OP_ADD_SP_imm_T1 = 0xA800,
+ OP_ADD_SP_imm_T2 = 0xB000,
+ OP_SUB_SP_imm_T1 = 0xB080,
+ OP_BKPT = 0xBE00,
+ OP_IT = 0xBF00,
+ } OpcodeID;
+
+ typedef enum {
+ OP_AND_reg_T2 = 0xEA00,
+ OP_TST_reg_T2 = 0xEA10,
+ OP_ORR_reg_T2 = 0xEA40,
+ OP_ASR_imm_T1 = 0xEA4F,
+ OP_LSL_imm_T1 = 0xEA4F,
+ OP_LSR_imm_T1 = 0xEA4F,
+ OP_ROR_imm_T1 = 0xEA4F,
+ OP_MVN_reg_T2 = 0xEA6F,
+ OP_EOR_reg_T2 = 0xEA80,
+ OP_ADD_reg_T3 = 0xEB00,
+ OP_ADD_S_reg_T3 = 0xEB10,
+ OP_SUB_reg_T2 = 0xEBA0,
+ OP_SUB_S_reg_T2 = 0xEBB0,
+ OP_CMP_reg_T2 = 0xEBB0,
+ OP_B_T4a = 0xF000,
+ OP_AND_imm_T1 = 0xF000,
+ OP_TST_imm = 0xF010,
+ OP_ORR_imm_T1 = 0xF040,
+ OP_MOV_imm_T2 = 0xF040,
+ OP_MVN_imm = 0xF060,
+ OP_EOR_imm_T1 = 0xF080,
+ OP_ADD_imm_T3 = 0xF100,
+ OP_ADD_S_imm_T3 = 0xF110,
+ OP_CMN_imm = 0xF110,
+ OP_SUB_imm_T3 = 0xF1A0,
+ OP_SUB_S_imm_T3 = 0xF1B0,
+ OP_CMP_imm_T2 = 0xF1B0,
+ OP_ADD_imm_T4 = 0xF200,
+ OP_MOV_imm_T3 = 0xF240,
+ OP_SUB_imm_T4 = 0xF2A0,
+ OP_MOVT = 0xF2C0,
+ OP_LDRH_reg_T2 = 0xF830,
+ OP_LDRH_imm_T3 = 0xF830,
+ OP_STR_imm_T4 = 0xF840,
+ OP_STR_reg_T2 = 0xF840,
+ OP_LDR_imm_T4 = 0xF850,
+ OP_LDR_reg_T2 = 0xF850,
+ OP_LDRH_imm_T2 = 0xF8B0,
+ OP_STR_imm_T3 = 0xF8C0,
+ OP_LDR_imm_T3 = 0xF8D0,
+ OP_LSL_reg_T2 = 0xFA00,
+ OP_LSR_reg_T2 = 0xFA20,
+ OP_ASR_reg_T2 = 0xFA40,
+ OP_ROR_reg_T2 = 0xFA60,
+ OP_SMULL_T1 = 0xFB80,
+ } OpcodeID1;
+
+ typedef enum {
+ OP_B_T4b = 0x9000,
+ } OpcodeID2;
+
+ struct FourFours {
+ FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
+ {
+ m_u.f0 = f0;
+ m_u.f1 = f1;
+ m_u.f2 = f2;
+ m_u.f3 = f3;
+ }
+
+ union {
+ unsigned value;
+ struct {
+ unsigned f0 : 4;
+ unsigned f1 : 4;
+ unsigned f2 : 4;
+ unsigned f3 : 4;
+ };
+ } m_u;
+ };
+
+ class ARMInstructionFormatter;
+
+ // false means else!
+ bool ifThenElseConditionBit(Condition condition, bool isIf)
+ {
+ return isIf ? (condition & 1) : !(condition & 1);
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | (ifThenElseConditionBit(condition, inst3if) << 2)
+ | (ifThenElseConditionBit(condition, inst4if) << 1)
+ | 1;
+ ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | (ifThenElseConditionBit(condition, inst3if) << 2)
+ | 2;
+ ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | 4;
+ ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+
+ uint8_t ifThenElse(Condition condition)
+ {
+ int mask = 8;
+ ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+
+public:
+
+ void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+
+ if (rn == ARMRegisters::sp) {
+ if (!(rd & 8) && imm.isUInt10()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, imm.getUInt10() >> 2);
+ return;
+ } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
+ m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, imm.getUInt9() >> 2);
+ return;
+ }
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
+ else {
+ ASSERT(imm.isUInt12());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
+ }
+ }
+
+ void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // NOTE: In an IT block, add doesn't modify the flags register.
+ void add(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (rd == rn)
+ m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
+ else if (rd == rm)
+ m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
+ else if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
+ else
+ add(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ // Not allowed in an IT (if then) block.
+ void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_S_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_S_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
+ }
+
+ // Not allowed in an IT (if then) block?
+ void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // Not allowed in an IT (if then) block.
+ void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_S_reg_T1, rm, rn, rd);
+ else
+ add_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
+ }
+
+ void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
+ else
+ ARM_and(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void asr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ JmpSrc b()
+ {
+ m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
+ return JmpSrc(m_formatter.size());
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ JmpSrc blx(RegisterID rm)
+ {
+ ASSERT(rm != ARMRegisters::pc);
+ m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
+ return JmpSrc(m_formatter.size());
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ JmpSrc bx(RegisterID rm)
+ {
+ m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
+ return JmpSrc(m_formatter.size());
+ }
+
+ void bkpt(uint8_t imm=0)
+ {
+ m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
+ }
+
+ void cmn(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
+ }
+
+ void cmp(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ if (!(rn & 8) && imm.isUInt8())
+ m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
+ else
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
+ }
+
+ void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+ }
+
+ void cmp(RegisterID rn, RegisterID rm)
+ {
+ if ((rn | rm) & 8)
+ cmp(rn, rm, ShiftTypeAndAmount());
+ else
+ m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ void eor(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
+ else
+ eor(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ void it(Condition cond)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
+ }
+
+ void it(Condition cond, bool inst2if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
+ }
+
+ void it(Condition cond, bool inst2if, bool inst3if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
+ }
+
+ void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
+ m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, imm.getUInt10() >> 2);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed load;
+ // if index is not set then is is a post-index load.
+ //
+ // If wback is set rn is updated - this is a pre or post index load,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt6())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed load;
+ // if index is not set then is is a post-index load.
+ //
+ // If wback is set rn is updated - this is a pre or post index load,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
+ }
+
+ void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
+ {
+ ASSERT(!BadReg(rt)); // Memory hint
+ ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ void movT3(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
+ }
+
+ void mov(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!BadReg(rd));
+
+ if ((rd < 8) && imm.isUInt8())
+ m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
+ else if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
+ else
+ movT3(rd, imm);
+ }
+
+ void mov(RegisterID rd, RegisterID rm)
+ {
+ m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
+ }
+
+ void movt(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isUInt16());
+ ASSERT(!BadReg(rd));
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
+ }
+
+ void mvn(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
+ }
+
+ void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void mvn(RegisterID rd, RegisterID rm)
+ {
+ if (!((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
+ else
+ mvn(rd, rm, ShiftTypeAndAmount());
+ }
+
+ void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
+ }
+
+ void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void orr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
+ else
+ orr(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void ror(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rdLo));
+ ASSERT(!BadReg(rdHi));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ ASSERT(rdLo != rdHi);
+ m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
+ m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, imm.getUInt10() >> 2);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed store;
+ // if index is not set then is is a post-index store.
+ //
+ // If wback is set rn is updated - this is a pre or post index store,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+
+ if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
+ m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2);
+ return;
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
+ else {
+ ASSERT(imm.isUInt12());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
+ }
+ }
+
+ void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // NOTE: In an IT block, add doesn't modify the flags register.
+ void sub(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
+ else
+ sub(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ // Not allowed in an IT (if then) block.
+ void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+
+ if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
+ m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2);
+ return;
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_S_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_SUB_S_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
+ }
+
+ // Not allowed in an IT (if then) block?
+ void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // Not allowed in an IT (if then) block.
+ void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_S_reg_T1, rm, rn, rd);
+ else
+ sub_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ void tst(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
+ }
+
+ void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+ }
+
+ void tst(RegisterID rn, RegisterID rm)
+ {
+ if ((rn | rm) & 8)
+ tst(rn, rm, ShiftTypeAndAmount());
+ else
+ m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
+ }
+
+ void vadd_F64(FPRegisterID rd, FPRegisterID rn, FPRegisterID rm)
+ {
+ m_formatter.vfpOp(0x0b00ee30 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rn, 23, 0) | doubleRegisterMask(rm, 21, 16));
+ }
+
+ void vcmp_F64(FPRegisterID rd, FPRegisterID rm)
+ {
+ m_formatter.vfpOp(0x0bc0eeb4 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rm, 21, 16));
+ }
+
+ void vcvt_F64_S32(FPRegisterID fd, FPRegisterID sm)
+ {
+ m_formatter.vfpOp(0x0bc0eeb8 | doubleRegisterMask(fd, 6, 28) | singleRegisterMask(sm, 16, 21));
+ }
+
+ void vcvt_S32_F64(FPRegisterID sd, FPRegisterID fm)
+ {
+ m_formatter.vfpOp(0x0bc0eebd | singleRegisterMask(sd, 28, 6) | doubleRegisterMask(fm, 21, 16));
+ }
+
+ void vldr(FPRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ vmem(rd, rn, imm, true);
+ }
+
+ void vmov(RegisterID rd, FPRegisterID sn)
+ {
+ m_formatter.vfpOp(0x0a10ee10 | (rd << 28) | singleRegisterMask(sn, 0, 23));
+ }
+
+ void vmov(FPRegisterID sn, RegisterID rd)
+ {
+ m_formatter.vfpOp(0x0a10ee00 | (rd << 28) | singleRegisterMask(sn, 0, 23));
+ }
+
+ // move FPSCR flags to APSR.
+ void vmrs_APSR_nzcv_FPSCR()
+ {
+ m_formatter.vfpOp(0xfa10eef1);
+ }
+
+ void vmul_F64(FPRegisterID rd, FPRegisterID rn, FPRegisterID rm)
+ {
+ m_formatter.vfpOp(0x0b00ee20 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rn, 23, 0) | doubleRegisterMask(rm, 21, 16));
+ }
+
+ void vstr(FPRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ vmem(rd, rn, imm, false);
+ }
+
+ void vsub_F64(FPRegisterID rd, FPRegisterID rn, FPRegisterID rm)
+ {
+ m_formatter.vfpOp(0x0b40ee30 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rn, 23, 0) | doubleRegisterMask(rm, 21, 16));
+ }
+
+
+ JmpDst label()
+ {
+ return JmpDst(m_formatter.size());
+ }
+
+ JmpDst align(int alignment)
+ {
+ while (!m_formatter.isAligned(alignment))
+ bkpt();
+
+ return label();
+ }
+
+ static void* getRelocatedAddress(void* code, JmpSrc jump)
+ {
+ ASSERT(jump.m_offset != -1);
+
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
+ }
+
+ static void* getRelocatedAddress(void* code, JmpDst destination)
+ {
+ ASSERT(destination.m_offset != -1);
+
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
+ {
+ return dst.m_offset - src.m_offset;
+ }
+
+ static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
+ {
+ return dst.m_offset - src.m_offset;
+ }
+
+ static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
+ {
+ return dst.m_offset - src.m_offset;
+ }
+
+ // Assembler admin methods:
+
+ size_t size() const
+ {
+ return m_formatter.size();
+ }
+
+ void* executableCopy(ExecutablePool* allocator)
+ {
+ void* copy = m_formatter.executableCopy(allocator);
+ ASSERT(copy);
+ return copy;
+ }
+
+ static unsigned getCallReturnOffset(JmpSrc call)
+ {
+ ASSERT(call.m_offset >= 0);
+ return call.m_offset;
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ void linkJump(JmpSrc from, JmpDst to)
+ {
+ ASSERT(to.m_offset != -1);
+ ASSERT(from.m_offset != -1);
+
+ uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(m_formatter.data()) + from.m_offset);
+ intptr_t relative = to.m_offset - from.m_offset;
+
+ linkWithOffset(location, relative);
+ }
+
+ static void linkJump(void* code, JmpSrc from, void* to)
+ {
+ ASSERT(from.m_offset != -1);
+
+ uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ intptr_t relative = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(location);
+
+ linkWithOffset(location, relative);
+ }
+
+ // bah, this mathod should really be static, since it is used by the LinkBuffer.
+ // return a bool saying whether the link was successful?
+ static void linkCall(void* code, JmpSrc from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
+ ASSERT(from.m_offset != -1);
+ ASSERT(reinterpret_cast<intptr_t>(to) & 1);
+
+ setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to);
+ }
+
+ static void linkPointer(void* code, JmpDst where, void* value)
+ {
+ setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
+ linkWithOffset(reinterpret_cast<uint16_t*>(from), relative);
+
+ ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 2, 2 * sizeof(uint16_t));
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
+ ASSERT(reinterpret_cast<intptr_t>(to) & 1);
+
+ setPointer(reinterpret_cast<uint16_t*>(from) - 1, to);
+
+ ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 4 * sizeof(uint16_t));
+ }
+
+ static void repatchInt32(void* where, int32_t value)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ setInt32(where, value);
+
+ ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
+ }
+
+ static void repatchPointer(void* where, void* value)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ setPointer(where, value);
+
+ ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
+ }
+
+ static void repatchLoadPtrToLEA(void* where)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ uint16_t* loadOp = reinterpret_cast<uint16_t*>(where) + 4;
+ ASSERT((*loadOp & 0xfff0) == OP_LDR_reg_T2);
+
+ *loadOp = OP_ADD_reg_T3 | (*loadOp & 0xf);
+ ExecutableAllocator::cacheFlush(loadOp, sizeof(uint16_t));
+ }
+
+private:
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ // (i.e. +/-(0..255) 32-bit words)
+ void vmem(FPRegisterID rd, RegisterID rn, int32_t imm, bool isLoad)
+ {
+ bool up;
+ uint32_t offset;
+ if (imm < 0) {
+ offset = -imm;
+ up = false;
+ } else {
+ offset = imm;
+ up = true;
+ }
+
+ // offset is effectively leftshifted by 2 already (the bottom two bits are zero, and not
+ // reperesented in the instruction. Left shift by 14, to mov it into position 0x00AA0000.
+ ASSERT((offset & ~(0xff << 2)) == 0);
+ offset <<= 14;
+
+ m_formatter.vfpOp(0x0b00ed00 | offset | (up << 7) | (isLoad << 4) | doubleRegisterMask(rd, 6, 28) | rn);
+ }
+
+ static void setInt32(void* code, uint32_t value)
+ {
+ uint16_t* location = reinterpret_cast<uint16_t*>(code);
+
+ uint16_t lo16 = value;
+ uint16_t hi16 = value >> 16;
+
+ spliceHi5(location - 4, lo16);
+ spliceLo11(location - 3, lo16);
+ spliceHi5(location - 2, hi16);
+ spliceLo11(location - 1, hi16);
+
+ ExecutableAllocator::cacheFlush(location - 4, 4 * sizeof(uint16_t));
+ }
+
+ static void setPointer(void* code, void* value)
+ {
+ setInt32(code, reinterpret_cast<uint32_t>(value));
+ }
+
+ // Linking & patching:
+ // This method assumes that the JmpSrc being linked is a T4 b instruction.
+ static void linkWithOffset(uint16_t* instruction, intptr_t relative)
+ {
+ // Currently branches > 16m = mostly deathy.
+ if (((relative << 7) >> 7) != relative) {
+ // FIXME: This CRASH means we cannot turn the JIT on by default on arm-v7.
+ fprintf(stderr, "Error: Cannot link T4b.\n");
+ CRASH();
+ }
+
+ // ARM encoding for the top two bits below the sign bit is 'peculiar'.
+ if (relative >= 0)
+ relative ^= 0xC00000;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+
+ int word1 = ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
+ int word2 = ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+
+ instruction[-2] = OP_B_T4a | word1;
+ instruction[-1] = OP_B_T4b | word2;
+ }
+
+ // These functions can be used to splice 16-bit immediates back into previously generated instructions.
+ static void spliceHi5(uint16_t* where, uint16_t what)
+ {
+ uint16_t pattern = (what >> 12) | ((what & 0x0800) >> 1);
+ *where = (*where & 0xFBF0) | pattern;
+ }
+ static void spliceLo11(uint16_t* where, uint16_t what)
+ {
+ uint16_t pattern = ((what & 0x0700) << 4) | (what & 0x00FF);
+ *where = (*where & 0x8F00) | pattern;
+ }
+
+ class ARMInstructionFormatter {
+ public:
+ void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
+ {
+ m_buffer.putShort(op | (rd << 8) | imm);
+ }
+
+ void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
+ }
+
+ void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
+ {
+ m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
+ }
+
+ void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
+ {
+ m_buffer.putShort(op | imm);
+ }
+
+ void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
+ }
+ void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
+ {
+ m_buffer.putShort(op | imm);
+ }
+
+ void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | (reg1 << 3) | reg2);
+ }
+
+ void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
+ {
+ m_buffer.putShort(op | reg);
+ m_buffer.putShort(ff.m_u.value);
+ }
+
+ void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
+ {
+ m_buffer.putShort(op);
+ m_buffer.putShort(ff.m_u.value);
+ }
+
+ void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
+ {
+ m_buffer.putShort(op1);
+ m_buffer.putShort(op2);
+ }
+
+ void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
+ {
+ m_buffer.putShort(op | (imm.m_value.i << 10) | imm4);
+ m_buffer.putShort((imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8);
+ }
+
+ void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
+ {
+ m_buffer.putShort(op | reg1);
+ m_buffer.putShort((reg2 << 12) | imm);
+ }
+
+ void vfpOp(int32_t op)
+ {
+ m_buffer.putInt(op);
+ }
+
+
+ // Administrative methods:
+
+ size_t size() const { return m_buffer.size(); }
+ bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
+ void* data() const { return m_buffer.data(); }
+ void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
+
+ private:
+ AssemblerBuffer m_buffer;
+ } m_formatter;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_THUMB2)
+
+#endif // ARMAssembler_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/AbstractMacroAssembler.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/AbstractMacroAssembler.h
new file mode 100644
index 000000000..525fe9813
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/AbstractMacroAssembler.h
@@ -0,0 +1,535 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AbstractMacroAssembler_h
+#define AbstractMacroAssembler_h
+
+#include <wtf/Platform.h>
+
+#include <MacroAssemblerCodeRef.h>
+#include <CodeLocation.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/UnusedParam.h>
+
+#if ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+class LinkBuffer;
+class RepatchBuffer;
+
+template <class AssemblerType>
+class AbstractMacroAssembler {
+public:
+ typedef AssemblerType AssemblerType_T;
+
+ typedef MacroAssemblerCodePtr CodePtr;
+ typedef MacroAssemblerCodeRef CodeRef;
+
+ class Jump;
+
+ typedef typename AssemblerType::RegisterID RegisterID;
+ typedef typename AssemblerType::FPRegisterID FPRegisterID;
+ typedef typename AssemblerType::JmpSrc JmpSrc;
+ typedef typename AssemblerType::JmpDst JmpDst;
+
+
+ // Section 1: MacroAssembler operand types
+ //
+ // The following types are used as operands to MacroAssembler operations,
+ // describing immediate and memory operands to the instructions to be planted.
+
+
+ enum Scale {
+ TimesOne,
+ TimesTwo,
+ TimesFour,
+ TimesEight,
+ };
+
+ // Address:
+ //
+ // Describes a simple base-offset address.
+ struct Address {
+ explicit Address(RegisterID base, int32_t offset = 0)
+ : base(base)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ int32_t offset;
+ };
+
+ // ImplicitAddress:
+ //
+ // This class is used for explicit 'load' and 'store' operations
+ // (as opposed to situations in which a memory operand is provided
+ // to a generic operation, such as an integer arithmetic instruction).
+ //
+ // In the case of a load (or store) operation we want to permit
+ // addresses to be implicitly constructed, e.g. the two calls:
+ //
+ // load32(Address(addrReg), destReg);
+ // load32(addrReg, destReg);
+ //
+ // Are equivalent, and the explicit wrapping of the Address in the former
+ // is unnecessary.
+ struct ImplicitAddress {
+ ImplicitAddress(RegisterID base)
+ : base(base)
+ , offset(0)
+ {
+ }
+
+ ImplicitAddress(Address address)
+ : base(address.base)
+ , offset(address.offset)
+ {
+ }
+
+ RegisterID base;
+ int32_t offset;
+ };
+
+ // BaseIndex:
+ //
+ // Describes a complex addressing mode.
+ struct BaseIndex {
+ BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
+ : base(base)
+ , index(index)
+ , scale(scale)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ RegisterID index;
+ Scale scale;
+ int32_t offset;
+ };
+
+ // AbsoluteAddress:
+ //
+ // Describes an memory operand given by a pointer. For regular load & store
+ // operations an unwrapped void* will be used, rather than using this.
+ struct AbsoluteAddress {
+ explicit AbsoluteAddress(void* ptr)
+ : m_ptr(ptr)
+ {
+ }
+
+ void* m_ptr;
+ };
+
+ // ImmPtr:
+ //
+ // A pointer sized immediate operand to an instruction - this is wrapped
+ // in a class requiring explicit construction in order to differentiate
+ // from pointers used as absolute addresses to memory operations
+ struct ImmPtr {
+ explicit ImmPtr(void* value)
+ : m_value(value)
+ {
+ }
+
+ intptr_t asIntptr()
+ {
+ return reinterpret_cast<intptr_t>(m_value);
+ }
+
+ void* m_value;
+ };
+
+ // Imm32:
+ //
+ // A 32bit immediate operand to an instruction - this is wrapped in a
+ // class requiring explicit construction in order to prevent RegisterIDs
+ // (which are implemented as an enum) from accidentally being passed as
+ // immediate values.
+ struct Imm32 {
+ explicit Imm32(int32_t value)
+ : m_value(value)
+#if PLATFORM(ARM)
+ , m_isPointer(false)
+#endif
+ {
+ }
+
+#if !PLATFORM(X86_64)
+ explicit Imm32(ImmPtr ptr)
+ : m_value(ptr.asIntptr())
+#if PLATFORM(ARM)
+ , m_isPointer(true)
+#endif
+ {
+ }
+#endif
+
+ int32_t m_value;
+#if PLATFORM(ARM)
+ // We rely on being able to regenerate code to recover exception handling
+ // information. Since ARMv7 supports 16-bit immediates there is a danger
+ // that if pointer values change the layout of the generated code will change.
+ // To avoid this problem, always generate pointers (and thus Imm32s constructed
+ // from ImmPtrs) with a code sequence that is able to represent any pointer
+ // value - don't use a more compact form in these cases.
+ bool m_isPointer;
+#endif
+ };
+
+
+ // Section 2: MacroAssembler code buffer handles
+ //
+ // The following types are used to reference items in the code buffer
+ // during JIT code generation. For example, the type Jump is used to
+ // track the location of a jump instruction so that it may later be
+ // linked to a label marking its destination.
+
+
+ // Label:
+ //
+ // A Label records a point in the generated instruction stream, typically such that
+ // it may be used as a destination for a jump.
+ class Label {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class Jump;
+ friend class MacroAssemblerCodeRef;
+ friend class LinkBuffer;
+
+ public:
+ Label()
+ {
+ }
+
+ Label(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ bool isUsed() const { return m_label.isUsed(); }
+ void used() { m_label.used(); }
+ private:
+ JmpDst m_label;
+ };
+
+ // DataLabelPtr:
+ //
+ // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
+ // patched after the code has been generated.
+ class DataLabelPtr {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+ public:
+ DataLabelPtr()
+ {
+ }
+
+ DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ private:
+ JmpDst m_label;
+ };
+
+ // DataLabel32:
+ //
+ // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
+ // patched after the code has been generated.
+ class DataLabel32 {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+ public:
+ DataLabel32()
+ {
+ }
+
+ DataLabel32(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ private:
+ JmpDst m_label;
+ };
+
+ // Call:
+ //
+ // A Call object is a reference to a call instruction that has been planted
+ // into the code buffer - it is typically used to link the call, setting the
+ // relative offset such that when executed it will call to the desired
+ // destination.
+ class Call {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+
+ public:
+ enum Flags {
+ None = 0x0,
+ Linkable = 0x1,
+ Near = 0x2,
+ LinkableNear = 0x3,
+ };
+
+ Call()
+ : m_flags(None)
+ {
+ }
+
+ Call(JmpSrc jmp, Flags flags)
+ : m_jmp(jmp)
+ , m_flags(flags)
+ {
+ }
+
+ bool isFlagSet(Flags flag)
+ {
+ return m_flags & flag;
+ }
+
+ static Call fromTailJump(Jump jump)
+ {
+ return Call(jump.m_jmp, Linkable);
+ }
+
+ JmpSrc m_jmp;
+ private:
+ Flags m_flags;
+ };
+
+ // Jump:
+ //
+ // A jump object is a reference to a jump instruction that has been planted
+ // into the code buffer - it is typically used to link the jump, setting the
+ // relative offset such that when executed it will jump to the desired
+ // destination.
+ class Jump {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class Call;
+ friend class LinkBuffer;
+ public:
+ Jump()
+ {
+ }
+
+ Jump(JmpSrc jmp)
+ : m_jmp(jmp)
+ {
+ }
+
+ void link(AbstractMacroAssembler<AssemblerType>* masm)
+ {
+ masm->m_assembler.linkJump(m_jmp, masm->m_assembler.label());
+ }
+
+ void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
+ {
+ masm->m_assembler.linkJump(m_jmp, label.m_label);
+ }
+
+ private:
+ JmpSrc m_jmp;
+ };
+
+ // JumpList:
+ //
+ // A JumpList is a set of Jump objects.
+ // All jumps in the set will be linked to the same destination.
+ class JumpList {
+ friend class LinkBuffer;
+
+ public:
+ typedef Vector<Jump, 16> JumpVector;
+
+ void link(AbstractMacroAssembler<AssemblerType>* masm)
+ {
+ size_t size = m_jumps.size();
+ for (size_t i = 0; i < size; ++i)
+ m_jumps[i].link(masm);
+ m_jumps.clear();
+ }
+
+ void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
+ {
+ size_t size = m_jumps.size();
+ for (size_t i = 0; i < size; ++i)
+ m_jumps[i].linkTo(label, masm);
+ m_jumps.clear();
+ }
+
+ void append(Jump jump)
+ {
+ m_jumps.append(jump);
+ }
+
+ void append(JumpList& other)
+ {
+ m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
+ }
+
+ bool empty()
+ {
+ return !m_jumps.size();
+ }
+
+ const JumpVector& jumps() { return m_jumps; }
+
+ private:
+ JumpVector m_jumps;
+ };
+
+
+ // Section 3: Misc admin methods
+
+ static CodePtr trampolineAt(CodeRef ref, Label label)
+ {
+ return CodePtr(AssemblerType::getRelocatedAddress(ref.m_code.dataLocation(), label.m_label));
+ }
+
+ size_t size()
+ {
+ return m_assembler.size();
+ }
+
+ Label label()
+ {
+ return Label(this);
+ }
+
+ Label align()
+ {
+ m_assembler.align(16);
+ return Label(this);
+ }
+
+ ptrdiff_t differenceBetween(Label from, Jump to)
+ {
+ return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
+ }
+
+ ptrdiff_t differenceBetween(Label from, Call to)
+ {
+ return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
+ }
+
+ ptrdiff_t differenceBetween(Label from, Label to)
+ {
+ return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+ }
+
+ ptrdiff_t differenceBetween(Label from, DataLabelPtr to)
+ {
+ return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+ }
+
+ ptrdiff_t differenceBetween(Label from, DataLabel32 to)
+ {
+ return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+ }
+
+ ptrdiff_t differenceBetween(DataLabelPtr from, Jump to)
+ {
+ return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
+ }
+
+ ptrdiff_t differenceBetween(DataLabelPtr from, DataLabelPtr to)
+ {
+ return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+ }
+
+ ptrdiff_t differenceBetween(DataLabelPtr from, Call to)
+ {
+ return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
+ }
+
+protected:
+ AssemblerType m_assembler;
+
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkJump(void* code, Jump jump, CodeLocationLabel target)
+ {
+ AssemblerType::linkJump(code, jump.m_jmp, target.dataLocation());
+ }
+
+ static void linkPointer(void* code, typename AssemblerType::JmpDst label, void* value)
+ {
+ AssemblerType::linkPointer(code, label, value);
+ }
+
+ static void* getLinkerAddress(void* code, typename AssemblerType::JmpSrc label)
+ {
+ return AssemblerType::getRelocatedAddress(code, label);
+ }
+
+ static void* getLinkerAddress(void* code, typename AssemblerType::JmpDst label)
+ {
+ return AssemblerType::getRelocatedAddress(code, label);
+ }
+
+ static unsigned getLinkerCallReturnOffset(Call call)
+ {
+ return AssemblerType::getCallReturnOffset(call.m_jmp);
+ }
+
+ static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
+ {
+ AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
+ }
+
+ static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+ {
+ AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
+ {
+ AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
+ }
+
+ static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+ {
+ AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
+ }
+
+ static void repatchLoadPtrToLEA(CodeLocationInstruction instruction)
+ {
+ AssemblerType::repatchLoadPtrToLEA(instruction.dataLocation());
+ }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AbstractMacroAssembler_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/AssemblerBuffer.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/AssemblerBuffer.h
new file mode 100644
index 000000000..073906a52
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/AssemblerBuffer.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AssemblerBuffer_h
+#define AssemblerBuffer_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include "stdint.h"
+#include <string.h>
+#include <jit/ExecutableAllocator.h>
+#include <wtf/Assertions.h>
+#include <wtf/FastMalloc.h>
+
+namespace JSC {
+
+ class AssemblerBuffer {
+ static const int inlineCapacity = 256;
+ public:
+ AssemblerBuffer()
+ : m_buffer(m_inlineBuffer)
+ , m_capacity(inlineCapacity)
+ , m_size(0)
+ {
+ }
+
+ ~AssemblerBuffer()
+ {
+ if (m_buffer != m_inlineBuffer)
+ fastFree(m_buffer);
+ }
+
+ void ensureSpace(int space)
+ {
+ if (m_size > m_capacity - space)
+ grow();
+ }
+
+ bool isAligned(int alignment) const
+ {
+ return !(m_size & (alignment - 1));
+ }
+
+ void putByteUnchecked(int value)
+ {
+ ASSERT(!(m_size > m_capacity - 4));
+ m_buffer[m_size] = value;
+ m_size++;
+ }
+
+ void putByte(int value)
+ {
+ if (m_size > m_capacity - 4)
+ grow();
+ putByteUnchecked(value);
+ }
+
+ void putShortUnchecked(int value)
+ {
+ ASSERT(!(m_size > m_capacity - 4));
+ *reinterpret_cast<short*>(&m_buffer[m_size]) = value;
+ m_size += 2;
+ }
+
+ void putShort(int value)
+ {
+ if (m_size > m_capacity - 4)
+ grow();
+ putShortUnchecked(value);
+ }
+
+ void putIntUnchecked(int value)
+ {
+ ASSERT(!(m_size > m_capacity - 4));
+ *reinterpret_cast<int*>(&m_buffer[m_size]) = value;
+ m_size += 4;
+ }
+
+ void putInt64Unchecked(int64_t value)
+ {
+ ASSERT(!(m_size > m_capacity - 8));
+ *reinterpret_cast<int64_t*>(&m_buffer[m_size]) = value;
+ m_size += 8;
+ }
+
+ void putInt(int value)
+ {
+ if (m_size > m_capacity - 4)
+ grow();
+ putIntUnchecked(value);
+ }
+
+ void* data() const
+ {
+ return m_buffer;
+ }
+
+ int size() const
+ {
+ return m_size;
+ }
+
+ void* executableCopy(ExecutablePool* allocator)
+ {
+ if (!m_size)
+ return 0;
+
+ void* result = allocator->alloc(m_size);
+
+ if (!result)
+ return 0;
+
+ ExecutableAllocator::makeWritable(result, m_size);
+
+ return memcpy(result, m_buffer, m_size);
+ }
+
+ protected:
+ void append(const char* data, int size)
+ {
+ if (m_size > m_capacity - size)
+ grow(size);
+
+ memcpy(m_buffer + m_size, data, size);
+ m_size += size;
+ }
+
+ void grow(int extraCapacity = 0)
+ {
+ m_capacity += m_capacity / 2 + extraCapacity;
+
+ if (m_buffer == m_inlineBuffer) {
+ char* newBuffer = static_cast<char*>(fastMalloc(m_capacity));
+ m_buffer = static_cast<char*>(memcpy(newBuffer, m_buffer, m_size));
+ } else
+ m_buffer = static_cast<char*>(fastRealloc(m_buffer, m_capacity));
+ }
+
+ char m_inlineBuffer[inlineCapacity];
+ char* m_buffer;
+ int m_capacity;
+ int m_size;
+ };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AssemblerBuffer_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
new file mode 100644
index 000000000..af3c3be07
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AssemblerBufferWithConstantPool_h
+#define AssemblerBufferWithConstantPool_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include "AssemblerBuffer.h"
+#include <wtf/SegmentedVector.h>
+
+#define ASSEMBLER_HAS_CONSTANT_POOL 1
+
+namespace JSC {
+
+/*
+ On a constant pool 4 or 8 bytes data can be stored. The values can be
+ constants or addresses. The addresses should be 32 or 64 bits. The constants
+ should be double-precisions float or integer numbers which are hard to be
+ encoded as few machine instructions.
+
+ TODO: The pool is desinged to handle both 32 and 64 bits values, but
+ currently only the 4 bytes constants are implemented and tested.
+
+ The AssemblerBuffer can contain multiple constant pools. Each pool is inserted
+ into the instruction stream - protected by a jump instruction from the
+ execution flow.
+
+ The flush mechanism is called when no space remain to insert the next instruction
+ into the pool. Three values are used to determine when the constant pool itself
+ have to be inserted into the instruction stream (Assembler Buffer):
+
+ - maxPoolSize: size of the constant pool in bytes, this value cannot be
+ larger than the maximum offset of a PC relative memory load
+
+ - barrierSize: size of jump instruction in bytes which protects the
+ constant pool from execution
+
+ - maxInstructionSize: maximum length of a machine instruction in bytes
+
+ There are some callbacks which solve the target architecture specific
+ address handling:
+
+ - TYPE patchConstantPoolLoad(TYPE load, int value):
+ patch the 'load' instruction with the index of the constant in the
+ constant pool and return the patched instruction.
+
+ - void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr):
+ patch the a PC relative load instruction at 'loadAddr' address with the
+ final relative offset. The offset can be computed with help of
+ 'constPoolAddr' (the address of the constant pool) and index of the
+ constant (which is stored previously in the load instruction itself).
+
+ - TYPE placeConstantPoolBarrier(int size):
+ return with a constant pool barrier instruction which jumps over the
+ constant pool.
+
+ The 'put*WithConstant*' functions should be used to place a data into the
+ constant pool.
+*/
+
+template <int maxPoolSize, int barrierSize, int maxInstructionSize, class AssemblerType>
+class AssemblerBufferWithConstantPool: public AssemblerBuffer {
+ typedef SegmentedVector<uint32_t, 512> LoadOffsets;
+public:
+ enum {
+ UniqueConst,
+ ReusableConst,
+ UnusedEntry,
+ };
+
+ AssemblerBufferWithConstantPool()
+ : AssemblerBuffer()
+ , m_numConsts(0)
+ , m_maxDistance(maxPoolSize)
+ , m_lastConstDelta(0)
+ {
+ m_pool = static_cast<uint32_t*>(fastMalloc(maxPoolSize));
+ m_mask = static_cast<char*>(fastMalloc(maxPoolSize / sizeof(uint32_t)));
+ }
+
+ ~AssemblerBufferWithConstantPool()
+ {
+ fastFree(m_mask);
+ fastFree(m_pool);
+ }
+
+ void ensureSpace(int space)
+ {
+ flushIfNoSpaceFor(space);
+ AssemblerBuffer::ensureSpace(space);
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ flushIfNoSpaceFor(insnSpace, constSpace);
+ AssemblerBuffer::ensureSpace(insnSpace);
+ }
+
+ bool isAligned(int alignment)
+ {
+ flushIfNoSpaceFor(alignment);
+ return AssemblerBuffer::isAligned(alignment);
+ }
+
+ void putByteUnchecked(int value)
+ {
+ AssemblerBuffer::putByteUnchecked(value);
+ correctDeltas(1);
+ }
+
+ void putByte(int value)
+ {
+ flushIfNoSpaceFor(1);
+ AssemblerBuffer::putByte(value);
+ correctDeltas(1);
+ }
+
+ void putShortUnchecked(int value)
+ {
+ AssemblerBuffer::putShortUnchecked(value);
+ correctDeltas(2);
+ }
+
+ void putShort(int value)
+ {
+ flushIfNoSpaceFor(2);
+ AssemblerBuffer::putShort(value);
+ correctDeltas(2);
+ }
+
+ void putIntUnchecked(int value)
+ {
+ AssemblerBuffer::putIntUnchecked(value);
+ correctDeltas(4);
+ }
+
+ void putInt(int value)
+ {
+ flushIfNoSpaceFor(4);
+ AssemblerBuffer::putInt(value);
+ correctDeltas(4);
+ }
+
+ void putInt64Unchecked(int64_t value)
+ {
+ AssemblerBuffer::putInt64Unchecked(value);
+ correctDeltas(8);
+ }
+
+ int size()
+ {
+ flushIfNoSpaceFor(maxInstructionSize, sizeof(uint64_t));
+ return AssemblerBuffer::size();
+ }
+
+ int uncheckedSize()
+ {
+ return AssemblerBuffer::size();
+ }
+
+ void* executableCopy(ExecutablePool* allocator)
+ {
+ flushConstantPool(false);
+ return AssemblerBuffer::executableCopy(allocator);
+ }
+
+ void putIntWithConstantInt(uint32_t insn, uint32_t constant, bool isReusable = false)
+ {
+ flushIfNoSpaceFor(4, 4);
+
+ m_loadOffsets.append(AssemblerBuffer::size());
+ if (isReusable)
+ for (int i = 0; i < m_numConsts; ++i) {
+ if (m_mask[i] == ReusableConst && m_pool[i] == constant) {
+ AssemblerBuffer::putInt(AssemblerType::patchConstantPoolLoad(insn, i));
+ correctDeltas(4);
+ return;
+ }
+ }
+
+ m_pool[m_numConsts] = constant;
+ m_mask[m_numConsts] = static_cast<char>(isReusable ? ReusableConst : UniqueConst);
+
+ AssemblerBuffer::putInt(AssemblerType::patchConstantPoolLoad(insn, m_numConsts));
+ ++m_numConsts;
+
+ correctDeltas(4, 4);
+ }
+
+ // This flushing mechanism can be called after any unconditional jumps.
+ void flushWithoutBarrier(bool isForced = false)
+ {
+ // Flush if constant pool is more than 60% full to avoid overuse of this function.
+ if (isForced || 5 * m_numConsts > 3 * maxPoolSize / sizeof(uint32_t))
+ flushConstantPool(false);
+ }
+
+ uint32_t* poolAddress()
+ {
+ return m_pool;
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_numConsts;
+ }
+
+private:
+ void correctDeltas(int insnSize)
+ {
+ m_maxDistance -= insnSize;
+ m_lastConstDelta -= insnSize;
+ if (m_lastConstDelta < 0)
+ m_lastConstDelta = 0;
+ }
+
+ void correctDeltas(int insnSize, int constSize)
+ {
+ correctDeltas(insnSize);
+
+ m_maxDistance -= m_lastConstDelta;
+ m_lastConstDelta = constSize;
+ }
+
+ void flushConstantPool(bool useBarrier = true)
+ {
+ if (m_numConsts == 0)
+ return;
+ int alignPool = (AssemblerBuffer::size() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1);
+
+ if (alignPool)
+ alignPool = sizeof(uint64_t) - alignPool;
+
+ // Callback to protect the constant pool from execution
+ if (useBarrier)
+ AssemblerBuffer::putInt(AssemblerType::placeConstantPoolBarrier(m_numConsts * sizeof(uint32_t) + alignPool));
+
+ if (alignPool) {
+ if (alignPool & 1)
+ AssemblerBuffer::putByte(AssemblerType::padForAlign8);
+ if (alignPool & 2)
+ AssemblerBuffer::putShort(AssemblerType::padForAlign16);
+ if (alignPool & 4)
+ AssemblerBuffer::putInt(AssemblerType::padForAlign32);
+ }
+
+ int constPoolOffset = AssemblerBuffer::size();
+ append(reinterpret_cast<char*>(m_pool), m_numConsts * sizeof(uint32_t));
+
+ // Patch each PC relative load
+ for (LoadOffsets::Iterator iter = m_loadOffsets.begin(); iter != m_loadOffsets.end(); ++iter) {
+ void* loadAddr = reinterpret_cast<void*>(m_buffer + *iter);
+ AssemblerType::patchConstantPoolLoad(loadAddr, reinterpret_cast<void*>(m_buffer + constPoolOffset));
+ }
+
+ m_loadOffsets.clear();
+ m_numConsts = 0;
+ m_maxDistance = maxPoolSize;
+ }
+
+ void flushIfNoSpaceFor(int nextInsnSize)
+ {
+ if (m_numConsts == 0)
+ return;
+ int lastConstDelta = m_lastConstDelta > nextInsnSize ? m_lastConstDelta - nextInsnSize : 0;
+ if ((m_maxDistance < nextInsnSize + lastConstDelta + barrierSize + (int)sizeof(uint32_t)))
+ flushConstantPool();
+ }
+
+ void flushIfNoSpaceFor(int nextInsnSize, int nextConstSize)
+ {
+ if (m_numConsts == 0)
+ return;
+ if ((m_maxDistance < nextInsnSize + m_lastConstDelta + nextConstSize + barrierSize + (int)sizeof(uint32_t)) ||
+ (m_numConsts * sizeof(uint32_t) + nextConstSize >= maxPoolSize))
+ flushConstantPool();
+ }
+
+ uint32_t* m_pool;
+ char* m_mask;
+ LoadOffsets m_loadOffsets;
+
+ int m_numConsts;
+ int m_maxDistance;
+ int m_lastConstDelta;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AssemblerBufferWithConstantPool_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/CodeLocation.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/CodeLocation.h
new file mode 100644
index 000000000..b910b6f88
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/CodeLocation.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CodeLocation_h
+#define CodeLocation_h
+
+#include <wtf/Platform.h>
+
+#include <MacroAssemblerCodeRef.h>
+
+#if ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+class CodeLocationInstruction;
+class CodeLocationLabel;
+class CodeLocationJump;
+class CodeLocationCall;
+class CodeLocationNearCall;
+class CodeLocationDataLabel32;
+class CodeLocationDataLabelPtr;
+
+// The CodeLocation* types are all pretty much do-nothing wrappers around
+// CodePtr (or MacroAssemblerCodePtr, to give it its full name). These
+// classes only exist to provide type-safety when linking and patching code.
+//
+// The one new piece of functionallity introduced by these classes is the
+// ability to create (or put another way, to re-discover) another CodeLocation
+// at an offset from one you already know. When patching code to optimize it
+// we often want to patch a number of instructions that are short, fixed
+// offsets apart. To reduce memory overhead we will only retain a pointer to
+// one of the instructions, and we will use the *AtOffset methods provided by
+// CodeLocationCommon to find the other points in the code to modify.
+class CodeLocationCommon : public MacroAssemblerCodePtr {
+public:
+ CodeLocationInstruction instructionAtOffset(int offset);
+ CodeLocationLabel labelAtOffset(int offset);
+ CodeLocationJump jumpAtOffset(int offset);
+ CodeLocationCall callAtOffset(int offset);
+ CodeLocationNearCall nearCallAtOffset(int offset);
+ CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset);
+ CodeLocationDataLabel32 dataLabel32AtOffset(int offset);
+
+protected:
+ CodeLocationCommon()
+ {
+ }
+
+ CodeLocationCommon(MacroAssemblerCodePtr location)
+ : MacroAssemblerCodePtr(location)
+ {
+ }
+};
+
+class CodeLocationInstruction : public CodeLocationCommon {
+public:
+ CodeLocationInstruction() {}
+ explicit CodeLocationInstruction(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationInstruction(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationLabel : public CodeLocationCommon {
+public:
+ CodeLocationLabel() {}
+ explicit CodeLocationLabel(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationLabel(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationJump : public CodeLocationCommon {
+public:
+ CodeLocationJump() {}
+ explicit CodeLocationJump(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationJump(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationCall : public CodeLocationCommon {
+public:
+ CodeLocationCall() {}
+ explicit CodeLocationCall(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationCall(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationNearCall : public CodeLocationCommon {
+public:
+ CodeLocationNearCall() {}
+ explicit CodeLocationNearCall(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationNearCall(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabel32 : public CodeLocationCommon {
+public:
+ CodeLocationDataLabel32() {}
+ explicit CodeLocationDataLabel32(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationDataLabel32(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabelPtr : public CodeLocationCommon {
+public:
+ CodeLocationDataLabelPtr() {}
+ explicit CodeLocationDataLabelPtr(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationDataLabelPtr(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+inline CodeLocationInstruction CodeLocationCommon::instructionAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationInstruction(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationLabel CodeLocationCommon::labelAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationLabel(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationJump CodeLocationCommon::jumpAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationJump(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationCall CodeLocationCommon::callAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabelPtr CodeLocationCommon::dataLabelPtrAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabelPtr(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabel32 CodeLocationCommon::dataLabel32AtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabel32(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // CodeLocation_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/LinkBuffer.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/LinkBuffer.h
new file mode 100644
index 000000000..6d0811703
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/LinkBuffer.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LinkBuffer_h
+#define LinkBuffer_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include <MacroAssembler.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+// LinkBuffer:
+//
+// This class assists in linking code generated by the macro assembler, once code generation
+// has been completed, and the code has been copied to is final location in memory. At this
+// time pointers to labels within the code may be resolved, and relative offsets to external
+// addresses may be fixed.
+//
+// Specifically:
+// * Jump objects may be linked to external targets,
+// * The address of Jump objects may taken, such that it can later be relinked.
+// * The return address of a Call may be acquired.
+// * The address of a Label pointing into the code may be resolved.
+// * The value referenced by a DataLabel may be set.
+//
+class LinkBuffer : public Noncopyable {
+ typedef MacroAssemblerCodeRef CodeRef;
+ typedef MacroAssembler::Label Label;
+ typedef MacroAssembler::Jump Jump;
+ typedef MacroAssembler::JumpList JumpList;
+ typedef MacroAssembler::Call Call;
+ typedef MacroAssembler::DataLabel32 DataLabel32;
+ typedef MacroAssembler::DataLabelPtr DataLabelPtr;
+
+public:
+ // Note: Initialization sequence is significant, since executablePool is a PassRefPtr.
+ // First, executablePool is copied into m_executablePool, then the initialization of
+ // m_code uses m_executablePool, *not* executablePool, since this is no longer valid.
+ LinkBuffer(MacroAssembler* masm, PassRefPtr<ExecutablePool> executablePool)
+ : m_executablePool(executablePool)
+ , m_code(masm->m_assembler.executableCopy(m_executablePool.get()))
+ , m_size(masm->m_assembler.size())
+#ifndef NDEBUG
+ , m_completed(false)
+#endif
+ {
+ }
+
+ ~LinkBuffer()
+ {
+ ASSERT(m_completed);
+ }
+
+ // These methods are used to link or set values at code generation time.
+
+ void link(Call call, FunctionPtr function)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ MacroAssembler::linkCall(code(), call, function);
+ }
+
+ void link(Jump jump, CodeLocationLabel label)
+ {
+ MacroAssembler::linkJump(code(), jump, label);
+ }
+
+ void link(JumpList list, CodeLocationLabel label)
+ {
+ for (unsigned i = 0; i < list.m_jumps.size(); ++i)
+ MacroAssembler::linkJump(code(), list.m_jumps[i], label);
+ }
+
+ void patch(DataLabelPtr label, void* value)
+ {
+ MacroAssembler::linkPointer(code(), label.m_label, value);
+ }
+
+ void patch(DataLabelPtr label, CodeLocationLabel value)
+ {
+ MacroAssembler::linkPointer(code(), label.m_label, value.executableAddress());
+ }
+
+ // These methods are used to obtain handles to allow the code to be relinked / repatched later.
+
+ CodeLocationCall locationOf(Call call)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ ASSERT(!call.isFlagSet(Call::Near));
+ return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp));
+ }
+
+ CodeLocationNearCall locationOfNearCall(Call call)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ ASSERT(call.isFlagSet(Call::Near));
+ return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp));
+ }
+
+ CodeLocationLabel locationOf(Label label)
+ {
+ return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), label.m_label));
+ }
+
+ CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
+ {
+ return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), label.m_label));
+ }
+
+ CodeLocationDataLabel32 locationOf(DataLabel32 label)
+ {
+ return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), label.m_label));
+ }
+
+ // This method obtains the return address of the call, given as an offset from
+ // the start of the code.
+ unsigned returnAddressOffset(Call call)
+ {
+ return MacroAssembler::getLinkerCallReturnOffset(call);
+ }
+
+ // Upon completion of all patching either 'finalizeCode()' or 'finalizeCodeAddendum()' should be called
+ // once to complete generation of the code. 'finalizeCode()' is suited to situations
+ // where the executable pool must also be retained, the lighter-weight 'finalizeCodeAddendum()' is
+ // suited to adding to an existing allocation.
+ CodeRef finalizeCode()
+ {
+ performFinalization();
+
+ return CodeRef(m_code, m_executablePool, m_size);
+ }
+ CodeLocationLabel finalizeCodeAddendum()
+ {
+ performFinalization();
+
+ return CodeLocationLabel(code());
+ }
+
+private:
+ // Keep this private! - the underlying code should only be obtained externally via
+ // finalizeCode() or finalizeCodeAddendum().
+ void* code()
+ {
+ return m_code;
+ }
+
+ void performFinalization()
+ {
+#ifndef NDEBUG
+ ASSERT(!m_completed);
+ m_completed = true;
+#endif
+
+ ExecutableAllocator::makeExecutable(code(), m_size);
+ ExecutableAllocator::cacheFlush(code(), m_size);
+ }
+
+ RefPtr<ExecutablePool> m_executablePool;
+ void* m_code;
+ size_t m_size;
+#ifndef NDEBUG
+ bool m_completed;
+#endif
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // LinkBuffer_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssembler.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssembler.h
new file mode 100644
index 000000000..2743ab4e4
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssembler.h
@@ -0,0 +1,347 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssembler_h
+#define MacroAssembler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#if PLATFORM(ARM_THUMB2)
+#include "MacroAssemblerARMv7.h"
+namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
+
+#elif PLATFORM(ARM_TRADITIONAL)
+#include "MacroAssemblerARM.h"
+namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
+
+#elif PLATFORM(X86)
+#include "MacroAssemblerX86.h"
+namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
+
+#elif PLATFORM(X86_64)
+#include "MacroAssemblerX86_64.h"
+namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
+
+#else
+#error "The MacroAssembler is not supported on this platform."
+#endif
+
+
+namespace JSC {
+
+class MacroAssembler : public MacroAssemblerBase {
+public:
+
+ using MacroAssemblerBase::pop;
+ using MacroAssemblerBase::jump;
+ using MacroAssemblerBase::branch32;
+ using MacroAssemblerBase::branch16;
+#if PLATFORM(X86_64)
+ using MacroAssemblerBase::branchPtr;
+ using MacroAssemblerBase::branchTestPtr;
+#endif
+
+
+ // Platform agnostic onvenience functions,
+ // described in terms of other macro assembly methods.
+ void pop()
+ {
+ addPtr(Imm32(sizeof(void*)), stackPointerRegister);
+ }
+
+ void peek(RegisterID dest, int index = 0)
+ {
+ loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
+ }
+
+ void poke(RegisterID src, int index = 0)
+ {
+ storePtr(src, Address(stackPointerRegister, (index * sizeof(void*))));
+ }
+
+ void poke(Imm32 value, int index = 0)
+ {
+ store32(value, Address(stackPointerRegister, (index * sizeof(void*))));
+ }
+
+ void poke(ImmPtr imm, int index = 0)
+ {
+ storePtr(imm, Address(stackPointerRegister, (index * sizeof(void*))));
+ }
+
+
+ // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
+ void branchPtr(Condition cond, RegisterID op1, ImmPtr imm, Label target)
+ {
+ branchPtr(cond, op1, imm).linkTo(target, this);
+ }
+
+ void branch32(Condition cond, RegisterID op1, RegisterID op2, Label target)
+ {
+ branch32(cond, op1, op2).linkTo(target, this);
+ }
+
+ void branch32(Condition cond, RegisterID op1, Imm32 imm, Label target)
+ {
+ branch32(cond, op1, imm).linkTo(target, this);
+ }
+
+ void branch32(Condition cond, RegisterID left, Address right, Label target)
+ {
+ branch32(cond, left, right).linkTo(target, this);
+ }
+
+ void branch16(Condition cond, BaseIndex left, RegisterID right, Label target)
+ {
+ branch16(cond, left, right).linkTo(target, this);
+ }
+
+ void branchTestPtr(Condition cond, RegisterID reg, Label target)
+ {
+ branchTestPtr(cond, reg).linkTo(target, this);
+ }
+
+ void jump(Label target)
+ {
+ jump().linkTo(target, this);
+ }
+
+
+ // Ptr methods
+ // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
+#if !PLATFORM(X86_64)
+ void addPtr(RegisterID src, RegisterID dest)
+ {
+ add32(src, dest);
+ }
+
+ void addPtr(Imm32 imm, RegisterID srcDest)
+ {
+ add32(imm, srcDest);
+ }
+
+ void addPtr(ImmPtr imm, RegisterID dest)
+ {
+ add32(Imm32(imm), dest);
+ }
+
+ void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ add32(imm, src, dest);
+ }
+
+ void andPtr(RegisterID src, RegisterID dest)
+ {
+ and32(src, dest);
+ }
+
+ void andPtr(Imm32 imm, RegisterID srcDest)
+ {
+ and32(imm, srcDest);
+ }
+
+ void orPtr(RegisterID src, RegisterID dest)
+ {
+ or32(src, dest);
+ }
+
+ void orPtr(ImmPtr imm, RegisterID dest)
+ {
+ or32(Imm32(imm), dest);
+ }
+
+ void orPtr(Imm32 imm, RegisterID dest)
+ {
+ or32(imm, dest);
+ }
+
+ void rshiftPtr(RegisterID shift_amount, RegisterID dest)
+ {
+ rshift32(shift_amount, dest);
+ }
+
+ void rshiftPtr(Imm32 imm, RegisterID dest)
+ {
+ rshift32(imm, dest);
+ }
+
+ void subPtr(RegisterID src, RegisterID dest)
+ {
+ sub32(src, dest);
+ }
+
+ void subPtr(Imm32 imm, RegisterID dest)
+ {
+ sub32(imm, dest);
+ }
+
+ void subPtr(ImmPtr imm, RegisterID dest)
+ {
+ sub32(Imm32(imm), dest);
+ }
+
+ void xorPtr(RegisterID src, RegisterID dest)
+ {
+ xor32(src, dest);
+ }
+
+ void xorPtr(Imm32 imm, RegisterID srcDest)
+ {
+ xor32(imm, srcDest);
+ }
+
+
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void loadPtr(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void loadPtr(void* address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load32WithAddressOffsetPatch(address, dest);
+ }
+
+ void setPtr(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+ {
+ set32(cond, left, right, dest);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(RegisterID src, BaseIndex address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(RegisterID src, void* address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(ImmPtr imm, ImplicitAddress address)
+ {
+ store32(Imm32(imm), address);
+ }
+
+ void storePtr(ImmPtr imm, void* address)
+ {
+ store32(Imm32(imm), address);
+ }
+
+ DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ return store32WithAddressOffsetPatch(src, address);
+ }
+
+
+ Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
+ {
+ return branch32(cond, left, Imm32(right));
+ }
+
+ Jump branchPtr(Condition cond, RegisterID left, Address right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(Condition cond, Address left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(Condition cond, Address left, ImmPtr right)
+ {
+ return branch32(cond, left, Imm32(right));
+ }
+
+ Jump branchPtr(Condition cond, AbsoluteAddress left, ImmPtr right)
+ {
+ return branch32(cond, left, Imm32(right));
+ }
+
+ Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
+ {
+ return branchTest32(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+ {
+ return branchTest32(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
+ {
+ return branchTest32(cond, address, mask);
+ }
+
+ Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+ {
+ return branchTest32(cond, address, mask);
+ }
+
+
+ Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd32(cond, src, dest);
+ }
+
+ Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ return branchSub32(cond, imm, dest);
+ }
+#endif
+
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssembler_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.cpp
new file mode 100644
index 000000000..d726ecd9c
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+
+#include "MacroAssemblerARM.h"
+
+#if PLATFORM(LINUX)
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <elf.h>
+#include <asm/hwcap.h>
+#endif
+
+namespace JSC {
+
+static bool isVFPPresent()
+{
+#if PLATFORM(LINUX)
+ int fd = open("/proc/self/auxv", O_RDONLY);
+ if (fd > 0) {
+ Elf32_auxv_t aux;
+ while (read(fd, &aux, sizeof(Elf32_auxv_t))) {
+ if (aux.a_type == AT_HWCAP) {
+ close(fd);
+ return aux.a_un.a_val & HWCAP_VFP;
+ }
+ }
+ close(fd);
+ }
+#endif
+
+ return false;
+}
+
+const bool MacroAssemblerARM::s_isVFPPresent = isVFPPresent();
+
+#if defined(ARM_REQUIRE_NATURAL_ALIGNMENT) && ARM_REQUIRE_NATURAL_ALIGNMENT
+void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+{
+ ARMWord op2;
+
+ ASSERT(address.scale >= 0 && address.scale <= 3);
+ op2 = m_assembler.lsl(address.index, static_cast<int>(address.scale));
+
+ if (address.offset >= 0 && address.offset + 0x2 <= 0xff) {
+ m_assembler.add_r(ARMRegisters::S0, address.base, op2);
+ m_assembler.ldrh_u(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(address.offset));
+ m_assembler.ldrh_u(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(address.offset + 0x2));
+ } else if (address.offset < 0 && address.offset >= -0xff) {
+ m_assembler.add_r(ARMRegisters::S0, address.base, op2);
+ m_assembler.ldrh_d(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset));
+ m_assembler.ldrh_d(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset - 0x2));
+ } else {
+ m_assembler.ldr_un_imm(ARMRegisters::S0, address.offset);
+ m_assembler.add_r(ARMRegisters::S0, ARMRegisters::S0, op2);
+ m_assembler.ldrh_r(dest, address.base, ARMRegisters::S0);
+ m_assembler.add_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::OP2_IMM | 0x2);
+ m_assembler.ldrh_r(ARMRegisters::S0, address.base, ARMRegisters::S0);
+ }
+ m_assembler.orr_r(dest, dest, m_assembler.lsl(ARMRegisters::S0, 16));
+}
+#endif
+
+}
+
+#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.h
new file mode 100644
index 000000000..aa8cbb036
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.h
@@ -0,0 +1,817 @@
+/*
+ * Copyright (C) 2008 Apple Inc.
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerARM_h
+#define MacroAssemblerARM_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+
+#include "ARMAssembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
+public:
+ enum Condition {
+ Equal = ARMAssembler::EQ,
+ NotEqual = ARMAssembler::NE,
+ Above = ARMAssembler::HI,
+ AboveOrEqual = ARMAssembler::CS,
+ Below = ARMAssembler::CC,
+ BelowOrEqual = ARMAssembler::LS,
+ GreaterThan = ARMAssembler::GT,
+ GreaterThanOrEqual = ARMAssembler::GE,
+ LessThan = ARMAssembler::LT,
+ LessThanOrEqual = ARMAssembler::LE,
+ Overflow = ARMAssembler::VS,
+ Signed = ARMAssembler::MI,
+ Zero = ARMAssembler::EQ,
+ NonZero = ARMAssembler::NE
+ };
+
+ enum DoubleCondition {
+ DoubleEqual = ARMAssembler::EQ,
+ DoubleGreaterThan = ARMAssembler::GT,
+ DoubleGreaterThanOrEqual = ARMAssembler::GE,
+ DoubleLessThan = ARMAssembler::LT,
+ DoubleLessThanOrEqual = ARMAssembler::LE,
+ };
+
+ static const RegisterID stackPointerRegister = ARMRegisters::sp;
+
+ static const Scale ScalePtr = TimesFour;
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.adds_r(dest, dest, src);
+ }
+
+ void add32(Imm32 imm, Address address)
+ {
+ load32(address, ARMRegisters::S1);
+ add32(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address);
+ }
+
+ void add32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.adds_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, ARMRegisters::S1);
+ add32(ARMRegisters::S1, dest);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.ands_r(dest, dest, src);
+ }
+
+ void and32(Imm32 imm, RegisterID dest)
+ {
+ ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
+ if (w & ARMAssembler::OP2_INV_IMM)
+ m_assembler.bics_r(dest, dest, w & ~ARMAssembler::OP2_INV_IMM);
+ else
+ m_assembler.ands_r(dest, dest, w);
+ }
+
+ void lshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.movs_r(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
+ }
+
+ void lshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ m_assembler.movs_r(dest, m_assembler.lsl_r(dest, shift_amount));
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ if (src == dest) {
+ move(src, ARMRegisters::S0);
+ src = ARMRegisters::S0;
+ }
+ m_assembler.muls_r(dest, dest, src);
+ }
+
+ void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, ARMRegisters::S0);
+ m_assembler.muls_r(dest, src, ARMRegisters::S0);
+ }
+
+ void not32(RegisterID dest)
+ {
+ m_assembler.mvns_r(dest, dest);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orrs_r(dest, dest, src);
+ }
+
+ void or32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.orrs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void rshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ m_assembler.movs_r(dest, m_assembler.asr_r(dest, shift_amount));
+ }
+
+ void rshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.movs_r(dest, m_assembler.asr(dest, imm.m_value & 0x1f));
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subs_r(dest, dest, src);
+ }
+
+ void sub32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.subs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void sub32(Imm32 imm, Address address)
+ {
+ load32(address, ARMRegisters::S1);
+ sub32(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, ARMRegisters::S1);
+ sub32(ARMRegisters::S1, dest);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.eors_r(dest, dest, src);
+ }
+
+ void xor32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.dataTransfer32(true, dest, address.base, address.offset);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+#if defined(ARM_REQUIRE_NATURAL_ALIGNMENT) && ARM_REQUIRE_NATURAL_ALIGNMENT
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
+#else
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+#endif
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabel32 dataLabel(this);
+ m_assembler.ldr_un_imm(ARMRegisters::S0, 0);
+ m_assembler.dtr_ur(true, dest, address.base, ARMRegisters::S0);
+ return dataLabel;
+ }
+
+ Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+ {
+ Label label(this);
+ load32(address, dest);
+ return label;
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.add_r(ARMRegisters::S0, address.base, m_assembler.lsl(address.index, address.scale));
+ if (address.offset>=0)
+ m_assembler.ldrh_u(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(address.offset));
+ else
+ m_assembler.ldrh_d(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset));
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ DataLabel32 dataLabel(this);
+ m_assembler.ldr_un_imm(ARMRegisters::S0, 0);
+ m_assembler.dtr_ur(false, src, address.base, ARMRegisters::S0);
+ return dataLabel;
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.dataTransfer32(false, src, address.base, address.offset);
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransfer32(false, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void store32(Imm32 imm, ImplicitAddress address)
+ {
+ if (imm.m_isPointer)
+ m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
+ else
+ move(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address);
+ }
+
+ void store32(RegisterID src, void* address)
+ {
+ m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
+ m_assembler.dtr_u(false, src, ARMRegisters::S0, 0);
+ }
+
+ void store32(Imm32 imm, void* address)
+ {
+ m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
+ if (imm.m_isPointer)
+ m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
+ else
+ m_assembler.moveImm(imm.m_value, ARMRegisters::S1);
+ m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
+ }
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.pop_r(dest);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.push_r(src);
+ }
+
+ void push(Address address)
+ {
+ load32(address, ARMRegisters::S1);
+ push(ARMRegisters::S1);
+ }
+
+ void push(Imm32 imm)
+ {
+ move(imm, ARMRegisters::S0);
+ push(ARMRegisters::S0);
+ }
+
+ void move(Imm32 imm, RegisterID dest)
+ {
+ if (imm.m_isPointer)
+ m_assembler.ldr_un_imm(dest, imm.m_value);
+ else
+ m_assembler.moveImm(imm.m_value, dest);
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ m_assembler.mov_r(dest, src);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ move(Imm32(imm), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ m_assembler.mov_r(ARMRegisters::S0, reg1);
+ m_assembler.mov_r(reg1, reg2);
+ m_assembler.mov_r(reg2, ARMRegisters::S0);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ Jump branch32(Condition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
+ {
+ m_assembler.cmp_r(left, right);
+ return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
+ }
+
+ Jump branch32(Condition cond, RegisterID left, Imm32 right, int useConstantPool = 0)
+ {
+ if (right.m_isPointer) {
+ m_assembler.ldr_un_imm(ARMRegisters::S0, right.m_value);
+ m_assembler.cmp_r(left, ARMRegisters::S0);
+ } else
+ m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
+ return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
+ }
+
+ Jump branch32(Condition cond, RegisterID left, Address right)
+ {
+ load32(right, ARMRegisters::S1);
+ return branch32(cond, left, ARMRegisters::S1);
+ }
+
+ Jump branch32(Condition cond, Address left, RegisterID right)
+ {
+ load32(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32(Condition cond, Address left, Imm32 right)
+ {
+ load32(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32(Condition cond, BaseIndex left, Imm32 right)
+ {
+ load32(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
+ {
+ load32WithUnalignedHalfWords(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, RegisterID right)
+ {
+ UNUSED_PARAM(cond);
+ UNUSED_PARAM(left);
+ UNUSED_PARAM(right);
+ ASSERT_NOT_REACHED();
+ return jump();
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, Imm32 right)
+ {
+ load16(left, ARMRegisters::S0);
+ move(right, ARMRegisters::S1);
+ m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S1);
+ return m_assembler.jmp(ARMCondition(cond));
+ }
+
+ Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ m_assembler.tst_r(reg, mask);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
+ if (w & ARMAssembler::OP2_INV_IMM)
+ m_assembler.bics_r(ARMRegisters::S0, reg, w & ~ARMAssembler::OP2_INV_IMM);
+ else
+ m_assembler.tst_r(reg, w);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
+ {
+ load32(address, ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
+ Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+ {
+ load32(address, ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
+ Jump jump()
+ {
+ return Jump(m_assembler.jmp());
+ }
+
+ void jump(RegisterID target)
+ {
+ move(target, ARMRegisters::pc);
+ }
+
+ void jump(Address address)
+ {
+ load32(address, ARMRegisters::pc);
+ }
+
+ Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ void mull32(RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ if (src1 == dest) {
+ move(src1, ARMRegisters::S0);
+ src1 = ARMRegisters::S0;
+ }
+ m_assembler.mull_r(ARMRegisters::S1, dest, src2, src1);
+ m_assembler.cmp_r(ARMRegisters::S1, m_assembler.asr(dest, 31));
+ }
+
+ Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ mull32(src, dest, dest);
+ cond = NonZero;
+ }
+ else
+ mul32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ move(imm, ARMRegisters::S0);
+ mull32(ARMRegisters::S0, src, dest);
+ cond = NonZero;
+ }
+ else
+ mul32(imm, src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ void breakpoint()
+ {
+ m_assembler.bkpt(0);
+ }
+
+ Call nearCall()
+ {
+ prepareCall();
+ return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::LinkableNear);
+ }
+
+ Call call(RegisterID target)
+ {
+ prepareCall();
+ move(ARMRegisters::pc, target);
+ JmpSrc jmpSrc;
+ return Call(jmpSrc, Call::None);
+ }
+
+ void call(Address address)
+ {
+ call32(address.base, address.offset);
+ }
+
+ void ret()
+ {
+ pop(ARMRegisters::pc);
+ }
+
+ void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmp_r(left, right);
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
+ }
+
+ void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+ {
+ m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
+ }
+
+ void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
+ {
+ load32(address, ARMRegisters::S1);
+ if (mask.m_value == -1)
+ m_assembler.cmp_r(0, ARMRegisters::S1);
+ else
+ m_assembler.tst_r(ARMRegisters::S1, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
+ }
+
+ void add32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.add_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void add32(Imm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr));
+ m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
+ add32(imm, ARMRegisters::S1);
+ m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr));
+ m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
+ }
+
+ void sub32(Imm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr));
+ m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
+ sub32(imm, ARMRegisters::S1);
+ m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr));
+ m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
+ }
+
+ void load32(void* address, RegisterID dest)
+ {
+ m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
+ m_assembler.dtr_u(true, dest, ARMRegisters::S0, 0);
+ }
+
+ Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
+ {
+ load32(left.m_ptr, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Call call()
+ {
+ prepareCall();
+ return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::Linkable);
+ }
+
+ Call tailRecursiveCall()
+ {
+ return Call::fromTailJump(jump());
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ return Call::fromTailJump(oldJump);
+ }
+
+ DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
+ {
+ DataLabelPtr dataLabel(this);
+ m_assembler.ldr_un_imm(dest, reinterpret_cast<ARMWord>(initialValue.m_value));
+ return dataLabel;
+ }
+
+ Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S1);
+ Jump jump = branch32(cond, left, ARMRegisters::S1, true);
+ return jump;
+ }
+
+ Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ load32(left, ARMRegisters::S1);
+ dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
+ Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
+ return jump;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address);
+ return dataLabel;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+ {
+ return storePtrWithPatch(ImmPtr(0), address);
+ }
+
+ // Floating point operators
+ bool supportsFloatingPoint() const
+ {
+ return s_isVFPPresent;
+ }
+
+ bool supportsFloatingPointTruncate() const
+ {
+ return false;
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ m_assembler.doubleTransfer(true, dest, address.base, address.offset);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ m_assembler.doubleTransfer(false, src, address.base, address.offset);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.faddd_r(dest, dest, src);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, ARMRegisters::SD0);
+ addDouble(ARMRegisters::SD0, dest);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fsubd_r(dest, dest, src);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, ARMRegisters::SD0);
+ subDouble(ARMRegisters::SD0, dest);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fmuld_r(dest, dest, src);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, ARMRegisters::SD0);
+ mulDouble(ARMRegisters::SD0, dest);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fmsr_r(dest, src);
+ m_assembler.fsitod_r(dest, dest);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ m_assembler.fcmpd_r(left, right);
+ m_assembler.fmstat();
+ return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond)));
+ }
+
+ // Truncates 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, INT_MIN).
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ UNUSED_PARAM(src);
+ UNUSED_PARAM(dest);
+ ASSERT_NOT_REACHED();
+ return jump();
+ }
+
+protected:
+ ARMAssembler::Condition ARMCondition(Condition cond)
+ {
+ return static_cast<ARMAssembler::Condition>(cond);
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ m_assembler.ensureSpace(insnSpace, constSpace);
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_assembler.sizeOfConstantPool();
+ }
+
+ void prepareCall()
+ {
+ ensureSpace(3 * sizeof(ARMWord), sizeof(ARMWord));
+
+ // S0 might be used for parameter passing
+ m_assembler.add_r(ARMRegisters::S1, ARMRegisters::pc, ARMAssembler::OP2_IMM | 0x4);
+ m_assembler.push_r(ARMRegisters::S1);
+ }
+
+ void call32(RegisterID base, int32_t offset)
+ {
+ if (base == ARMRegisters::sp)
+ offset += 4;
+
+ if (offset >= 0) {
+ if (offset <= 0xfff) {
+ prepareCall();
+ m_assembler.dtr_u(true, ARMRegisters::pc, base, offset);
+ } else if (offset <= 0xfffff) {
+ m_assembler.add_r(ARMRegisters::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
+ prepareCall();
+ m_assembler.dtr_u(true, ARMRegisters::pc, ARMRegisters::S0, offset & 0xfff);
+ } else {
+ ARMWord reg = m_assembler.getImm(offset, ARMRegisters::S0);
+ prepareCall();
+ m_assembler.dtr_ur(true, ARMRegisters::pc, base, reg);
+ }
+ } else {
+ offset = -offset;
+ if (offset <= 0xfff) {
+ prepareCall();
+ m_assembler.dtr_d(true, ARMRegisters::pc, base, offset);
+ } else if (offset <= 0xfffff) {
+ m_assembler.sub_r(ARMRegisters::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
+ prepareCall();
+ m_assembler.dtr_d(true, ARMRegisters::pc, ARMRegisters::S0, offset & 0xfff);
+ } else {
+ ARMWord reg = m_assembler.getImm(offset, ARMRegisters::S0);
+ prepareCall();
+ m_assembler.dtr_dr(true, ARMRegisters::pc, base, reg);
+ }
+ }
+ }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ ARMAssembler::linkCall(code, call.m_jmp, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static const bool s_isVFPPresent;
+};
+
+}
+
+#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+
+#endif // MacroAssemblerARM_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARMv7.h
new file mode 100644
index 000000000..a549604ff
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARMv7.h
@@ -0,0 +1,1095 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerARMv7_h
+#define MacroAssemblerARMv7_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include "ARMv7Assembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
+ // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7?
+ // - dTR is likely used more than aTR, and we'll get better instruction
+ // encoding if it's in the low 8 registers.
+ static const ARMRegisters::RegisterID dataTempRegister = ARMRegisters::ip;
+ static const RegisterID addressTempRegister = ARMRegisters::r3;
+ static const FPRegisterID fpTempRegister = ARMRegisters::d7;
+
+ struct ArmAddress {
+ enum AddressType {
+ HasOffset,
+ HasIndex,
+ } type;
+ RegisterID base;
+ union {
+ int32_t offset;
+ struct {
+ RegisterID index;
+ Scale scale;
+ };
+ } u;
+
+ explicit ArmAddress(RegisterID base, int32_t offset = 0)
+ : type(HasOffset)
+ , base(base)
+ {
+ u.offset = offset;
+ }
+
+ explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
+ : type(HasIndex)
+ , base(base)
+ {
+ u.index = index;
+ u.scale = scale;
+ }
+ };
+
+public:
+
+ static const Scale ScalePtr = TimesFour;
+
+ enum Condition {
+ Equal = ARMv7Assembler::ConditionEQ,
+ NotEqual = ARMv7Assembler::ConditionNE,
+ Above = ARMv7Assembler::ConditionHI,
+ AboveOrEqual = ARMv7Assembler::ConditionHS,
+ Below = ARMv7Assembler::ConditionLO,
+ BelowOrEqual = ARMv7Assembler::ConditionLS,
+ GreaterThan = ARMv7Assembler::ConditionGT,
+ GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
+ LessThan = ARMv7Assembler::ConditionLT,
+ LessThanOrEqual = ARMv7Assembler::ConditionLE,
+ Overflow = ARMv7Assembler::ConditionVS,
+ Signed = ARMv7Assembler::ConditionMI,
+ Zero = ARMv7Assembler::ConditionEQ,
+ NonZero = ARMv7Assembler::ConditionNE
+ };
+
+ enum DoubleCondition {
+ DoubleEqual = ARMv7Assembler::ConditionEQ,
+ DoubleGreaterThan = ARMv7Assembler::ConditionGT,
+ DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
+ DoubleLessThan = ARMv7Assembler::ConditionLO,
+ DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
+ };
+
+ static const RegisterID stackPointerRegister = ARMRegisters::sp;
+ static const RegisterID linkRegister = ARMRegisters::lr;
+
+ // Integer arithmetic operations:
+ //
+ // Operations are typically two operand - operation(source, srcDst)
+ // For many operations the source may be an Imm32, the srcDst operand
+ // may often be a memory location (explictly described using an Address
+ // object).
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.add(dest, dest, src);
+ }
+
+ void add32(Imm32 imm, RegisterID dest)
+ {
+ add32(imm, dest, dest);
+ }
+
+ void add32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dest, src, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.add(dest, src, dataTempRegister);
+ }
+ }
+
+ void add32(Imm32 imm, Address address)
+ {
+ load32(address, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
+
+ void add32(Imm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address.m_ptr);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.ARM_and(dest, dest, src);
+ }
+
+ void and32(Imm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.ARM_and(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.ARM_and(dest, dest, dataTempRegister);
+ }
+ }
+
+ void lshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.lsl(dest, dest, imm.m_value);
+ }
+
+ void lshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ m_assembler.lsl(dest, dest, shift_amount);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.smull(dest, dataTempRegister, dest, src);
+ }
+
+ void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, dataTempRegister);
+ m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
+ }
+
+ void not32(RegisterID srcDest)
+ {
+ m_assembler.mvn(srcDest, srcDest);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orr(dest, dest, src);
+ }
+
+ void or32(Imm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.orr(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.orr(dest, dest, dataTempRegister);
+ }
+ }
+
+ void rshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ m_assembler.asr(dest, dest, shift_amount);
+ }
+
+ void rshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.asr(dest, dest, imm.m_value);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.sub(dest, dest, src);
+ }
+
+ void sub32(Imm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.sub(dest, dest, dataTempRegister);
+ }
+ }
+
+ void sub32(Imm32 imm, Address address)
+ {
+ load32(address, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ sub32(dataTempRegister, dest);
+ }
+
+ void sub32(Imm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address.m_ptr);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.eor(dest, dest, src);
+ }
+
+ void xor32(Imm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.eor(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.eor(dest, dest, dataTempRegister);
+ }
+ }
+
+
+ // Memory access operations:
+ //
+ // Loads are of the form load(address, destination) and stores of the form
+ // store(source, address). The source for a store may be an Imm32. Address
+ // operand objects to loads and store will be implicitly constructed if a
+ // register is passed.
+
+private:
+ void load32(ArmAddress address, RegisterID dest)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.ldr(dest, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.ldr(dest, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void load16(ArmAddress address, RegisterID dest)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.ldrh(dest, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void store32(RegisterID src, ArmAddress address)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.str(src, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.str(src, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.str(src, address.base, address.u.offset, true, false);
+ }
+ }
+
+public:
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ load32(setupArmAddress(address), dest);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ load32(setupArmAddress(address), dest);
+ }
+
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ load32(setupArmAddress(address), dest);
+ }
+
+ void load32(void* address, RegisterID dest)
+ {
+ move(ImmPtr(address), addressTempRegister);
+ m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister);
+ load32(ArmAddress(address.base, dataTempRegister), dest);
+ return label;
+ }
+
+ Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+ {
+ Label label(this);
+ moveFixedWidthEncoding(Imm32(address.offset), dataTempRegister);
+ load32(ArmAddress(address.base, dataTempRegister), dest);
+ return label;
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister);
+ store32(src, ArmAddress(address.base, dataTempRegister));
+ return label;
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ store32(src, setupArmAddress(address));
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ store32(src, setupArmAddress(address));
+ }
+
+ void store32(Imm32 imm, ImplicitAddress address)
+ {
+ move(imm, dataTempRegister);
+ store32(dataTempRegister, setupArmAddress(address));
+ }
+
+ void store32(RegisterID src, void* address)
+ {
+ move(ImmPtr(address), addressTempRegister);
+ m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ void store32(Imm32 imm, void* address)
+ {
+ move(imm, dataTempRegister);
+ store32(dataTempRegister, address);
+ }
+
+
+ // Floating-point operations:
+
+ bool supportsFloatingPoint() const { return true; }
+ // On x86(_64) the MacroAssembler provides an interface to truncate a double to an integer.
+ // If a value is not representable as an integer, and possibly for some values that are,
+ // (on x86 INT_MIN, since this is indistinguishable from results for out-of-range/NaN input)
+ // a branch will be taken. It is not clear whether this interface will be well suited to
+ // other platforms. On ARMv7 the hardware truncation operation produces multiple possible
+ // failure values (saturates to INT_MIN & INT_MAX, NaN reulsts in a value of 0). This is a
+ // temporary solution while we work out what this interface should be. Either we need to
+ // decide to make this interface work on all platforms, rework the interface to make it more
+ // generic, or decide that the MacroAssembler cannot practically be used to abstracted these
+ // operations, and make clients go directly to the m_assembler to plant truncation instructions.
+ // In short, FIXME:.
+ bool supportsFloatingPointTruncate() const { return false; }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(Imm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.vldr(dest, base, offset);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(Imm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.vstr(src, base, offset);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vadd_F64(dest, dest, src);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ addDouble(fpTempRegister, dest);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vsub_F64(dest, dest, src);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ subDouble(fpTempRegister, dest);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmul_F64(dest, dest, src);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ mulDouble(fpTempRegister, dest);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmov(fpTempRegister, src);
+ m_assembler.vcvt_F64_S32(dest, fpTempRegister);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ m_assembler.vcmp_F64(left, right);
+ m_assembler.vmrs_APSR_nzcv_FPSCR();
+ return makeBranch(cond);
+ }
+
+ Jump branchTruncateDoubleToInt32(FPRegisterID, RegisterID)
+ {
+ ASSERT_NOT_REACHED();
+ return jump();
+ }
+
+
+ // Stack manipulation operations:
+ //
+ // The ABI is assumed to provide a stack abstraction to memory,
+ // containing machine word sized units of data. Push and pop
+ // operations add and remove a single register sized unit of data
+ // to or from the stack. Peek and poke operations read or write
+ // values on the stack, without moving the current stack position.
+
+ void pop(RegisterID dest)
+ {
+ // store postindexed with writeback
+ m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
+ }
+
+ void push(RegisterID src)
+ {
+ // store preindexed with writeback
+ m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true);
+ }
+
+ void push(Address address)
+ {
+ load32(address, dataTempRegister);
+ push(dataTempRegister);
+ }
+
+ void push(Imm32 imm)
+ {
+ move(imm, dataTempRegister);
+ push(dataTempRegister);
+ }
+
+ // Register move operations:
+ //
+ // Move values in registers.
+
+ void move(Imm32 imm, RegisterID dest)
+ {
+ uint32_t value = imm.m_value;
+
+ if (imm.m_isPointer)
+ moveFixedWidthEncoding(imm, dest);
+ else {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
+
+ if (armImm.isValid())
+ m_assembler.mov(dest, armImm);
+ else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
+ m_assembler.mvn(dest, armImm);
+ else {
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
+ if (value & 0xffff0000)
+ m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
+ }
+ }
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ m_assembler.mov(dest, src);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ move(Imm32(imm), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ move(reg1, dataTempRegister);
+ move(reg2, reg1);
+ move(dataTempRegister, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+
+ // Forwards / external control flow operations:
+ //
+ // This set of jump and conditional branch operations return a Jump
+ // object which may linked at a later point, allow forwards jump,
+ // or jumps that will require external linkage (after the code has been
+ // relocated).
+ //
+ // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+ // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+ // used (representing the names 'below' and 'above').
+ //
+ // Operands to the comparision are provided in the expected order, e.g.
+ // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
+ // treated as a signed 32bit value, is less than or equal to 5.
+ //
+ // jz and jnz test whether the first operand is equal to zero, and take
+ // an optional second operand of a mask under which to perform the test.
+private:
+
+ // Should we be using TEQ for equal/not-equal?
+ void compare32(RegisterID left, Imm32 right)
+ {
+ int32_t imm = right.m_value;
+ if (!imm)
+ m_assembler.tst(left, left);
+ else {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+ if (armImm.isValid())
+ m_assembler.cmp(left, armImm);
+ if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
+ m_assembler.cmn(left, armImm);
+ else {
+ move(Imm32(imm), dataTempRegister);
+ m_assembler.cmp(left, dataTempRegister);
+ }
+ }
+ }
+
+ void test32(RegisterID reg, Imm32 mask)
+ {
+ int32_t imm = mask.m_value;
+
+ if (imm == -1)
+ m_assembler.tst(reg, reg);
+ else {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+ if (armImm.isValid())
+ m_assembler.tst(reg, armImm);
+ else {
+ move(mask, dataTempRegister);
+ m_assembler.tst(reg, dataTempRegister);
+ }
+ }
+ }
+
+public:
+ Jump branch32(Condition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmp(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch32(Condition cond, RegisterID left, Imm32 right)
+ {
+ compare32(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch32(Condition cond, RegisterID left, Address right)
+ {
+ load32(right, dataTempRegister);
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ Jump branch32(Condition cond, Address left, RegisterID right)
+ {
+ load32(left, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(Condition cond, Address left, Imm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch32(Condition cond, BaseIndex left, Imm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32WithUnalignedHalfWords(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left.m_ptr, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, RegisterID right)
+ {
+ load16(left, dataTempRegister);
+ m_assembler.lsl(addressTempRegister, right, 16);
+ m_assembler.lsl(dataTempRegister, dataTempRegister, 16);
+ return branch32(cond, dataTempRegister, addressTempRegister);
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, Imm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load16(left, addressTempRegister);
+ m_assembler.lsl(addressTempRegister, addressTempRegister, 16);
+ return branch32(cond, addressTempRegister, Imm32(right.m_value << 16));
+ }
+
+ Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ m_assembler.tst(reg, mask);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ test32(reg, mask);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
+ load32(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
+ load32(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump jump()
+ {
+ return Jump(makeJump());
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.bx(target);
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(Address address)
+ {
+ load32(address, dataTempRegister);
+ m_assembler.bx(dataTempRegister);
+ }
+
+
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ m_assembler.add_S(dest, dest, src);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add_S(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.add_S(dest, dest, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT(cond == Overflow);
+ m_assembler.smull(dest, dataTempRegister, dest, src);
+ m_assembler.asr(addressTempRegister, dest, 31);
+ return branch32(NotEqual, addressTempRegister, dataTempRegister);
+ }
+
+ Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ ASSERT(cond == Overflow);
+ move(imm, dataTempRegister);
+ m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
+ m_assembler.asr(addressTempRegister, dest, 31);
+ return branch32(NotEqual, addressTempRegister, dataTempRegister);
+ }
+
+ Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ m_assembler.sub_S(dest, dest, src);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub_S(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.sub_S(dest, dest, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
+
+ // Miscellaneous operations:
+
+ void breakpoint()
+ {
+ m_assembler.bkpt();
+ }
+
+ Call nearCall()
+ {
+ moveFixedWidthEncoding(Imm32(0), dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
+ }
+
+ Call call()
+ {
+ moveFixedWidthEncoding(Imm32(0), dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
+ }
+
+ Call call(RegisterID target)
+ {
+ return Call(m_assembler.blx(target), Call::None);
+ }
+
+ Call call(Address address)
+ {
+ load32(address, dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::None);
+ }
+
+ void ret()
+ {
+ m_assembler.bx(linkRegister);
+ }
+
+ void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmp(left, right);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+ {
+ compare32(left, right);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ // FIXME:
+ // The mask should be optional... paerhaps the argument order should be
+ // dest-src, operations always have a dest? ... possibly not true, considering
+ // asm ops like test, or pseudo ops like pop().
+ void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
+ {
+ load32(address, dataTempRegister);
+ test32(dataTempRegister, mask);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+
+ DataLabel32 moveWithPatch(Imm32 imm, RegisterID dst)
+ {
+ moveFixedWidthEncoding(imm, dst);
+ return DataLabel32(this);
+ }
+
+ DataLabelPtr moveWithPatch(ImmPtr imm, RegisterID dst)
+ {
+ moveFixedWidthEncoding(Imm32(imm), dst);
+ return DataLabelPtr(this);
+ }
+
+ Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ load32(left, addressTempRegister);
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ return branch32(cond, addressTempRegister, dataTempRegister);
+ }
+
+ DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
+ store32(dataTempRegister, address);
+ return label;
+ }
+ DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(ImmPtr(0), address); }
+
+
+ Call tailRecursiveCall()
+ {
+ // Like a normal call, but don't link.
+ moveFixedWidthEncoding(Imm32(0), dataTempRegister);
+ return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ return tailRecursiveCall();
+ }
+
+
+protected:
+ ARMv7Assembler::JmpSrc makeJump()
+ {
+ return m_assembler.b();
+ }
+
+ ARMv7Assembler::JmpSrc makeBranch(ARMv7Assembler::Condition cond)
+ {
+ m_assembler.it(cond);
+ return m_assembler.b();
+ }
+ ARMv7Assembler::JmpSrc makeBranch(Condition cond) { return makeBranch(armV7Condition(cond)); }
+ ARMv7Assembler::JmpSrc makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
+
+ ArmAddress setupArmAddress(BaseIndex address)
+ {
+ if (address.offset) {
+ ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
+ if (imm.isValid())
+ m_assembler.add(addressTempRegister, address.base, imm);
+ else {
+ move(Imm32(address.offset), addressTempRegister);
+ m_assembler.add(addressTempRegister, addressTempRegister, address.base);
+ }
+
+ return ArmAddress(addressTempRegister, address.index, address.scale);
+ } else
+ return ArmAddress(address.base, address.index, address.scale);
+ }
+
+ ArmAddress setupArmAddress(Address address)
+ {
+ if ((address.offset >= -0xff) && (address.offset <= 0xfff))
+ return ArmAddress(address.base, address.offset);
+
+ move(Imm32(address.offset), addressTempRegister);
+ return ArmAddress(address.base, addressTempRegister);
+ }
+
+ ArmAddress setupArmAddress(ImplicitAddress address)
+ {
+ if ((address.offset >= -0xff) && (address.offset <= 0xfff))
+ return ArmAddress(address.base, address.offset);
+
+ move(Imm32(address.offset), addressTempRegister);
+ return ArmAddress(address.base, addressTempRegister);
+ }
+
+ RegisterID makeBaseIndexBase(BaseIndex address)
+ {
+ if (!address.offset)
+ return address.base;
+
+ ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
+ if (imm.isValid())
+ m_assembler.add(addressTempRegister, address.base, imm);
+ else {
+ move(Imm32(address.offset), addressTempRegister);
+ m_assembler.add(addressTempRegister, addressTempRegister, address.base);
+ }
+
+ return addressTempRegister;
+ }
+
+ void moveFixedWidthEncoding(Imm32 imm, RegisterID dst)
+ {
+ uint32_t value = imm.m_value;
+ m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
+ m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
+ }
+
+ ARMv7Assembler::Condition armV7Condition(Condition cond)
+ {
+ return static_cast<ARMv7Assembler::Condition>(cond);
+ }
+
+ ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
+ {
+ return static_cast<ARMv7Assembler::Condition>(cond);
+ }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ ARMv7Assembler::linkCall(code, call.m_jmp, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerARMv7_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
new file mode 100644
index 000000000..568260a4f
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerCodeRef_h
+#define MacroAssemblerCodeRef_h
+
+#include <wtf/Platform.h>
+
+#include "ExecutableAllocator.h"
+#include "PassRefPtr.h"
+#include "RefPtr.h"
+#include "UnusedParam.h"
+
+#if ENABLE(ASSEMBLER)
+
+// ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
+// instruction address on the platform (for example, check any alignment requirements).
+#if PLATFORM(ARM_THUMB2)
+// ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded
+// into the processor are decorated with the bottom bit set, indicating that this is
+// thumb code (as oposed to 32-bit traditional ARM). The first test checks for both
+// decorated and undectorated null, and the second test ensures that the pointer is
+// decorated.
+#define ASSERT_VALID_CODE_POINTER(ptr) \
+ ASSERT(reinterpret_cast<intptr_t>(ptr) & ~1); \
+ ASSERT(reinterpret_cast<intptr_t>(ptr) & 1)
+#define ASSERT_VALID_CODE_OFFSET(offset) \
+ ASSERT(!(offset & 1)) // Must be multiple of 2.
+#else
+#define ASSERT_VALID_CODE_POINTER(ptr) \
+ ASSERT(ptr)
+#define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes!
+#endif
+
+namespace JSC {
+
+// FunctionPtr:
+//
+// FunctionPtr should be used to wrap pointers to C/C++ functions in JSC
+// (particularly, the stub functions).
+class FunctionPtr {
+public:
+ FunctionPtr()
+ : m_value(0)
+ {
+ }
+
+ template<typename FunctionType>
+ explicit FunctionPtr(FunctionType* value)
+ : m_value(reinterpret_cast<void*>(value))
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ void* value() const { return m_value; }
+ void* executableAddress() const { return m_value; }
+
+
+private:
+ void* m_value;
+};
+
+// ReturnAddressPtr:
+//
+// ReturnAddressPtr should be used to wrap return addresses generated by processor
+// 'call' instructions exectued in JIT code. We use return addresses to look up
+// exception and optimization information, and to repatch the call instruction
+// that is the source of the return address.
+class ReturnAddressPtr {
+public:
+ ReturnAddressPtr()
+ : m_value(0)
+ {
+ }
+
+ explicit ReturnAddressPtr(void* value)
+ : m_value(value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ explicit ReturnAddressPtr(FunctionPtr function)
+ : m_value(function.value())
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ void* value() const { return m_value; }
+
+private:
+ void* m_value;
+};
+
+// MacroAssemblerCodePtr:
+//
+// MacroAssemblerCodePtr should be used to wrap pointers to JIT generated code.
+class MacroAssemblerCodePtr {
+public:
+ MacroAssemblerCodePtr()
+ : m_value(0)
+ {
+ }
+
+ explicit MacroAssemblerCodePtr(void* value)
+#if PLATFORM(ARM_THUMB2)
+ // Decorate the pointer as a thumb code pointer.
+ : m_value(reinterpret_cast<char*>(value) + 1)
+#else
+ : m_value(value)
+#endif
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ explicit MacroAssemblerCodePtr(ReturnAddressPtr ra)
+ : m_value(ra.value())
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ void* executableAddress() const { return m_value; }
+#if PLATFORM(ARM_THUMB2)
+ // To use this pointer as a data address remove the decoration.
+ void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; }
+#else
+ void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; }
+#endif
+
+ bool operator!()
+ {
+ return !m_value;
+ }
+
+private:
+ void* m_value;
+};
+
+// MacroAssemblerCodeRef:
+//
+// A reference to a section of JIT generated code. A CodeRef consists of a
+// pointer to the code, and a ref pointer to the pool from within which it
+// was allocated.
+class MacroAssemblerCodeRef {
+public:
+ MacroAssemblerCodeRef()
+ : m_size(0)
+ {
+ }
+
+ MacroAssemblerCodeRef(void* code, PassRefPtr<ExecutablePool> executablePool, size_t size)
+ : m_code(code)
+ , m_executablePool(executablePool)
+ , m_size(size)
+ {
+ }
+
+ MacroAssemblerCodePtr m_code;
+ RefPtr<ExecutablePool> m_executablePool;
+ size_t m_size;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerCodeRef_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86.h
new file mode 100644
index 000000000..6e962406d
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerX86_h
+#define MacroAssemblerX86_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER) && PLATFORM(X86)
+
+#include "MacroAssemblerX86Common.h"
+
+namespace JSC {
+
+class MacroAssemblerX86 : public MacroAssemblerX86Common {
+public:
+ MacroAssemblerX86()
+ : m_isSSE2Present(isSSE2Present())
+ {
+ }
+
+ static const Scale ScalePtr = TimesFour;
+
+ using MacroAssemblerX86Common::add32;
+ using MacroAssemblerX86Common::and32;
+ using MacroAssemblerX86Common::sub32;
+ using MacroAssemblerX86Common::or32;
+ using MacroAssemblerX86Common::load32;
+ using MacroAssemblerX86Common::store32;
+ using MacroAssemblerX86Common::branch32;
+ using MacroAssemblerX86Common::call;
+ using MacroAssemblerX86Common::loadDouble;
+ using MacroAssemblerX86Common::convertInt32ToDouble;
+
+ void add32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.leal_mr(imm.m_value, src, dest);
+ }
+
+ void add32(Imm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.addl_im(imm.m_value, address.m_ptr);
+ }
+
+ void addWithCarry32(Imm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.adcl_im(imm.m_value, address.m_ptr);
+ }
+
+ void and32(Imm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.andl_im(imm.m_value, address.m_ptr);
+ }
+
+ void or32(Imm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.orl_im(imm.m_value, address.m_ptr);
+ }
+
+ void sub32(Imm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.subl_im(imm.m_value, address.m_ptr);
+ }
+
+ void load32(void* address, RegisterID dest)
+ {
+ m_assembler.movl_mr(address, dest);
+ }
+
+ void loadDouble(void* address, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_mr(address, dest);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ m_assembler.cvtsi2sd_mr(src.m_ptr, dest);
+ }
+
+ void store32(Imm32 imm, void* address)
+ {
+ m_assembler.movl_i32m(imm.m_value, address);
+ }
+
+ void store32(RegisterID src, void* address)
+ {
+ m_assembler.movl_rm(src, address);
+ }
+
+ Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
+ {
+ m_assembler.cmpl_rm(right, left.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
+ {
+ m_assembler.cmpl_im(right.m_value, left.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Call call()
+ {
+ return Call(m_assembler.call(), Call::Linkable);
+ }
+
+ Call tailRecursiveCall()
+ {
+ return Call::fromTailJump(jump());
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ return Call::fromTailJump(oldJump);
+ }
+
+
+ DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
+ {
+ m_assembler.movl_i32r(initialValue.asIntptr(), dest);
+ return DataLabelPtr(this);
+ }
+
+ Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
+ dataLabel = DataLabelPtr(this);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
+ dataLabel = DataLabelPtr(this);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
+ {
+ m_assembler.movl_i32m(initialValue.asIntptr(), address.offset, address.base);
+ return DataLabelPtr(this);
+ }
+
+ Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+ {
+ Label label(this);
+ load32(address, dest);
+ return label;
+ }
+
+ bool supportsFloatingPoint() const { return m_isSSE2Present; }
+ // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
+ bool supportsFloatingPointTruncate() const { return m_isSSE2Present; }
+
+private:
+ const bool m_isSSE2Present;
+
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ X86Assembler::linkCall(code, call.m_jmp, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerX86_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86Common.h
new file mode 100644
index 000000000..5ebefa7df
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86Common.h
@@ -0,0 +1,971 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerX86Common_h
+#define MacroAssemblerX86Common_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include "X86Assembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
+public:
+
+ enum Condition {
+ Equal = X86Assembler::ConditionE,
+ NotEqual = X86Assembler::ConditionNE,
+ Above = X86Assembler::ConditionA,
+ AboveOrEqual = X86Assembler::ConditionAE,
+ Below = X86Assembler::ConditionB,
+ BelowOrEqual = X86Assembler::ConditionBE,
+ GreaterThan = X86Assembler::ConditionG,
+ GreaterThanOrEqual = X86Assembler::ConditionGE,
+ LessThan = X86Assembler::ConditionL,
+ LessThanOrEqual = X86Assembler::ConditionLE,
+ Overflow = X86Assembler::ConditionO,
+ Signed = X86Assembler::ConditionS,
+ Zero = X86Assembler::ConditionE,
+ NonZero = X86Assembler::ConditionNE
+ };
+
+ enum DoubleCondition {
+ DoubleEqual = X86Assembler::ConditionE,
+ DoubleNotEqual = X86Assembler::ConditionNE,
+ DoubleGreaterThan = X86Assembler::ConditionA,
+ DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
+ DoubleLessThan = X86Assembler::ConditionB,
+ DoubleLessThanOrEqual = X86Assembler::ConditionBE,
+ };
+
+ static const RegisterID stackPointerRegister = X86Registers::esp;
+
+ // Integer arithmetic operations:
+ //
+ // Operations are typically two operand - operation(source, srcDst)
+ // For many operations the source may be an Imm32, the srcDst operand
+ // may often be a memory location (explictly described using an Address
+ // object).
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addl_rr(src, dest);
+ }
+
+ void add32(Imm32 imm, Address address)
+ {
+ m_assembler.addl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void add32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.addl_ir(imm.m_value, dest);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ m_assembler.addl_mr(src.offset, src.base, dest);
+ }
+
+ void add32(RegisterID src, Address dest)
+ {
+ m_assembler.addl_rm(src, dest.offset, dest.base);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andl_rr(src, dest);
+ }
+
+ void and32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.andl_ir(imm.m_value, dest);
+ }
+
+ void and32(RegisterID src, Address dest)
+ {
+ m_assembler.andl_rm(src, dest.offset, dest.base);
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ m_assembler.andl_mr(src.offset, src.base, dest);
+ }
+
+ void and32(Imm32 imm, Address address)
+ {
+ m_assembler.andl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void lshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.shll_i8r(imm.m_value, dest);
+ }
+
+ void lshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ if (shift_amount != X86Registers::ecx) {
+ swap(shift_amount, X86Registers::ecx);
+
+ // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
+ if (dest == shift_amount)
+ m_assembler.shll_CLr(X86Registers::ecx);
+ // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
+ else if (dest == X86Registers::ecx)
+ m_assembler.shll_CLr(shift_amount);
+ // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
+ else
+ m_assembler.shll_CLr(dest);
+
+ swap(shift_amount, X86Registers::ecx);
+ } else
+ m_assembler.shll_CLr(dest);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.imull_rr(src, dest);
+ }
+
+ void mul32(Address src, RegisterID dest)
+ {
+ m_assembler.imull_mr(src.offset, src.base, dest);
+ }
+
+ void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.imull_i32r(src, imm.m_value, dest);
+ }
+
+ void neg32(RegisterID srcDest)
+ {
+ m_assembler.negl_r(srcDest);
+ }
+
+ void neg32(Address srcDest)
+ {
+ m_assembler.negl_m(srcDest.offset, srcDest.base);
+ }
+
+ void not32(RegisterID srcDest)
+ {
+ m_assembler.notl_r(srcDest);
+ }
+
+ void not32(Address srcDest)
+ {
+ m_assembler.notl_m(srcDest.offset, srcDest.base);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orl_rr(src, dest);
+ }
+
+ void or32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.orl_ir(imm.m_value, dest);
+ }
+
+ void or32(RegisterID src, Address dest)
+ {
+ m_assembler.orl_rm(src, dest.offset, dest.base);
+ }
+
+ void or32(Address src, RegisterID dest)
+ {
+ m_assembler.orl_mr(src.offset, src.base, dest);
+ }
+
+ void or32(Imm32 imm, Address address)
+ {
+ m_assembler.orl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void rshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ if (shift_amount != X86Registers::ecx) {
+ swap(shift_amount, X86Registers::ecx);
+
+ // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
+ if (dest == shift_amount)
+ m_assembler.sarl_CLr(X86Registers::ecx);
+ // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
+ else if (dest == X86Registers::ecx)
+ m_assembler.sarl_CLr(shift_amount);
+ // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
+ else
+ m_assembler.sarl_CLr(dest);
+
+ swap(shift_amount, X86Registers::ecx);
+ } else
+ m_assembler.sarl_CLr(dest);
+ }
+
+ void rshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.sarl_i8r(imm.m_value, dest);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subl_rr(src, dest);
+ }
+
+ void sub32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.subl_ir(imm.m_value, dest);
+ }
+
+ void sub32(Imm32 imm, Address address)
+ {
+ m_assembler.subl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ m_assembler.subl_mr(src.offset, src.base, dest);
+ }
+
+ void sub32(RegisterID src, Address dest)
+ {
+ m_assembler.subl_rm(src, dest.offset, dest.base);
+ }
+
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.xorl_rr(src, dest);
+ }
+
+ void xor32(Imm32 imm, Address dest)
+ {
+ m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
+ }
+
+ void xor32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.xorl_ir(imm.m_value, dest);
+ }
+
+ void xor32(RegisterID src, Address dest)
+ {
+ m_assembler.xorl_rm(src, dest.offset, dest.base);
+ }
+
+ void xor32(Address src, RegisterID dest)
+ {
+ m_assembler.xorl_mr(src.offset, src.base, dest);
+ }
+
+
+ // Memory access operations:
+ //
+ // Loads are of the form load(address, destination) and stores of the form
+ // store(source, address). The source for a store may be an Imm32. Address
+ // operand objects to loads and store will be implicitly constructed if a
+ // register is passed.
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movl_mr(address.offset, address.base, dest);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ m_assembler.movl_mr_disp32(address.offset, address.base, dest);
+ return DataLabel32(this);
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ m_assembler.movl_rm_disp32(src, address.offset, address.base);
+ return DataLabel32(this);
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.movl_rm(src, address.offset, address.base);
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store32(Imm32 imm, ImplicitAddress address)
+ {
+ m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
+ }
+
+
+ // Floating-point operation:
+ //
+ // Presently only supports SSE, not x87 floating point.
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_mr(address.offset, address.base, dest);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_rm(src, address.offset, address.base);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.addsd_rr(src, dest);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.addsd_mr(src.offset, src.base, dest);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.divsd_rr(src, dest);
+ }
+
+ void divDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.divsd_mr(src.offset, src.base, dest);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.subsd_rr(src, dest);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.subsd_mr(src.offset, src.base, dest);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.mulsd_rr(src, dest);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.mulsd_mr(src.offset, src.base, dest);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvtsi2sd_rr(src, dest);
+ }
+
+ void convertInt32ToDouble(Address src, FPRegisterID dest)
+ {
+ m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.ucomisd_rr(right, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, Address right)
+ {
+ m_assembler.ucomisd_mr(right.offset, right.base, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ // Truncates 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, INT_MIN).
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+ return branch32(Equal, dest, Imm32(0x80000000));
+ }
+
+ void zeroDouble(FPRegisterID srcDest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.xorpd_rr(srcDest, srcDest);
+ }
+
+
+ // Stack manipulation operations:
+ //
+ // The ABI is assumed to provide a stack abstraction to memory,
+ // containing machine word sized units of data. Push and pop
+ // operations add and remove a single register sized unit of data
+ // to or from the stack. Peek and poke operations read or write
+ // values on the stack, without moving the current stack position.
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.pop_r(dest);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.push_r(src);
+ }
+
+ void push(Address address)
+ {
+ m_assembler.push_m(address.offset, address.base);
+ }
+
+ void push(Imm32 imm)
+ {
+ m_assembler.push_i32(imm.m_value);
+ }
+
+
+ // Register move operations:
+ //
+ // Move values in registers.
+
+ void move(Imm32 imm, RegisterID dest)
+ {
+ // Note: on 64-bit the Imm32 value is zero extended into the register, it
+ // may be useful to have a separate version that sign extends the value?
+ if (!imm.m_value)
+ m_assembler.xorl_rr(dest, dest);
+ else
+ m_assembler.movl_i32r(imm.m_value, dest);
+ }
+
+#if PLATFORM(X86_64)
+ void move(RegisterID src, RegisterID dest)
+ {
+ // Note: on 64-bit this is is a full register move; perhaps it would be
+ // useful to have separate move32 & movePtr, with move32 zero extending?
+ if (src != dest)
+ m_assembler.movq_rr(src, dest);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ m_assembler.movq_i64r(imm.asIntptr(), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ m_assembler.xchgq_rr(reg1, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ m_assembler.movsxd_rr(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ m_assembler.movl_rr(src, dest);
+ }
+#else
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.movl_rr(src, dest);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ m_assembler.movl_i32r(imm.asIntptr(), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ if (reg1 != reg2)
+ m_assembler.xchgl_rr(reg1, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+#endif
+
+
+ // Forwards / external control flow operations:
+ //
+ // This set of jump and conditional branch operations return a Jump
+ // object which may linked at a later point, allow forwards jump,
+ // or jumps that will require external linkage (after the code has been
+ // relocated).
+ //
+ // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+ // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+ // used (representing the names 'below' and 'above').
+ //
+ // Operands to the comparision are provided in the expected order, e.g.
+ // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
+ // treated as a signed 32bit value, is less than or equal to 5.
+ //
+ // jz and jnz test whether the first operand is equal to zero, and take
+ // an optional second operand of a mask under which to perform the test.
+
+public:
+ Jump branch32(Condition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmpl_rr(right, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(Condition cond, RegisterID left, Imm32 right)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testl_rr(left, left);
+ else
+ m_assembler.cmpl_ir(right.m_value, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(Condition cond, RegisterID left, Address right)
+ {
+ m_assembler.cmpl_mr(right.offset, right.base, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(Condition cond, Address left, RegisterID right)
+ {
+ m_assembler.cmpl_rm(right, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(Condition cond, Address left, Imm32 right)
+ {
+ m_assembler.cmpl_im(right.m_value, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(Condition cond, BaseIndex left, Imm32 right)
+ {
+ m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, RegisterID right)
+ {
+ m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, Imm32 right)
+ {
+ ASSERT(!(right.m_value & 0xFFFF0000));
+
+ m_assembler.cmpw_im(right.m_value, left.offset, left.base, left.index, left.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ m_assembler.testl_rr(reg, mask);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ // if we are only interested in the low seven bits, this can be tested with a testb
+ if (mask.m_value == -1)
+ m_assembler.testl_rr(reg, reg);
+ else if ((mask.m_value & ~0x7f) == 0)
+ m_assembler.testb_i8r(mask.m_value, reg);
+ else
+ m_assembler.testl_i32r(mask.m_value, reg);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ if (mask.m_value == -1)
+ m_assembler.cmpl_im(0, address.offset, address.base);
+ else
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ if (mask.m_value == -1)
+ m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
+ else
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump jump()
+ {
+ return Jump(m_assembler.jmp());
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.jmp_r(target);
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(Address address)
+ {
+ m_assembler.jmp_m(address.offset, address.base);
+ }
+
+
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(Condition cond, Imm32 src, Address dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(Condition cond, RegisterID src, Address dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(Condition cond, Address src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT(cond == Overflow);
+ mul32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchMul32(Condition cond, Address src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ mul32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ ASSERT(cond == Overflow);
+ mul32(imm, src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(Condition cond, Imm32 imm, Address dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ sub32(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(Condition cond, RegisterID src, Address dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ sub32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(Condition cond, Address src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ sub32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+ or32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+
+ // Miscellaneous operations:
+
+ void breakpoint()
+ {
+ m_assembler.int3();
+ }
+
+ Call nearCall()
+ {
+ return Call(m_assembler.call(), Call::LinkableNear);
+ }
+
+ Call call(RegisterID target)
+ {
+ return Call(m_assembler.call(target), Call::None);
+ }
+
+ void call(Address address)
+ {
+ m_assembler.call_m(address.offset, address.base);
+ }
+
+ void ret()
+ {
+ m_assembler.ret();
+ }
+
+ void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmpl_rr(right, left);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ }
+
+ void set8(Condition cond, Address left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmpl_mr(left.offset, left.base, right);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ }
+
+ void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testl_rr(left, left);
+ else
+ m_assembler.cmpl_ir(right.m_value, left);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ }
+
+ void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmpl_rr(right, left);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+
+ void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testl_rr(left, left);
+ else
+ m_assembler.cmpl_ir(right.m_value, left);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+
+ // FIXME:
+ // The mask should be optional... paerhaps the argument order should be
+ // dest-src, operations always have a dest? ... possibly not true, considering
+ // asm ops like test, or pseudo ops like pop().
+
+ void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpl_im(0, address.offset, address.base);
+ else
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ }
+
+ void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpl_im(0, address.offset, address.base);
+ else
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+
+protected:
+ X86Assembler::Condition x86Condition(Condition cond)
+ {
+ return static_cast<X86Assembler::Condition>(cond);
+ }
+
+ X86Assembler::Condition x86Condition(DoubleCondition cond)
+ {
+ return static_cast<X86Assembler::Condition>(cond);
+ }
+
+private:
+ // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
+ // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
+ friend class MacroAssemblerX86;
+
+#if PLATFORM(X86)
+#if PLATFORM(MAC)
+
+ // All X86 Macs are guaranteed to support at least SSE2,
+ static bool isSSE2Present()
+ {
+ return true;
+ }
+
+#else // PLATFORM(MAC)
+
+ enum SSE2CheckState {
+ NotCheckedSSE2,
+ HasSSE2,
+ NoSSE2
+ };
+
+ static bool isSSE2Present()
+ {
+ if (s_sse2CheckState == NotCheckedSSE2) {
+ // Default the flags value to zero; if the compiler is
+ // not MSVC or GCC we will read this as SSE2 not present.
+ int flags = 0;
+#if COMPILER(MSVC)
+ _asm {
+ mov eax, 1 // cpuid function 1 gives us the standard feature set
+ cpuid;
+ mov flags, edx;
+ }
+#elif COMPILER(GCC)
+ asm (
+ "movl $0x1, %%eax;"
+ "pushl %%ebx;"
+ "cpuid;"
+ "popl %%ebx;"
+ "movl %%edx, %0;"
+ : "=g" (flags)
+ :
+ : "%eax", "%ecx", "%edx"
+ );
+#endif
+ static const int SSE2FeatureBit = 1 << 26;
+ s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
+ }
+ // Only check once.
+ ASSERT(s_sse2CheckState != NotCheckedSSE2);
+
+ return s_sse2CheckState == HasSSE2;
+ }
+
+ static SSE2CheckState s_sse2CheckState;
+
+#endif // PLATFORM(MAC)
+#elif !defined(NDEBUG) // PLATFORM(X86)
+
+ // On x86-64 we should never be checking for SSE2 in a non-debug build,
+ // but non debug add this method to keep the asserts above happy.
+ static bool isSSE2Present()
+ {
+ return true;
+ }
+
+#endif
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerX86Common_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86_64.h
new file mode 100644
index 000000000..0f95fe62e
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86_64.h
@@ -0,0 +1,480 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerX86_64_h
+#define MacroAssemblerX86_64_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER) && PLATFORM(X86_64)
+
+#include "MacroAssemblerX86Common.h"
+
+#define REPTACH_OFFSET_CALL_R11 3
+
+namespace JSC {
+
+class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
+protected:
+ static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
+
+public:
+ static const Scale ScalePtr = TimesEight;
+
+ using MacroAssemblerX86Common::add32;
+ using MacroAssemblerX86Common::and32;
+ using MacroAssemblerX86Common::or32;
+ using MacroAssemblerX86Common::sub32;
+ using MacroAssemblerX86Common::load32;
+ using MacroAssemblerX86Common::store32;
+ using MacroAssemblerX86Common::call;
+ using MacroAssemblerX86Common::loadDouble;
+ using MacroAssemblerX86Common::convertInt32ToDouble;
+
+ void add32(Imm32 imm, AbsoluteAddress address)
+ {
+ move(ImmPtr(address.m_ptr), scratchRegister);
+ add32(imm, Address(scratchRegister));
+ }
+
+ void and32(Imm32 imm, AbsoluteAddress address)
+ {
+ move(ImmPtr(address.m_ptr), scratchRegister);
+ and32(imm, Address(scratchRegister));
+ }
+
+ void or32(Imm32 imm, AbsoluteAddress address)
+ {
+ move(ImmPtr(address.m_ptr), scratchRegister);
+ or32(imm, Address(scratchRegister));
+ }
+
+ void sub32(Imm32 imm, AbsoluteAddress address)
+ {
+ move(ImmPtr(address.m_ptr), scratchRegister);
+ sub32(imm, Address(scratchRegister));
+ }
+
+ void load32(void* address, RegisterID dest)
+ {
+ if (dest == X86Registers::eax)
+ m_assembler.movl_mEAX(address);
+ else {
+ move(X86Registers::eax, dest);
+ m_assembler.movl_mEAX(address);
+ swap(X86Registers::eax, dest);
+ }
+ }
+
+ void loadDouble(void* address, FPRegisterID dest)
+ {
+ move(ImmPtr(address), scratchRegister);
+ loadDouble(scratchRegister, dest);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ move(Imm32(*static_cast<int32_t*>(src.m_ptr)), scratchRegister);
+ m_assembler.cvtsi2sd_rr(scratchRegister, dest);
+ }
+
+ void store32(Imm32 imm, void* address)
+ {
+ move(X86Registers::eax, scratchRegister);
+ move(imm, X86Registers::eax);
+ m_assembler.movl_EAXm(address);
+ move(scratchRegister, X86Registers::eax);
+ }
+
+ Call call()
+ {
+ DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister);
+ Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
+ ASSERT(differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
+ return result;
+ }
+
+ Call tailRecursiveCall()
+ {
+ DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister);
+ Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
+ ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
+ return Call::fromTailJump(newJump);
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister);
+ Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
+ ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
+ return Call::fromTailJump(newJump);
+ }
+
+
+ void addPtr(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addq_rr(src, dest);
+ }
+
+ void addPtr(Imm32 imm, RegisterID srcDest)
+ {
+ m_assembler.addq_ir(imm.m_value, srcDest);
+ }
+
+ void addPtr(ImmPtr imm, RegisterID dest)
+ {
+ move(imm, scratchRegister);
+ m_assembler.addq_rr(scratchRegister, dest);
+ }
+
+ void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.leaq_mr(imm.m_value, src, dest);
+ }
+
+ void addPtr(Imm32 imm, Address address)
+ {
+ m_assembler.addq_im(imm.m_value, address.offset, address.base);
+ }
+
+ void addPtr(Imm32 imm, AbsoluteAddress address)
+ {
+ move(ImmPtr(address.m_ptr), scratchRegister);
+ addPtr(imm, Address(scratchRegister));
+ }
+
+ void andPtr(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andq_rr(src, dest);
+ }
+
+ void andPtr(Imm32 imm, RegisterID srcDest)
+ {
+ m_assembler.andq_ir(imm.m_value, srcDest);
+ }
+
+ void orPtr(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orq_rr(src, dest);
+ }
+
+ void orPtr(ImmPtr imm, RegisterID dest)
+ {
+ move(imm, scratchRegister);
+ m_assembler.orq_rr(scratchRegister, dest);
+ }
+
+ void orPtr(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.orq_ir(imm.m_value, dest);
+ }
+
+ void rshiftPtr(RegisterID shift_amount, RegisterID dest)
+ {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ if (shift_amount != X86Registers::ecx) {
+ swap(shift_amount, X86Registers::ecx);
+
+ // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
+ if (dest == shift_amount)
+ m_assembler.sarq_CLr(X86Registers::ecx);
+ // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
+ else if (dest == X86Registers::ecx)
+ m_assembler.sarq_CLr(shift_amount);
+ // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
+ else
+ m_assembler.sarq_CLr(dest);
+
+ swap(shift_amount, X86Registers::ecx);
+ } else
+ m_assembler.sarq_CLr(dest);
+ }
+
+ void rshiftPtr(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.sarq_i8r(imm.m_value, dest);
+ }
+
+ void subPtr(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subq_rr(src, dest);
+ }
+
+ void subPtr(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.subq_ir(imm.m_value, dest);
+ }
+
+ void subPtr(ImmPtr imm, RegisterID dest)
+ {
+ move(imm, scratchRegister);
+ m_assembler.subq_rr(scratchRegister, dest);
+ }
+
+ void xorPtr(RegisterID src, RegisterID dest)
+ {
+ m_assembler.xorq_rr(src, dest);
+ }
+
+ void xorPtr(Imm32 imm, RegisterID srcDest)
+ {
+ m_assembler.xorq_ir(imm.m_value, srcDest);
+ }
+
+
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movq_mr(address.offset, address.base, dest);
+ }
+
+ void loadPtr(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void loadPtr(void* address, RegisterID dest)
+ {
+ if (dest == X86Registers::eax)
+ m_assembler.movq_mEAX(address);
+ else {
+ move(X86Registers::eax, dest);
+ m_assembler.movq_mEAX(address);
+ swap(X86Registers::eax, dest);
+ }
+ }
+
+ DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ m_assembler.movq_mr_disp32(address.offset, address.base, dest);
+ return DataLabel32(this);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.movq_rm(src, address.offset, address.base);
+ }
+
+ void storePtr(RegisterID src, BaseIndex address)
+ {
+ m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void storePtr(RegisterID src, void* address)
+ {
+ if (src == X86Registers::eax)
+ m_assembler.movq_EAXm(address);
+ else {
+ swap(X86Registers::eax, src);
+ m_assembler.movq_EAXm(address);
+ swap(X86Registers::eax, src);
+ }
+ }
+
+ void storePtr(ImmPtr imm, ImplicitAddress address)
+ {
+ move(imm, scratchRegister);
+ storePtr(scratchRegister, address);
+ }
+
+ DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ m_assembler.movq_rm_disp32(src, address.offset, address.base);
+ return DataLabel32(this);
+ }
+
+ void movePtrToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.movq_rr(src, dest);
+ }
+
+ void moveDoubleToPtr(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.movq_rr(src, dest);
+ }
+
+ void setPtr(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testq_rr(left, left);
+ else
+ m_assembler.cmpq_ir(right.m_value, left);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+
+ Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmpq_rr(right, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
+ {
+ move(right, scratchRegister);
+ return branchPtr(cond, left, scratchRegister);
+ }
+
+ Jump branchPtr(Condition cond, RegisterID left, Address right)
+ {
+ m_assembler.cmpq_mr(right.offset, right.base, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
+ {
+ move(ImmPtr(left.m_ptr), scratchRegister);
+ return branchPtr(cond, Address(scratchRegister), right);
+ }
+
+ Jump branchPtr(Condition cond, Address left, RegisterID right)
+ {
+ m_assembler.cmpq_rm(right, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchPtr(Condition cond, Address left, ImmPtr right)
+ {
+ move(right, scratchRegister);
+ return branchPtr(cond, left, scratchRegister);
+ }
+
+ Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
+ {
+ m_assembler.testq_rr(reg, mask);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+ {
+ // if we are only interested in the low seven bits, this can be tested with a testb
+ if (mask.m_value == -1)
+ m_assembler.testq_rr(reg, reg);
+ else if ((mask.m_value & ~0x7f) == 0)
+ m_assembler.testb_i8r(mask.m_value, reg);
+ else
+ m_assembler.testq_i32r(mask.m_value, reg);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpq_im(0, address.offset, address.base);
+ else
+ m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
+ else
+ m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+
+ Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ addPtr(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ subPtr(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
+ {
+ m_assembler.movq_i64r(initialValue.asIntptr(), dest);
+ return DataLabelPtr(this);
+ }
+
+ Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, scratchRegister);
+ return branchPtr(cond, left, scratchRegister);
+ }
+
+ Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, scratchRegister);
+ return branchPtr(cond, left, scratchRegister);
+ }
+
+ DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr label = moveWithPatch(initialValue, scratchRegister);
+ storePtr(scratchRegister, address);
+ return label;
+ }
+
+ Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+ {
+ Label label(this);
+ loadPtr(address, dest);
+ return label;
+ }
+
+ bool supportsFloatingPoint() const { return true; }
+ // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
+ bool supportsFloatingPointTruncate() const { return true; }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ if (!call.isFlagSet(Call::Near))
+ X86Assembler::linkPointer(code, X86Assembler::labelFor(call.m_jmp, -REPTACH_OFFSET_CALL_R11), function.value());
+ else
+ X86Assembler::linkCall(code, call.m_jmp, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
+ }
+
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerX86_64_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/RepatchBuffer.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/RepatchBuffer.h
new file mode 100644
index 000000000..89cbf06d1
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/RepatchBuffer.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RepatchBuffer_h
+#define RepatchBuffer_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include <MacroAssembler.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+// RepatchBuffer:
+//
+// This class is used to modify code after code generation has been completed,
+// and after the code has potentially already been executed. This mechanism is
+// used to apply optimizations to the code.
+//
+class RepatchBuffer {
+ typedef MacroAssemblerCodePtr CodePtr;
+
+public:
+ RepatchBuffer(CodeBlock* codeBlock)
+ {
+ JITCode& code = codeBlock->getJITCode();
+ m_start = code.start();
+ m_size = code.size();
+
+ ExecutableAllocator::makeWritable(m_start, m_size);
+ }
+
+ ~RepatchBuffer()
+ {
+ ExecutableAllocator::makeExecutable(m_start, m_size);
+ }
+
+ void relink(CodeLocationJump jump, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchJump(jump, destination);
+ }
+
+ void relink(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchCall(call, destination);
+ }
+
+ void relink(CodeLocationCall call, FunctionPtr destination)
+ {
+ MacroAssembler::repatchCall(call, destination);
+ }
+
+ void relink(CodeLocationNearCall nearCall, CodePtr destination)
+ {
+ MacroAssembler::repatchNearCall(nearCall, CodeLocationLabel(destination));
+ }
+
+ void relink(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchNearCall(nearCall, destination);
+ }
+
+ void repatch(CodeLocationDataLabel32 dataLabel32, int32_t value)
+ {
+ MacroAssembler::repatchInt32(dataLabel32, value);
+ }
+
+ void repatch(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+ {
+ MacroAssembler::repatchPointer(dataLabelPtr, value);
+ }
+
+ void repatchLoadPtrToLEA(CodeLocationInstruction instruction)
+ {
+ MacroAssembler::repatchLoadPtrToLEA(instruction);
+ }
+
+ void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
+ {
+ relink(CodeLocationCall(CodePtr(returnAddress)), label);
+ }
+
+ void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
+ {
+ relinkCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
+ }
+
+ void relinkCallerToFunction(ReturnAddressPtr returnAddress, FunctionPtr function)
+ {
+ relink(CodeLocationCall(CodePtr(returnAddress)), function);
+ }
+
+ void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
+ {
+ relink(CodeLocationNearCall(CodePtr(returnAddress)), label);
+ }
+
+ void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
+ {
+ relinkNearCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
+ }
+
+private:
+ void* m_start;
+ size_t m_size;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // RepatchBuffer_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/X86Assembler.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/X86Assembler.h
new file mode 100644
index 000000000..cbbaaa5a1
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/X86Assembler.h
@@ -0,0 +1,2053 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef X86Assembler_h
+#define X86Assembler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64))
+
+#include "AssemblerBuffer.h"
+#include <stdint.h>
+#include <wtf/Assertions.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
+
+namespace X86Registers {
+ typedef enum {
+ eax,
+ ecx,
+ edx,
+ ebx,
+ esp,
+ ebp,
+ esi,
+ edi,
+
+#if PLATFORM(X86_64)
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+#endif
+ } RegisterID;
+
+ typedef enum {
+ xmm0,
+ xmm1,
+ xmm2,
+ xmm3,
+ xmm4,
+ xmm5,
+ xmm6,
+ xmm7,
+ } XMMRegisterID;
+}
+
+class X86Assembler {
+public:
+ typedef X86Registers::RegisterID RegisterID;
+ typedef X86Registers::XMMRegisterID XMMRegisterID;
+ typedef XMMRegisterID FPRegisterID;
+
+ typedef enum {
+ ConditionO,
+ ConditionNO,
+ ConditionB,
+ ConditionAE,
+ ConditionE,
+ ConditionNE,
+ ConditionBE,
+ ConditionA,
+ ConditionS,
+ ConditionNS,
+ ConditionP,
+ ConditionNP,
+ ConditionL,
+ ConditionGE,
+ ConditionLE,
+ ConditionG,
+
+ ConditionC = ConditionB,
+ ConditionNC = ConditionAE,
+ } Condition;
+
+private:
+ typedef enum {
+ OP_ADD_EvGv = 0x01,
+ OP_ADD_GvEv = 0x03,
+ OP_OR_EvGv = 0x09,
+ OP_OR_GvEv = 0x0B,
+ OP_2BYTE_ESCAPE = 0x0F,
+ OP_AND_EvGv = 0x21,
+ OP_AND_GvEv = 0x23,
+ OP_SUB_EvGv = 0x29,
+ OP_SUB_GvEv = 0x2B,
+ PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
+ OP_XOR_EvGv = 0x31,
+ OP_XOR_GvEv = 0x33,
+ OP_CMP_EvGv = 0x39,
+ OP_CMP_GvEv = 0x3B,
+#if PLATFORM(X86_64)
+ PRE_REX = 0x40,
+#endif
+ OP_PUSH_EAX = 0x50,
+ OP_POP_EAX = 0x58,
+#if PLATFORM(X86_64)
+ OP_MOVSXD_GvEv = 0x63,
+#endif
+ PRE_OPERAND_SIZE = 0x66,
+ PRE_SSE_66 = 0x66,
+ OP_PUSH_Iz = 0x68,
+ OP_IMUL_GvEvIz = 0x69,
+ OP_GROUP1_EvIz = 0x81,
+ OP_GROUP1_EvIb = 0x83,
+ OP_TEST_EvGv = 0x85,
+ OP_XCHG_EvGv = 0x87,
+ OP_MOV_EvGv = 0x89,
+ OP_MOV_GvEv = 0x8B,
+ OP_LEA = 0x8D,
+ OP_GROUP1A_Ev = 0x8F,
+ OP_CDQ = 0x99,
+ OP_MOV_EAXOv = 0xA1,
+ OP_MOV_OvEAX = 0xA3,
+ OP_MOV_EAXIv = 0xB8,
+ OP_GROUP2_EvIb = 0xC1,
+ OP_RET = 0xC3,
+ OP_GROUP11_EvIz = 0xC7,
+ OP_INT3 = 0xCC,
+ OP_GROUP2_Ev1 = 0xD1,
+ OP_GROUP2_EvCL = 0xD3,
+ OP_CALL_rel32 = 0xE8,
+ OP_JMP_rel32 = 0xE9,
+ PRE_SSE_F2 = 0xF2,
+ OP_HLT = 0xF4,
+ OP_GROUP3_EbIb = 0xF6,
+ OP_GROUP3_Ev = 0xF7,
+ OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
+ OP_GROUP5_Ev = 0xFF,
+ } OneByteOpcodeID;
+
+ typedef enum {
+ OP2_MOVSD_VsdWsd = 0x10,
+ OP2_MOVSD_WsdVsd = 0x11,
+ OP2_CVTSI2SD_VsdEd = 0x2A,
+ OP2_CVTTSD2SI_GdWsd = 0x2C,
+ OP2_UCOMISD_VsdWsd = 0x2E,
+ OP2_ADDSD_VsdWsd = 0x58,
+ OP2_MULSD_VsdWsd = 0x59,
+ OP2_SUBSD_VsdWsd = 0x5C,
+ OP2_DIVSD_VsdWsd = 0x5E,
+ OP2_XORPD_VpdWpd = 0x57,
+ OP2_MOVD_VdEd = 0x6E,
+ OP2_MOVD_EdVd = 0x7E,
+ OP2_JCC_rel32 = 0x80,
+ OP_SETCC = 0x90,
+ OP2_IMUL_GvEv = 0xAF,
+ OP2_MOVZX_GvEb = 0xB6,
+ OP2_MOVZX_GvEw = 0xB7,
+ OP2_PEXTRW_GdUdIb = 0xC5,
+ } TwoByteOpcodeID;
+
+ TwoByteOpcodeID jccRel32(Condition cond)
+ {
+ return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
+ }
+
+ TwoByteOpcodeID setccOpcode(Condition cond)
+ {
+ return (TwoByteOpcodeID)(OP_SETCC + cond);
+ }
+
+ typedef enum {
+ GROUP1_OP_ADD = 0,
+ GROUP1_OP_OR = 1,
+ GROUP1_OP_ADC = 2,
+ GROUP1_OP_AND = 4,
+ GROUP1_OP_SUB = 5,
+ GROUP1_OP_XOR = 6,
+ GROUP1_OP_CMP = 7,
+
+ GROUP1A_OP_POP = 0,
+
+ GROUP2_OP_SHL = 4,
+ GROUP2_OP_SAR = 7,
+
+ GROUP3_OP_TEST = 0,
+ GROUP3_OP_NOT = 2,
+ GROUP3_OP_NEG = 3,
+ GROUP3_OP_IDIV = 7,
+
+ GROUP5_OP_CALLN = 2,
+ GROUP5_OP_JMPN = 4,
+ GROUP5_OP_PUSH = 6,
+
+ GROUP11_MOV = 0,
+ } GroupOpcodeID;
+
+ class X86InstructionFormatter;
+public:
+
+ class JmpSrc {
+ friend class X86Assembler;
+ friend class X86InstructionFormatter;
+ public:
+ JmpSrc()
+ : m_offset(-1)
+ {
+ }
+
+ private:
+ JmpSrc(int offset)
+ : m_offset(offset)
+ {
+ }
+
+ int m_offset;
+ };
+
+ class JmpDst {
+ friend class X86Assembler;
+ friend class X86InstructionFormatter;
+ public:
+ JmpDst()
+ : m_offset(-1)
+ , m_used(false)
+ {
+ }
+
+ bool isUsed() const { return m_used; }
+ void used() { m_used = true; }
+ private:
+ JmpDst(int offset)
+ : m_offset(offset)
+ , m_used(false)
+ {
+ ASSERT(m_offset == offset);
+ }
+
+ int m_offset : 31;
+ bool m_used : 1;
+ };
+
+ X86Assembler()
+ {
+ }
+
+ size_t size() const { return m_formatter.size(); }
+
+ // Stack operations:
+
+ void push_r(RegisterID reg)
+ {
+ m_formatter.oneByteOp(OP_PUSH_EAX, reg);
+ }
+
+ void pop_r(RegisterID reg)
+ {
+ m_formatter.oneByteOp(OP_POP_EAX, reg);
+ }
+
+ void push_i32(int imm)
+ {
+ m_formatter.oneByteOp(OP_PUSH_Iz);
+ m_formatter.immediate32(imm);
+ }
+
+ void push_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
+ }
+
+ void pop_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
+ }
+
+ // Arithmetic operations:
+
+#if !PLATFORM(X86_64)
+ void adcl_im(int imm, void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void addl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
+ }
+
+ void addl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
+ }
+
+ void addl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
+ }
+
+ void addl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if PLATFORM(X86_64)
+ void addq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
+ }
+
+ void addq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addq_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void addl_im(int imm, void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void andl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
+ }
+
+ void andl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
+ }
+
+ void andl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
+ }
+
+ void andl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void andl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if PLATFORM(X86_64)
+ void andq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
+ }
+
+ void andq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void andl_im(int imm, void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void negl_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
+ }
+
+ void negl_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
+ }
+
+ void notl_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
+ }
+
+ void notl_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
+ }
+
+ void orl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
+ }
+
+ void orl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
+ }
+
+ void orl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset);
+ }
+
+ void orl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void orl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if PLATFORM(X86_64)
+ void orq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
+ }
+
+ void orq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void orl_im(int imm, void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void subl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
+ }
+
+ void subl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
+ }
+
+ void subl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
+ }
+
+ void subl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void subl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if PLATFORM(X86_64)
+ void subq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
+ }
+
+ void subq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void subl_im(int imm, void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void xorl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
+ }
+
+ void xorl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset);
+ }
+
+ void xorl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset);
+ }
+
+ void xorl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void xorl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if PLATFORM(X86_64)
+ void xorq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
+ }
+
+ void xorq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void sarl_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void sarl_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
+ }
+
+ void shll_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void shll_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
+ }
+
+#if PLATFORM(X86_64)
+ void sarq_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
+ }
+
+ void sarq_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+#endif
+
+ void imull_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
+ }
+
+ void imull_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
+ }
+
+ void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
+ m_formatter.immediate32(value);
+ }
+
+ void idivl_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
+ }
+
+ // Comparisons:
+
+ void cmpl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
+ }
+
+ void cmpl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
+ }
+
+ void cmpl_mr(int offset, RegisterID base, RegisterID src)
+ {
+ m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
+ }
+
+ void cmpl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpl_ir_force32(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void cmpl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpl_im_force32(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+#if PLATFORM(X86_64)
+ void cmpq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
+ }
+
+ void cmpq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
+ }
+
+ void cmpq_mr(int offset, RegisterID base, RegisterID src)
+ {
+ m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
+ }
+
+ void cmpq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpq_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void cmpl_rm(RegisterID reg, void* addr)
+ {
+ m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
+ }
+
+ void cmpl_im(int imm, void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
+ }
+
+ void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void testl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
+ }
+
+ void testl_i32r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void testl_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+
+#if PLATFORM(X86_64)
+ void testq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
+ }
+
+ void testq_i32r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void testq_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+#endif
+
+ void testw_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
+ }
+
+ void testb_i8r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
+ m_formatter.immediate8(imm);
+ }
+
+ void setCC_r(Condition cond, RegisterID dst)
+ {
+ m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
+ }
+
+ void sete_r(RegisterID dst)
+ {
+ m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
+ }
+
+ void setz_r(RegisterID dst)
+ {
+ sete_r(dst);
+ }
+
+ void setne_r(RegisterID dst)
+ {
+ m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
+ }
+
+ void setnz_r(RegisterID dst)
+ {
+ setne_r(dst);
+ }
+
+ // Various move ops:
+
+ void cdq()
+ {
+ m_formatter.oneByteOp(OP_CDQ);
+ }
+
+ void xchgl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
+ }
+
+#if PLATFORM(X86_64)
+ void xchgq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
+ }
+#endif
+
+ void movl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
+ }
+
+ void movl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
+ }
+
+ void movl_mEAX(void* addr)
+ {
+ m_formatter.oneByteOp(OP_MOV_EAXOv);
+#if PLATFORM(X86_64)
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+#else
+ m_formatter.immediate32(reinterpret_cast<int>(addr));
+#endif
+ }
+
+ void movl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
+ }
+
+ void movl_i32r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void movl_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void movl_EAXm(void* addr)
+ {
+ m_formatter.oneByteOp(OP_MOV_OvEAX);
+#if PLATFORM(X86_64)
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+#else
+ m_formatter.immediate32(reinterpret_cast<int>(addr));
+#endif
+ }
+
+#if PLATFORM(X86_64)
+ void movq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
+ }
+
+ void movq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
+ }
+
+ void movq_mEAX(void* addr)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EAXOv);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
+ void movq_EAXm(void* addr)
+ {
+ m_formatter.oneByteOp64(OP_MOV_OvEAX);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
+ void movq_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
+ }
+
+ void movq_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void movq_i64r(int64_t imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
+ m_formatter.immediate64(imm);
+ }
+
+ void movsxd_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
+ }
+
+
+#else
+ void movl_rm(RegisterID src, void* addr)
+ {
+ if (src == X86Registers::eax)
+ movl_EAXm(addr);
+ else
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
+ }
+
+ void movl_mr(void* addr, RegisterID dst)
+ {
+ if (dst == X86Registers::eax)
+ movl_mEAX(addr);
+ else
+ m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
+ }
+
+ void movl_i32m(int imm, void* addr)
+ {
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
+ m_formatter.immediate32(imm);
+ }
+#endif
+
+ void movzwl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
+ }
+
+ void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
+ }
+
+ void movzbl_rr(RegisterID src, RegisterID dst)
+ {
+ // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
+ // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
+ // REX prefixes are defined to be silently ignored by the processor.
+ m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
+ }
+
+ void leal_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_LEA, dst, base, offset);
+ }
+#if PLATFORM(X86_64)
+ void leaq_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
+ }
+#endif
+
+ // Flow control:
+
+ JmpSrc call()
+ {
+ m_formatter.oneByteOp(OP_CALL_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc call(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
+ return JmpSrc(m_formatter.size());
+ }
+
+ void call_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
+ }
+
+ JmpSrc jmp()
+ {
+ m_formatter.oneByteOp(OP_JMP_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ // Return a JmpSrc so we have a label to the jump, so we can use this
+ // To make a tail recursive call on x86-64. The MacroAssembler
+ // really shouldn't wrap this as a Jump, since it can't be linked. :-/
+ JmpSrc jmp_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
+ return JmpSrc(m_formatter.size());
+ }
+
+ void jmp_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
+ }
+
+ JmpSrc jne()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionNE));
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jnz()
+ {
+ return jne();
+ }
+
+ JmpSrc je()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionE));
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jz()
+ {
+ return je();
+ }
+
+ JmpSrc jl()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionL));
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jb()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionB));
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jle()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionLE));
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jbe()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionBE));
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jge()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionGE));
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jg()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionG));
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc ja()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionA));
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jae()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionAE));
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jo()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionO));
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jp()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionP));
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc js()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionS));
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jCC(Condition cond)
+ {
+ m_formatter.twoByteOp(jccRel32(cond));
+ return m_formatter.immediateRel32();
+ }
+
+ // SSE operations:
+
+ void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
+ }
+
+ void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
+ }
+
+#if !PLATFORM(X86_64)
+ void cvtsi2sd_mr(void* address, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
+ }
+#endif
+
+ void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
+ }
+
+ void movd_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
+ }
+
+#if PLATFORM(X86_64)
+ void movq_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
+ }
+
+ void movq_rr(RegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
+ }
+#endif
+
+ void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
+ }
+
+ void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+#if !PLATFORM(X86_64)
+ void movsd_mr(void* address, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
+ }
+#endif
+
+ void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
+ m_formatter.immediate8(whichWord);
+ }
+
+ void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ // Misc instructions:
+
+ void int3()
+ {
+ m_formatter.oneByteOp(OP_INT3);
+ }
+
+ void ret()
+ {
+ m_formatter.oneByteOp(OP_RET);
+ }
+
+ void predictNotTaken()
+ {
+ m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
+ }
+
+ // Assembler admin methods:
+
+ JmpDst label()
+ {
+ return JmpDst(m_formatter.size());
+ }
+
+ static JmpDst labelFor(JmpSrc jump, intptr_t offset = 0)
+ {
+ return JmpDst(jump.m_offset + offset);
+ }
+
+ JmpDst align(int alignment)
+ {
+ while (!m_formatter.isAligned(alignment))
+ m_formatter.oneByteOp(OP_HLT);
+
+ return label();
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ void linkJump(JmpSrc from, JmpDst to)
+ {
+ ASSERT(from.m_offset != -1);
+ ASSERT(to.m_offset != -1);
+
+ char* code = reinterpret_cast<char*>(m_formatter.data());
+ setRel32(code + from.m_offset, code + to.m_offset);
+ }
+
+ static void linkJump(void* code, JmpSrc from, void* to)
+ {
+ ASSERT(from.m_offset != -1);
+
+ setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
+ }
+
+ static void linkCall(void* code, JmpSrc from, void* to)
+ {
+ ASSERT(from.m_offset != -1);
+
+ setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
+ }
+
+ static void linkPointer(void* code, JmpDst where, void* value)
+ {
+ ASSERT(where.m_offset != -1);
+
+ setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ setRel32(from, to);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ setRel32(from, to);
+ }
+
+ static void repatchInt32(void* where, int32_t value)
+ {
+ setInt32(where, value);
+ }
+
+ static void repatchPointer(void* where, void* value)
+ {
+ setPointer(where, value);
+ }
+
+ static void repatchLoadPtrToLEA(void* where)
+ {
+#if PLATFORM(X86_64)
+ // On x86-64 pointer memory accesses require a 64-bit operand, and as such a REX prefix.
+ // Skip over the prefix byte.
+ where = reinterpret_cast<char*>(where) + 1;
+#endif
+ *reinterpret_cast<unsigned char*>(where) = static_cast<unsigned char>(OP_LEA);
+ }
+
+ static unsigned getCallReturnOffset(JmpSrc call)
+ {
+ ASSERT(call.m_offset >= 0);
+ return call.m_offset;
+ }
+
+ static void* getRelocatedAddress(void* code, JmpSrc jump)
+ {
+ ASSERT(jump.m_offset != -1);
+
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
+ }
+
+ static void* getRelocatedAddress(void* code, JmpDst destination)
+ {
+ ASSERT(destination.m_offset != -1);
+
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
+ {
+ return dst.m_offset - src.m_offset;
+ }
+
+ static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
+ {
+ return dst.m_offset - src.m_offset;
+ }
+
+ static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
+ {
+ return dst.m_offset - src.m_offset;
+ }
+
+ void* executableCopy(ExecutablePool* allocator)
+ {
+ void* copy = m_formatter.executableCopy(allocator);
+ ASSERT(copy);
+ return copy;
+ }
+
+private:
+
+ static void setPointer(void* where, void* value)
+ {
+ reinterpret_cast<void**>(where)[-1] = value;
+ }
+
+ static void setInt32(void* where, int32_t value)
+ {
+ reinterpret_cast<int32_t*>(where)[-1] = value;
+ }
+
+ static void setRel32(void* from, void* to)
+ {
+ intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
+ ASSERT(offset == static_cast<int32_t>(offset));
+
+ setInt32(from, offset);
+ }
+
+ class X86InstructionFormatter {
+
+ static const int maxInstructionSize = 16;
+
+ public:
+
+ // Legacy prefix bytes:
+ //
+ // These are emmitted prior to the instruction.
+
+ void prefix(OneByteOpcodeID pre)
+ {
+ m_buffer.putByte(pre);
+ }
+
+ // Word-sized operands / no operand instruction formatters.
+ //
+ // In addition to the opcode, the following operand permutations are supported:
+ // * None - instruction takes no operands.
+ // * One register - the low three bits of the RegisterID are added into the opcode.
+ // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
+ // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
+ // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
+ //
+ // For 32-bit x86 targets, the address operand may also be provided as a void*.
+ // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
+ //
+ // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
+
+ void oneByteOp(OneByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(0, 0, reg);
+ m_buffer.putByteUnchecked(opcode + (reg & 7));
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(reg, base, offset);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+#if !PLATFORM(X86_64)
+ void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, address);
+ }
+#endif
+
+ void twoByteOp(TwoByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, index, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+#if !PLATFORM(X86_64)
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, void* address)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, address);
+ }
+#endif
+
+#if PLATFORM(X86_64)
+ // Quad-word-sized operands:
+ //
+ // Used to format 64-bit operantions, planting a REX.w prefix.
+ // When planting d64 or f64 instructions, not requiring a REX.w prefix,
+ // the normal (non-'64'-postfixed) formatters should be used.
+
+ void oneByteOp64(OneByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(0, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(0, 0, reg);
+ m_buffer.putByteUnchecked(opcode + (reg & 7));
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(reg, base, offset);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+ void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+#endif
+
+ // Byte-operands:
+ //
+ // These methods format byte operations. Byte operations differ from the normal
+ // formatters in the circumstances under which they will decide to emit REX prefixes.
+ // These should be used where any register operand signifies a byte register.
+ //
+ // The disctinction is due to the handling of register numbers in the range 4..7 on
+ // x86-64. These register numbers may either represent the second byte of the first
+ // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
+ //
+ // Since ah..bh cannot be used in all permutations of operands (specifically cannot
+ // be accessed where a REX prefix is present), these are likely best treated as
+ // deprecated. In order to ensure the correct registers spl..dil are selected a
+ // REX prefix will be emitted for any byte register operand in the range 4..15.
+ //
+ // These formatters may be used in instructions where a mix of operand sizes, in which
+ // case an unnecessary REX will be emitted, for example:
+ // movzbl %al, %edi
+ // In this case a REX will be planted since edi is 7 (and were this a byte operand
+ // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
+ // be silently ignored by the processor.
+ //
+ // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
+ // is provided to check byte register operands.
+
+ void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(groupOp, rm);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(groupOp, rm);
+ }
+
+ // Immediates:
+ //
+ // An immedaite should be appended where appropriate after an op has been emitted.
+ // The writes are unchecked since the opcode formatters above will have ensured space.
+
+ void immediate8(int imm)
+ {
+ m_buffer.putByteUnchecked(imm);
+ }
+
+ void immediate16(int imm)
+ {
+ m_buffer.putShortUnchecked(imm);
+ }
+
+ void immediate32(int imm)
+ {
+ m_buffer.putIntUnchecked(imm);
+ }
+
+ void immediate64(int64_t imm)
+ {
+ m_buffer.putInt64Unchecked(imm);
+ }
+
+ JmpSrc immediateRel32()
+ {
+ m_buffer.putIntUnchecked(0);
+ return JmpSrc(m_buffer.size());
+ }
+
+ // Administrative methods:
+
+ size_t size() const { return m_buffer.size(); }
+ bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
+ void* data() const { return m_buffer.data(); }
+ void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
+
+ private:
+
+ // Internals; ModRm and REX formatters.
+
+ static const RegisterID noBase = X86Registers::ebp;
+ static const RegisterID hasSib = X86Registers::esp;
+ static const RegisterID noIndex = X86Registers::esp;
+#if PLATFORM(X86_64)
+ static const RegisterID noBase2 = X86Registers::r13;
+ static const RegisterID hasSib2 = X86Registers::r12;
+
+ // Registers r8 & above require a REX prefixe.
+ inline bool regRequiresRex(int reg)
+ {
+ return (reg >= X86Registers::r8);
+ }
+
+ // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
+ inline bool byteRegRequiresRex(int reg)
+ {
+ return (reg >= X86Registers::esp);
+ }
+
+ // Format a REX prefix byte.
+ inline void emitRex(bool w, int r, int x, int b)
+ {
+ m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
+ }
+
+ // Used to plant a REX byte with REX.w set (for 64-bit operations).
+ inline void emitRexW(int r, int x, int b)
+ {
+ emitRex(true, r, x, b);
+ }
+
+ // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
+ // regRequiresRex() to check other registers (i.e. address base & index).
+ inline void emitRexIf(bool condition, int r, int x, int b)
+ {
+ if (condition) emitRex(false, r, x, b);
+ }
+
+ // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
+ inline void emitRexIfNeeded(int r, int x, int b)
+ {
+ emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
+ }
+#else
+ // No REX prefix bytes on 32-bit x86.
+ inline bool regRequiresRex(int) { return false; }
+ inline bool byteRegRequiresRex(int) { return false; }
+ inline void emitRexIf(bool, int, int, int) {}
+ inline void emitRexIfNeeded(int, int, int) {}
+#endif
+
+ enum ModRmMode {
+ ModRmMemoryNoDisp,
+ ModRmMemoryDisp8,
+ ModRmMemoryDisp32,
+ ModRmRegister,
+ };
+
+ void putModRm(ModRmMode mode, int reg, RegisterID rm)
+ {
+ m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
+ }
+
+ void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
+ {
+ ASSERT(mode != ModRmRegister);
+
+ putModRm(mode, reg, hasSib);
+ m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
+ }
+
+ void registerModRM(int reg, RegisterID rm)
+ {
+ putModRm(ModRmRegister, reg, rm);
+ }
+
+ void memoryModRM(int reg, RegisterID base, int offset)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+#if PLATFORM(X86_64)
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
+ putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
+ m_buffer.putIntUnchecked(offset);
+ }
+ } else {
+#if PLATFORM(X86_64)
+ if (!offset && (base != noBase) && (base != noBase2))
+#else
+ if (!offset && (base != noBase))
+#endif
+ putModRm(ModRmMemoryNoDisp, reg, base);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRm(ModRmMemoryDisp8, reg, base);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp32, reg, base);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+ }
+
+ void memoryModRM_disp32(int reg, RegisterID base, int offset)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+#if PLATFORM(X86_64)
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
+ m_buffer.putIntUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp32, reg, base);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+
+ void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ ASSERT(index != noIndex);
+
+#if PLATFORM(X86_64)
+ if (!offset && (base != noBase) && (base != noBase2))
+#else
+ if (!offset && (base != noBase))
+#endif
+ putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+
+#if !PLATFORM(X86_64)
+ void memoryModRM(int reg, void* address)
+ {
+ // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
+ putModRm(ModRmMemoryNoDisp, reg, noBase);
+ m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
+ }
+#endif
+
+ AssemblerBuffer m_buffer;
+ } m_formatter;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && PLATFORM(X86)
+
+#endif // X86Assembler_h