aboutsummaryrefslogtreecommitdiffstats
path: root/src/3rdparty/masm/assembler/X86Assembler.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/masm/assembler/X86Assembler.h')
-rw-r--r--src/3rdparty/masm/assembler/X86Assembler.h151
1 files changed, 76 insertions, 75 deletions
diff --git a/src/3rdparty/masm/assembler/X86Assembler.h b/src/3rdparty/masm/assembler/X86Assembler.h
index e8ae687036..ab80e42e79 100644
--- a/src/3rdparty/masm/assembler/X86Assembler.h
+++ b/src/3rdparty/masm/assembler/X86Assembler.h
@@ -20,7 +20,7 @@
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef X86Assembler_h
@@ -62,6 +62,7 @@ namespace X86Registers {
r14,
r15,
#endif
+ none = 0xff,
} RegisterID;
typedef enum {
@@ -163,7 +164,7 @@ private:
OP_HLT = 0xF4,
OP_GROUP3_EbIb = 0xF6,
OP_GROUP3_Ev = 0xF7,
- OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
+ OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
OP_GROUP5_Ev = 0xFF,
} OneByteOpcodeID;
@@ -201,12 +202,12 @@ private:
TwoByteOpcodeID jccRel32(Condition cond)
{
- return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
+ return (TwoByteOpcodeID)(int(OP2_JCC_rel32) + cond);
}
TwoByteOpcodeID setccOpcode(Condition cond)
{
- return (TwoByteOpcodeID)(OP_SETCC + cond);
+ return (TwoByteOpcodeID)(int(OP_SETCC) + cond);
}
typedef enum {
@@ -219,12 +220,12 @@ private:
GROUP1_OP_CMP = 7,
GROUP1A_OP_POP = 0,
-
+
GROUP2_OP_ROL = 0,
GROUP2_OP_ROR = 1,
GROUP2_OP_RCL = 2,
GROUP2_OP_RCR = 3,
-
+
GROUP2_OP_SHL = 4,
GROUP2_OP_SHR = 5,
GROUP2_OP_SAR = 7,
@@ -245,7 +246,7 @@ private:
ESCAPE_DD_FSTP_doubleReal = 3,
} GroupOpcodeID;
-
+
class X86InstructionFormatter;
public:
@@ -307,7 +308,7 @@ public:
{
m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
}
-
+
#if !CPU(X86_64)
void addl_mr(const void* addr, RegisterID dst)
{
@@ -576,7 +577,7 @@ public:
m_formatter.immediate32(imm);
}
}
-
+
void subl_im(int imm, int offset, RegisterID base)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
@@ -670,12 +671,12 @@ public:
m_formatter.immediate32(imm);
}
}
-
+
void xorq_rm(RegisterID src, int offset, RegisterID base)
{
m_formatter.oneByteOp64(OP_XOR_EvGv, src, base, offset);
}
-
+
void rorq_i8r(int imm, RegisterID dst)
{
if (imm == 1)
@@ -748,7 +749,7 @@ public:
{
m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
}
-
+
void shrl_i8r(int imm, RegisterID dst)
{
if (imm == 1)
@@ -758,7 +759,7 @@ public:
m_formatter.immediate8(imm);
}
}
-
+
void shrl_CLr(RegisterID dst)
{
m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst);
@@ -833,7 +834,7 @@ public:
m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
m_formatter.immediate32(imm);
}
-
+
void cmpl_im(int imm, int offset, RegisterID base)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
@@ -844,19 +845,19 @@ public:
m_formatter.immediate32(imm);
}
}
-
+
void cmpb_im(int imm, int offset, RegisterID base)
{
m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, offset);
m_formatter.immediate8(imm);
}
-
+
void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
{
m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, index, scale, offset);
m_formatter.immediate8(imm);
}
-
+
#if CPU(X86)
void cmpb_im(int imm, const void* addr)
{
@@ -984,7 +985,7 @@ public:
{
m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
}
-
+
void testl_i32r(int imm, RegisterID dst)
{
m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
@@ -1007,7 +1008,7 @@ public:
m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, offset);
m_formatter.immediate8(imm);
}
-
+
void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
{
m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, index, scale, offset);
@@ -1056,14 +1057,14 @@ public:
m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
m_formatter.immediate32(imm);
}
-#endif
+#endif
void testw_rr(RegisterID src, RegisterID dst)
{
m_formatter.prefix(PRE_OPERAND_SIZE);
m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
}
-
+
void testb_i8r(int imm, RegisterID dst)
{
m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
@@ -1123,7 +1124,7 @@ public:
{
m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
}
-
+
void movl_rm(RegisterID src, int offset, RegisterID base)
{
m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
@@ -1138,7 +1139,7 @@ public:
{
m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
}
-
+
void movl_mEAX(const void* addr)
{
m_formatter.oneByteOp(OP_MOV_EAXOv);
@@ -1158,7 +1159,7 @@ public:
{
m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
}
-
+
void movl_mr_disp8(int offset, RegisterID base, RegisterID dst)
{
m_formatter.oneByteOp_disp8(OP_MOV_GvEv, dst, base, offset);
@@ -1180,7 +1181,7 @@ public:
m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
m_formatter.immediate32(imm);
}
-
+
void movl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
{
m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, index, scale, offset);
@@ -1209,12 +1210,12 @@ public:
m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, index, scale, offset);
m_formatter.immediate8(imm);
}
-
+
void movb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
{
m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, index, scale, offset);
}
-
+
void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
{
m_formatter.prefix(PRE_OPERAND_SIZE);
@@ -1295,22 +1296,22 @@ public:
m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
m_formatter.immediate64(imm);
}
-
+
void movsxd_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
}
-
-
+
+
#else
void movl_rm(RegisterID src, const void* addr)
{
if (src == X86Registers::eax)
movl_EAXm(addr);
- else
+ else
m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
}
-
+
void movl_mr(const void* addr, RegisterID dst)
{
if (dst == X86Registers::eax)
@@ -1350,7 +1351,7 @@ public:
{
m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, offset);
}
-
+
void movzbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
{
m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, index, scale, offset);
@@ -1360,7 +1361,7 @@ public:
{
m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, offset);
}
-
+
void movsbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
{
m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, index, scale, offset);
@@ -1403,13 +1404,13 @@ public:
m_formatter.oneByteOp(OP_CALL_rel32);
return m_formatter.immediateRel32();
}
-
+
AssemblerLabel call(RegisterID dst)
{
m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
return m_formatter.label();
}
-
+
void call_m(int offset, RegisterID base)
{
m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
@@ -1420,7 +1421,7 @@ public:
m_formatter.oneByteOp(OP_JMP_rel32);
return m_formatter.immediateRel32();
}
-
+
// Return a AssemblerLabel so we have a label to the jump, so we can use this
// To make a tail recursive call on x86-64. The MacroAssembler
// really shouldn't wrap this as a Jump, since it can't be linked. :-/
@@ -1429,12 +1430,12 @@ public:
m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
return m_formatter.label();
}
-
+
void jmp_m(int offset, RegisterID base)
{
m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
}
-
+
#if !CPU(X86_64)
void jmp_m(const void* address)
{
@@ -1447,7 +1448,7 @@ public:
m_formatter.twoByteOp(jccRel32(ConditionNE));
return m_formatter.immediateRel32();
}
-
+
AssemblerLabel jnz()
{
return jne();
@@ -1458,7 +1459,7 @@ public:
m_formatter.twoByteOp(jccRel32(ConditionE));
return m_formatter.immediateRel32();
}
-
+
AssemblerLabel jz()
{
return je();
@@ -1469,25 +1470,25 @@ public:
m_formatter.twoByteOp(jccRel32(ConditionL));
return m_formatter.immediateRel32();
}
-
+
AssemblerLabel jb()
{
m_formatter.twoByteOp(jccRel32(ConditionB));
return m_formatter.immediateRel32();
}
-
+
AssemblerLabel jle()
{
m_formatter.twoByteOp(jccRel32(ConditionLE));
return m_formatter.immediateRel32();
}
-
+
AssemblerLabel jbe()
{
m_formatter.twoByteOp(jccRel32(ConditionBE));
return m_formatter.immediateRel32();
}
-
+
AssemblerLabel jge()
{
m_formatter.twoByteOp(jccRel32(ConditionGE));
@@ -1505,13 +1506,13 @@ public:
m_formatter.twoByteOp(jccRel32(ConditionA));
return m_formatter.immediateRel32();
}
-
+
AssemblerLabel jae()
{
m_formatter.twoByteOp(jccRel32(ConditionAE));
return m_formatter.immediateRel32();
}
-
+
AssemblerLabel jo()
{
m_formatter.twoByteOp(jccRel32(ConditionO));
@@ -1529,7 +1530,7 @@ public:
m_formatter.twoByteOp(jccRel32(ConditionP));
return m_formatter.immediateRel32();
}
-
+
AssemblerLabel js()
{
m_formatter.twoByteOp(jccRel32(ConditionS));
@@ -1610,7 +1611,7 @@ public:
m_formatter.prefix(PRE_SSE_F3);
m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, (RegisterID)src);
}
-
+
#if CPU(X86_64)
void cvttsd2siq_rr(XMMRegisterID src, RegisterID dst)
{
@@ -1656,19 +1657,19 @@ public:
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
}
-
+
void movsd_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
{
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
}
-
+
void movss_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
{
m_formatter.prefix(PRE_SSE_F3);
m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
}
-
+
void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
@@ -1680,7 +1681,7 @@ public:
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
}
-
+
void movss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F3);
@@ -1799,7 +1800,7 @@ public:
{
m_formatter.oneByteOp(OP_INT3);
}
-
+
void ret()
{
m_formatter.oneByteOp(OP_RET);
@@ -1816,7 +1817,7 @@ public:
{
return m_formatter.codeSize();
}
-
+
AssemblerLabel labelForWatchpoint()
{
AssemblerLabel result = m_formatter.label();
@@ -1826,7 +1827,7 @@ public:
m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
return result;
}
-
+
AssemblerLabel labelIgnoringWatchpoints()
{
return m_formatter.label();
@@ -1876,7 +1877,7 @@ public:
memcpy(&val, t_ptr, sizeof(T));
return val;
}
-
+
static void linkJump(void* code, AssemblerLabel from, void* to)
{
ASSERT(from.isSet());
@@ -1902,12 +1903,12 @@ public:
{
setRel32(from, to);
}
-
+
static void relinkCall(void* from, void* to)
{
setRel32(from, to);
}
-
+
static void repatchCompact(void* where, int32_t value)
{
ASSERT(value >= std::numeric_limits<int8_t>::min());
@@ -1924,7 +1925,7 @@ public:
{
setPointer(where, value);
}
-
+
static void* readPointer(void* where)
{
return reinterpret_cast<void**>(where)[-1];
@@ -1938,12 +1939,12 @@ public:
ptr[0] = static_cast<uint8_t>(OP_JMP_rel32);
*reinterpret_cast<int32_t*>(ptr + 1) = static_cast<int32_t>(distance);
}
-
+
static ptrdiff_t maxJumpReplacementSize()
{
return 5;
}
-
+
#if CPU(X86_64)
static void revertJumpTo_movq_i64r(void* instructionStart, int64_t imm, RegisterID dst)
{
@@ -1953,7 +1954,7 @@ public:
uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
ptr[0] = PRE_REX | (1 << 3) | (dst >> 3);
ptr[1] = OP_MOV_EAXIv | (dst & 7);
-
+
union {
uint64_t asWord;
uint8_t asBytes[8];
@@ -1963,7 +1964,7 @@ public:
ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
}
#endif
-
+
static void revertJumpTo_cmpl_ir_force32(void* instructionStart, int32_t imm, RegisterID dst)
{
const int opcodeBytes = 1;
@@ -1980,7 +1981,7 @@ public:
for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
}
-
+
static void revertJumpTo_cmpl_im_force32(void* instructionStart, int32_t imm, int offset, RegisterID dst)
{
ASSERT_UNUSED(offset, !offset);
@@ -1998,7 +1999,7 @@ public:
for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
}
-
+
static void replaceWithLoad(void* instructionStart)
{
uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
@@ -2016,7 +2017,7 @@ public:
RELEASE_ASSERT_NOT_REACHED();
}
}
-
+
static void replaceWithAddressComputation(void* instructionStart)
{
uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
@@ -2034,7 +2035,7 @@ public:
RELEASE_ASSERT_NOT_REACHED();
}
}
-
+
static unsigned getCallReturnOffset(AssemblerLabel call)
{
ASSERT(call.isSet());
@@ -2046,12 +2047,12 @@ public:
ASSERT(label.isSet());
return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
}
-
+
static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
{
return b.m_offset - a.m_offset;
}
-
+
PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
{
return m_formatter.executableCopy(globalData, ownerUID, effort);
@@ -2085,7 +2086,7 @@ private:
T *ptr = &reinterpret_cast<T*>(where)[idx];
memcpy(ptr, &value, sizeof(T));
}
-
+
static void setInt8(void* where, int8_t value)
{
reinterpret_cast<int8_t*>(where)[-1] = value;
@@ -2171,7 +2172,7 @@ private:
m_buffer.putByteUnchecked(opcode);
memoryModRM_disp32(reg, base, offset);
}
-
+
void oneByteOp_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
{
m_buffer.ensureSpace(maxInstructionSize);
@@ -2285,7 +2286,7 @@ private:
m_buffer.putByteUnchecked(opcode);
memoryModRM_disp32(reg, base, offset);
}
-
+
void oneByteOp64_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
{
m_buffer.ensureSpace(maxInstructionSize);
@@ -2364,7 +2365,7 @@ private:
void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
{
m_buffer.ensureSpace(maxInstructionSize);
- emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
+ emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(rm), reg, 0, rm);
m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
m_buffer.putByteUnchecked(opcode);
registerModRM(reg, rm);
@@ -2566,7 +2567,7 @@ private:
m_buffer.putIntUnchecked(offset);
}
}
-
+
void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
{
ASSERT(index != noIndex);