From f8bcf723ce920981705c6913fffb1f4d3de6793d Mon Sep 17 00:00:00 2001 From: Erik Verbruggen Date: Thu, 14 Jan 2016 15:39:17 +0100 Subject: V4: add Aarch64/ARM64 support. This uses the JavaScriptCore assembler rev. 195098. It is tested on iOS (for which it is disabled, as it only allows marking pages as executable when running from Xcode). Testing on Linux will be done when hardware arrives. Change-Id: I650e15fec03c27d4b326a2d70863a89b85cfc5c3 Reviewed-by: Simon Hausmann --- src/3rdparty/masm/assembler/ARM64Assembler.h | 3772 ++++++++++++++++++++ src/3rdparty/masm/assembler/ARMv7Assembler.h | 1 + .../masm/assembler/AbstractMacroAssembler.h | 52 +- src/3rdparty/masm/assembler/MacroAssembler.h | 12 +- src/3rdparty/masm/assembler/MacroAssemblerARM64.h | 3455 ++++++++++++++++++ src/3rdparty/masm/assembler/MacroAssemblerARMv7.h | 2 +- .../masm/disassembler/ARM64/A64DOpcode.cpp | 1202 +++++++ src/3rdparty/masm/disassembler/ARM64/A64DOpcode.h | 708 ++++ .../masm/disassembler/ARM64Disassembler.cpp | 72 + .../masm/disassembler/ARMv7/ARMv7DOpcode.cpp | 2 + .../masm/disassembler/ARMv7/ARMv7DOpcode.h | 2 +- .../masm/disassembler/ARMv7Disassembler.cpp | 2 +- src/3rdparty/masm/masm-defs.pri | 2 +- src/3rdparty/masm/masm.pri | 3 + src/3rdparty/masm/stubs/WTFStubs.cpp | 7 + src/3rdparty/masm/wtf/Platform.h | 14 +- src/3rdparty/masm/yarr/YarrJIT.cpp | 11 + src/qml/jit/qv4targetplatform_p.h | 115 +- src/qml/jsruntime/qv4global_p.h | 2 + 19 files changed, 9421 insertions(+), 15 deletions(-) create mode 100644 src/3rdparty/masm/assembler/ARM64Assembler.h create mode 100644 src/3rdparty/masm/assembler/MacroAssemblerARM64.h create mode 100644 src/3rdparty/masm/disassembler/ARM64/A64DOpcode.cpp create mode 100644 src/3rdparty/masm/disassembler/ARM64/A64DOpcode.h create mode 100644 src/3rdparty/masm/disassembler/ARM64Disassembler.cpp diff --git a/src/3rdparty/masm/assembler/ARM64Assembler.h b/src/3rdparty/masm/assembler/ARM64Assembler.h new file mode 100644 index 0000000000..8ec9823f33 --- /dev/null +++ b/src/3rdparty/masm/assembler/ARM64Assembler.h @@ -0,0 +1,3772 @@ +/* + * Copyright (C) 2012, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ARM64Assembler_h +#define ARM64Assembler_h + +#if ENABLE(ASSEMBLER) && CPU(ARM64) + +#include "AssemblerBuffer.h" +#include +#include +#include +#include + +#if OS(IOS) +#include +#endif + +#define CHECK_DATASIZE_OF(datasize) ASSERT(datasize == 32 || datasize == 64) +#define DATASIZE_OF(datasize) ((datasize == 64) ? Datasize_64 : Datasize_32) +#define MEMOPSIZE_OF(datasize) ((datasize == 8 || datasize == 128) ? MemOpSize_8_or_128 : (datasize == 16) ? MemOpSize_16 : (datasize == 32) ? MemOpSize_32 : MemOpSize_64) +#define CHECK_DATASIZE() CHECK_DATASIZE_OF(datasize) +#define CHECK_VECTOR_DATASIZE() ASSERT(datasize == 64 || datasize == 128) +#define DATASIZE DATASIZE_OF(datasize) +#define MEMOPSIZE MEMOPSIZE_OF(datasize) +#define CHECK_FP_MEMOP_DATASIZE() ASSERT(datasize == 8 || datasize == 16 || datasize == 32 || datasize == 64 || datasize == 128) +#define MEMPAIROPSIZE_INT(datasize) ((datasize == 64) ? MemPairOp_64 : MemPairOp_32) +#define MEMPAIROPSIZE_FP(datasize) ((datasize == 128) ? MemPairOp_V128 : (datasize == 64) ? MemPairOp_V64 : MemPairOp_32) + +namespace JSC { + +ALWAYS_INLINE bool isInt9(int32_t value) +{ + return value == ((value << 23) >> 23); +} + +template +ALWAYS_INLINE bool isUInt12(Type value) +{ + return !(value & ~static_cast(0xfff)); +} + +template +ALWAYS_INLINE bool isValidScaledUImm12(int32_t offset) +{ + int32_t maxPImm = 4095 * (datasize / 8); + if (offset < 0) + return false; + if (offset > maxPImm) + return false; + if (offset & ((datasize / 8) - 1)) + return false; + return true; +} + +ALWAYS_INLINE bool isValidSignedImm9(int32_t value) +{ + return isInt9(value); +} + +ALWAYS_INLINE bool isInt7(int32_t value) +{ + return value == ((value << 25) >> 25); +} + +ALWAYS_INLINE bool isInt11(int32_t value) +{ + return value == ((value << 21) >> 21); +} + +ALWAYS_INLINE bool isUInt5(int32_t value) +{ + return !(value & ~0x1f); +} + +class UInt5 { +public: + explicit UInt5(int value) + : m_value(value) + { + ASSERT(isUInt5(value)); + } + + operator int() { return m_value; } + +private: + int m_value; +}; + +class UInt12 { +public: + explicit UInt12(int value) + : m_value(value) + { + ASSERT(isUInt12(value)); + } + + operator int() { return m_value; } + +private: + int m_value; +}; + +class PostIndex { +public: + explicit PostIndex(int value) + : m_value(value) + { + ASSERT(isInt9(value)); + } + + operator int() { return m_value; } + +private: + int m_value; +}; + +class PreIndex { +public: + explicit PreIndex(int value) + : m_value(value) + { + ASSERT(isInt9(value)); + } + + operator int() { return m_value; } + +private: + int m_value; +}; + +class PairPostIndex { +public: + explicit PairPostIndex(int value) + : m_value(value) + { + ASSERT(isInt11(value)); + } + + operator int() { return m_value; } + +private: + int m_value; +}; + +class PairPreIndex { +public: + explicit PairPreIndex(int value) + : m_value(value) + { + ASSERT(isInt11(value)); + } + + operator int() { return m_value; } + +private: + int m_value; +}; + +class LogicalImmediate { +public: + static LogicalImmediate create32(uint32_t value) + { + // Check for 0, -1 - these cannot be encoded. + if (!value || !~value) + return InvalidLogicalImmediate; + + // First look for a 32-bit pattern, then for repeating 16-bit + // patterns, 8-bit, 4-bit, and finally 2-bit. + + unsigned hsb, lsb; + bool inverted; + if (findBitRange<32>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<32>(hsb, lsb, inverted); + + if ((value & 0xffff) != (value >> 16)) + return InvalidLogicalImmediate; + value &= 0xffff; + + if (findBitRange<16>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<16>(hsb, lsb, inverted); + + if ((value & 0xff) != (value >> 8)) + return InvalidLogicalImmediate; + value &= 0xff; + + if (findBitRange<8>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<8>(hsb, lsb, inverted); + + if ((value & 0xf) != (value >> 4)) + return InvalidLogicalImmediate; + value &= 0xf; + + if (findBitRange<4>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<4>(hsb, lsb, inverted); + + if ((value & 0x3) != (value >> 2)) + return InvalidLogicalImmediate; + value &= 0x3; + + if (findBitRange<2>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<2>(hsb, lsb, inverted); + + return InvalidLogicalImmediate; + } + + static LogicalImmediate create64(uint64_t value) + { + // Check for 0, -1 - these cannot be encoded. + if (!value || !~value) + return InvalidLogicalImmediate; + + // Look for a contiguous bit range. + unsigned hsb, lsb; + bool inverted; + if (findBitRange<64>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<64>(hsb, lsb, inverted); + + // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern. + if (static_cast(value) == static_cast(value >> 32)) + return create32(static_cast(value)); + return InvalidLogicalImmediate; + } + + int value() const + { + ASSERT(isValid()); + return m_value; + } + + bool isValid() const + { + return m_value != InvalidLogicalImmediate; + } + + bool is64bit() const + { + return m_value & (1 << 12); + } + +private: + LogicalImmediate(int value) + : m_value(value) + { + } + + // Generate a mask with bits in the range hsb..0 set, for example: + // hsb:63 = 0xffffffffffffffff + // hsb:42 = 0x000007ffffffffff + // hsb: 0 = 0x0000000000000001 + static uint64_t mask(unsigned hsb) + { + ASSERT(hsb < 64); + return 0xffffffffffffffffull >> (63 - hsb); + } + + template + static void partialHSB(uint64_t& value, unsigned&result) + { + if (value & (0xffffffffffffffffull << N)) { + result += N; + value >>= N; + } + } + + // Find the bit number of the highest bit set in a non-zero value, for example: + // 0x8080808080808080 = hsb:63 + // 0x0000000000000001 = hsb: 0 + // 0x000007ffffe00000 = hsb:42 + static unsigned highestSetBit(uint64_t value) + { + ASSERT(value); + unsigned hsb = 0; + partialHSB<32>(value, hsb); + partialHSB<16>(value, hsb); + partialHSB<8>(value, hsb); + partialHSB<4>(value, hsb); + partialHSB<2>(value, hsb); + partialHSB<1>(value, hsb); + return hsb; + } + + // This function takes a value and a bit width, where value obeys the following constraints: + // * bits outside of the width of the value must be zero. + // * bits within the width of value must neither be all clear or all set. + // The input is inspected to detect values that consist of either two or three contiguous + // ranges of bits. The output range hsb..lsb will describe the second range of the value. + // if the range is set, inverted will be false, and if the range is clear, inverted will + // be true. For example (with width 8): + // 00001111 = hsb:3, lsb:0, inverted:false + // 11110000 = hsb:3, lsb:0, inverted:true + // 00111100 = hsb:5, lsb:2, inverted:false + // 11000011 = hsb:5, lsb:2, inverted:true + template + static bool findBitRange(uint64_t value, unsigned& hsb, unsigned& lsb, bool& inverted) + { + ASSERT(value & mask(width - 1)); + ASSERT(value != mask(width - 1)); + ASSERT(!(value & ~mask(width - 1))); + + // Detect cases where the top bit is set; if so, flip all the bits & set invert. + // This halves the number of patterns we need to look for. + const uint64_t msb = 1ull << (width - 1); + if ((inverted = (value & msb))) + value ^= mask(width - 1); + + // Find the highest set bit in value, generate a corresponding mask & flip all + // bits under it. + hsb = highestSetBit(value); + value ^= mask(hsb); + if (!value) { + // If this cleared the value, then the range hsb..0 was all set. + lsb = 0; + return true; + } + + // Try making one more mask, and flipping the bits! + lsb = highestSetBit(value); + value ^= mask(lsb); + if (!value) { + // Success - but lsb actually points to the hsb of a third range - add one + // to get to the lsb of the mid range. + ++lsb; + return true; + } + + return false; + } + + // Encodes the set of immN:immr:imms fields found in a logical immediate. + template + static int encodeLogicalImmediate(unsigned hsb, unsigned lsb, bool inverted) + { + // Check width is a power of 2! + ASSERT(!(width & (width -1))); + ASSERT(width <= 64 && width >= 2); + ASSERT(hsb >= lsb); + ASSERT(hsb < width); + + int immN = 0; + int imms = 0; + int immr = 0; + + // For 64-bit values this is easy - just set immN to true, and imms just + // contains the bit number of the highest set bit of the set range. For + // values with narrower widths, these are encoded by a leading set of + // one bits, followed by a zero bit, followed by the remaining set of bits + // being the high bit of the range. For a 32-bit immediate there are no + // leading one bits, just a zero followed by a five bit number. For a + // 16-bit immediate there is one one bit, a zero bit, and then a four bit + // bit-position, etc. + if (width == 64) + immN = 1; + else + imms = 63 & ~(width + width - 1); + + if (inverted) { + // if width is 64 & hsb is 62, then we have a value something like: + // 0x80000000ffffffff (in this case with lsb 32). + // The ror should be by 1, imms (effectively set width minus 1) is + // 32. Set width is full width minus cleared width. + immr = (width - 1) - hsb; + imms |= (width - ((hsb - lsb) + 1)) - 1; + } else { + // if width is 64 & hsb is 62, then we have a value something like: + // 0x7fffffff00000000 (in this case with lsb 32). + // The value is effectively rol'ed by lsb, which is equivalent to + // a ror by width - lsb (or 0, in the case where lsb is 0). imms + // is hsb - lsb. + immr = (width - lsb) & (width - 1); + imms |= hsb - lsb; + } + + return immN << 12 | immr << 6 | imms; + } + + static const int InvalidLogicalImmediate = -1; + + int m_value; +}; + +inline uint16_t getHalfword(uint64_t value, int which) +{ + return value >> (which << 4); +} + +namespace ARM64Registers { + +#define FOR_EACH_CPU_REGISTER(V) \ + FOR_EACH_CPU_GPREGISTER(V) \ + FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + FOR_EACH_CPU_FPREGISTER(V) + +// The following are defined as pairs of the following value: +// 1. type of the storage needed to save the register value by the JIT probe. +// 2. name of the register. +#define FOR_EACH_CPU_GPREGISTER(V) \ + /* Parameter/result registers */ \ + V(void*, x0) \ + V(void*, x1) \ + V(void*, x2) \ + V(void*, x3) \ + V(void*, x4) \ + V(void*, x5) \ + V(void*, x6) \ + V(void*, x7) \ + /* Indirect result location register */ \ + V(void*, x8) \ + /* Temporary registers */ \ + V(void*, x9) \ + V(void*, x10) \ + V(void*, x11) \ + V(void*, x12) \ + V(void*, x13) \ + V(void*, x14) \ + V(void*, x15) \ + /* Intra-procedure-call scratch registers (temporary) */ \ + V(void*, x16) \ + V(void*, x17) \ + /* Platform Register (temporary) */ \ + V(void*, x18) \ + /* Callee-saved */ \ + V(void*, x19) \ + V(void*, x20) \ + V(void*, x21) \ + V(void*, x22) \ + V(void*, x23) \ + V(void*, x24) \ + V(void*, x25) \ + V(void*, x26) \ + V(void*, x27) \ + V(void*, x28) \ + /* Special */ \ + V(void*, fp) \ + V(void*, lr) \ + V(void*, sp) + +#define FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + V(void*, pc) \ + V(void*, nzcv) \ + V(void*, fpsr) \ + +// ARM64 always has 32 FPU registers 128-bits each. See http://llvm.org/devmtg/2012-11/Northover-AArch64.pdf +// and Section 5.1.2 in http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf. +// However, we only use them for 64-bit doubles. +#define FOR_EACH_CPU_FPREGISTER(V) \ + /* Parameter/result registers */ \ + V(double, q0) \ + V(double, q1) \ + V(double, q2) \ + V(double, q3) \ + V(double, q4) \ + V(double, q5) \ + V(double, q6) \ + V(double, q7) \ + /* Callee-saved (up to 64-bits only!) */ \ + V(double, q8) \ + V(double, q9) \ + V(double, q10) \ + V(double, q11) \ + V(double, q12) \ + V(double, q13) \ + V(double, q14) \ + V(double, q15) \ + /* Temporary registers */ \ + V(double, q16) \ + V(double, q17) \ + V(double, q18) \ + V(double, q19) \ + V(double, q20) \ + V(double, q21) \ + V(double, q22) \ + V(double, q23) \ + V(double, q24) \ + V(double, q25) \ + V(double, q26) \ + V(double, q27) \ + V(double, q28) \ + V(double, q29) \ + V(double, q30) \ + V(double, q31) + +typedef enum { + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + + ip0 = x16, + ip1 = x17, + x29 = fp, + x30 = lr, + zr = 0x3f, +} RegisterID; + +typedef enum { + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER +} FPRegisterID; + +static constexpr bool isSp(RegisterID reg) { return reg == sp; } +static constexpr bool isZr(RegisterID reg) { return reg == zr; } + +} // namespace ARM64Registers + +class ARM64Assembler { +public: + typedef ARM64Registers::RegisterID RegisterID; + typedef ARM64Registers::FPRegisterID FPRegisterID; + + static constexpr RegisterID firstRegister() { return ARM64Registers::x0; } + static constexpr RegisterID lastRegister() { return ARM64Registers::sp; } + + static constexpr FPRegisterID firstFPRegister() { return ARM64Registers::q0; } + static constexpr FPRegisterID lastFPRegister() { return ARM64Registers::q31; } + +private: + static constexpr bool isSp(RegisterID reg) { return ARM64Registers::isSp(reg); } + static constexpr bool isZr(RegisterID reg) { return ARM64Registers::isZr(reg); } + +public: + ARM64Assembler() + : m_indexOfLastWatchpoint(INT_MIN) + , m_indexOfTailOfLastWatchpoint(INT_MIN) + { + } + + AssemblerBuffer& buffer() { return m_buffer; } + + // (HS, LO, HI, LS) -> (AE, B, A, BE) + // (VS, VC) -> (O, NO) + typedef enum { + ConditionEQ, + ConditionNE, + ConditionHS, ConditionCS = ConditionHS, + ConditionLO, ConditionCC = ConditionLO, + ConditionMI, + ConditionPL, + ConditionVS, + ConditionVC, + ConditionHI, + ConditionLS, + ConditionGE, + ConditionLT, + ConditionGT, + ConditionLE, + ConditionAL, + ConditionInvalid + } Condition; + + static Condition invert(Condition cond) + { + return static_cast(cond ^ 1); + } + + typedef enum { + LSL, + LSR, + ASR, + ROR + } ShiftType; + + typedef enum { + UXTB, + UXTH, + UXTW, + UXTX, + SXTB, + SXTH, + SXTW, + SXTX + } ExtendType; + + enum SetFlags { + DontSetFlags, + S + }; + +#define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 4) | (index)) +#define JUMP_ENUM_SIZE(jump) ((jump) >> 4) + enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0), + JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)), + JumpCondition = JUMP_ENUM_WITH_SIZE(2, 2 * sizeof(uint32_t)), + JumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)), + JumpTestBit = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)), + JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)), + JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)), + JumpCompareAndBranchFixedSize = JUMP_ENUM_WITH_SIZE(7, 2 * sizeof(uint32_t)), + JumpTestBitFixedSize = JUMP_ENUM_WITH_SIZE(8, 2 * sizeof(uint32_t)), + }; + enum JumpLinkType { + LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0), + LinkJumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 1 * sizeof(uint32_t)), + LinkJumpConditionDirect = JUMP_ENUM_WITH_SIZE(2, 1 * sizeof(uint32_t)), + LinkJumpCondition = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint32_t)), + LinkJumpCompareAndBranch = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint32_t)), + LinkJumpCompareAndBranchDirect = JUMP_ENUM_WITH_SIZE(5, 1 * sizeof(uint32_t)), + LinkJumpTestBit = JUMP_ENUM_WITH_SIZE(6, 2 * sizeof(uint32_t)), + LinkJumpTestBitDirect = JUMP_ENUM_WITH_SIZE(7, 1 * sizeof(uint32_t)), + }; + + class LinkRecord { + public: + LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition) + { + data.realTypes.m_from = from; + data.realTypes.m_to = to; + data.realTypes.m_type = type; + data.realTypes.m_linkType = LinkInvalid; + data.realTypes.m_condition = condition; + } + LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister) + { + data.realTypes.m_from = from; + data.realTypes.m_to = to; + data.realTypes.m_type = type; + data.realTypes.m_linkType = LinkInvalid; + data.realTypes.m_condition = condition; + data.realTypes.m_is64Bit = is64Bit; + data.realTypes.m_compareRegister = compareRegister; + } + LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister) + { + data.realTypes.m_from = from; + data.realTypes.m_to = to; + data.realTypes.m_type = type; + data.realTypes.m_linkType = LinkInvalid; + data.realTypes.m_condition = condition; + data.realTypes.m_bitNumber = bitNumber; + data.realTypes.m_compareRegister = compareRegister; + } + void operator=(const LinkRecord& other) + { + data.copyTypes.content[0] = other.data.copyTypes.content[0]; + data.copyTypes.content[1] = other.data.copyTypes.content[1]; + data.copyTypes.content[2] = other.data.copyTypes.content[2]; + } + intptr_t from() const { return data.realTypes.m_from; } + void setFrom(intptr_t from) { data.realTypes.m_from = from; } + intptr_t to() const { return data.realTypes.m_to; } + JumpType type() const { return data.realTypes.m_type; } + JumpLinkType linkType() const { return data.realTypes.m_linkType; } + void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; } + Condition condition() const { return data.realTypes.m_condition; } + bool is64Bit() const { return data.realTypes.m_is64Bit; } + unsigned bitNumber() const { return data.realTypes.m_bitNumber; } + RegisterID compareRegister() const { return data.realTypes.m_compareRegister; } + + private: + union { + struct RealTypes { + intptr_t m_from : 48; + intptr_t m_to : 48; + JumpType m_type : 8; + JumpLinkType m_linkType : 8; + Condition m_condition : 4; + unsigned m_bitNumber : 6; + RegisterID m_compareRegister : 6; + bool m_is64Bit : 1; + } realTypes; + struct CopyTypes { + uint64_t content[3]; + } copyTypes; + COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct); + } data; + }; + + // bits(N) VFPExpandImm(bits(8) imm8); + // + // Encoding of floating point immediates is a litte complicated. Here's a + // high level description: + // +/-m*2-n where m and n are integers, 16 <= m <= 31, 0 <= n <= 7 + // and the algirithm for expanding to a single precision float: + // return imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19); + // + // The trickiest bit is how the exponent is handled. The following table + // may help clarify things a little: + // 654 + // 100 01111100 124 -3 1020 01111111100 + // 101 01111101 125 -2 1021 01111111101 + // 110 01111110 126 -1 1022 01111111110 + // 111 01111111 127 0 1023 01111111111 + // 000 10000000 128 1 1024 10000000000 + // 001 10000001 129 2 1025 10000000001 + // 010 10000010 130 3 1026 10000000010 + // 011 10000011 131 4 1027 10000000011 + // The first column shows the bit pattern stored in bits 6-4 of the arm + // encoded immediate. The second column shows the 8-bit IEEE 754 single + // -precision exponent in binary, the third column shows the raw decimal + // value. IEEE 754 single-precision numbers are stored with a bias of 127 + // to the exponent, so the fourth column shows the resulting exponent. + // From this was can see that the exponent can be in the range -3..4, + // which agrees with the high level description given above. The fifth + // and sixth columns shows the value stored in a IEEE 754 double-precision + // number to represent these exponents in decimal and binary, given the + // bias of 1023. + // + // Ultimately, detecting doubles that can be encoded as immediates on arm + // and encoding doubles is actually not too bad. A floating point value can + // be encoded by retaining the sign bit, the low three bits of the exponent + // and the high 4 bits of the mantissa. To validly be able to encode an + // immediate the remainder of the mantissa must be zero, and the high part + // of the exponent must match the top bit retained, bar the highest bit + // which must be its inverse. + static bool canEncodeFPImm(double d) + { + // Discard the sign bit, the low two bits of the exponent & the highest + // four bits of the mantissa. + uint64_t masked = bitwise_cast(d) & 0x7fc0ffffffffffffull; + return (masked == 0x3fc0000000000000ull) || (masked == 0x4000000000000000ull); + } + + template + static bool canEncodePImmOffset(int32_t offset) + { + return isValidScaledUImm12(offset); + } + + static bool canEncodeSImmOffset(int32_t offset) + { + return isValidSignedImm9(offset); + } + +private: + int encodeFPImm(double d) + { + ASSERT(canEncodeFPImm(d)); + uint64_t u64 = bitwise_cast(d); + return (static_cast(u64 >> 56) & 0x80) | (static_cast(u64 >> 48) & 0x7f); + } + + template + int encodeShiftAmount(int amount) + { + ASSERT(!amount || datasize == (8 << amount)); + return amount; + } + + template + static int encodePositiveImmediate(unsigned pimm) + { + ASSERT(!(pimm & ((datasize / 8) - 1))); + return pimm / (datasize / 8); + } + + enum Datasize { + Datasize_32, + Datasize_64, + Datasize_64_top, + Datasize_16 + }; + + enum MemOpSize { + MemOpSize_8_or_128, + MemOpSize_16, + MemOpSize_32, + MemOpSize_64, + }; + + enum BranchType { + BranchType_JMP, + BranchType_CALL, + BranchType_RET + }; + + enum AddOp { + AddOp_ADD, + AddOp_SUB + }; + + enum BitfieldOp { + BitfieldOp_SBFM, + BitfieldOp_BFM, + BitfieldOp_UBFM + }; + + enum DataOp1Source { + DataOp_RBIT, + DataOp_REV16, + DataOp_REV32, + DataOp_REV64, + DataOp_CLZ, + DataOp_CLS + }; + + enum DataOp2Source { + DataOp_UDIV = 2, + DataOp_SDIV = 3, + DataOp_LSLV = 8, + DataOp_LSRV = 9, + DataOp_ASRV = 10, + DataOp_RORV = 11 + }; + + enum DataOp3Source { + DataOp_MADD = 0, + DataOp_MSUB = 1, + DataOp_SMADDL = 2, + DataOp_SMSUBL = 3, + DataOp_SMULH = 4, + DataOp_UMADDL = 10, + DataOp_UMSUBL = 11, + DataOp_UMULH = 12 + }; + + enum ExcepnOp { + ExcepnOp_EXCEPTION = 0, + ExcepnOp_BREAKPOINT = 1, + ExcepnOp_HALT = 2, + ExcepnOp_DCPS = 5 + }; + + enum FPCmpOp { + FPCmpOp_FCMP = 0x00, + FPCmpOp_FCMP0 = 0x08, + FPCmpOp_FCMPE = 0x10, + FPCmpOp_FCMPE0 = 0x18 + }; + + enum FPCondCmpOp { + FPCondCmpOp_FCMP, + FPCondCmpOp_FCMPE + }; + + enum FPDataOp1Source { + FPDataOp_FMOV = 0, + FPDataOp_FABS = 1, + FPDataOp_FNEG = 2, + FPDataOp_FSQRT = 3, + FPDataOp_FCVT_toSingle = 4, + FPDataOp_FCVT_toDouble = 5, + FPDataOp_FCVT_toHalf = 7, + FPDataOp_FRINTN = 8, + FPDataOp_FRINTP = 9, + FPDataOp_FRINTM = 10, + FPDataOp_FRINTZ = 11, + FPDataOp_FRINTA = 12, + FPDataOp_FRINTX = 14, + FPDataOp_FRINTI = 15 + }; + + enum FPDataOp2Source { + FPDataOp_FMUL, + FPDataOp_FDIV, + FPDataOp_FADD, + FPDataOp_FSUB, + FPDataOp_FMAX, + FPDataOp_FMIN, + FPDataOp_FMAXNM, + FPDataOp_FMINNM, + FPDataOp_FNMUL + }; + + enum SIMD3Same { + SIMD_LogicalOp_AND = 0x03 + }; + + enum FPIntConvOp { + FPIntConvOp_FCVTNS = 0x00, + FPIntConvOp_FCVTNU = 0x01, + FPIntConvOp_SCVTF = 0x02, + FPIntConvOp_UCVTF = 0x03, + FPIntConvOp_FCVTAS = 0x04, + FPIntConvOp_FCVTAU = 0x05, + FPIntConvOp_FMOV_QtoX = 0x06, + FPIntConvOp_FMOV_XtoQ = 0x07, + FPIntConvOp_FCVTPS = 0x08, + FPIntConvOp_FCVTPU = 0x09, + FPIntConvOp_FMOV_QtoX_top = 0x0e, + FPIntConvOp_FMOV_XtoQ_top = 0x0f, + FPIntConvOp_FCVTMS = 0x10, + FPIntConvOp_FCVTMU = 0x11, + FPIntConvOp_FCVTZS = 0x18, + FPIntConvOp_FCVTZU = 0x19, + }; + + enum LogicalOp { + LogicalOp_AND, + LogicalOp_ORR, + LogicalOp_EOR, + LogicalOp_ANDS + }; + + enum MemOp { + MemOp_STORE, + MemOp_LOAD, + MemOp_STORE_V128, + MemOp_LOAD_V128, + MemOp_PREFETCH = 2, // size must be 3 + MemOp_LOAD_signed64 = 2, // size may be 0, 1 or 2 + MemOp_LOAD_signed32 = 3 // size may be 0 or 1 + }; + + enum MemPairOpSize { + MemPairOp_32 = 0, + MemPairOp_LoadSigned_32 = 1, + MemPairOp_64 = 2, + + MemPairOp_V32 = MemPairOp_32, + MemPairOp_V64 = 1, + MemPairOp_V128 = 2 + }; + + enum MoveWideOp { + MoveWideOp_N = 0, + MoveWideOp_Z = 2, + MoveWideOp_K = 3 + }; + + enum LdrLiteralOp { + LdrLiteralOp_32BIT = 0, + LdrLiteralOp_64BIT = 1, + LdrLiteralOp_LDRSW = 2, + LdrLiteralOp_128BIT = 2 + }; + + static unsigned memPairOffsetShift(bool V, MemPairOpSize size) + { + // return the log2 of the size in bytes, e.g. 64 bit size returns 3 + if (V) + return size + 2; + return (size >> 1) + 2; + } + +public: + // Integer Instructions: + + template + ALWAYS_INLINE void adc(RegisterID rd, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + insn(addSubtractWithCarry(DATASIZE, AddOp_ADD, setFlags, rm, rn, rd)); + } + + template + ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, UInt12 imm12, int shift = 0) + { + CHECK_DATASIZE(); + ASSERT(!shift || shift == 12); + insn(addSubtractImmediate(DATASIZE, AddOp_ADD, setFlags, shift == 12, imm12, rn, rd)); + } + + template + ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm) + { + add(rd, rn, rm, LSL, 0); + } + + template + ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + CHECK_DATASIZE(); + insn(addSubtractExtendedRegister(DATASIZE, AddOp_ADD, setFlags, rm, extend, amount, rn, rd)); + } + + template + ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + CHECK_DATASIZE(); + if (isSp(rd) || isSp(rn)) { + ASSERT(shift == LSL); + ASSERT(!isSp(rm)); + add(rd, rn, rm, UXTX, amount); + } else + insn(addSubtractShiftedRegister(DATASIZE, AddOp_ADD, setFlags, shift, rm, amount, rn, rd)); + } + + ALWAYS_INLINE void adr(RegisterID rd, int offset) + { + insn(pcRelative(false, offset, rd)); + } + + ALWAYS_INLINE void adrp(RegisterID rd, int offset) + { + ASSERT(!(offset & 0xfff)); + insn(pcRelative(true, offset >> 12, rd)); + nopCortexA53Fix843419(); + } + + template + ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, RegisterID rm) + { + and_(rd, rn, rm, LSL, 0); + } + + template + ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + CHECK_DATASIZE(); + insn(logicalShiftedRegister(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, shift, false, rm, amount, rn, rd)); + } + + template + ALWAYS_INLINE void and_(RegisterID rd, RegisterID rn, LogicalImmediate imm) + { + CHECK_DATASIZE(); + insn(logicalImmediate(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, imm.value(), rn, rd)); + } + + template + ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, int shift) + { + ASSERT(shift < datasize); + sbfm(rd, rn, shift, datasize - 1); + } + + template + ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm) + { + asrv(rd, rn, rm); + } + + template + ALWAYS_INLINE void asrv(RegisterID rd, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + insn(dataProcessing2Source(DATASIZE, rm, DataOp_ASRV, rn, rd)); + } + + ALWAYS_INLINE void b(int32_t offset = 0) + { + ASSERT(!(offset & 3)); + offset >>= 2; + ASSERT(offset == (offset << 6) >> 6); + insn(unconditionalBranchImmediate(false, offset)); + } + + ALWAYS_INLINE void b_cond(Condition cond, int32_t offset = 0) + { + ASSERT(!(offset & 3)); + offset >>= 2; + ASSERT(offset == (offset << 13) >> 13); + insn(conditionalBranchImmediate(offset, cond)); + } + + template + ALWAYS_INLINE void bfi(RegisterID rd, RegisterID rn, int lsb, int width) + { + bfm(rd, rn, (datasize - lsb) & (datasize - 1), width - 1); + } + + template + ALWAYS_INLINE void bfm(RegisterID rd, RegisterID rn, int immr, int imms) + { + CHECK_DATASIZE(); + insn(bitfield(DATASIZE, BitfieldOp_BFM, immr, imms, rn, rd)); + } + + template + ALWAYS_INLINE void bfxil(RegisterID rd, RegisterID rn, int lsb, int width) + { + bfm(rd, rn, lsb, lsb + width - 1); + } + + template + ALWAYS_INLINE void bic(RegisterID rd, RegisterID rn, RegisterID rm) + { + bic(rd, rn, rm, LSL, 0); + } + + template + ALWAYS_INLINE void bic(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + CHECK_DATASIZE(); + insn(logicalShiftedRegister(DATASIZE, setFlags ? LogicalOp_ANDS : LogicalOp_AND, shift, true, rm, amount, rn, rd)); + } + + ALWAYS_INLINE void bl(int32_t offset = 0) + { + ASSERT(!(offset & 3)); + offset >>= 2; + insn(unconditionalBranchImmediate(true, offset)); + } + + ALWAYS_INLINE void blr(RegisterID rn) + { + insn(unconditionalBranchRegister(BranchType_CALL, rn)); + } + + ALWAYS_INLINE void br(RegisterID rn) + { + insn(unconditionalBranchRegister(BranchType_JMP, rn)); + } + + ALWAYS_INLINE void brk(uint16_t imm) + { + insn(excepnGeneration(ExcepnOp_BREAKPOINT, imm, 0)); + } + + template + ALWAYS_INLINE void cbnz(RegisterID rt, int32_t offset = 0) + { + CHECK_DATASIZE(); + ASSERT(!(offset & 3)); + offset >>= 2; + insn(compareAndBranchImmediate(DATASIZE, true, offset, rt)); + } + + template + ALWAYS_INLINE void cbz(RegisterID rt, int32_t offset = 0) + { + CHECK_DATASIZE(); + ASSERT(!(offset & 3)); + offset >>= 2; + insn(compareAndBranchImmediate(DATASIZE, false, offset, rt)); + } + + template + ALWAYS_INLINE void ccmn(RegisterID rn, RegisterID rm, int nzcv, Condition cond) + { + CHECK_DATASIZE(); + insn(conditionalCompareRegister(DATASIZE, AddOp_ADD, rm, cond, rn, nzcv)); + } + + template + ALWAYS_INLINE void ccmn(RegisterID rn, UInt5 imm, int nzcv, Condition cond) + { + CHECK_DATASIZE(); + insn(conditionalCompareImmediate(DATASIZE, AddOp_ADD, imm, cond, rn, nzcv)); + } + + template + ALWAYS_INLINE void ccmp(RegisterID rn, RegisterID rm, int nzcv, Condition cond) + { + CHECK_DATASIZE(); + insn(conditionalCompareRegister(DATASIZE, AddOp_SUB, rm, cond, rn, nzcv)); + } + + template + ALWAYS_INLINE void ccmp(RegisterID rn, UInt5 imm, int nzcv, Condition cond) + { + CHECK_DATASIZE(); + insn(conditionalCompareImmediate(DATASIZE, AddOp_SUB, imm, cond, rn, nzcv)); + } + + template + ALWAYS_INLINE void cinc(RegisterID rd, RegisterID rn, Condition cond) + { + csinc(rd, rn, rn, invert(cond)); + } + + template + ALWAYS_INLINE void cinv(RegisterID rd, RegisterID rn, Condition cond) + { + csinv(rd, rn, rn, invert(cond)); + } + + template + ALWAYS_INLINE void cls(RegisterID rd, RegisterID rn) + { + CHECK_DATASIZE(); + insn(dataProcessing1Source(DATASIZE, DataOp_CLS, rn, rd)); + } + + template + ALWAYS_INLINE void clz(RegisterID rd, RegisterID rn) + { + CHECK_DATASIZE(); + insn(dataProcessing1Source(DATASIZE, DataOp_CLZ, rn, rd)); + } + + template + ALWAYS_INLINE void cmn(RegisterID rn, UInt12 imm12, int shift = 0) + { + add(ARM64Registers::zr, rn, imm12, shift); + } + + template + ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm) + { + add(ARM64Registers::zr, rn, rm); + } + + template + ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + add(ARM64Registers::zr, rn, rm, extend, amount); + } + + template + ALWAYS_INLINE void cmn(RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + add(ARM64Registers::zr, rn, rm, shift, amount); + } + + template + ALWAYS_INLINE void cmp(RegisterID rn, UInt12 imm12, int shift = 0) + { + sub(ARM64Registers::zr, rn, imm12, shift); + } + + template + ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm) + { + sub(ARM64Registers::zr, rn, rm); + } + + template + ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + sub(ARM64Registers::zr, rn, rm, extend, amount); + } + + template + ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + sub(ARM64Registers::zr, rn, rm, shift, amount); + } + + template + ALWAYS_INLINE void cneg(RegisterID rd, RegisterID rn, Condition cond) + { + csneg(rd, rn, rn, invert(cond)); + } + + template + ALWAYS_INLINE void csel(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond) + { + CHECK_DATASIZE(); + insn(conditionalSelect(DATASIZE, false, rm, cond, false, rn, rd)); + } + + template + ALWAYS_INLINE void cset(RegisterID rd, Condition cond) + { + csinc(rd, ARM64Registers::zr, ARM64Registers::zr, invert(cond)); + } + + template + ALWAYS_INLINE void csetm(RegisterID rd, Condition cond) + { + csinv(rd, ARM64Registers::zr, ARM64Registers::zr, invert(cond)); + } + + template + ALWAYS_INLINE void csinc(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond) + { + CHECK_DATASIZE(); + insn(conditionalSelect(DATASIZE, false, rm, cond, true, rn, rd)); + } + + template + ALWAYS_INLINE void csinv(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond) + { + CHECK_DATASIZE(); + insn(conditionalSelect(DATASIZE, true, rm, cond, false, rn, rd)); + } + + template + ALWAYS_INLINE void csneg(RegisterID rd, RegisterID rn, RegisterID rm, Condition cond) + { + CHECK_DATASIZE(); + insn(conditionalSelect(DATASIZE, true, rm, cond, true, rn, rd)); + } + + template + ALWAYS_INLINE void eon(RegisterID rd, RegisterID rn, RegisterID rm) + { + eon(rd, rn, rm, LSL, 0); + } + + template + ALWAYS_INLINE void eon(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + CHECK_DATASIZE(); + insn(logicalShiftedRegister(DATASIZE, LogicalOp_EOR, shift, true, rm, amount, rn, rd)); + } + + template + ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm) + { + eor(rd, rn, rm, LSL, 0); + } + + template + ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + CHECK_DATASIZE(); + insn(logicalShiftedRegister(DATASIZE, LogicalOp_EOR, shift, false, rm, amount, rn, rd)); + } + + template + ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, LogicalImmediate imm) + { + CHECK_DATASIZE(); + insn(logicalImmediate(DATASIZE, LogicalOp_EOR, imm.value(), rn, rd)); + } + + template + ALWAYS_INLINE void extr(RegisterID rd, RegisterID rn, RegisterID rm, int lsb) + { + CHECK_DATASIZE(); + insn(extract(DATASIZE, rm, lsb, rn, rd)); + } + + ALWAYS_INLINE void hint(int imm) + { + insn(hintPseudo(imm)); + } + + ALWAYS_INLINE void hlt(uint16_t imm) + { + insn(excepnGeneration(ExcepnOp_HALT, imm, 0)); + } + + template + ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2)); + } + + template + ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2)); + } + + template + ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm) + { + ldr(rt, rn, rm, UXTX, 0); + } + + template + ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, false, MemOp_LOAD, rm, extend, encodeShiftAmount(amount), rn, rt)); + } + + template + ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, unsigned pimm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, false, MemOp_LOAD, encodePositiveImmediate(pimm), rn, rt)); + } + + template + ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, PostIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPostIndex(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt)); + } + + template + ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, PreIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPreIndex(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt)); + } + + template + ALWAYS_INLINE void ldr_literal(RegisterID rt, int offset = 0) + { + CHECK_DATASIZE(); + ASSERT(!(offset & 3)); + insn(loadRegisterLiteral(datasize == 64 ? LdrLiteralOp_64BIT : LdrLiteralOp_32BIT, false, offset >> 2, rt)); + } + + ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm) + { + // Not calling the 5 argument form of ldrb, since is amount is ommitted S is false. + insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_LOAD, rm, UXTX, false, rn, rt)); + } + + ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + ASSERT_UNUSED(amount, !amount); + insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_LOAD, rm, extend, true, rn, rt)); + } + + ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, unsigned pimm) + { + insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, MemOp_LOAD, encodePositiveImmediate<8>(pimm), rn, rt)); + } + + ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, PostIndex simm) + { + insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt)); + } + + ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, PreIndex simm) + { + insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt)); + } + + ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm) + { + ldrh(rt, rn, rm, UXTX, 0); + } + + ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + ASSERT(!amount || amount == 1); + insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, MemOp_LOAD, rm, extend, amount == 1, rn, rt)); + } + + ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, unsigned pimm) + { + insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, MemOp_LOAD, encodePositiveImmediate<16>(pimm), rn, rt)); + } + + ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, PostIndex simm) + { + insn(loadStoreRegisterPostIndex(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt)); + } + + ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, PreIndex simm) + { + insn(loadStoreRegisterPreIndex(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt)); + } + + template + ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + // Not calling the 5 argument form of ldrsb, since is amount is ommitted S is false. + insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, UXTX, false, rn, rt)); + } + + template + ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + CHECK_DATASIZE(); + ASSERT_UNUSED(amount, !amount); + insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, extend, true, rn, rt)); + } + + template + ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, unsigned pimm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, encodePositiveImmediate<8>(pimm), rn, rt)); + } + + template + ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, PostIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); + } + + template + ALWAYS_INLINE void ldrsb(RegisterID rt, RegisterID rn, PreIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); + } + + template + ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm) + { + ldrsh(rt, rn, rm, UXTX, 0); + } + + template + ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + CHECK_DATASIZE(); + ASSERT(!amount || amount == 1); + insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, rm, extend, amount == 1, rn, rt)); + } + + template + ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, unsigned pimm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, encodePositiveImmediate<16>(pimm), rn, rt)); + } + + template + ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, PostIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPostIndex(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); + } + + template + ALWAYS_INLINE void ldrsh(RegisterID rt, RegisterID rn, PreIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPreIndex(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); + } + + ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, RegisterID rm) + { + ldrsw(rt, rn, rm, UXTX, 0); + } + + ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + ASSERT(!amount || amount == 2); + insn(loadStoreRegisterRegisterOffset(MemOpSize_32, false, MemOp_LOAD_signed64, rm, extend, amount == 2, rn, rt)); + } + + ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, unsigned pimm) + { + insn(loadStoreRegisterUnsignedImmediate(MemOpSize_32, false, MemOp_LOAD_signed64, encodePositiveImmediate<32>(pimm), rn, rt)); + } + + ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, PostIndex simm) + { + insn(loadStoreRegisterPostIndex(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt)); + } + + ALWAYS_INLINE void ldrsw(RegisterID rt, RegisterID rn, PreIndex simm) + { + insn(loadStoreRegisterPreIndex(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt)); + } + + ALWAYS_INLINE void ldrsw_literal(RegisterID rt, int offset = 0) + { + ASSERT(!(offset & 3)); + insn(loadRegisterLiteral(LdrLiteralOp_LDRSW, false, offset >> 2, rt)); + } + + template + ALWAYS_INLINE void ldur(RegisterID rt, RegisterID rn, int simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, false, MemOp_LOAD, simm, rn, rt)); + } + + ALWAYS_INLINE void ldurb(RegisterID rt, RegisterID rn, int simm) + { + insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, MemOp_LOAD, simm, rn, rt)); + } + + ALWAYS_INLINE void ldurh(RegisterID rt, RegisterID rn, int simm) + { + insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, MemOp_LOAD, simm, rn, rt)); + } + + template + ALWAYS_INLINE void ldursb(RegisterID rt, RegisterID rn, int simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); + } + + template + ALWAYS_INLINE void ldursh(RegisterID rt, RegisterID rn, int simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, (datasize == 64) ? MemOp_LOAD_signed64 : MemOp_LOAD_signed32, simm, rn, rt)); + } + + ALWAYS_INLINE void ldursw(RegisterID rt, RegisterID rn, int simm) + { + insn(loadStoreRegisterUnscaledImmediate(MemOpSize_32, false, MemOp_LOAD_signed64, simm, rn, rt)); + } + + template + ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, int shift) + { + ASSERT(shift < datasize); + ubfm(rd, rn, (datasize - shift) & (datasize - 1), datasize - 1 - shift); + } + + template + ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm) + { + lslv(rd, rn, rm); + } + + template + ALWAYS_INLINE void lslv(RegisterID rd, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + insn(dataProcessing2Source(DATASIZE, rm, DataOp_LSLV, rn, rd)); + } + + template + ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, int shift) + { + ASSERT(shift < datasize); + ubfm(rd, rn, shift, datasize - 1); + } + + template + ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm) + { + lsrv(rd, rn, rm); + } + + template + ALWAYS_INLINE void lsrv(RegisterID rd, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + insn(dataProcessing2Source(DATASIZE, rm, DataOp_LSRV, rn, rd)); + } + + template + ALWAYS_INLINE void madd(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) + { + CHECK_DATASIZE(); + nopCortexA53Fix835769(); + insn(dataProcessing3Source(DATASIZE, DataOp_MADD, rm, ra, rn, rd)); + } + + template + ALWAYS_INLINE void mneg(RegisterID rd, RegisterID rn, RegisterID rm) + { + msub(rd, rn, rm, ARM64Registers::zr); + } + + template + ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm) + { + if (isSp(rd) || isSp(rm)) + add(rd, rm, UInt12(0)); + else + orr(rd, ARM64Registers::zr, rm); + } + + template + ALWAYS_INLINE void movi(RegisterID rd, LogicalImmediate imm) + { + orr(rd, ARM64Registers::zr, imm); + } + + template + ALWAYS_INLINE void movk(RegisterID rd, uint16_t value, int shift = 0) + { + CHECK_DATASIZE(); + ASSERT(!(shift & 0xf)); + insn(moveWideImediate(DATASIZE, MoveWideOp_K, shift >> 4, value, rd)); + } + + template + ALWAYS_INLINE void movn(RegisterID rd, uint16_t value, int shift = 0) + { + CHECK_DATASIZE(); + ASSERT(!(shift & 0xf)); + insn(moveWideImediate(DATASIZE, MoveWideOp_N, shift >> 4, value, rd)); + } + + template + ALWAYS_INLINE void movz(RegisterID rd, uint16_t value, int shift = 0) + { + CHECK_DATASIZE(); + ASSERT(!(shift & 0xf)); + insn(moveWideImediate(DATASIZE, MoveWideOp_Z, shift >> 4, value, rd)); + } + + template + ALWAYS_INLINE void msub(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) + { + CHECK_DATASIZE(); + nopCortexA53Fix835769(); + insn(dataProcessing3Source(DATASIZE, DataOp_MSUB, rm, ra, rn, rd)); + } + + template + ALWAYS_INLINE void mul(RegisterID rd, RegisterID rn, RegisterID rm) + { + madd(rd, rn, rm, ARM64Registers::zr); + } + + template + ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm) + { + orn(rd, ARM64Registers::zr, rm); + } + + template + ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftType shift, int amount) + { + orn(rd, ARM64Registers::zr, rm, shift, amount); + } + + template + ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm) + { + sub(rd, ARM64Registers::zr, rm); + } + + template + ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm, ShiftType shift, int amount) + { + sub(rd, ARM64Registers::zr, rm, shift, amount); + } + + template + ALWAYS_INLINE void ngc(RegisterID rd, RegisterID rm) + { + sbc(rd, ARM64Registers::zr, rm); + } + + template + ALWAYS_INLINE void ngc(RegisterID rd, RegisterID rm, ShiftType shift, int amount) + { + sbc(rd, ARM64Registers::zr, rm, shift, amount); + } + + ALWAYS_INLINE void nop() + { + insn(nopPseudo()); + } + + static void fillNops(void* base, size_t size) + { + RELEASE_ASSERT(!(size % sizeof(int32_t))); + size_t n = size / sizeof(int32_t); + for (int32_t* ptr = static_cast(base); n--;) + *ptr++ = nopPseudo(); + } + + ALWAYS_INLINE void dmbSY() + { + insn(0xd5033fbf); + } + + template + ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm) + { + orn(rd, rn, rm, LSL, 0); + } + + template + ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + CHECK_DATASIZE(); + insn(logicalShiftedRegister(DATASIZE, LogicalOp_ORR, shift, true, rm, amount, rn, rd)); + } + + template + ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm) + { + orr(rd, rn, rm, LSL, 0); + } + + template + ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + CHECK_DATASIZE(); + insn(logicalShiftedRegister(DATASIZE, LogicalOp_ORR, shift, false, rm, amount, rn, rd)); + } + + template + ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, LogicalImmediate imm) + { + CHECK_DATASIZE(); + insn(logicalImmediate(DATASIZE, LogicalOp_ORR, imm.value(), rn, rd)); + } + + template + ALWAYS_INLINE void rbit(RegisterID rd, RegisterID rn) + { + CHECK_DATASIZE(); + insn(dataProcessing1Source(DATASIZE, DataOp_RBIT, rn, rd)); + } + + ALWAYS_INLINE void ret(RegisterID rn = ARM64Registers::lr) + { + insn(unconditionalBranchRegister(BranchType_RET, rn)); + } + + template + ALWAYS_INLINE void rev(RegisterID rd, RegisterID rn) + { + CHECK_DATASIZE(); + if (datasize == 32) // 'rev' mnemonic means REV32 or REV64 depending on the operand width. + insn(dataProcessing1Source(Datasize_32, DataOp_REV32, rn, rd)); + else + insn(dataProcessing1Source(Datasize_64, DataOp_REV64, rn, rd)); + } + + template + ALWAYS_INLINE void rev16(RegisterID rd, RegisterID rn) + { + CHECK_DATASIZE(); + insn(dataProcessing1Source(DATASIZE, DataOp_REV16, rn, rd)); + } + + template + ALWAYS_INLINE void rev32(RegisterID rd, RegisterID rn) + { + ASSERT(datasize == 64); // 'rev32' only valid with 64-bit operands. + insn(dataProcessing1Source(Datasize_64, DataOp_REV32, rn, rd)); + } + + template + ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm) + { + rorv(rd, rn, rm); + } + + template + ALWAYS_INLINE void ror(RegisterID rd, RegisterID rs, int shift) + { + extr(rd, rs, rs, shift); + } + + template + ALWAYS_INLINE void rorv(RegisterID rd, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + insn(dataProcessing2Source(DATASIZE, rm, DataOp_RORV, rn, rd)); + } + + template + ALWAYS_INLINE void sbc(RegisterID rd, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + insn(addSubtractWithCarry(DATASIZE, AddOp_SUB, setFlags, rm, rn, rd)); + } + + template + ALWAYS_INLINE void sbfiz(RegisterID rd, RegisterID rn, int lsb, int width) + { + sbfm(rd, rn, (datasize - lsb) & (datasize - 1), width - 1); + } + + template + ALWAYS_INLINE void sbfm(RegisterID rd, RegisterID rn, int immr, int imms) + { + CHECK_DATASIZE(); + insn(bitfield(DATASIZE, BitfieldOp_SBFM, immr, imms, rn, rd)); + } + + template + ALWAYS_INLINE void sbfx(RegisterID rd, RegisterID rn, int lsb, int width) + { + sbfm(rd, rn, lsb, lsb + width - 1); + } + + template + ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + insn(dataProcessing2Source(DATASIZE, rm, DataOp_SDIV, rn, rd)); + } + + ALWAYS_INLINE void smaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) + { + nopCortexA53Fix835769<64>(); + insn(dataProcessing3Source(Datasize_64, DataOp_SMADDL, rm, ra, rn, rd)); + } + + ALWAYS_INLINE void smnegl(RegisterID rd, RegisterID rn, RegisterID rm) + { + smsubl(rd, rn, rm, ARM64Registers::zr); + } + + ALWAYS_INLINE void smsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) + { + nopCortexA53Fix835769<64>(); + insn(dataProcessing3Source(Datasize_64, DataOp_SMSUBL, rm, ra, rn, rd)); + } + + ALWAYS_INLINE void smulh(RegisterID rd, RegisterID rn, RegisterID rm) + { + insn(dataProcessing3Source(Datasize_64, DataOp_SMULH, rm, ARM64Registers::zr, rn, rd)); + } + + ALWAYS_INLINE void smull(RegisterID rd, RegisterID rn, RegisterID rm) + { + smaddl(rd, rn, rm, ARM64Registers::zr); + } + + template + ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2)); + } + + template + ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2)); + } + + template + ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm) + { + str(rt, rn, rm, UXTX, 0); + } + + template + ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, false, MemOp_STORE, rm, extend, encodeShiftAmount(amount), rn, rt)); + } + + template + ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, unsigned pimm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, false, MemOp_STORE, encodePositiveImmediate(pimm), rn, rt)); + } + + template + ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, PostIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPostIndex(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt)); + } + + template + ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, PreIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPreIndex(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt)); + } + + ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm) + { + // Not calling the 5 argument form of strb, since is amount is ommitted S is false. + insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_STORE, rm, UXTX, false, rn, rt)); + } + + ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + ASSERT_UNUSED(amount, !amount); + insn(loadStoreRegisterRegisterOffset(MemOpSize_8_or_128, false, MemOp_STORE, rm, extend, true, rn, rt)); + } + + ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, unsigned pimm) + { + insn(loadStoreRegisterUnsignedImmediate(MemOpSize_8_or_128, false, MemOp_STORE, encodePositiveImmediate<8>(pimm), rn, rt)); + } + + ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, PostIndex simm) + { + insn(loadStoreRegisterPostIndex(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt)); + } + + ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, PreIndex simm) + { + insn(loadStoreRegisterPreIndex(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt)); + } + + ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm) + { + strh(rt, rn, rm, UXTX, 0); + } + + ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + ASSERT(!amount || amount == 1); + insn(loadStoreRegisterRegisterOffset(MemOpSize_16, false, MemOp_STORE, rm, extend, amount == 1, rn, rt)); + } + + ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, unsigned pimm) + { + insn(loadStoreRegisterUnsignedImmediate(MemOpSize_16, false, MemOp_STORE, encodePositiveImmediate<16>(pimm), rn, rt)); + } + + ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, PostIndex simm) + { + insn(loadStoreRegisterPostIndex(MemOpSize_16, false, MemOp_STORE, simm, rn, rt)); + } + + ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, PreIndex simm) + { + insn(loadStoreRegisterPreIndex(MemOpSize_16, false, MemOp_STORE, simm, rn, rt)); + } + + template + ALWAYS_INLINE void stur(RegisterID rt, RegisterID rn, int simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, false, MemOp_STORE, simm, rn, rt)); + } + + ALWAYS_INLINE void sturb(RegisterID rt, RegisterID rn, int simm) + { + insn(loadStoreRegisterUnscaledImmediate(MemOpSize_8_or_128, false, MemOp_STORE, simm, rn, rt)); + } + + ALWAYS_INLINE void sturh(RegisterID rt, RegisterID rn, int simm) + { + insn(loadStoreRegisterUnscaledImmediate(MemOpSize_16, false, MemOp_STORE, simm, rn, rt)); + } + + template + ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, UInt12 imm12, int shift = 0) + { + CHECK_DATASIZE(); + ASSERT(!shift || shift == 12); + insn(addSubtractImmediate(DATASIZE, AddOp_SUB, setFlags, shift == 12, imm12, rn, rd)); + } + + template + ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm) + { + ASSERT_WITH_MESSAGE(!isSp(rd) || setFlags == DontSetFlags, "SUBS with shifted register does not support SP for Xd, it uses XZR for the register 31. SUBS with extended register support SP for Xd, but only if SetFlag is not used, otherwise register 31 is Xd."); + ASSERT_WITH_MESSAGE(!isSp(rm), "No encoding of SUBS supports SP for the third operand."); + + if (isSp(rd) || isSp(rn)) + sub(rd, rn, rm, UXTX, 0); + else + sub(rd, rn, rm, LSL, 0); + } + + template + ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + CHECK_DATASIZE(); + insn(addSubtractExtendedRegister(DATASIZE, AddOp_SUB, setFlags, rm, extend, amount, rn, rd)); + } + + template + ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + CHECK_DATASIZE(); + ASSERT(!isSp(rd) && !isSp(rn) && !isSp(rm)); + insn(addSubtractShiftedRegister(DATASIZE, AddOp_SUB, setFlags, shift, rm, amount, rn, rd)); + } + + template + ALWAYS_INLINE void sxtb(RegisterID rd, RegisterID rn) + { + sbfm(rd, rn, 0, 7); + } + + template + ALWAYS_INLINE void sxth(RegisterID rd, RegisterID rn) + { + sbfm(rd, rn, 0, 15); + } + + ALWAYS_INLINE void sxtw(RegisterID rd, RegisterID rn) + { + sbfm<64>(rd, rn, 0, 31); + } + + ALWAYS_INLINE void tbz(RegisterID rt, int imm, int offset = 0) + { + ASSERT(!(offset & 3)); + offset >>= 2; + insn(testAndBranchImmediate(false, imm, offset, rt)); + } + + ALWAYS_INLINE void tbnz(RegisterID rt, int imm, int offset = 0) + { + ASSERT(!(offset & 3)); + offset >>= 2; + insn(testAndBranchImmediate(true, imm, offset, rt)); + } + + template + ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm) + { + and_(ARM64Registers::zr, rn, rm); + } + + template + ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftType shift, int amount) + { + and_(ARM64Registers::zr, rn, rm, shift, amount); + } + + template + ALWAYS_INLINE void tst(RegisterID rn, LogicalImmediate imm) + { + and_(ARM64Registers::zr, rn, imm); + } + + template + ALWAYS_INLINE void ubfiz(RegisterID rd, RegisterID rn, int lsb, int width) + { + ubfm(rd, rn, (datasize - lsb) & (datasize - 1), width - 1); + } + + template + ALWAYS_INLINE void ubfm(RegisterID rd, RegisterID rn, int immr, int imms) + { + CHECK_DATASIZE(); + insn(bitfield(DATASIZE, BitfieldOp_UBFM, immr, imms, rn, rd)); + } + + template + ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, int lsb, int width) + { + ubfm(rd, rn, lsb, lsb + width - 1); + } + + template + ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm) + { + CHECK_DATASIZE(); + insn(dataProcessing2Source(DATASIZE, rm, DataOp_UDIV, rn, rd)); + } + + ALWAYS_INLINE void umaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) + { + nopCortexA53Fix835769<64>(); + insn(dataProcessing3Source(Datasize_64, DataOp_UMADDL, rm, ra, rn, rd)); + } + + ALWAYS_INLINE void umnegl(RegisterID rd, RegisterID rn, RegisterID rm) + { + umsubl(rd, rn, rm, ARM64Registers::zr); + } + + ALWAYS_INLINE void umsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) + { + nopCortexA53Fix835769<64>(); + insn(dataProcessing3Source(Datasize_64, DataOp_UMSUBL, rm, ra, rn, rd)); + } + + ALWAYS_INLINE void umulh(RegisterID rd, RegisterID rn, RegisterID rm) + { + insn(dataProcessing3Source(Datasize_64, DataOp_UMULH, rm, ARM64Registers::zr, rn, rd)); + } + + ALWAYS_INLINE void umull(RegisterID rd, RegisterID rn, RegisterID rm) + { + umaddl(rd, rn, rm, ARM64Registers::zr); + } + + template + ALWAYS_INLINE void uxtb(RegisterID rd, RegisterID rn) + { + ubfm(rd, rn, 0, 7); + } + + template + ALWAYS_INLINE void uxth(RegisterID rd, RegisterID rn) + { + ubfm(rd, rn, 0, 15); + } + + ALWAYS_INLINE void uxtw(RegisterID rd, RegisterID rn) + { + ubfm<64>(rd, rn, 0, 31); + } + + // Floating Point Instructions: + + template + ALWAYS_INLINE void fabs(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FABS, vn, vd)); + } + + template + ALWAYS_INLINE void fadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FADD, vn, vd)); + } + + template + ALWAYS_INLINE void fccmp(FPRegisterID vn, FPRegisterID vm, int nzcv, Condition cond) + { + CHECK_DATASIZE(); + insn(floatingPointConditionalCompare(DATASIZE, vm, cond, vn, FPCondCmpOp_FCMP, nzcv)); + } + + template + ALWAYS_INLINE void fccmpe(FPRegisterID vn, FPRegisterID vm, int nzcv, Condition cond) + { + CHECK_DATASIZE(); + insn(floatingPointConditionalCompare(DATASIZE, vm, cond, vn, FPCondCmpOp_FCMPE, nzcv)); + } + + template + ALWAYS_INLINE void fcmp(FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointCompare(DATASIZE, vm, vn, FPCmpOp_FCMP)); + } + + template + ALWAYS_INLINE void fcmp_0(FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointCompare(DATASIZE, static_cast(0), vn, FPCmpOp_FCMP0)); + } + + template + ALWAYS_INLINE void fcmpe(FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointCompare(DATASIZE, vm, vn, FPCmpOp_FCMPE)); + } + + template + ALWAYS_INLINE void fcmpe_0(FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointCompare(DATASIZE, static_cast(0), vn, FPCmpOp_FCMPE0)); + } + + template + ALWAYS_INLINE void fcsel(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, Condition cond) + { + CHECK_DATASIZE(); + insn(floatingPointConditionalSelect(DATASIZE, vm, cond, vn, vd)); + } + + template + ALWAYS_INLINE void fcvt(FPRegisterID vd, FPRegisterID vn) + { + ASSERT(dstsize == 16 || dstsize == 32 || dstsize == 64); + ASSERT(srcsize == 16 || srcsize == 32 || srcsize == 64); + ASSERT(dstsize != srcsize); + Datasize type = (srcsize == 64) ? Datasize_64 : (srcsize == 32) ? Datasize_32 : Datasize_16; + FPDataOp1Source opcode = (dstsize == 64) ? FPDataOp_FCVT_toDouble : (dstsize == 32) ? FPDataOp_FCVT_toSingle : FPDataOp_FCVT_toHalf; + insn(floatingPointDataProcessing1Source(type, opcode, vn, vd)); + } + + template + ALWAYS_INLINE void fcvtas(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTAS, vn, rd)); + } + + template + ALWAYS_INLINE void fcvtau(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTAU, vn, rd)); + } + + template + ALWAYS_INLINE void fcvtms(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTMS, vn, rd)); + } + + template + ALWAYS_INLINE void fcvtmu(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTMU, vn, rd)); + } + + template + ALWAYS_INLINE void fcvtns(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTNS, vn, rd)); + } + + template + ALWAYS_INLINE void fcvtnu(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTNU, vn, rd)); + } + + template + ALWAYS_INLINE void fcvtps(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTPS, vn, rd)); + } + + template + ALWAYS_INLINE void fcvtpu(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTPU, vn, rd)); + } + + template + ALWAYS_INLINE void fcvtzs(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTZS, vn, rd)); + } + + template + ALWAYS_INLINE void fcvtzu(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(dstsize), DATASIZE_OF(srcsize), FPIntConvOp_FCVTZU, vn, rd)); + } + + template + ALWAYS_INLINE void fdiv(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FDIV, vn, vd)); + } + + template + ALWAYS_INLINE void fmadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing3Source(DATASIZE, false, vm, AddOp_ADD, va, vn, vd)); + } + + template + ALWAYS_INLINE void fmax(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMAX, vn, vd)); + } + + template + ALWAYS_INLINE void fmaxnm(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMAXNM, vn, vd)); + } + + template + ALWAYS_INLINE void fmin(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMIN, vn, vd)); + } + + template + ALWAYS_INLINE void fminnm(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMINNM, vn, vd)); + } + + template + ALWAYS_INLINE void fmov(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FMOV, vn, vd)); + } + + template + ALWAYS_INLINE void fmov(FPRegisterID vd, RegisterID rn) + { + CHECK_DATASIZE(); + insn(floatingPointIntegerConversions(DATASIZE, DATASIZE, FPIntConvOp_FMOV_XtoQ, rn, vd)); + } + + template + ALWAYS_INLINE void fmov(RegisterID rd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointIntegerConversions(DATASIZE, DATASIZE, FPIntConvOp_FMOV_QtoX, vn, rd)); + } + + template + ALWAYS_INLINE void fmov(FPRegisterID vd, double imm) + { + CHECK_DATASIZE(); + insn(floatingPointImmediate(DATASIZE, encodeFPImm(imm), vd)); + } + + ALWAYS_INLINE void fmov_top(FPRegisterID vd, RegisterID rn) + { + insn(floatingPointIntegerConversions(Datasize_64, Datasize_64, FPIntConvOp_FMOV_XtoQ_top, rn, vd)); + } + + ALWAYS_INLINE void fmov_top(RegisterID rd, FPRegisterID vn) + { + insn(floatingPointIntegerConversions(Datasize_64, Datasize_64, FPIntConvOp_FMOV_QtoX_top, vn, rd)); + } + + template + ALWAYS_INLINE void fmsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing3Source(DATASIZE, false, vm, AddOp_SUB, va, vn, vd)); + } + + template + ALWAYS_INLINE void fmul(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FMUL, vn, vd)); + } + + template + ALWAYS_INLINE void fneg(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FNEG, vn, vd)); + } + + template + ALWAYS_INLINE void fnmadd(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing3Source(DATASIZE, true, vm, AddOp_ADD, va, vn, vd)); + } + + template + ALWAYS_INLINE void fnmsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm, FPRegisterID va) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing3Source(DATASIZE, true, vm, AddOp_SUB, va, vn, vd)); + } + + template + ALWAYS_INLINE void fnmul(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FNMUL, vn, vd)); + } + + template + ALWAYS_INLINE void vand(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_VECTOR_DATASIZE(); + insn(vectorDataProcessing2Source(SIMD_LogicalOp_AND, vm, vn, vd)); + } + + template + ALWAYS_INLINE void frinta(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTA, vn, vd)); + } + + template + ALWAYS_INLINE void frinti(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTI, vn, vd)); + } + + template + ALWAYS_INLINE void frintm(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTM, vn, vd)); + } + + template + ALWAYS_INLINE void frintn(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTN, vn, vd)); + } + + template + ALWAYS_INLINE void frintp(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTP, vn, vd)); + } + + template + ALWAYS_INLINE void frintx(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTX, vn, vd)); + } + + template + ALWAYS_INLINE void frintz(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FRINTZ, vn, vd)); + } + + template + ALWAYS_INLINE void fsqrt(FPRegisterID vd, FPRegisterID vn) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing1Source(DATASIZE, FPDataOp_FSQRT, vn, vd)); + } + + template + ALWAYS_INLINE void fsub(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_DATASIZE(); + insn(floatingPointDataProcessing2Source(DATASIZE, vm, FPDataOp_FSUB, vn, vd)); + } + + template + ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, RegisterID rm) + { + ldr(rt, rn, rm, UXTX, 0); + } + + template + ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, rm, extend, encodeShiftAmount(amount), rn, rt)); + } + + template + ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, unsigned pimm) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, encodePositiveImmediate(pimm), rn, rt)); + } + + template + ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, PostIndex simm) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterPostIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt)); + } + + template + ALWAYS_INLINE void ldr(FPRegisterID rt, RegisterID rn, PreIndex simm) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterPreIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt)); + } + + template + ALWAYS_INLINE void ldr_literal(FPRegisterID rt, int offset = 0) + { + CHECK_FP_MEMOP_DATASIZE(); + ASSERT(datasize >= 32); + ASSERT(!(offset & 3)); + insn(loadRegisterLiteral(datasize == 128 ? LdrLiteralOp_128BIT : datasize == 64 ? LdrLiteralOp_64BIT : LdrLiteralOp_32BIT, true, offset >> 2, rt)); + } + + template + ALWAYS_INLINE void ldur(FPRegisterID rt, RegisterID rn, int simm) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_LOAD_V128 : MemOp_LOAD, simm, rn, rt)); + } + + template + ALWAYS_INLINE void scvtf(FPRegisterID vd, RegisterID rn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize), DATASIZE_OF(dstsize), FPIntConvOp_SCVTF, rn, vd)); + } + + template + ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, RegisterID rm) + { + str(rt, rn, rm, UXTX, 0); + } + + template + ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, RegisterID rm, ExtendType extend, int amount) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterRegisterOffset(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, rm, extend, encodeShiftAmount(amount), rn, rt)); + } + + template + ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, unsigned pimm) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterUnsignedImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, encodePositiveImmediate(pimm), rn, rt)); + } + + template + ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, PostIndex simm) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterPostIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt)); + } + + template + ALWAYS_INLINE void str(FPRegisterID rt, RegisterID rn, PreIndex simm) + { + CHECK_FP_MEMOP_DATASIZE(); + insn(loadStoreRegisterPreIndex(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt)); + } + + template + ALWAYS_INLINE void stur(FPRegisterID rt, RegisterID rn, int simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterUnscaledImmediate(MEMOPSIZE, true, datasize == 128 ? MemOp_STORE_V128 : MemOp_STORE, simm, rn, rt)); + } + + template + ALWAYS_INLINE void ucvtf(FPRegisterID vd, RegisterID rn) + { + CHECK_DATASIZE_OF(dstsize); + CHECK_DATASIZE_OF(srcsize); + insn(floatingPointIntegerConversions(DATASIZE_OF(srcsize), DATASIZE_OF(dstsize), FPIntConvOp_UCVTF, rn, vd)); + } + + // Admin methods: + + AssemblerLabel labelIgnoringWatchpoints() + { + return m_buffer.label(); + } + + AssemblerLabel labelForWatchpoint() + { + AssemblerLabel result = m_buffer.label(); + if (static_cast(result.m_offset) != m_indexOfLastWatchpoint) + result = label(); + m_indexOfLastWatchpoint = result.m_offset; + m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize(); + return result; + } + + AssemblerLabel label() + { + AssemblerLabel result = m_buffer.label(); + while (UNLIKELY(static_cast(result.m_offset) < m_indexOfTailOfLastWatchpoint)) { + nop(); + result = m_buffer.label(); + } + return result; + } + + AssemblerLabel align(int alignment) + { + ASSERT(!(alignment & 3)); + while (!m_buffer.isAligned(alignment)) + brk(0); + return label(); + } + + static void* getRelocatedAddress(void* code, AssemblerLabel label) + { + ASSERT(label.isSet()); + return reinterpret_cast(reinterpret_cast(code) + label.m_offset); + } + + static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b) + { + return b.m_offset - a.m_offset; + } + + void* unlinkedCode() { return m_buffer.data(); } + size_t codeSize() const { return m_buffer.codeSize(); } + + static unsigned getCallReturnOffset(AssemblerLabel call) + { + ASSERT(call.isSet()); + return call.m_offset; + } + + // Linking & patching: + // + // 'link' and 'patch' methods are for use on unprotected code - such as the code + // within the AssemblerBuffer, and code being patched by the patch buffer. Once + // code has been finalized it is (platform support permitting) within a non- + // writable region of memory; to modify the code in an execute-only execuable + // pool the 'repatch' and 'relink' methods should be used. + + void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition) + { + ASSERT(to.isSet()); + ASSERT(from.isSet()); + m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition)); + } + + void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, bool is64Bit, RegisterID compareRegister) + { + ASSERT(to.isSet()); + ASSERT(from.isSet()); + m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, is64Bit, compareRegister)); + } + + void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition, unsigned bitNumber, RegisterID compareRegister) + { + ASSERT(to.isSet()); + ASSERT(from.isSet()); + m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, bitNumber, compareRegister)); + } + + void linkJump(AssemblerLabel from, AssemblerLabel to) + { + ASSERT(from.isSet()); + ASSERT(to.isSet()); + relinkJumpOrCall(addressOf(from), addressOf(to)); + } + + static void linkJump(void* code, AssemblerLabel from, void* to) + { + ASSERT(from.isSet()); + relinkJumpOrCall(addressOf(code, from), to); + } + + static void linkCall(void* code, AssemblerLabel from, void* to) + { + ASSERT(from.isSet()); + linkJumpOrCall(addressOf(code, from) - 1, to); + } + + static void linkPointer(void* code, AssemblerLabel where, void* valuePtr) + { + linkPointer(addressOf(code, where), valuePtr); + } + + static void replaceWithJump(void* where, void* to) + { + intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(where)) >> 2; + ASSERT(static_cast(offset) == offset); + *static_cast(where) = unconditionalBranchImmediate(false, static_cast(offset)); + cacheFlush(where, sizeof(int)); + } + + static ptrdiff_t maxJumpReplacementSize() + { + return 4; + } + + static void replaceWithLoad(void* where) + { + Datasize sf; + AddOp op; + SetFlags S; + int shift; + int imm12; + RegisterID rn; + RegisterID rd; + if (disassembleAddSubtractImmediate(where, sf, op, S, shift, imm12, rn, rd)) { + ASSERT(sf == Datasize_64); + ASSERT(op == AddOp_ADD); + ASSERT(!S); + ASSERT(!shift); + ASSERT(!(imm12 & ~0xff8)); + *static_cast(where) = loadStoreRegisterUnsignedImmediate(MemOpSize_64, false, MemOp_LOAD, encodePositiveImmediate<64>(imm12), rn, rd); + cacheFlush(where, sizeof(int)); + } +#if !ASSERT_DISABLED + else { + MemOpSize size; + bool V; + MemOp opc; + int imm12; + RegisterID rn; + RegisterID rt; + ASSERT(disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt)); + ASSERT(size == MemOpSize_64); + ASSERT(!V); + ASSERT(opc == MemOp_LOAD); + ASSERT(!(imm12 & ~0x1ff)); + } +#endif + } + + static void replaceWithAddressComputation(void* where) + { + MemOpSize size; + bool V; + MemOp opc; + int imm12; + RegisterID rn; + RegisterID rt; + if (disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt)) { + ASSERT(size == MemOpSize_64); + ASSERT(!V); + ASSERT(opc == MemOp_LOAD); + ASSERT(!(imm12 & ~0x1ff)); + *static_cast(where) = addSubtractImmediate(Datasize_64, AddOp_ADD, DontSetFlags, 0, imm12 * sizeof(void*), rn, rt); + cacheFlush(where, sizeof(int)); + } +#if !ASSERT_DISABLED + else { + Datasize sf; + AddOp op; + SetFlags S; + int shift; + int imm12; + RegisterID rn; + RegisterID rd; + ASSERT(disassembleAddSubtractImmediate(where, sf, op, S, shift, imm12, rn, rd)); + ASSERT(sf == Datasize_64); + ASSERT(op == AddOp_ADD); + ASSERT(!S); + ASSERT(!shift); + ASSERT(!(imm12 & ~0xff8)); + } +#endif + } + + static void repatchPointer(void* where, void* valuePtr) + { + linkPointer(static_cast(where), valuePtr, true); + } + + static void setPointer(int* address, void* valuePtr, RegisterID rd, bool flush) + { + uintptr_t value = reinterpret_cast(valuePtr); + address[0] = moveWideImediate(Datasize_64, MoveWideOp_Z, 0, getHalfword(value, 0), rd); + address[1] = moveWideImediate(Datasize_64, MoveWideOp_K, 1, getHalfword(value, 1), rd); + address[2] = moveWideImediate(Datasize_64, MoveWideOp_K, 2, getHalfword(value, 2), rd); + + if (flush) + cacheFlush(address, sizeof(int) * 3); + } + + static void repatchInt32(void* where, int32_t value) + { + int* address = static_cast(where); + + Datasize sf; + MoveWideOp opc; + int hw; + uint16_t imm16; + RegisterID rd; + bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rd); + ASSERT_UNUSED(expected, expected && !sf && (opc == MoveWideOp_Z || opc == MoveWideOp_N) && !hw); + ASSERT(checkMovk(address[1], 1, rd)); + + if (value >= 0) { + address[0] = moveWideImediate(Datasize_32, MoveWideOp_Z, 0, getHalfword(value, 0), rd); + address[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd); + } else { + address[0] = moveWideImediate(Datasize_32, MoveWideOp_N, 0, ~getHalfword(value, 0), rd); + address[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd); + } + + cacheFlush(where, sizeof(int) * 2); + } + + static void* readPointer(void* where) + { + int* address = static_cast(where); + + Datasize sf; + MoveWideOp opc; + int hw; + uint16_t imm16; + RegisterID rdFirst, rd; + + bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rdFirst); + ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_Z && !hw); + uintptr_t result = imm16; + + expected = disassembleMoveWideImediate(address + 1, sf, opc, hw, imm16, rd); + ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 1 && rd == rdFirst); + result |= static_cast(imm16) << 16; + + expected = disassembleMoveWideImediate(address + 2, sf, opc, hw, imm16, rd); + ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 2 && rd == rdFirst); + result |= static_cast(imm16) << 32; + + return reinterpret_cast(result); + } + + static void* readCallTarget(void* from) + { + return readPointer(reinterpret_cast(from) - 4); + } + + static void relinkJump(void* from, void* to) + { + relinkJumpOrCall(reinterpret_cast(from), to); + cacheFlush(from, sizeof(int)); + } + + static void relinkCall(void* from, void* to) + { + relinkJumpOrCall(reinterpret_cast(from) - 1, to); + cacheFlush(reinterpret_cast(from) - 1, sizeof(int)); + } + + static void repatchCompact(void* where, int32_t value) + { + ASSERT(!(value & ~0x3ff8)); + + MemOpSize size; + bool V; + MemOp opc; + int imm12; + RegisterID rn; + RegisterID rt; + bool expected = disassembleLoadStoreRegisterUnsignedImmediate(where, size, V, opc, imm12, rn, rt); + ASSERT_UNUSED(expected, expected && size >= MemOpSize_32 && !V && opc == MemOp_LOAD); // expect 32/64 bit load to GPR. + + if (size == MemOpSize_32) + imm12 = encodePositiveImmediate<32>(value); + else + imm12 = encodePositiveImmediate<64>(value); + *static_cast(where) = loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, rt); + + cacheFlush(where, sizeof(int)); + } + + unsigned debugOffset() { return m_buffer.debugOffset(); } + + void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) + { + int32_t ptr = regionStart / sizeof(int32_t); + const int32_t end = regionEnd / sizeof(int32_t); + int32_t* offsets = static_cast(m_buffer.data()); + while (ptr < end) + offsets[ptr++] = offset; + } + + int executableOffsetFor(int location) + { + if (!location) + return 0; + return static_cast(m_buffer.data())[location / sizeof(int32_t) - 1]; + } + +#if OS(LINUX) && COMPILER(GCC_OR_CLANG) + static inline void linuxPageFlush(uintptr_t begin, uintptr_t end) + { + __builtin___clear_cache(reinterpret_cast(begin), reinterpret_cast(end)); + } +#endif + + static void cacheFlush(void* code, size_t size) + { +#if OS(IOS) + sys_cache_control(kCacheFunctionPrepareForExecution, code, size); +#elif OS(LINUX) + size_t page = pageSize(); + uintptr_t current = reinterpret_cast(code); + uintptr_t end = current + size; + uintptr_t firstPageEnd = (current & ~(page - 1)) + page; + + if (end <= firstPageEnd) { + linuxPageFlush(current, end); + return; + } + + linuxPageFlush(current, firstPageEnd); + + for (current = firstPageEnd; current + page < end; current += page) + linuxPageFlush(current, current + page); + + linuxPageFlush(current, end); +#else +#error "The cacheFlush support is missing on this platform." +#endif + } + + // Assembler admin methods: + + static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); } + + static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b) + { + return a.from() < b.from(); + } + + static bool canCompact(JumpType jumpType) + { + // Fixed jumps cannot be compacted + return (jumpType == JumpNoCondition) || (jumpType == JumpCondition) || (jumpType == JumpCompareAndBranch) || (jumpType == JumpTestBit); + } + + static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) + { + switch (jumpType) { + case JumpFixed: + return LinkInvalid; + case JumpNoConditionFixedSize: + return LinkJumpNoCondition; + case JumpConditionFixedSize: + return LinkJumpCondition; + case JumpCompareAndBranchFixedSize: + return LinkJumpCompareAndBranch; + case JumpTestBitFixedSize: + return LinkJumpTestBit; + case JumpNoCondition: + return LinkJumpNoCondition; + case JumpCondition: { + ASSERT(!(reinterpret_cast(from) & 0x3)); + ASSERT(!(reinterpret_cast(to) & 0x3)); + intptr_t relative = reinterpret_cast(to) - (reinterpret_cast(from)); + + if (((relative << 43) >> 43) == relative) + return LinkJumpConditionDirect; + + return LinkJumpCondition; + } + case JumpCompareAndBranch: { + ASSERT(!(reinterpret_cast(from) & 0x3)); + ASSERT(!(reinterpret_cast(to) & 0x3)); + intptr_t relative = reinterpret_cast(to) - (reinterpret_cast(from)); + + if (((relative << 43) >> 43) == relative) + return LinkJumpCompareAndBranchDirect; + + return LinkJumpCompareAndBranch; + } + case JumpTestBit: { + ASSERT(!(reinterpret_cast(from) & 0x3)); + ASSERT(!(reinterpret_cast(to) & 0x3)); + intptr_t relative = reinterpret_cast(to) - (reinterpret_cast(from)); + + if (((relative << 50) >> 50) == relative) + return LinkJumpTestBitDirect; + + return LinkJumpTestBit; + } + default: + ASSERT_NOT_REACHED(); + } + + return LinkJumpNoCondition; + } + + static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) + { + JumpLinkType linkType = computeJumpType(record.type(), from, to); + record.setLinkType(linkType); + return linkType; + } + + Vector& jumpsToLink() + { + std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator); + return m_jumpsToLink; + } + + static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to) + { + switch (record.linkType()) { + case LinkJumpNoCondition: + linkJumpOrCall(reinterpret_cast(from), to); + break; + case LinkJumpConditionDirect: + linkConditionalBranch(record.condition(), reinterpret_cast(from), to); + break; + case LinkJumpCondition: + linkConditionalBranch(record.condition(), reinterpret_cast(from) - 1, to); + break; + case LinkJumpCompareAndBranchDirect: + linkCompareAndBranch(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast(from), to); + break; + case LinkJumpCompareAndBranch: + linkCompareAndBranch(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast(from) - 1, to); + break; + case LinkJumpTestBitDirect: + linkTestAndBranch(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast(from), to); + break; + case LinkJumpTestBit: + linkTestAndBranch(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast(from) - 1, to); + break; + default: + ASSERT_NOT_REACHED(); + break; + } + } + +private: + template + static bool checkMovk(int insn, int _hw, RegisterID _rd) + { + Datasize sf; + MoveWideOp opc; + int hw; + uint16_t imm16; + RegisterID rd; + bool expected = disassembleMoveWideImediate(&insn, sf, opc, hw, imm16, rd); + + return expected + && sf == size + && opc == MoveWideOp_K + && hw == _hw + && rd == _rd; + } + + static void linkPointer(int* address, void* valuePtr, bool flush = false) + { + Datasize sf; + MoveWideOp opc; + int hw; + uint16_t imm16; + RegisterID rd; + bool expected = disassembleMoveWideImediate(address, sf, opc, hw, imm16, rd); + ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_Z && !hw); + ASSERT(checkMovk(address[1], 1, rd)); + ASSERT(checkMovk(address[2], 2, rd)); + + setPointer(address, valuePtr, rd, flush); + } + + template + static void linkJumpOrCall(int* from, void* to) + { + bool link; + int imm26; + bool isUnconditionalBranchImmediateOrNop = disassembleUnconditionalBranchImmediate(from, link, imm26) || disassembleNop(from); + + ASSERT_UNUSED(isUnconditionalBranchImmediateOrNop, isUnconditionalBranchImmediateOrNop); + ASSERT_UNUSED(isCall, (link == isCall) || disassembleNop(from)); + ASSERT(!(reinterpret_cast(from) & 3)); + ASSERT(!(reinterpret_cast(to) & 3)); + intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(from)) >> 2; + ASSERT(static_cast(offset) == offset); + + *from = unconditionalBranchImmediate(isCall, static_cast(offset)); + } + + template + static void linkCompareAndBranch(Condition condition, bool is64Bit, RegisterID rt, int* from, void* to) + { + ASSERT(!(reinterpret_cast(from) & 3)); + ASSERT(!(reinterpret_cast(to) & 3)); + intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(from)) >> 2; + ASSERT(((offset << 38) >> 38) == offset); + + bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits + ASSERT(!isDirect || useDirect); + + if (useDirect || isDirect) { + *from = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, condition == ConditionNE, static_cast(offset), rt); + if (!isDirect) + *(from + 1) = nopPseudo(); + } else { + *from = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, invert(condition) == ConditionNE, 2, rt); + linkJumpOrCall(from + 1, to); + } + } + + template + static void linkConditionalBranch(Condition condition, int* from, void* to) + { + ASSERT(!(reinterpret_cast(from) & 3)); + ASSERT(!(reinterpret_cast(to) & 3)); + intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(from)) >> 2; + ASSERT(((offset << 38) >> 38) == offset); + + bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits + ASSERT(!isDirect || useDirect); + + if (useDirect || isDirect) { + *from = conditionalBranchImmediate(static_cast(offset), condition); + if (!isDirect) + *(from + 1) = nopPseudo(); + } else { + *from = conditionalBranchImmediate(2, invert(condition)); + linkJumpOrCall(from + 1, to); + } + } + + template + static void linkTestAndBranch(Condition condition, unsigned bitNumber, RegisterID rt, int* from, void* to) + { + ASSERT(!(reinterpret_cast(from) & 3)); + ASSERT(!(reinterpret_cast(to) & 3)); + intptr_t offset = (reinterpret_cast(to) - reinterpret_cast(from)) >> 2; + ASSERT(static_cast(offset) == offset); + ASSERT(((offset << 38) >> 38) == offset); + + bool useDirect = ((offset << 50) >> 50) == offset; // Fits in 14 bits + ASSERT(!isDirect || useDirect); + + if (useDirect || isDirect) { + *from = testAndBranchImmediate(condition == ConditionNE, static_cast(bitNumber), static_cast(offset), rt); + if (!isDirect) + *(from + 1) = nopPseudo(); + } else { + *from = testAndBranchImmediate(invert(condition) == ConditionNE, static_cast(bitNumber), 2, rt); + linkJumpOrCall(from + 1, to); + } + } + + template + static void relinkJumpOrCall(int* from, void* to) + { + if (!isCall && disassembleNop(from)) { + unsigned op01; + int imm19; + Condition condition; + bool isConditionalBranchImmediate = disassembleConditionalBranchImmediate(from - 1, op01, imm19, condition); + + if (isConditionalBranchImmediate) { + ASSERT_UNUSED(op01, !op01); + ASSERT_UNUSED(isCall, !isCall); + + if (imm19 == 8) + condition = invert(condition); + + linkConditionalBranch(condition, from - 1, to); + return; + } + + Datasize opSize; + bool op; + RegisterID rt; + bool isCompareAndBranchImmediate = disassembleCompareAndBranchImmediate(from - 1, opSize, op, imm19, rt); + + if (isCompareAndBranchImmediate) { + if (imm19 == 8) + op = !op; + + linkCompareAndBranch(op ? ConditionNE : ConditionEQ, opSize == Datasize_64, rt, from - 1, to); + return; + } + + int imm14; + unsigned bitNumber; + bool isTestAndBranchImmediate = disassembleTestAndBranchImmediate(from - 1, op, bitNumber, imm14, rt); + + if (isTestAndBranchImmediate) { + if (imm14 == 8) + op = !op; + + linkTestAndBranch(op ? ConditionNE : ConditionEQ, bitNumber, rt, from - 1, to); + return; + } + } + + linkJumpOrCall(from, to); + } + + static int* addressOf(void* code, AssemblerLabel label) + { + return reinterpret_cast(static_cast(code) + label.m_offset); + } + + int* addressOf(AssemblerLabel label) + { + return addressOf(m_buffer.data(), label); + } + + static RegisterID disassembleXOrSp(int reg) { return reg == 31 ? ARM64Registers::sp : static_cast(reg); } + static RegisterID disassembleXOrZr(int reg) { return reg == 31 ? ARM64Registers::zr : static_cast(reg); } + static RegisterID disassembleXOrZrOrSp(bool useZr, int reg) { return reg == 31 ? (useZr ? ARM64Registers::zr : ARM64Registers::sp) : static_cast(reg); } + + static bool disassembleAddSubtractImmediate(void* address, Datasize& sf, AddOp& op, SetFlags& S, int& shift, int& imm12, RegisterID& rn, RegisterID& rd) + { + int insn = *static_cast(address); + sf = static_cast((insn >> 31) & 1); + op = static_cast((insn >> 30) & 1); + S = static_cast((insn >> 29) & 1); + shift = (insn >> 22) & 3; + imm12 = (insn >> 10) & 0x3ff; + rn = disassembleXOrSp((insn >> 5) & 0x1f); + rd = disassembleXOrZrOrSp(S, insn & 0x1f); + return (insn & 0x1f000000) == 0x11000000; + } + + static bool disassembleLoadStoreRegisterUnsignedImmediate(void* address, MemOpSize& size, bool& V, MemOp& opc, int& imm12, RegisterID& rn, RegisterID& rt) + { + int insn = *static_cast(address); + size = static_cast((insn >> 30) & 3); + V = (insn >> 26) & 1; + opc = static_cast((insn >> 22) & 3); + imm12 = (insn >> 10) & 0xfff; + rn = disassembleXOrSp((insn >> 5) & 0x1f); + rt = disassembleXOrZr(insn & 0x1f); + return (insn & 0x3b000000) == 0x39000000; + } + + static bool disassembleMoveWideImediate(void* address, Datasize& sf, MoveWideOp& opc, int& hw, uint16_t& imm16, RegisterID& rd) + { + int insn = *static_cast(address); + sf = static_cast((insn >> 31) & 1); + opc = static_cast((insn >> 29) & 3); + hw = (insn >> 21) & 3; + imm16 = insn >> 5; + rd = disassembleXOrZr(insn & 0x1f); + return (insn & 0x1f800000) == 0x12800000; + } + + static bool disassembleNop(void* address) + { + unsigned insn = *static_cast(address); + return insn == 0xd503201f; + } + + static bool disassembleCompareAndBranchImmediate(void* address, Datasize& sf, bool& op, int& imm19, RegisterID& rt) + { + int insn = *static_cast(address); + sf = static_cast((insn >> 31) & 1); + op = (insn >> 24) & 0x1; + imm19 = (insn << 8) >> 13; + rt = static_cast(insn & 0x1f); + return (insn & 0x7e000000) == 0x34000000; + + } + + static bool disassembleConditionalBranchImmediate(void* address, unsigned& op01, int& imm19, Condition &condition) + { + int insn = *static_cast(address); + op01 = ((insn >> 23) & 0x2) | ((insn >> 4) & 0x1); + imm19 = (insn << 8) >> 13; + condition = static_cast(insn & 0xf); + return (insn & 0xfe000000) == 0x54000000; + } + + static bool disassembleTestAndBranchImmediate(void* address, bool& op, unsigned& bitNumber, int& imm14, RegisterID& rt) + { + int insn = *static_cast(address); + op = (insn >> 24) & 0x1; + imm14 = (insn << 13) >> 18; + bitNumber = static_cast((((insn >> 26) & 0x20)) | ((insn >> 19) & 0x1f)); + rt = static_cast(insn & 0x1f); + return (insn & 0x7e000000) == 0x36000000; + + } + + static bool disassembleUnconditionalBranchImmediate(void* address, bool& op, int& imm26) + { + int insn = *static_cast(address); + op = (insn >> 31) & 1; + imm26 = (insn << 6) >> 6; + return (insn & 0x7c000000) == 0x14000000; + } + + static int xOrSp(RegisterID reg) { ASSERT(!isZr(reg)); return reg; } + static int xOrZr(RegisterID reg) { ASSERT(!isSp(reg)); return reg & 31; } + static FPRegisterID xOrZrAsFPR(RegisterID reg) { return static_cast(xOrZr(reg)); } + static int xOrZrOrSp(bool useZr, RegisterID reg) { return useZr ? xOrZr(reg) : xOrSp(reg); } + + ALWAYS_INLINE void insn(int instruction) + { + m_buffer.putInt(instruction); + } + + ALWAYS_INLINE static int addSubtractExtendedRegister(Datasize sf, AddOp op, SetFlags S, RegisterID rm, ExtendType option, int imm3, RegisterID rn, RegisterID rd) + { + ASSERT(imm3 < 5); + // The only allocated values for opt is 0. + const int opt = 0; + return (0x0b200000 | sf << 31 | op << 30 | S << 29 | opt << 22 | xOrZr(rm) << 16 | option << 13 | (imm3 & 0x7) << 10 | xOrSp(rn) << 5 | xOrZrOrSp(S, rd)); + } + + ALWAYS_INLINE static int addSubtractImmediate(Datasize sf, AddOp op, SetFlags S, int shift, int imm12, RegisterID rn, RegisterID rd) + { + ASSERT(shift < 2); + ASSERT(isUInt12(imm12)); + return (0x11000000 | sf << 31 | op << 30 | S << 29 | shift << 22 | (imm12 & 0xfff) << 10 | xOrSp(rn) << 5 | xOrZrOrSp(S, rd)); + } + + ALWAYS_INLINE static int addSubtractShiftedRegister(Datasize sf, AddOp op, SetFlags S, ShiftType shift, RegisterID rm, int imm6, RegisterID rn, RegisterID rd) + { + ASSERT(shift < 3); + ASSERT(!(imm6 & (sf ? ~63 : ~31))); + return (0x0b000000 | sf << 31 | op << 30 | S << 29 | shift << 22 | xOrZr(rm) << 16 | (imm6 & 0x3f) << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int addSubtractWithCarry(Datasize sf, AddOp op, SetFlags S, RegisterID rm, RegisterID rn, RegisterID rd) + { + const int opcode2 = 0; + return (0x1a000000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | opcode2 << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int bitfield(Datasize sf, BitfieldOp opc, int immr, int imms, RegisterID rn, RegisterID rd) + { + ASSERT(immr < (sf ? 64 : 32)); + ASSERT(imms < (sf ? 64 : 32)); + const int N = sf; + return (0x13000000 | sf << 31 | opc << 29 | N << 22 | immr << 16 | imms << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + // 'op' means negate + ALWAYS_INLINE static int compareAndBranchImmediate(Datasize sf, bool op, int32_t imm19, RegisterID rt) + { + ASSERT(imm19 == (imm19 << 13) >> 13); + return (0x34000000 | sf << 31 | op << 24 | (imm19 & 0x7ffff) << 5 | xOrZr(rt)); + } + + ALWAYS_INLINE static int conditionalBranchImmediate(int32_t imm19, Condition cond) + { + ASSERT(imm19 == (imm19 << 13) >> 13); + ASSERT(!(cond & ~15)); + // The only allocated values for o1 & o0 are 0. + const int o1 = 0; + const int o0 = 0; + return (0x54000000 | o1 << 24 | (imm19 & 0x7ffff) << 5 | o0 << 4 | cond); + } + + ALWAYS_INLINE static int conditionalCompareImmediate(Datasize sf, AddOp op, int imm5, Condition cond, RegisterID rn, int nzcv) + { + ASSERT(!(imm5 & ~0x1f)); + ASSERT(nzcv < 16); + const int S = 1; + const int o2 = 0; + const int o3 = 0; + return (0x1a400800 | sf << 31 | op << 30 | S << 29 | (imm5 & 0x1f) << 16 | cond << 12 | o2 << 10 | xOrZr(rn) << 5 | o3 << 4 | nzcv); + } + + ALWAYS_INLINE static int conditionalCompareRegister(Datasize sf, AddOp op, RegisterID rm, Condition cond, RegisterID rn, int nzcv) + { + ASSERT(nzcv < 16); + const int S = 1; + const int o2 = 0; + const int o3 = 0; + return (0x1a400000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | cond << 12 | o2 << 10 | xOrZr(rn) << 5 | o3 << 4 | nzcv); + } + + // 'op' means negate + // 'op2' means increment + ALWAYS_INLINE static int conditionalSelect(Datasize sf, bool op, RegisterID rm, Condition cond, bool op2, RegisterID rn, RegisterID rd) + { + const int S = 0; + return (0x1a800000 | sf << 31 | op << 30 | S << 29 | xOrZr(rm) << 16 | cond << 12 | op2 << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int dataProcessing1Source(Datasize sf, DataOp1Source opcode, RegisterID rn, RegisterID rd) + { + const int S = 0; + const int opcode2 = 0; + return (0x5ac00000 | sf << 31 | S << 29 | opcode2 << 16 | opcode << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int dataProcessing2Source(Datasize sf, RegisterID rm, DataOp2Source opcode, RegisterID rn, RegisterID rd) + { + const int S = 0; + return (0x1ac00000 | sf << 31 | S << 29 | xOrZr(rm) << 16 | opcode << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int dataProcessing3Source(Datasize sf, DataOp3Source opcode, RegisterID rm, RegisterID ra, RegisterID rn, RegisterID rd) + { + int op54 = opcode >> 4; + int op31 = (opcode >> 1) & 7; + int op0 = opcode & 1; + return (0x1b000000 | sf << 31 | op54 << 29 | op31 << 21 | xOrZr(rm) << 16 | op0 << 15 | xOrZr(ra) << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int excepnGeneration(ExcepnOp opc, uint16_t imm16, int LL) + { + ASSERT((opc == ExcepnOp_BREAKPOINT || opc == ExcepnOp_HALT) ? !LL : (LL && (LL < 4))); + const int op2 = 0; + return (0xd4000000 | opc << 21 | imm16 << 5 | op2 << 2 | LL); + } + + ALWAYS_INLINE static int extract(Datasize sf, RegisterID rm, int imms, RegisterID rn, RegisterID rd) + { + ASSERT(imms < (sf ? 64 : 32)); + const int op21 = 0; + const int N = sf; + const int o0 = 0; + return (0x13800000 | sf << 31 | op21 << 29 | N << 22 | o0 << 21 | xOrZr(rm) << 16 | imms << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int floatingPointCompare(Datasize type, FPRegisterID rm, FPRegisterID rn, FPCmpOp opcode2) + { + const int M = 0; + const int S = 0; + const int op = 0; + return (0x1e202000 | M << 31 | S << 29 | type << 22 | rm << 16 | op << 14 | rn << 5 | opcode2); + } + + ALWAYS_INLINE static int floatingPointConditionalCompare(Datasize type, FPRegisterID rm, Condition cond, FPRegisterID rn, FPCondCmpOp op, int nzcv) + { + ASSERT(nzcv < 16); + const int M = 0; + const int S = 0; + return (0x1e200400 | M << 31 | S << 29 | type << 22 | rm << 16 | cond << 12 | rn << 5 | op << 4 | nzcv); + } + + ALWAYS_INLINE static int floatingPointConditionalSelect(Datasize type, FPRegisterID rm, Condition cond, FPRegisterID rn, FPRegisterID rd) + { + const int M = 0; + const int S = 0; + return (0x1e200c00 | M << 31 | S << 29 | type << 22 | rm << 16 | cond << 12 | rn << 5 | rd); + } + + ALWAYS_INLINE static int floatingPointImmediate(Datasize type, int imm8, FPRegisterID rd) + { + const int M = 0; + const int S = 0; + const int imm5 = 0; + return (0x1e201000 | M << 31 | S << 29 | type << 22 | (imm8 & 0xff) << 13 | imm5 << 5 | rd); + } + + ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, FPRegisterID rn, FPRegisterID rd) + { + const int S = 0; + return (0x1e200000 | sf << 31 | S << 29 | type << 22 | rmodeOpcode << 16 | rn << 5 | rd); + } + + ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, FPRegisterID rn, RegisterID rd) + { + return floatingPointIntegerConversions(sf, type, rmodeOpcode, rn, xOrZrAsFPR(rd)); + } + + ALWAYS_INLINE static int floatingPointIntegerConversions(Datasize sf, Datasize type, FPIntConvOp rmodeOpcode, RegisterID rn, FPRegisterID rd) + { + return floatingPointIntegerConversions(sf, type, rmodeOpcode, xOrZrAsFPR(rn), rd); + } + + ALWAYS_INLINE static int floatingPointDataProcessing1Source(Datasize type, FPDataOp1Source opcode, FPRegisterID rn, FPRegisterID rd) + { + const int M = 0; + const int S = 0; + return (0x1e204000 | M << 31 | S << 29 | type << 22 | opcode << 15 | rn << 5 | rd); + } + + ALWAYS_INLINE static int floatingPointDataProcessing2Source(Datasize type, FPRegisterID rm, FPDataOp2Source opcode, FPRegisterID rn, FPRegisterID rd) + { + const int M = 0; + const int S = 0; + return (0x1e200800 | M << 31 | S << 29 | type << 22 | rm << 16 | opcode << 12 | rn << 5 | rd); + } + + ALWAYS_INLINE static int vectorDataProcessing2Source(SIMD3Same opcode, unsigned size, FPRegisterID vm, FPRegisterID vn, FPRegisterID vd) + { + const int Q = 0; + return (0xe201c00 | Q << 30 | size << 22 | vm << 16 | opcode << 11 | vn << 5 | vd); + } + + ALWAYS_INLINE static int vectorDataProcessing2Source(SIMD3Same opcode, FPRegisterID vm, FPRegisterID vn, FPRegisterID vd) + { + return vectorDataProcessing2Source(opcode, 0, vm, vn, vd); + } + + + // 'o1' means negate + ALWAYS_INLINE static int floatingPointDataProcessing3Source(Datasize type, bool o1, FPRegisterID rm, AddOp o2, FPRegisterID ra, FPRegisterID rn, FPRegisterID rd) + { + const int M = 0; + const int S = 0; + return (0x1f000000 | M << 31 | S << 29 | type << 22 | o1 << 21 | rm << 16 | o2 << 15 | ra << 10 | rn << 5 | rd); + } + + // 'V' means vector + ALWAYS_INLINE static int loadRegisterLiteral(LdrLiteralOp opc, bool V, int imm19, FPRegisterID rt) + { + ASSERT(((imm19 << 13) >> 13) == imm19); + return (0x18000000 | opc << 30 | V << 26 | (imm19 & 0x7ffff) << 5 | rt); + } + + ALWAYS_INLINE static int loadRegisterLiteral(LdrLiteralOp opc, bool V, int imm19, RegisterID rt) + { + return loadRegisterLiteral(opc, V, imm19, xOrZrAsFPR(rt)); + } + + // 'V' means vector + ALWAYS_INLINE static int loadStoreRegisterPostIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt) + { + ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits. + ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits. + ASSERT(isInt9(imm9)); + return (0x38000400 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterPostIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt) + { + return loadStoreRegisterPostIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt)); + } + + // 'V' means vector + ALWAYS_INLINE static int loadStoreRegisterPairPostIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2) + { + ASSERT(size < 3); + ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size. + ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed. + unsigned immedShiftAmount = memPairOffsetShift(V, size); + int imm7 = immediate >> immedShiftAmount; + ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7)); + return (0x28800000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterPairPostIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2) + { + return loadStoreRegisterPairPostIndex(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2)); + } + + // 'V' means vector + ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt) + { + ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits. + ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits. + ASSERT(isInt9(imm9)); + return (0x38000c00 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt) + { + return loadStoreRegisterPreIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt)); + } + + // 'V' means vector + ALWAYS_INLINE static int loadStoreRegisterPairPreIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2) + { + ASSERT(size < 3); + ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size. + ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed. + unsigned immedShiftAmount = memPairOffsetShift(V, size); + int imm7 = immediate >> immedShiftAmount; + ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7)); + return (0x29800000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterPairPreIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2) + { + return loadStoreRegisterPairPreIndex(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2)); + } + + // 'V' means vector + // 'S' means shift rm + ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, FPRegisterID rt) + { + ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits. + ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits. + ASSERT(option & 2); // The ExtendType for the address must be 32/64 bit, signed or unsigned - not 8/16bit. + return (0x38200800 | size << 30 | V << 26 | opc << 22 | xOrZr(rm) << 16 | option << 13 | S << 12 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, RegisterID rt) + { + return loadStoreRegisterRegisterOffset(size, V, opc, rm, option, S, rn, xOrZrAsFPR(rt)); + } + + // 'V' means vector + ALWAYS_INLINE static int loadStoreRegisterUnscaledImmediate(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt) + { + ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits. + ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits. + ASSERT(isInt9(imm9)); + return (0x38000000 | size << 30 | V << 26 | opc << 22 | (imm9 & 0x1ff) << 12 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterUnscaledImmediate(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, RegisterID rt) + { + ASSERT(isInt9(imm9)); + return loadStoreRegisterUnscaledImmediate(size, V, opc, imm9, rn, xOrZrAsFPR(rt)); + } + + // 'V' means vector + ALWAYS_INLINE static int loadStoreRegisterUnsignedImmediate(MemOpSize size, bool V, MemOp opc, int imm12, RegisterID rn, FPRegisterID rt) + { + ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits. + ASSERT(!((size & 2) && !V && (opc == 3))); // signed 32-bit load must be extending from 8/16 bits. + ASSERT(isUInt12(imm12)); + return (0x39000000 | size << 30 | V << 26 | opc << 22 | (imm12 & 0xfff) << 10 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterUnsignedImmediate(MemOpSize size, bool V, MemOp opc, int imm12, RegisterID rn, RegisterID rt) + { + return loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, xOrZrAsFPR(rt)); + } + + ALWAYS_INLINE static int logicalImmediate(Datasize sf, LogicalOp opc, int N_immr_imms, RegisterID rn, RegisterID rd) + { + ASSERT(!(N_immr_imms & (sf ? ~0x1fff : ~0xfff))); + return (0x12000000 | sf << 31 | opc << 29 | N_immr_imms << 10 | xOrZr(rn) << 5 | xOrZrOrSp(opc == LogicalOp_ANDS, rd)); + } + + // 'N' means negate rm + ALWAYS_INLINE static int logicalShiftedRegister(Datasize sf, LogicalOp opc, ShiftType shift, bool N, RegisterID rm, int imm6, RegisterID rn, RegisterID rd) + { + ASSERT(!(imm6 & (sf ? ~63 : ~31))); + return (0x0a000000 | sf << 31 | opc << 29 | shift << 22 | N << 21 | xOrZr(rm) << 16 | (imm6 & 0x3f) << 10 | xOrZr(rn) << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int moveWideImediate(Datasize sf, MoveWideOp opc, int hw, uint16_t imm16, RegisterID rd) + { + ASSERT(hw < (sf ? 4 : 2)); + return (0x12800000 | sf << 31 | opc << 29 | hw << 21 | (int)imm16 << 5 | xOrZr(rd)); + } + + // 'op' means link + ALWAYS_INLINE static int unconditionalBranchImmediate(bool op, int32_t imm26) + { + ASSERT(imm26 == (imm26 << 6) >> 6); + return (0x14000000 | op << 31 | (imm26 & 0x3ffffff)); + } + + // 'op' means page + ALWAYS_INLINE static int pcRelative(bool op, int32_t imm21, RegisterID rd) + { + ASSERT(imm21 == (imm21 << 11) >> 11); + int32_t immlo = imm21 & 3; + int32_t immhi = (imm21 >> 2) & 0x7ffff; + return (0x10000000 | op << 31 | immlo << 29 | immhi << 5 | xOrZr(rd)); + } + + ALWAYS_INLINE static int system(bool L, int op0, int op1, int crn, int crm, int op2, RegisterID rt) + { + return (0xd5000000 | L << 21 | op0 << 19 | op1 << 16 | crn << 12 | crm << 8 | op2 << 5 | xOrZr(rt)); + } + + ALWAYS_INLINE static int hintPseudo(int imm) + { + ASSERT(!(imm & ~0x7f)); + return system(0, 0, 3, 2, (imm >> 3) & 0xf, imm & 0x7, ARM64Registers::zr); + } + + ALWAYS_INLINE static int nopPseudo() + { + return hintPseudo(0); + } + + // 'op' means negate + ALWAYS_INLINE static int testAndBranchImmediate(bool op, int b50, int imm14, RegisterID rt) + { + ASSERT(!(b50 & ~0x3f)); + ASSERT(imm14 == (imm14 << 18) >> 18); + int b5 = b50 >> 5; + int b40 = b50 & 0x1f; + return (0x36000000 | b5 << 31 | op << 24 | b40 << 19 | (imm14 & 0x3fff) << 5 | xOrZr(rt)); + } + + ALWAYS_INLINE static int unconditionalBranchRegister(BranchType opc, RegisterID rn) + { + // The only allocated values for op2 is 0x1f, for op3 & op4 are 0. + const int op2 = 0x1f; + const int op3 = 0; + const int op4 = 0; + return (0xd6000000 | opc << 21 | op2 << 16 | op3 << 10 | xOrZr(rn) << 5 | op4); + } + + // Workaround for Cortex-A53 erratum (835769). Emit an extra nop if the + // last instruction in the buffer is a load, store or prefetch. Needed + // before 64-bit multiply-accumulate instructions. + template + ALWAYS_INLINE void nopCortexA53Fix835769() + { +#if CPU(ARM64_CORTEXA53) + CHECK_DATASIZE(); + if (datasize == 64) { + if (LIKELY(m_buffer.codeSize() >= sizeof(int32_t))) { + // From ARMv8 Reference Manual, Section C4.1: the encoding of the + // instructions in the Loads and stores instruction group is: + // ---- 1-0- ---- ---- ---- ---- ---- ---- + if (UNLIKELY((*reinterpret_cast_ptr(reinterpret_cast_ptr(m_buffer.data()) + m_buffer.codeSize() - sizeof(int32_t)) & 0x0a000000) == 0x08000000)) + nop(); + } + } +#endif + } + + // Workaround for Cortex-A53 erratum (843419). Emit extra nops to avoid + // wrong address access after ADRP instruction. + ALWAYS_INLINE void nopCortexA53Fix843419() + { +#if CPU(ARM64_CORTEXA53) + nop(); + nop(); + nop(); +#endif + } + + AssemblerBuffer m_buffer; + Vector m_jumpsToLink; + int m_indexOfLastWatchpoint; + int m_indexOfTailOfLastWatchpoint; +}; + +} // namespace JSC + +#undef CHECK_DATASIZE_OF +#undef DATASIZE_OF +#undef MEMOPSIZE_OF +#undef CHECK_DATASIZE +#undef DATASIZE +#undef MEMOPSIZE +#undef CHECK_FP_MEMOP_DATASIZE + +#endif // ENABLE(ASSEMBLER) && CPU(ARM64) + +#endif // ARM64Assembler_h diff --git a/src/3rdparty/masm/assembler/ARMv7Assembler.h b/src/3rdparty/masm/assembler/ARMv7Assembler.h index 236d55883d..f0fa07a1bf 100644 --- a/src/3rdparty/masm/assembler/ARMv7Assembler.h +++ b/src/3rdparty/masm/assembler/ARMv7Assembler.h @@ -423,6 +423,7 @@ public: typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID; typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID; typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID; + typedef ARMRegisters::FPDoubleRegisterID FPRegisterID; // (HS, LO, HI, LS) -> (AE, B, A, BE) // (VS, VC) -> (O, NO) diff --git a/src/3rdparty/masm/assembler/AbstractMacroAssembler.h b/src/3rdparty/masm/assembler/AbstractMacroAssembler.h index 95eaf7d99d..e90dd235c6 100644 --- a/src/3rdparty/masm/assembler/AbstractMacroAssembler.h +++ b/src/3rdparty/masm/assembler/AbstractMacroAssembler.h @@ -66,6 +66,7 @@ public: class Jump; typedef typename AssemblerType::RegisterID RegisterID; + typedef typename AssemblerType::FPRegisterID FPRegisterID; // Section 1: MacroAssembler operand types // @@ -275,7 +276,7 @@ public: { } -#if CPU(X86_64) +#if CPU(X86_64) || CPU(ARM64) explicit TrustedImm64(TrustedImmPtr ptr) : m_value(ptr.asIntptr()) { @@ -296,7 +297,7 @@ public: : TrustedImm64(value) { } -#if CPU(X86_64) +#if CPU(X86_64) || CPU(ARM64) explicit Imm64(TrustedImmPtr ptr) : TrustedImm64(ptr) { @@ -516,6 +517,33 @@ public: , m_condition(condition) { } +#elif CPU(ARM64) + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + { + } + + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + , m_is64Bit(is64Bit) + , m_compareRegister(compareRegister) + { + ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize)); + } + + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + , m_bitNumber(bitNumber) + , m_compareRegister(compareRegister) + { + ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize)); + } #elif CPU(SH4) Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar) : m_label(jmp) @@ -544,6 +572,13 @@ public: #if CPU(ARM_THUMB2) masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition); +#elif CPU(ARM64) + if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize)) + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister); + else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize)) + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister); + else + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition); #elif CPU(SH4) masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type); #else @@ -559,6 +594,13 @@ public: #if CPU(ARM_THUMB2) masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition); +#elif CPU(ARM64) + if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize)) + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister); + else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize)) + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister); + else + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition); #else masm->m_assembler.linkJump(m_label, label.m_label); #endif @@ -571,6 +613,12 @@ public: #if CPU(ARM_THUMB2) ARMv7Assembler::JumpType m_type; ARMv7Assembler::Condition m_condition; +#elif CPU(ARM64) + ARM64Assembler::JumpType m_type; + ARM64Assembler::Condition m_condition; + bool m_is64Bit; + unsigned m_bitNumber; + ARM64Assembler::RegisterID m_compareRegister; #endif #if CPU(SH4) SH4Assembler::JumpType m_type; diff --git a/src/3rdparty/masm/assembler/MacroAssembler.h b/src/3rdparty/masm/assembler/MacroAssembler.h index 0c95bc7ca1..e122e2f3ae 100644 --- a/src/3rdparty/masm/assembler/MacroAssembler.h +++ b/src/3rdparty/masm/assembler/MacroAssembler.h @@ -34,6 +34,10 @@ #include "MacroAssemblerARMv7.h" namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; }; +#elif CPU(ARM64) +#include "MacroAssemblerARM64.h" +namespace JSC { typedef MacroAssemblerARM64 MacroAssemblerBase; }; + #elif CPU(ARM_TRADITIONAL) #include "MacroAssemblerARM.h" namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; }; @@ -183,7 +187,7 @@ public: storePtr(imm, addressForPoke(index)); } -#if CPU(X86_64) +#if CPU(X86_64) || CPU(ARM64) void peek64(RegisterID dest, int index = 0) { load64(Address(stackPointerRegister, (index * sizeof(void*))), dest); @@ -253,7 +257,7 @@ public: branchTestPtr(cond, reg).linkTo(target, this); } -#if !CPU(ARM_THUMB2) +#if !CPU(ARM_THUMB2) && !CPU(ARM64) PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0)) { return PatchableJump(branchPtr(cond, left, right)); @@ -273,7 +277,7 @@ public: { return PatchableJump(branchTest32(cond, reg, mask)); } -#endif // !CPU(ARM_THUMB2) +#endif // !CPU(ARM_THUMB2) && !CPU(ARM64) #if !CPU(ARM) PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm) @@ -325,7 +329,7 @@ public: // Ptr methods // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents. // FIXME: should this use a test for 32-bitness instead of this specific exception? -#if !CPU(X86_64) +#if !CPU(X86_64) && !CPU(ARM64) void addPtr(Address src, RegisterID dest) { add32(src, dest); diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARM64.h b/src/3rdparty/masm/assembler/MacroAssemblerARM64.h new file mode 100644 index 0000000000..bd85b6b2c1 --- /dev/null +++ b/src/3rdparty/masm/assembler/MacroAssemblerARM64.h @@ -0,0 +1,3455 @@ +/* + * Copyright (C) 2012, 2014, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef MacroAssemblerARM64_h +#define MacroAssemblerARM64_h + +#if ENABLE(ASSEMBLER) + +#include "ARM64Assembler.h" +#include "AbstractMacroAssembler.h" +#include + +namespace JSC { + +class MacroAssemblerARM64 : public AbstractMacroAssembler { +#define COPIED_FROM_AbstractAssembler_H 1 +#ifdef COPIED_FROM_AbstractAssembler_H + typedef MacroAssemblerARM64 AbstractMacroAssemblerType; + class CachedTempRegister { + friend class DataLabelPtr; + friend class DataLabel32; + friend class DataLabelCompact; + friend class Jump; + friend class Label; + + public: + CachedTempRegister(AbstractMacroAssemblerType* masm, RegisterID registerID) + : m_masm(masm) + , m_registerID(registerID) + , m_value(0) + , m_validBit(1 << static_cast(registerID)) + { + ASSERT(static_cast(registerID) < (sizeof(unsigned) * 8)); + } + + ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; } + + ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; } + + bool value(intptr_t& value) + { + value = m_value; + return m_masm->isTempRegisterValid(m_validBit); + } + + void setValue(intptr_t value) + { + m_value = value; + m_masm->setTempRegisterValid(m_validBit); + } + + ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); } + + private: + AbstractMacroAssemblerType* m_masm; + RegisterID m_registerID; + intptr_t m_value; + unsigned m_validBit; + }; + + ALWAYS_INLINE void invalidateAllTempRegisters() + { + m_tempRegistersValidBits = 0; + } + + ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask) + { + return (m_tempRegistersValidBits & registerMask); + } + + ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask) + { + m_tempRegistersValidBits &= ~registerMask; + } + + ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask) + { + m_tempRegistersValidBits |= registerMask; + } + + friend class AllowMacroScratchRegisterUsage; + friend class DisallowMacroScratchRegisterUsage; + unsigned m_tempRegistersValidBits; +#endif // COPIED_FROM_AbstractAssembler_H +public: + static const RegisterID dataTempRegister = ARM64Registers::ip0; + static const RegisterID memoryTempRegister = ARM64Registers::ip1; + +#if 0 + RegisterID scratchRegister() + { + RELEASE_ASSERT(m_allowScratchRegister); + return getCachedDataTempRegisterIDAndInvalidate(); + } +#endif + +private: + static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31; + static const ARM64Assembler::SetFlags S = ARM64Assembler::S; + static const intptr_t maskHalfWord0 = 0xffffl; + static const intptr_t maskHalfWord1 = 0xffff0000l; + static const intptr_t maskUpperWord = 0xffffffff00000000l; + + // 4 instructions - 3 to load the function pointer, + blr. + static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -16; + +public: + MacroAssemblerARM64() + : m_dataMemoryTempRegister(this, dataTempRegister) + , m_cachedMemoryTempRegister(this, memoryTempRegister) + , m_makeJumpPatchable(false) + { + } + + typedef ARM64Assembler::LinkRecord LinkRecord; + typedef ARM64Assembler::JumpType JumpType; + typedef ARM64Assembler::JumpLinkType JumpLinkType; + typedef ARM64Assembler::Condition Condition; + + static const ARM64Assembler::Condition DefaultCondition = ARM64Assembler::ConditionInvalid; + static const ARM64Assembler::JumpType DefaultJump = ARM64Assembler::JumpNoConditionFixedSize; + + Vector& jumpsToLink() { return m_assembler.jumpsToLink(); } + void* unlinkedCode() { return m_assembler.unlinkedCode(); } + static bool canCompact(JumpType jumpType) { return ARM64Assembler::canCompact(jumpType); } + static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(jumpType, from, to); } + static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(record, from, to); } + static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARM64Assembler::jumpSizeDelta(jumpType, jumpLinkType); } + static void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return ARM64Assembler::link(record, from, to); } + + static const Scale ScalePtr = TimesEight; + + static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value) + { + // This is the largest 32-bit access allowed, aligned to 64-bit boundary. + return !(value & ~0x3ff8); + } + + enum RelationalCondition { + Equal = ARM64Assembler::ConditionEQ, + NotEqual = ARM64Assembler::ConditionNE, + Above = ARM64Assembler::ConditionHI, + AboveOrEqual = ARM64Assembler::ConditionHS, + Below = ARM64Assembler::ConditionLO, + BelowOrEqual = ARM64Assembler::ConditionLS, + GreaterThan = ARM64Assembler::ConditionGT, + GreaterThanOrEqual = ARM64Assembler::ConditionGE, + LessThan = ARM64Assembler::ConditionLT, + LessThanOrEqual = ARM64Assembler::ConditionLE + }; + + enum ResultCondition { + Overflow = ARM64Assembler::ConditionVS, + Signed = ARM64Assembler::ConditionMI, + PositiveOrZero = ARM64Assembler::ConditionPL, + Zero = ARM64Assembler::ConditionEQ, + NonZero = ARM64Assembler::ConditionNE + }; + + enum ZeroCondition { + IsZero = ARM64Assembler::ConditionEQ, + IsNonZero = ARM64Assembler::ConditionNE + }; + + enum DoubleCondition { + // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. + DoubleEqual = ARM64Assembler::ConditionEQ, + DoubleNotEqual = ARM64Assembler::ConditionVC, // Not the right flag! check for this & handle differently. + DoubleGreaterThan = ARM64Assembler::ConditionGT, + DoubleGreaterThanOrEqual = ARM64Assembler::ConditionGE, + DoubleLessThan = ARM64Assembler::ConditionLO, + DoubleLessThanOrEqual = ARM64Assembler::ConditionLS, + // If either operand is NaN, these conditions always evaluate to true. + DoubleEqualOrUnordered = ARM64Assembler::ConditionVS, // Not the right flag! check for this & handle differently. + DoubleNotEqualOrUnordered = ARM64Assembler::ConditionNE, + DoubleGreaterThanOrUnordered = ARM64Assembler::ConditionHI, + DoubleGreaterThanOrEqualOrUnordered = ARM64Assembler::ConditionHS, + DoubleLessThanOrUnordered = ARM64Assembler::ConditionLT, + DoubleLessThanOrEqualOrUnordered = ARM64Assembler::ConditionLE, + }; + + static const RegisterID stackPointerRegister = ARM64Registers::sp; + static const RegisterID framePointerRegister = ARM64Registers::fp; + static const RegisterID linkRegister = ARM64Registers::lr; + + // FIXME: Get reasonable implementations for these + static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; } + static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; } + + // Integer operations: + + void add32(RegisterID a, RegisterID b, RegisterID dest) + { + ASSERT(a != ARM64Registers::sp && b != ARM64Registers::sp); + m_assembler.add<32>(dest, a, b); + } + + void add32(RegisterID src, RegisterID dest) + { + m_assembler.add<32>(dest, dest, src); + } + + void add32(TrustedImm32 imm, RegisterID dest) + { + if (!imm.m_value) + return; + + add32(imm, dest, dest); + } + + void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + if (!imm.m_value) { + move(src, dest); + return; + } + + if (isUInt12(imm.m_value)) + m_assembler.add<32>(dest, src, UInt12(imm.m_value)); + else if (isUInt12(-imm.m_value)) + m_assembler.sub<32>(dest, src, UInt12(-imm.m_value)); + else { + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.add<32>(dest, src, dataTempRegister); + } + } + + void add32(TrustedImm32 imm, Address address) + { + if (!imm.m_value) + return; + + load32(address, getCachedDataTempRegisterIDAndInvalidate()); + + if (isUInt12(imm.m_value)) + m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value)); + else if (isUInt12(-imm.m_value)) + m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value)); + else { + move(imm, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister); + } + + store32(dataTempRegister, address); + } + + void add32(TrustedImm32 imm, AbsoluteAddress address) + { + if (!imm.m_value) + return; + + load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + + if (isUInt12(imm.m_value)) { + m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value)); + store32(dataTempRegister, address.m_ptr); + return; + } + + if (isUInt12(-imm.m_value)) { + m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value)); + store32(dataTempRegister, address.m_ptr); + return; + } + + move(imm, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister); + store32(dataTempRegister, address.m_ptr); + } + + void add32(Address src, RegisterID dest) + { + load32(src, getCachedDataTempRegisterIDAndInvalidate()); + add32(dataTempRegister, dest); + } + + void add64(RegisterID a, RegisterID b, RegisterID dest) + { + ASSERT(a != ARM64Registers::sp || b != ARM64Registers::sp); + if (b == ARM64Registers::sp) + std::swap(a, b); + m_assembler.add<64>(dest, a, b); + } + + void add64(RegisterID src, RegisterID dest) + { + if (src == ARM64Registers::sp) + m_assembler.add<64>(dest, src, dest); + else + m_assembler.add<64>(dest, dest, src); + } + + void add64(TrustedImm32 imm, RegisterID dest) + { + if (!imm.m_value) + return; + + if (isUInt12(imm.m_value)) { + m_assembler.add<64>(dest, dest, UInt12(imm.m_value)); + return; + } + if (isUInt12(-imm.m_value)) { + m_assembler.sub<64>(dest, dest, UInt12(-imm.m_value)); + return; + } + + signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.add<64>(dest, dest, dataTempRegister); + } + + void add64(TrustedImm64 imm, RegisterID dest) + { + intptr_t immediate = imm.m_value; + if (!immediate) + return; + + if (isUInt12(immediate)) { + m_assembler.add<64>(dest, dest, UInt12(static_cast(immediate))); + return; + } + if (isUInt12(-immediate)) { + m_assembler.sub<64>(dest, dest, UInt12(static_cast(-immediate))); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.add<64>(dest, dest, dataTempRegister); + } + + void add64(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + if (!imm.m_value) { + move(src, dest); + return; + } + + if (isUInt12(imm.m_value)) { + m_assembler.add<64>(dest, src, UInt12(imm.m_value)); + return; + } + if (isUInt12(-imm.m_value)) { + m_assembler.sub<64>(dest, src, UInt12(-imm.m_value)); + return; + } + + signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.add<64>(dest, src, dataTempRegister); + } + + void add64(TrustedImm32 imm, Address address) + { + if (!imm.m_value) + return; + + load64(address, getCachedDataTempRegisterIDAndInvalidate()); + + if (isUInt12(imm.m_value)) + m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value)); + else if (isUInt12(-imm.m_value)) + m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value)); + else { + signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister); + } + + store64(dataTempRegister, address); + } + + void add64(TrustedImm32 imm, AbsoluteAddress address) + { + if (!imm.m_value) + return; + + load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + + if (isUInt12(imm.m_value)) { + m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value)); + store64(dataTempRegister, address.m_ptr); + return; + } + + if (isUInt12(-imm.m_value)) { + m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value)); + store64(dataTempRegister, address.m_ptr); + return; + } + + signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister); + store64(dataTempRegister, address.m_ptr); + } + + void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest) + { + add64(imm, srcDest); + } + + void add64(Address src, RegisterID dest) + { + load64(src, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.add<64>(dest, dest, dataTempRegister); + } + + void add64(AbsoluteAddress src, RegisterID dest) + { + load64(src.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.add<64>(dest, dest, dataTempRegister); + } + + void and32(RegisterID src, RegisterID dest) + { + and32(dest, src, dest); + } + + void and32(RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.and_<32>(dest, op1, op2); + } + + void and32(TrustedImm32 imm, RegisterID dest) + { + and32(imm, dest, dest); + } + + void and32(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value); + + if (logicalImm.isValid()) { + m_assembler.and_<32>(dest, src, logicalImm); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.and_<32>(dest, src, dataTempRegister); + } + + void and32(Address src, RegisterID dest) + { + load32(src, dataTempRegister); + and32(dataTempRegister, dest); + } + + void and64(RegisterID src, RegisterID dest) + { + m_assembler.and_<64>(dest, dest, src); + } + + void and64(TrustedImm32 imm, RegisterID dest) + { + LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast(static_cast(imm.m_value))); + + if (logicalImm.isValid()) { + m_assembler.and_<64>(dest, dest, logicalImm); + return; + } + + signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.and_<64>(dest, dest, dataTempRegister); + } + + void and64(TrustedImmPtr imm, RegisterID dest) + { + LogicalImmediate logicalImm = LogicalImmediate::create64(reinterpret_cast(imm.m_value)); + + if (logicalImm.isValid()) { + m_assembler.and_<64>(dest, dest, logicalImm); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.and_<64>(dest, dest, dataTempRegister); + } + + void countLeadingZeros32(RegisterID src, RegisterID dest) + { + m_assembler.clz<32>(dest, src); + } + + void countLeadingZeros64(RegisterID src, RegisterID dest) + { + m_assembler.clz<64>(dest, src); + } + + void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) + { + m_assembler.lsl<32>(dest, src, shiftAmount); + } + + void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + m_assembler.lsl<32>(dest, src, imm.m_value & 0x1f); + } + + void lshift32(RegisterID shiftAmount, RegisterID dest) + { + lshift32(dest, shiftAmount, dest); + } + + void lshift32(TrustedImm32 imm, RegisterID dest) + { + lshift32(dest, imm, dest); + } + + void lshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest) + { + m_assembler.lsl<64>(dest, src, shiftAmount); + } + + void lshift64(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + m_assembler.lsl<64>(dest, src, imm.m_value & 0x3f); + } + + void lshift64(RegisterID shiftAmount, RegisterID dest) + { + lshift64(dest, shiftAmount, dest); + } + + void lshift64(TrustedImm32 imm, RegisterID dest) + { + lshift64(dest, imm, dest); + } + + void mul32(RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.mul<32>(dest, left, right); + } + + void mul32(RegisterID src, RegisterID dest) + { + m_assembler.mul<32>(dest, dest, src); + } + + void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.mul<32>(dest, src, dataTempRegister); + } + + void mul32(Address src, RegisterID dest) + { + load32(src, dataTempRegister); + mul32(dataTempRegister, dest); + } + + void mul64(RegisterID src, RegisterID dest) + { + m_assembler.mul<64>(dest, dest, src); + } + + void mul64(RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.mul<64>(dest, left, right); + } + + void div32(RegisterID dividend, RegisterID divisor, RegisterID dest) + { + m_assembler.sdiv<32>(dest, dividend, divisor); + } + + void div64(RegisterID dividend, RegisterID divisor, RegisterID dest) + { + m_assembler.sdiv<64>(dest, dividend, divisor); + } + + void neg32(RegisterID dest) + { + m_assembler.neg<32>(dest, dest); + } + + void neg64(RegisterID dest) + { + m_assembler.neg<64>(dest, dest); + } + + void or32(RegisterID src, RegisterID dest) + { + or32(dest, src, dest); + } + + void or32(Address src, RegisterID dest) + { + load32(src, dataTempRegister); + or32(dataTempRegister, dest); + } + + void or32(RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.orr<32>(dest, op1, op2); + } + + void or32(TrustedImm32 imm, RegisterID dest) + { + or32(imm, dest, dest); + } + + void or32(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value); + + if (logicalImm.isValid()) { + m_assembler.orr<32>(dest, src, logicalImm); + return; + } + + ASSERT(src != dataTempRegister); + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.orr<32>(dest, src, dataTempRegister); + } + + void or32(RegisterID src, AbsoluteAddress address) + { + load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.orr<32>(dataTempRegister, dataTempRegister, src); + store32(dataTempRegister, address.m_ptr); + } + + void or32(TrustedImm32 imm, AbsoluteAddress address) + { + LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value); + if (logicalImm.isValid()) { + load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.orr<32>(dataTempRegister, dataTempRegister, logicalImm); + store32(dataTempRegister, address.m_ptr); + } else { + load32(address.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate()); + or32(imm, memoryTempRegister, getCachedDataTempRegisterIDAndInvalidate()); + store32(dataTempRegister, address.m_ptr); + } + } + + void or32(TrustedImm32 imm, Address address) + { + load32(address, getCachedDataTempRegisterIDAndInvalidate()); + or32(imm, dataTempRegister, dataTempRegister); + store32(dataTempRegister, address); + } + + void or64(RegisterID src, RegisterID dest) + { + or64(dest, src, dest); + } + + void or64(RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.orr<64>(dest, op1, op2); + } + + void or64(TrustedImm32 imm, RegisterID dest) + { + or64(imm, dest, dest); + } + + void or64(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast(static_cast(imm.m_value))); + + if (logicalImm.isValid()) { + m_assembler.orr<64>(dest, src, logicalImm); + return; + } + + signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.orr<64>(dest, src, dataTempRegister); + } + + void or64(TrustedImm64 imm, RegisterID dest) + { + LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast(static_cast(imm.m_value))); + + if (logicalImm.isValid()) { + m_assembler.orr<64>(dest, dest, logicalImm); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.orr<64>(dest, dest, dataTempRegister); + } + + void rotateRight64(TrustedImm32 imm, RegisterID srcDst) + { + m_assembler.ror<64>(srcDst, srcDst, imm.m_value & 63); + } + + void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) + { + m_assembler.asr<32>(dest, src, shiftAmount); + } + + void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + m_assembler.asr<32>(dest, src, imm.m_value & 0x1f); + } + + void rshift32(RegisterID shiftAmount, RegisterID dest) + { + rshift32(dest, shiftAmount, dest); + } + + void rshift32(TrustedImm32 imm, RegisterID dest) + { + rshift32(dest, imm, dest); + } + + void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest) + { + m_assembler.asr<64>(dest, src, shiftAmount); + } + + void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + m_assembler.asr<64>(dest, src, imm.m_value & 0x3f); + } + + void rshift64(RegisterID shiftAmount, RegisterID dest) + { + rshift64(dest, shiftAmount, dest); + } + + void rshift64(TrustedImm32 imm, RegisterID dest) + { + rshift64(dest, imm, dest); + } + + void sub32(RegisterID src, RegisterID dest) + { + m_assembler.sub<32>(dest, dest, src); + } + + void sub32(TrustedImm32 imm, RegisterID dest) + { + if (isUInt12(imm.m_value)) { + m_assembler.sub<32>(dest, dest, UInt12(imm.m_value)); + return; + } + if (isUInt12(-imm.m_value)) { + m_assembler.add<32>(dest, dest, UInt12(-imm.m_value)); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.sub<32>(dest, dest, dataTempRegister); + } + + void sub32(TrustedImm32 imm, Address address) + { + load32(address, getCachedDataTempRegisterIDAndInvalidate()); + + if (isUInt12(imm.m_value)) + m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value)); + else if (isUInt12(-imm.m_value)) + m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value)); + else { + move(imm, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister); + } + + store32(dataTempRegister, address); + } + + void sub32(TrustedImm32 imm, AbsoluteAddress address) + { + load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + + if (isUInt12(imm.m_value)) { + m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value)); + store32(dataTempRegister, address.m_ptr); + return; + } + + if (isUInt12(-imm.m_value)) { + m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value)); + store32(dataTempRegister, address.m_ptr); + return; + } + + move(imm, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister); + store32(dataTempRegister, address.m_ptr); + } + + void sub32(Address src, RegisterID dest) + { + load32(src, getCachedDataTempRegisterIDAndInvalidate()); + sub32(dataTempRegister, dest); + } + + void sub64(RegisterID src, RegisterID dest) + { + m_assembler.sub<64>(dest, dest, src); + } + + void sub64(TrustedImm32 imm, RegisterID dest) + { + if (isUInt12(imm.m_value)) { + m_assembler.sub<64>(dest, dest, UInt12(imm.m_value)); + return; + } + if (isUInt12(-imm.m_value)) { + m_assembler.add<64>(dest, dest, UInt12(-imm.m_value)); + return; + } + + signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.sub<64>(dest, dest, dataTempRegister); + } + + void sub64(TrustedImm64 imm, RegisterID dest) + { + intptr_t immediate = imm.m_value; + + if (isUInt12(immediate)) { + m_assembler.sub<64>(dest, dest, UInt12(static_cast(immediate))); + return; + } + if (isUInt12(-immediate)) { + m_assembler.add<64>(dest, dest, UInt12(static_cast(-immediate))); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.sub<64>(dest, dest, dataTempRegister); + } + + void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) + { + m_assembler.lsr<32>(dest, src, shiftAmount); + } + + void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + m_assembler.lsr<32>(dest, src, imm.m_value & 0x1f); + } + + void urshift32(RegisterID shiftAmount, RegisterID dest) + { + urshift32(dest, shiftAmount, dest); + } + + void urshift32(TrustedImm32 imm, RegisterID dest) + { + urshift32(dest, imm, dest); + } + + void urshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest) + { + m_assembler.lsr<64>(dest, src, shiftAmount); + } + + void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f); + } + + void urshift64(RegisterID shiftAmount, RegisterID dest) + { + urshift64(dest, shiftAmount, dest); + } + + void urshift64(TrustedImm32 imm, RegisterID dest) + { + urshift64(dest, imm, dest); + } + + void xor32(Address src, RegisterID dest) + { + load32(src, dataTempRegister); + xor32(dataTempRegister, dest); + } + + void xor32(RegisterID src, RegisterID dest) + { + xor32(dest, src, dest); + } + + void xor32(RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.eor<32>(dest, op1, op2); + } + + void xor32(TrustedImm32 imm, RegisterID dest) + { + xor32(imm, dest, dest); + } + + void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + if (imm.m_value == -1) + m_assembler.mvn<32>(dest, src); + else { + LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value); + + if (logicalImm.isValid()) { + m_assembler.eor<32>(dest, src, logicalImm); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.eor<32>(dest, src, dataTempRegister); + } + } + + void xor64(RegisterID src, Address address) + { + load64(address, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.eor<64>(dataTempRegister, dataTempRegister, src); + store64(dataTempRegister, address); + } + + void xor64(RegisterID src, RegisterID dest) + { + xor64(dest, src, dest); + } + + void xor64(RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.eor<64>(dest, op1, op2); + } + + void xor64(TrustedImm32 imm, RegisterID dest) + { + xor64(imm, dest, dest); + } + + void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + if (imm.m_value == -1) + m_assembler.mvn<64>(dest, src); + else { + LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast(static_cast(imm.m_value))); + + if (logicalImm.isValid()) { + m_assembler.eor<64>(dest, src, logicalImm); + return; + } + + signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.eor<64>(dest, src, dataTempRegister); + } + } + + void not32(RegisterID src, RegisterID dest) + { + m_assembler.mvn<32>(dest, src); + } + + void not64(RegisterID src, RegisterID dest) + { + m_assembler.mvn<64>(dest, src); + } + + // Memory access operations: + + void load64(ImplicitAddress address, RegisterID dest) + { + if (tryLoadWithOffset<64>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldr<64>(dest, address.base, memoryTempRegister); + } + + void load64(BaseIndex address, RegisterID dest) + { + if (!address.offset && (!address.scale || address.scale == 3)) { + m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldr<64>(dest, address.base, memoryTempRegister); + } + + void load64(const void* address, RegisterID dest) + { + load<64>(address, dest); + } + + DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest) + { + DataLabel32 label(this); + signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldr<64>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0); + return label; + } + + DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest) + { + ASSERT(isCompactPtrAlignedAddressOffset(address.offset)); + DataLabelCompact label(this); + m_assembler.ldr<64>(dest, address.base, address.offset); + return label; + } + + ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) + { + ConvertibleLoadLabel result(this); + ASSERT(!(address.offset & ~0xff8)); + m_assembler.ldr<64>(dest, address.base, address.offset); + return result; + } + + void load32(ImplicitAddress address, RegisterID dest) + { + if (tryLoadWithOffset<32>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldr<32>(dest, address.base, memoryTempRegister); + } + + void load32(BaseIndex address, RegisterID dest) + { + if (!address.offset && (!address.scale || address.scale == 2)) { + m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldr<32>(dest, address.base, memoryTempRegister); + } + + void load32(const void* address, RegisterID dest) + { + load<32>(address, dest); + } + + DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest) + { + DataLabel32 label(this); + signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldr<32>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0); + return label; + } + + DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest) + { + ASSERT(isCompactPtrAlignedAddressOffset(address.offset)); + DataLabelCompact label(this); + m_assembler.ldr<32>(dest, address.base, address.offset); + return label; + } + + void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) + { + load32(address, dest); + } + + void load16(ImplicitAddress address, RegisterID dest) + { + if (tryLoadWithOffset<16>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldrh(dest, address.base, memoryTempRegister); + } + + void load16(BaseIndex address, RegisterID dest) + { + if (!address.offset && (!address.scale || address.scale == 1)) { + m_assembler.ldrh(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldrh(dest, address.base, memoryTempRegister); + } + + void load16Unaligned(BaseIndex address, RegisterID dest) + { + load16(address, dest); + } + + void load16SignedExtendTo32(ImplicitAddress address, RegisterID dest) + { + if (tryLoadSignedWithOffset<16>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister); + } + + void load16SignedExtendTo32(BaseIndex address, RegisterID dest) + { + if (!address.offset && (!address.scale || address.scale == 1)) { + m_assembler.ldrsh<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister); + } + + void zeroExtend16To32(RegisterID src, RegisterID dest) + { + m_assembler.uxth<64>(dest, src); + } + + void signExtend16To32(RegisterID src, RegisterID dest) + { + m_assembler.sxth<64>(dest, src); + } + + void load8(ImplicitAddress address, RegisterID dest) + { + if (tryLoadWithOffset<8>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldrb(dest, address.base, memoryTempRegister); + } + + void load8(BaseIndex address, RegisterID dest) + { + if (!address.offset && !address.scale) { + m_assembler.ldrb(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldrb(dest, address.base, memoryTempRegister); + } + + void load8(const void* address, RegisterID dest) + { + moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister); + m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr); + if (dest == memoryTempRegister) + m_cachedMemoryTempRegister.invalidate(); + } + + void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest) + { + if (tryLoadSignedWithOffset<8>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister); + } + + void load8SignedExtendTo32(BaseIndex address, RegisterID dest) + { + if (!address.offset && !address.scale) { + m_assembler.ldrsb<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister); + } + + void zeroExtend8To32(RegisterID src, RegisterID dest) + { + m_assembler.uxtb<64>(dest, src); + } + + void signExtend8To32(RegisterID src, RegisterID dest) + { + m_assembler.sxtb<64>(dest, src); + } + + void store64(RegisterID src, ImplicitAddress address) + { + if (tryStoreWithOffset<64>(src, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<64>(src, address.base, memoryTempRegister); + } + + void store64(RegisterID src, BaseIndex address) + { + if (!address.offset && (!address.scale || address.scale == 3)) { + m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.str<64>(src, address.base, memoryTempRegister); + } + + void store64(RegisterID src, const void* address) + { + store<64>(src, address); + } + + void store64(TrustedImm32 imm, ImplicitAddress address) + { + store64(TrustedImm64(imm.m_value), address); + } + + void store64(TrustedImm64 imm, ImplicitAddress address) + { + if (!imm.m_value) { + store64(ARM64Registers::zr, address); + return; + } + + moveToCachedReg(imm, m_dataMemoryTempRegister); + store64(dataTempRegister, address); + } + + void store64(TrustedImm64 imm, BaseIndex address) + { + if (!imm.m_value) { + store64(ARM64Registers::zr, address); + return; + } + + moveToCachedReg(imm, m_dataMemoryTempRegister); + store64(dataTempRegister, address); + } + + DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address) + { + DataLabel32 label(this); + signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<64>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0); + return label; + } + + void store32(RegisterID src, ImplicitAddress address) + { + if (tryStoreWithOffset<32>(src, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<32>(src, address.base, memoryTempRegister); + } + + void store32(RegisterID src, BaseIndex address) + { + if (!address.offset && (!address.scale || address.scale == 2)) { + m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.str<32>(src, address.base, memoryTempRegister); + } + + void store32(RegisterID src, const void* address) + { + store<32>(src, address); + } + + void store32(TrustedImm32 imm, ImplicitAddress address) + { + if (!imm.m_value) { + store32(ARM64Registers::zr, address); + return; + } + + moveToCachedReg(imm, m_dataMemoryTempRegister); + store32(dataTempRegister, address); + } + + void store32(TrustedImm32 imm, BaseIndex address) + { + if (!imm.m_value) { + store32(ARM64Registers::zr, address); + return; + } + + moveToCachedReg(imm, m_dataMemoryTempRegister); + store32(dataTempRegister, address); + } + + void store32(TrustedImm32 imm, const void* address) + { + if (!imm.m_value) { + store32(ARM64Registers::zr, address); + return; + } + + moveToCachedReg(imm, m_dataMemoryTempRegister); + store32(dataTempRegister, address); + } + + DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) + { + DataLabel32 label(this); + signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<32>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0); + return label; + } + + void store16(RegisterID src, ImplicitAddress address) + { + if (tryStoreWithOffset<16>(src, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<16>(src, address.base, memoryTempRegister); + } + + void store16(RegisterID src, BaseIndex address) + { + if (!address.offset && (!address.scale || address.scale == 1)) { + m_assembler.strh(src, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.strh(src, address.base, memoryTempRegister); + } + + void store8(RegisterID src, BaseIndex address) + { + if (!address.offset && !address.scale) { + m_assembler.strb(src, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.strb(src, address.base, memoryTempRegister); + } + + void store8(RegisterID src, void* address) + { + move(TrustedImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.strb(src, memoryTempRegister, 0); + } + + void store8(RegisterID src, ImplicitAddress address) + { + if (tryStoreWithOffset<8>(src, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<8>(src, address.base, memoryTempRegister); + } + + void store8(TrustedImm32 imm, void* address) + { + if (!imm.m_value) { + store8(ARM64Registers::zr, address); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + store8(dataTempRegister, address); + } + + void store8(TrustedImm32 imm, ImplicitAddress address) + { + if (!imm.m_value) { + store8(ARM64Registers::zr, address); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + store8(dataTempRegister, address); + } + + // Floating-point operations: + + static bool supportsFloatingPoint() { return true; } + static bool supportsFloatingPointTruncate() { return true; } + static bool supportsFloatingPointSqrt() { return true; } + static bool supportsFloatingPointAbs() { return true; } + static bool supportsFloatingPointCeil() { return true; } + + enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful }; + + void absDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fabs<64>(dest, src); + } + + void absFloat(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fabs<32>(dest, src); + } + + void addDouble(FPRegisterID src, FPRegisterID dest) + { + addDouble(dest, src, dest); + } + + void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fadd<64>(dest, op1, op2); + } + + void addDouble(Address src, FPRegisterID dest) + { + loadDouble(src, fpTempRegister); + addDouble(fpTempRegister, dest); + } + + void addDouble(AbsoluteAddress address, FPRegisterID dest) + { + loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister); + addDouble(fpTempRegister, dest); + } + + void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fadd<32>(dest, op1, op2); + } + + void ceilDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.frintp<64>(dest, src); + } + + void ceilFloat(FPRegisterID src, FPRegisterID dest) + { + m_assembler.frintp<32>(dest, src); + } + + void floorDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.frintm<64>(dest, src); + } + + // Convert 'src' to an integer, and places the resulting 'dest'. + // If the result is not representable as a 32 bit value, branch. + // May also branch for some values that are representable in 32 bits + // (specifically, in this case, 0). + void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true) + { + m_assembler.fcvtns<32, 64>(dest, src); + + // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump. + m_assembler.scvtf<64, 32>(fpTempRegister, dest); + failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister)); + + // Test for negative zero. + if (negZeroCheck) { + Jump valueIsNonZero = branchTest32(NonZero, dest); + RegisterID scratch = getCachedMemoryTempRegisterIDAndInvalidate(); + m_assembler.fmov<64>(scratch, src); + failureCases.append(makeTestBitAndBranch(scratch, 63, IsNonZero)); + valueIsNonZero.link(this); + } + } + + Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right) + { + m_assembler.fcmp<64>(left, right); + return jumpAfterFloatingPointCompare(cond); + } + + Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right) + { + m_assembler.fcmp<32>(left, right); + return jumpAfterFloatingPointCompare(cond); + } + + Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID) + { + m_assembler.fcmp_0<64>(reg); + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + Jump result = makeBranch(ARM64Assembler::ConditionNE); + unordered.link(this); + return result; + } + + Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID) + { + m_assembler.fcmp_0<64>(reg); + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + Jump notEqual = makeBranch(ARM64Assembler::ConditionNE); + unordered.link(this); + // We get here if either unordered or equal. + Jump result = jump(); + notEqual.link(this); + return result; + } + + Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) + { + // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest. + m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src); + zeroExtend32ToPtr(dataTempRegister, dest); + // Check thlow 32-bits sign extend to be equal to the full value. + m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0); + return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual)); + } + + Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) + { + // Truncate to a 64-bit unsigned integer in dataTempRegister, copy the low 32-bit to dest. + m_assembler.fcvtzu<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src); + zeroExtend32ToPtr(dataTempRegister, dest); + // Check thlow 32-bits sign extend to be equal to the full value. + m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0); + return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual)); + } + + void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fcvt<32, 64>(dest, src); + } + + void convertFloatToDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fcvt<64, 32>(dest, src); + } + + void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest) + { + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + convertInt32ToDouble(dataTempRegister, dest); + } + + void convertUInt32ToDouble(RegisterID src, FPRegisterID dest, RegisterID /*scratch*/) + { + m_assembler.ucvtf<64, 32>(dest, src); + } + + void convertInt32ToDouble(RegisterID src, FPRegisterID dest) + { + m_assembler.scvtf<64, 32>(dest, src); + } + + void convertInt32ToDouble(Address address, FPRegisterID dest) + { + load32(address, getCachedDataTempRegisterIDAndInvalidate()); + convertInt32ToDouble(dataTempRegister, dest); + } + + void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest) + { + load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + convertInt32ToDouble(dataTempRegister, dest); + } + + void convertInt64ToDouble(RegisterID src, FPRegisterID dest) + { + m_assembler.scvtf<64, 64>(dest, src); + } + + void divDouble(FPRegisterID src, FPRegisterID dest) + { + divDouble(dest, src, dest); + } + + void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fdiv<64>(dest, op1, op2); + } + + void divFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fdiv<32>(dest, op1, op2); + } + + void loadDouble(ImplicitAddress address, FPRegisterID dest) + { + if (tryLoadWithOffset<64>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldr<64>(dest, address.base, memoryTempRegister); + } + + void loadDouble(BaseIndex address, FPRegisterID dest) + { + if (!address.offset && (!address.scale || address.scale == 3)) { + m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldr<64>(dest, address.base, memoryTempRegister); + } + + void loadDouble(TrustedImmPtr address, FPRegisterID dest) + { + moveToCachedReg(address, m_cachedMemoryTempRegister); + m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr); + } + + void loadFloat(ImplicitAddress address, FPRegisterID dest) + { + if (tryLoadWithOffset<32>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldr<32>(dest, address.base, memoryTempRegister); + } + + void loadFloat(BaseIndex address, FPRegisterID dest) + { + if (!address.offset && (!address.scale || address.scale == 2)) { + m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldr<32>(dest, address.base, memoryTempRegister); + } + + void moveDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fmov<64>(dest, src); + } + + void moveZeroToDouble(FPRegisterID reg) + { + m_assembler.fmov<64>(reg, ARM64Registers::zr); + } + + void moveDoubleTo64(FPRegisterID src, RegisterID dest) + { + m_assembler.fmov<64>(dest, src); + } + + void moveFloatTo32(FPRegisterID src, RegisterID dest) + { + m_assembler.fmov<32>(dest, src); + } + + void move64ToDouble(RegisterID src, FPRegisterID dest) + { + m_assembler.fmov<64>(dest, src); + } + + void move32ToFloat(RegisterID src, FPRegisterID dest) + { + m_assembler.fmov<32>(dest, src); + } + + void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest) + { + m_assembler.fcmp<64>(left, right); + moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest); + } + + void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest) + { + m_assembler.fcmp<32>(left, right); + moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest); + } + + template + void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID src, RegisterID dest) + { + if (cond == DoubleNotEqual) { + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + m_assembler.csel(dest, src, dest, ARM64Assembler::ConditionNE); + unordered.link(this); + return; + } + if (cond == DoubleEqualOrUnordered) { + // If the compare is unordered, src is copied to dest and the + // next csel has all arguments equal to src. + // If the compare is ordered, dest is unchanged and EQ decides + // what value to set. + m_assembler.csel(dest, src, dest, ARM64Assembler::ConditionVS); + m_assembler.csel(dest, src, dest, ARM64Assembler::ConditionEQ); + return; + } + m_assembler.csel(dest, src, dest, ARM64Condition(cond)); + } + + void mulDouble(FPRegisterID src, FPRegisterID dest) + { + mulDouble(dest, src, dest); + } + + void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fmul<64>(dest, op1, op2); + } + + void mulDouble(Address src, FPRegisterID dest) + { + loadDouble(src, fpTempRegister); + mulDouble(fpTempRegister, dest); + } + + void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fmul<32>(dest, op1, op2); + } + + void andDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.vand<64>(dest, op1, op2); + } + + void andFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + andDouble(op1, op2, dest); + } + + void negateDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fneg<64>(dest, src); + } + + void sqrtDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fsqrt<64>(dest, src); + } + + void sqrtFloat(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fsqrt<32>(dest, src); + } + + void storeDouble(FPRegisterID src, ImplicitAddress address) + { + if (tryStoreWithOffset<64>(src, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<64>(src, address.base, memoryTempRegister); + } + + void storeDouble(FPRegisterID src, TrustedImmPtr address) + { + moveToCachedReg(address, m_cachedMemoryTempRegister); + m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr); + } + + void storeDouble(FPRegisterID src, BaseIndex address) + { + if (!address.offset && (!address.scale || address.scale == 3)) { + m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.str<64>(src, address.base, memoryTempRegister); + } + + void storeFloat(FPRegisterID src, ImplicitAddress address) + { + if (tryStoreWithOffset<32>(src, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<32>(src, address.base, memoryTempRegister); + } + + void storeFloat(FPRegisterID src, BaseIndex address) + { + if (!address.offset && (!address.scale || address.scale == 2)) { + m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale); + return; + } + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.str<32>(src, address.base, memoryTempRegister); + } + + void subDouble(FPRegisterID src, FPRegisterID dest) + { + subDouble(dest, src, dest); + } + + void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fsub<64>(dest, op1, op2); + } + + void subDouble(Address src, FPRegisterID dest) + { + loadDouble(src, fpTempRegister); + subDouble(fpTempRegister, dest); + } + + void subFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fsub<32>(dest, op1, op2); + } + + // Result is undefined if the value is outside of the integer range. + void truncateDoubleToInt32(FPRegisterID src, RegisterID dest) + { + m_assembler.fcvtzs<32, 64>(dest, src); + } + + void truncateDoubleToUint32(FPRegisterID src, RegisterID dest) + { + m_assembler.fcvtzu<32, 64>(dest, src); + } + + + // Stack manipulation operations: + // + // The ABI is assumed to provide a stack abstraction to memory, + // containing machine word sized units of data. Push and pop + // operations add and remove a single register sized unit of data + // to or from the stack. These operations are not supported on + // ARM64. Peek and poke operations read or write values on the + // stack, without moving the current stack position. Additionally, + // there are popToRestore and pushToSave operations, which are + // designed just for quick-and-dirty saving and restoring of + // temporary values. These operations don't claim to have any + // ABI compatibility. + + void pop(RegisterID) NO_RETURN_DUE_TO_CRASH + { + CRASH(); + } + + void push(RegisterID) NO_RETURN_DUE_TO_CRASH + { + CRASH(); + } + + void push(Address) NO_RETURN_DUE_TO_CRASH + { + CRASH(); + } + + void push(TrustedImm32) NO_RETURN_DUE_TO_CRASH + { + CRASH(); + } + + void popPair(RegisterID dest1, RegisterID dest2) + { + m_assembler.ldp<64>(dest1, dest2, ARM64Registers::sp, PairPostIndex(16)); + } + + void pushPair(RegisterID src1, RegisterID src2) + { + m_assembler.stp<64>(src1, src2, ARM64Registers::sp, PairPreIndex(-16)); + } + + void popToRestore(RegisterID dest) + { + m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16)); + } + + void pushToSave(RegisterID src) + { + m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16)); + } + + void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm) + { + RegisterID reg = dataTempRegister; + pushPair(reg, reg); + move(imm, reg); + store64(reg, stackPointerRegister); + load64(Address(stackPointerRegister, 8), reg); + } + + void pushToSave(Address address) + { + load32(address, getCachedDataTempRegisterIDAndInvalidate()); + pushToSave(dataTempRegister); + } + + void pushToSave(TrustedImm32 imm) + { + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + pushToSave(dataTempRegister); + } + + void popToRestore(FPRegisterID dest) + { + loadDouble(stackPointerRegister, dest); + add64(TrustedImm32(16), stackPointerRegister); + } + + void pushToSave(FPRegisterID src) + { + sub64(TrustedImm32(16), stackPointerRegister); + storeDouble(src, stackPointerRegister); + } + + static ptrdiff_t pushToSaveByteOffset() { return 16; } + + // Register move operations: + + void move(RegisterID src, RegisterID dest) + { + if (src != dest) + m_assembler.mov<64>(dest, src); + } + + void move(TrustedImm32 imm, RegisterID dest) + { + moveInternal(imm, dest); + } + + void move(TrustedImmPtr imm, RegisterID dest) + { + moveInternal(imm, dest); + } + + void move(TrustedImm64 imm, RegisterID dest) + { + moveInternal(imm, dest); + } + + void swap(RegisterID reg1, RegisterID reg2) + { + move(reg1, getCachedDataTempRegisterIDAndInvalidate()); + move(reg2, reg1); + move(dataTempRegister, reg2); + } + + void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest) + { + move(TrustedImmPtr(reinterpret_cast(static_cast(imm.m_value))), dest); + } + + void signExtend32ToPtr(RegisterID src, RegisterID dest) + { + m_assembler.sxtw(dest, src); + } + + void zeroExtend32ToPtr(RegisterID src, RegisterID dest) + { + m_assembler.uxtw(dest, src); + } + + void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest) + { + m_assembler.cmp<32>(left, right); + m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond)); + } + + void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest) + { + m_assembler.cmp<64>(left, right); + m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond)); + } + + void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest) + { + m_assembler.tst<32>(testReg, mask); + m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond)); + } + + void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest) + { + m_assembler.tst<64>(testReg, mask); + m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond)); + } + + // Forwards / external control flow operations: + // + // This set of jump and conditional branch operations return a Jump + // object which may linked at a later point, allow forwards jump, + // or jumps that will require external linkage (after the code has been + // relocated). + // + // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge + // respecitvely, for unsigned comparisons the names b, a, be, and ae are + // used (representing the names 'below' and 'above'). + // + // Operands to the comparision are provided in the expected order, e.g. + // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when + // treated as a signed 32bit value, is less than or equal to 5. + // + // jz and jnz test whether the first operand is equal to zero, and take + // an optional second operand of a mask under which to perform the test. + + Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right) + { + m_assembler.cmp<32>(left, right); + return Jump(makeBranch(cond)); + } + + Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right) + { + if (isUInt12(right.m_value)) + m_assembler.cmp<32>(left, UInt12(right.m_value)); + else if (isUInt12(-right.m_value)) + m_assembler.cmn<32>(left, UInt12(-right.m_value)); + else { + moveToCachedReg(right, m_dataMemoryTempRegister); + m_assembler.cmp<32>(left, dataTempRegister); + } + return Jump(makeBranch(cond)); + } + + Jump branch32(RelationalCondition cond, RegisterID left, Address right) + { + load32(right, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, left, memoryTempRegister); + } + + Jump branch32(RelationalCondition cond, Address left, RegisterID right) + { + load32(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right); + } + + Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right) + { + load32(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right); + } + + Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right) + { + load32(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right); + } + + Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right) + { + load32(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + return branch32(cond, dataTempRegister, right); + } + + Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) + { + load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right); + } + + Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right) + { + if (right == ARM64Registers::sp) { + if (cond == Equal && left != ARM64Registers::sp) { + // CMP can only use SP for the left argument, since we are testing for equality, the order + // does not matter here. + std::swap(left, right); + } else { + move(right, getCachedDataTempRegisterIDAndInvalidate()); + right = dataTempRegister; + } + } + m_assembler.cmp<64>(left, right); + return Jump(makeBranch(cond)); + } + + Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right) + { + intptr_t immediate = right.m_value; + if (isUInt12(immediate)) + m_assembler.cmp<64>(left, UInt12(static_cast(immediate))); + else if (isUInt12(-immediate)) + m_assembler.cmn<64>(left, UInt12(static_cast(-immediate))); + else { + moveToCachedReg(right, m_dataMemoryTempRegister); + m_assembler.cmp<64>(left, dataTempRegister); + } + return Jump(makeBranch(cond)); + } + + Jump branch64(RelationalCondition cond, RegisterID left, Address right) + { + load64(right, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch64(cond, left, memoryTempRegister); + } + + Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right) + { + load64(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + return branch64(cond, dataTempRegister, right); + } + + Jump branch64(RelationalCondition cond, Address left, RegisterID right) + { + load64(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch64(cond, memoryTempRegister, right); + } + + Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right) + { + load64(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch64(cond, memoryTempRegister, right); + } + + Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right) + { + load64(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch64(cond, memoryTempRegister, right); + } + + Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right) + { + ASSERT(!(0xffffff00 & right.m_value)); + load8(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right); + } + + Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right) + { + ASSERT(!(0xffffff00 & right.m_value)); + load8(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right); + } + + Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) + { + ASSERT(!(0xffffff00 & right.m_value)); + load8(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right); + } + + Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask) + { + m_assembler.tst<32>(reg, mask); + return Jump(makeBranch(cond)); + } + + void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) + { + if (mask.m_value == -1) + m_assembler.tst<32>(reg, reg); + else { + bool testedWithImmediate = false; + if ((cond == Zero) || (cond == NonZero)) { + LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value); + + if (logicalImm.isValid()) { + m_assembler.tst<32>(reg, logicalImm); + testedWithImmediate = true; + } + } + if (!testedWithImmediate) { + move(mask, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.tst<32>(reg, dataTempRegister); + } + } + } + + Jump branch(ResultCondition cond) + { + return Jump(makeBranch(cond)); + } + + Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) + { + if (mask.m_value == -1) { + if ((cond == Zero) || (cond == NonZero)) + return Jump(makeCompareAndBranch<32>(static_cast(cond), reg)); + m_assembler.tst<32>(reg, reg); + } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero))) + return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast(cond))); + else { + if ((cond == Zero) || (cond == NonZero)) { + LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value); + + if (logicalImm.isValid()) { + m_assembler.tst<32>(reg, logicalImm); + return Jump(makeBranch(cond)); + } + } + + move(mask, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.tst<32>(reg, dataTempRegister); + } + return Jump(makeBranch(cond)); + } + + Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) + { + load32(address, getCachedMemoryTempRegisterIDAndInvalidate()); + return branchTest32(cond, memoryTempRegister, mask); + } + + Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) + { + load32(address, getCachedMemoryTempRegisterIDAndInvalidate()); + return branchTest32(cond, memoryTempRegister, mask); + } + + Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask) + { + m_assembler.tst<64>(reg, mask); + return Jump(makeBranch(cond)); + } + + Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) + { + if (mask.m_value == -1) { + if ((cond == Zero) || (cond == NonZero)) + return Jump(makeCompareAndBranch<64>(static_cast(cond), reg)); + m_assembler.tst<64>(reg, reg); + } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero))) + return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast(cond))); + else { + if ((cond == Zero) || (cond == NonZero)) { + LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value); + + if (logicalImm.isValid()) { + m_assembler.tst<64>(reg, logicalImm); + return Jump(makeBranch(cond)); + } + } + + signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.tst<64>(reg, dataTempRegister); + } + return Jump(makeBranch(cond)); + } + + Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask) + { + move(mask, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest64(cond, reg, dataTempRegister); + } + + Jump branchTest64(ResultCondition cond, Address address, RegisterID mask) + { + load64(address, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest64(cond, dataTempRegister, mask); + } + + Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) + { + load64(address, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest64(cond, dataTempRegister, mask); + } + + Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) + { + load64(address, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest64(cond, dataTempRegister, mask); + } + + Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) + { + load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest64(cond, dataTempRegister, mask); + } + + Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) + { + load8(address, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest32(cond, dataTempRegister, mask); + } + + Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) + { + load8(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest32(cond, dataTempRegister, mask); + } + + Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1)) + { + move(TrustedImmPtr(reinterpret_cast(address.offset)), getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister); + return branchTest32(cond, dataTempRegister, mask); + } + + Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) + { + load8(address, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest32(cond, dataTempRegister, mask); + } + + Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right) + { + return branch32(cond, left, right); + } + + + // Arithmetic control flow operations: + // + // This set of conditional branch operations branch based + // on the result of an arithmetic operation. The operation + // is performed as normal, storing the result. + // + // * jz operations branch if the result is zero. + // * jo operations branch if the (signed) arithmetic + // operation caused an overflow to occur. + + Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.add<32, S>(dest, op1, op2); + return Jump(makeBranch(cond)); + } + + Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest) + { + if (isUInt12(imm.m_value)) { + m_assembler.add<32, S>(dest, op1, UInt12(imm.m_value)); + return Jump(makeBranch(cond)); + } + if (isUInt12(-imm.m_value)) { + m_assembler.sub<32, S>(dest, op1, UInt12(-imm.m_value)); + return Jump(makeBranch(cond)); + } + + signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); + return branchAdd32(cond, op1, dataTempRegister, dest); + } + + Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest) + { + load32(src, getCachedDataTempRegisterIDAndInvalidate()); + return branchAdd32(cond, dest, dataTempRegister, dest); + } + + Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest) + { + return branchAdd32(cond, dest, src, dest); + } + + Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest) + { + return branchAdd32(cond, dest, imm, dest); + } + + Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress address) + { + load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + + if (isUInt12(imm.m_value)) { + m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value)); + store32(dataTempRegister, address.m_ptr); + } else if (isUInt12(-imm.m_value)) { + m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value)); + store32(dataTempRegister, address.m_ptr); + } else { + move(imm, getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister); + store32(dataTempRegister, address.m_ptr); + } + + return Jump(makeBranch(cond)); + } + + Jump branchAdd64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.add<64, S>(dest, op1, op2); + return Jump(makeBranch(cond)); + } + + Jump branchAdd64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest) + { + if (isUInt12(imm.m_value)) { + m_assembler.add<64, S>(dest, op1, UInt12(imm.m_value)); + return Jump(makeBranch(cond)); + } + if (isUInt12(-imm.m_value)) { + m_assembler.sub<64, S>(dest, op1, UInt12(-imm.m_value)); + return Jump(makeBranch(cond)); + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + return branchAdd64(cond, op1, dataTempRegister, dest); + } + + Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest) + { + return branchAdd64(cond, dest, src, dest); + } + + Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest) + { + return branchAdd64(cond, dest, imm, dest); + } + + Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest) + { + ASSERT(cond != Signed); + + if (cond != Overflow) { + m_assembler.mul<32>(dest, src1, src2); + return branchTest32(cond, dest); + } + + // This is a signed multiple of two 32-bit values, producing a 64-bit result. + m_assembler.smull(dest, src1, src2); + // Copy bits 63..32 of the result to bits 31..0 of scratch1. + m_assembler.asr<64>(scratch1, dest, 32); + // Splat bit 31 of the result to bits 31..0 of scratch2. + m_assembler.asr<32>(scratch2, dest, 31); + // After a mul32 the top 32 bits of the register should be clear. + zeroExtend32ToPtr(dest, dest); + // Check that bits 31..63 of the original result were all equal. + return branch32(NotEqual, scratch2, scratch1); + } + + Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) + { + return branchMul32(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest); + } + + Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest) + { + return branchMul32(cond, dest, src, dest); + } + + Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) + { + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + return branchMul32(cond, dataTempRegister, src, dest); + } + + Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) + { + move(imm, dataTempRegister); + return branchMul32(cond, dataTempRegister, src, dest); + } + + Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest) + { + ASSERT(cond != Signed); + + // This is a signed multiple of two 64-bit values, producing a 64-bit result. + m_assembler.mul<64>(dest, src1, src2); + + if (cond != Overflow) + return branchTest64(cond, dest); + + // Compute bits 127..64 of the result into scratch1. + m_assembler.smulh(scratch1, src1, src2); + // Splat bit 63 of the result to bits 63..0 of scratch2. + m_assembler.asr<64>(scratch2, dest, 63); + // Check that bits 31..63 of the original result were all equal. + return branch64(NotEqual, scratch2, scratch1); + } + + Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) + { + return branchMul64(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest); + } + + Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest) + { + return branchMul64(cond, dest, src, dest); + } + + Jump branchNeg32(ResultCondition cond, RegisterID dest) + { + m_assembler.neg<32, S>(dest, dest); + return Jump(makeBranch(cond)); + } + + Jump branchNeg64(ResultCondition cond, RegisterID srcDest) + { + m_assembler.neg<64, S>(srcDest, srcDest); + return Jump(makeBranch(cond)); + } + + Jump branchSub32(ResultCondition cond, RegisterID dest) + { + m_assembler.neg<32, S>(dest, dest); + return Jump(makeBranch(cond)); + } + + Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.sub<32, S>(dest, op1, op2); + return Jump(makeBranch(cond)); + } + + Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest) + { + if (isUInt12(imm.m_value)) { + m_assembler.sub<32, S>(dest, op1, UInt12(imm.m_value)); + return Jump(makeBranch(cond)); + } + if (isUInt12(-imm.m_value)) { + m_assembler.add<32, S>(dest, op1, UInt12(-imm.m_value)); + return Jump(makeBranch(cond)); + } + + signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); + return branchSub32(cond, op1, dataTempRegister, dest); + } + + Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest) + { + return branchSub32(cond, dest, src, dest); + } + + Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest) + { + return branchSub32(cond, dest, imm, dest); + } + + Jump branchSub64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.sub<64, S>(dest, op1, op2); + return Jump(makeBranch(cond)); + } + + Jump branchSub64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest) + { + if (isUInt12(imm.m_value)) { + m_assembler.sub<64, S>(dest, op1, UInt12(imm.m_value)); + return Jump(makeBranch(cond)); + } + if (isUInt12(-imm.m_value)) { + m_assembler.add<64, S>(dest, op1, UInt12(-imm.m_value)); + return Jump(makeBranch(cond)); + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + return branchSub64(cond, op1, dataTempRegister, dest); + } + + Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest) + { + return branchSub64(cond, dest, src, dest); + } + + Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest) + { + return branchSub64(cond, dest, imm, dest); + } + + + // Jumps, calls, returns + + ALWAYS_INLINE Call call() + { + AssemblerLabel pointerLabel = m_assembler.label(); + moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate()); + invalidateAllTempRegisters(); + m_assembler.blr(dataTempRegister); + AssemblerLabel callLabel = m_assembler.label(); + ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER); + return Call(callLabel, Call::Linkable); + } + + ALWAYS_INLINE Call call(RegisterID target) + { + invalidateAllTempRegisters(); + m_assembler.blr(target); + return Call(m_assembler.label(), Call::None); + } + + ALWAYS_INLINE Call call(Address address) + { + load64(address, getCachedDataTempRegisterIDAndInvalidate()); + return call(dataTempRegister); + } + + ALWAYS_INLINE Jump jump() + { + AssemblerLabel label = m_assembler.label(); + m_assembler.b(); + return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpNoConditionFixedSize : ARM64Assembler::JumpNoCondition); + } + + void jump(RegisterID target) + { + m_assembler.br(target); + } + + void jump(Address address) + { + load64(address, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.br(dataTempRegister); + } + + void jump(AbsoluteAddress address) + { + move(TrustedImmPtr(address.m_ptr), getCachedDataTempRegisterIDAndInvalidate()); + load64(Address(dataTempRegister), dataTempRegister); + m_assembler.br(dataTempRegister); + } + + ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump) + { + oldJump.link(this); + return tailRecursiveCall(); + } + + ALWAYS_INLINE Call nearCall() + { + m_assembler.bl(); + return Call(m_assembler.label(), Call::LinkableNear); + } + +#if 0 + ALWAYS_INLINE Call nearTailCall() + { + AssemblerLabel label = m_assembler.label(); + m_assembler.b(); + return Call(label, Call::LinkableNearTail); + } +#endif + + ALWAYS_INLINE void ret() + { + m_assembler.ret(); + } + + ALWAYS_INLINE Call tailRecursiveCall() + { + // Like a normal call, but don't link. + AssemblerLabel pointerLabel = m_assembler.label(); + moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.br(dataTempRegister); + AssemblerLabel callLabel = m_assembler.label(); + ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER); + return Call(callLabel, Call::Linkable); + } + + + // Comparisons operations + + void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.cmp<32>(left, right); + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest) + { + load32(left, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.cmp<32>(dataTempRegister, right); + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) + { + move(right, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.cmp<32>(left, dataTempRegister); + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.cmp<64>(left, right); + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) + { + signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.cmp<64>(left, dataTempRegister); + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) + { + load8(left, getCachedMemoryTempRegisterIDAndInvalidate()); + move(right, getCachedDataTempRegisterIDAndInvalidate()); + compare32(cond, memoryTempRegister, dataTempRegister, dest); + } + + void test32(ResultCondition cond, RegisterID src, RegisterID mask, RegisterID dest) + { + m_assembler.tst<32>(src, mask); + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest) + { + if (mask.m_value == -1) + m_assembler.tst<32>(src, src); + else { + signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.tst<32>(src, dataTempRegister); + } + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) + { + load32(address, getCachedMemoryTempRegisterIDAndInvalidate()); + test32(cond, memoryTempRegister, mask, dest); + } + + void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) + { + load8(address, getCachedMemoryTempRegisterIDAndInvalidate()); + test32(cond, memoryTempRegister, mask, dest); + } + + void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.tst<64>(op1, op2); + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void test64(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest) + { + if (mask.m_value == -1) + m_assembler.tst<64>(src, src); + else { + signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.tst<64>(src, dataTempRegister); + } + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + + void setCarry(RegisterID dest) + { + m_assembler.cset<32>(dest, ARM64Assembler::ConditionCS); + } + + // Patchable operations + + ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest) + { + DataLabel32 label(this); + moveWithFixedWidth(imm, dest); + return label; + } + + ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dest) + { + DataLabelPtr label(this); + moveWithFixedWidth(imm, dest); + return label; + } + + ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) + { + dataLabel = DataLabelPtr(this); + moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate()); + return branch64(cond, left, dataTempRegister); + } + + ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) + { + dataLabel = DataLabelPtr(this); + moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate()); + return branch64(cond, left, dataTempRegister); + } + + ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + dataLabel = DataLabel32(this); + moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate()); + return branch32(cond, left, dataTempRegister); + } + + PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right) + { + m_makeJumpPatchable = true; + Jump result = branch64(cond, left, TrustedImm64(right)); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) + { + m_makeJumpPatchable = true; + Jump result = branchTest32(cond, reg, mask); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm) + { + m_makeJumpPatchable = true; + Jump result = branch32(cond, reg, imm); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm) + { + m_makeJumpPatchable = true; + Jump result = branch64(cond, reg, imm); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right) + { + m_makeJumpPatchable = true; + Jump result = branch64(cond, left, right); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) + { + m_makeJumpPatchable = true; + Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + m_makeJumpPatchable = true; + Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + PatchableJump patchableJump() + { + m_makeJumpPatchable = true; + Jump result = jump(); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) + { + DataLabelPtr label(this); + moveWithFixedWidth(initialValue, getCachedDataTempRegisterIDAndInvalidate()); + store64(dataTempRegister, address); + return label; + } + + ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) + { + return storePtrWithPatch(TrustedImmPtr(0), address); + } + + static void reemitInitialMoveWithPatch(void* address, void* value) + { + ARM64Assembler::setPointer(static_cast(address), value, dataTempRegister, true); + } + + // Miscellaneous operations: + + void breakpoint(uint16_t imm = 0) + { + m_assembler.brk(imm); + } + + void nop() + { + m_assembler.nop(); + } + + void memoryFence() + { + m_assembler.dmbSY(); + } + + + // Misc helper functions. + + // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc. + static RelationalCondition invert(RelationalCondition cond) + { + return static_cast(ARM64Assembler::invert(static_cast(cond))); + } + + static FunctionPtr readCallTarget(CodeLocationCall call) + { + return FunctionPtr(reinterpret_cast(ARM64Assembler::readCallTarget(call.dataLocation()))); + } + + static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination) + { + ARM64Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation()); + } + + static ptrdiff_t maxJumpReplacementSize() + { + return ARM64Assembler::maxJumpReplacementSize(); + } + + RegisterID scratchRegisterForBlinding() + { + // We *do not* have a scratch register for blinding. + RELEASE_ASSERT_NOT_REACHED(); + return getCachedDataTempRegisterIDAndInvalidate(); + } + + static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; } + static bool canJumpReplacePatchableBranch32WithPatch() { return false; } + + static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) + { + return label.labelAtOffset(0); + } + + static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr) + { + UNREACHABLE_FOR_PLATFORM(); + return CodeLocationLabel(); + } + + static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32) + { + UNREACHABLE_FOR_PLATFORM(); + return CodeLocationLabel(); + } + + static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue) + { + reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue); + } + + static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*) + { + UNREACHABLE_FOR_PLATFORM(); + } + + static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t) + { + UNREACHABLE_FOR_PLATFORM(); + } + + static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) + { + ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress()); + } + + static void repatchCall(CodeLocationCall call, FunctionPtr destination) + { + ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress()); + } + +#if ENABLE(MASM_PROBE) + void probe(ProbeFunction, void* arg1, void* arg2); +#endif // ENABLE(MASM_PROBE) + +protected: + ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond) + { + m_assembler.b_cond(cond); + AssemblerLabel label = m_assembler.label(); + m_assembler.nop(); + return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpConditionFixedSize : ARM64Assembler::JumpCondition, cond); + } + ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(ARM64Condition(cond)); } + ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(ARM64Condition(cond)); } + ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(ARM64Condition(cond)); } + + template + ALWAYS_INLINE Jump makeCompareAndBranch(ZeroCondition cond, RegisterID reg) + { + if (cond == IsZero) + m_assembler.cbz(reg); + else + m_assembler.cbnz(reg); + AssemblerLabel label = m_assembler.label(); + m_assembler.nop(); + return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpCompareAndBranchFixedSize : ARM64Assembler::JumpCompareAndBranch, static_cast(cond), dataSize == 64, reg); + } + + ALWAYS_INLINE Jump makeTestBitAndBranch(RegisterID reg, unsigned bit, ZeroCondition cond) + { + ASSERT(bit < 64); + bit &= 0x3f; + if (cond == IsZero) + m_assembler.tbz(reg, bit); + else + m_assembler.tbnz(reg, bit); + AssemblerLabel label = m_assembler.label(); + m_assembler.nop(); + return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpTestBitFixedSize : ARM64Assembler::JumpTestBit, static_cast(cond), bit, reg); + } + + ARM64Assembler::Condition ARM64Condition(RelationalCondition cond) + { + return static_cast(cond); + } + + ARM64Assembler::Condition ARM64Condition(ResultCondition cond) + { + return static_cast(cond); + } + + ARM64Assembler::Condition ARM64Condition(DoubleCondition cond) + { + return static_cast(cond); + } + +private: + ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate() + { + RELEASE_ASSERT(m_allowScratchRegister); + return m_dataMemoryTempRegister.registerIDInvalidate(); + } + ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate() + { + RELEASE_ASSERT(m_allowScratchRegister); + return m_cachedMemoryTempRegister.registerIDInvalidate(); + } + + ALWAYS_INLINE bool isInIntRange(intptr_t value) + { + return value == ((value << 32) >> 32); + } + + template + void moveInternal(ImmediateType imm, RegisterID dest) + { + const int dataSize = sizeof(rawType) * 8; + const int numberHalfWords = dataSize / 16; + rawType value = bitwise_cast(imm.m_value); + uint16_t halfword[numberHalfWords]; + + // Handle 0 and ~0 here to simplify code below + if (!value) { + m_assembler.movz(dest, 0); + return; + } + if (!~value) { + m_assembler.movn(dest, 0); + return; + } + + LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast(value)) : LogicalImmediate::create32(static_cast(value)); + + if (logicalImm.isValid()) { + m_assembler.movi(dest, logicalImm); + return; + } + + // Figure out how many halfwords are 0 or FFFF, then choose movz or movn accordingly. + int zeroOrNegateVote = 0; + for (int i = 0; i < numberHalfWords; ++i) { + halfword[i] = getHalfword(value, i); + if (!halfword[i]) + zeroOrNegateVote++; + else if (halfword[i] == 0xffff) + zeroOrNegateVote--; + } + + bool needToClearRegister = true; + if (zeroOrNegateVote >= 0) { + for (int i = 0; i < numberHalfWords; i++) { + if (halfword[i]) { + if (needToClearRegister) { + m_assembler.movz(dest, halfword[i], 16*i); + needToClearRegister = false; + } else + m_assembler.movk(dest, halfword[i], 16*i); + } + } + } else { + for (int i = 0; i < numberHalfWords; i++) { + if (halfword[i] != 0xffff) { + if (needToClearRegister) { + m_assembler.movn(dest, ~halfword[i], 16*i); + needToClearRegister = false; + } else + m_assembler.movk(dest, halfword[i], 16*i); + } + } + } + } + + template + ALWAYS_INLINE void loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm) + { + m_assembler.ldr(rt, rn, pimm); + } + + template + ALWAYS_INLINE void loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm) + { + m_assembler.ldur(rt, rn, simm); + } + + template + ALWAYS_INLINE void loadSignedAddressedByUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm) + { + loadUnsignedImmediate(rt, rn, pimm); + } + + template + ALWAYS_INLINE void loadSignedAddressedByUnscaledImmediate(RegisterID rt, RegisterID rn, int simm) + { + loadUnscaledImmediate(rt, rn, simm); + } + + template + ALWAYS_INLINE void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm) + { + m_assembler.str(rt, rn, pimm); + } + + template + ALWAYS_INLINE void storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm) + { + m_assembler.stur(rt, rn, simm); + } + + void moveWithFixedWidth(TrustedImm32 imm, RegisterID dest) + { + int32_t value = imm.m_value; + m_assembler.movz<32>(dest, getHalfword(value, 0)); + m_assembler.movk<32>(dest, getHalfword(value, 1), 16); + } + + void moveWithFixedWidth(TrustedImmPtr imm, RegisterID dest) + { + intptr_t value = reinterpret_cast(imm.m_value); + m_assembler.movz<64>(dest, getHalfword(value, 0)); + m_assembler.movk<64>(dest, getHalfword(value, 1), 16); + m_assembler.movk<64>(dest, getHalfword(value, 2), 32); + } + + void signExtend32ToPtrWithFixedWidth(int32_t value, RegisterID dest) + { + if (value >= 0) { + m_assembler.movz<32>(dest, getHalfword(value, 0)); + m_assembler.movk<32>(dest, getHalfword(value, 1), 16); + } else { + m_assembler.movn<32>(dest, ~getHalfword(value, 0)); + m_assembler.movk<32>(dest, getHalfword(value, 1), 16); + } + } + + template + ALWAYS_INLINE void load(const void* address, RegisterID dest) + { + intptr_t currentRegisterContents; + if (m_cachedMemoryTempRegister.value(currentRegisterContents)) { + intptr_t addressAsInt = reinterpret_cast(address); + intptr_t addressDelta = addressAsInt - currentRegisterContents; + + if (dest == memoryTempRegister) + m_cachedMemoryTempRegister.invalidate(); + + if (isInIntRange(addressDelta)) { + if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) { + m_assembler.ldur(dest, memoryTempRegister, addressDelta); + return; + } + + if (ARM64Assembler::canEncodePImmOffset(addressDelta)) { + m_assembler.ldr(dest, memoryTempRegister, addressDelta); + return; + } + } + + if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) { + m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0); + m_cachedMemoryTempRegister.setValue(reinterpret_cast(address)); + m_assembler.ldr(dest, memoryTempRegister, ARM64Registers::zr); + return; + } + } + + move(TrustedImmPtr(address), memoryTempRegister); + if (dest == memoryTempRegister) + m_cachedMemoryTempRegister.invalidate(); + else + m_cachedMemoryTempRegister.setValue(reinterpret_cast(address)); + m_assembler.ldr(dest, memoryTempRegister, ARM64Registers::zr); + } + + template + ALWAYS_INLINE void store(RegisterID src, const void* address) + { + ASSERT(src != memoryTempRegister); + intptr_t currentRegisterContents; + if (m_cachedMemoryTempRegister.value(currentRegisterContents)) { + intptr_t addressAsInt = reinterpret_cast(address); + intptr_t addressDelta = addressAsInt - currentRegisterContents; + + if (isInIntRange(addressDelta)) { + if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) { + m_assembler.stur(src, memoryTempRegister, addressDelta); + return; + } + + if (ARM64Assembler::canEncodePImmOffset(addressDelta)) { + m_assembler.str(src, memoryTempRegister, addressDelta); + return; + } + } + + if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) { + m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0); + m_cachedMemoryTempRegister.setValue(reinterpret_cast(address)); + m_assembler.str(src, memoryTempRegister, ARM64Registers::zr); + return; + } + } + + move(TrustedImmPtr(address), memoryTempRegister); + m_cachedMemoryTempRegister.setValue(reinterpret_cast(address)); + m_assembler.str(src, memoryTempRegister, ARM64Registers::zr); + } + + template + ALWAYS_INLINE bool tryMoveUsingCacheRegisterContents(intptr_t immediate, CachedTempRegister& dest) + { +#if 1 + Q_UNUSED(immediate); + Q_UNUSED(dest) +#else + intptr_t currentRegisterContents; + if (dest.value(currentRegisterContents)) { + if (currentRegisterContents == immediate) + return true; + + LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast(immediate)) : LogicalImmediate::create32(static_cast(immediate)); + + if (logicalImm.isValid()) { + m_assembler.movi(dest.registerIDNoInvalidate(), logicalImm); + dest.setValue(immediate); + return true; + } + + if ((immediate & maskUpperWord) == (currentRegisterContents & maskUpperWord)) { + if ((immediate & maskHalfWord1) != (currentRegisterContents & maskHalfWord1)) + m_assembler.movk(dest.registerIDNoInvalidate(), (immediate & maskHalfWord1) >> 16, 16); + + if ((immediate & maskHalfWord0) != (currentRegisterContents & maskHalfWord0)) + m_assembler.movk(dest.registerIDNoInvalidate(), immediate & maskHalfWord0, 0); + + dest.setValue(immediate); + return true; + } + } +#endif + + return false; + } + + void moveToCachedReg(TrustedImm32 imm, CachedTempRegister& dest) + { + if (tryMoveUsingCacheRegisterContents<32>(static_cast(imm.m_value), dest)) + return; + + moveInternal(imm, dest.registerIDNoInvalidate()); + dest.setValue(imm.m_value); + } + + void moveToCachedReg(TrustedImmPtr imm, CachedTempRegister& dest) + { + if (tryMoveUsingCacheRegisterContents<64>(imm.asIntptr(), dest)) + return; + + moveInternal(imm, dest.registerIDNoInvalidate()); + dest.setValue(imm.asIntptr()); + } + + void moveToCachedReg(TrustedImm64 imm, CachedTempRegister& dest) + { + if (tryMoveUsingCacheRegisterContents<64>(static_cast(imm.m_value), dest)) + return; + + moveInternal(imm, dest.registerIDNoInvalidate()); + dest.setValue(imm.m_value); + } + + template + ALWAYS_INLINE bool tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset) + { + if (ARM64Assembler::canEncodeSImmOffset(offset)) { + loadUnscaledImmediate(rt, rn, offset); + return true; + } + if (ARM64Assembler::canEncodePImmOffset(offset)) { + loadUnsignedImmediate(rt, rn, static_cast(offset)); + return true; + } + return false; + } + + template + ALWAYS_INLINE bool tryLoadSignedWithOffset(RegisterID rt, RegisterID rn, int32_t offset) + { + if (ARM64Assembler::canEncodeSImmOffset(offset)) { + loadSignedAddressedByUnscaledImmediate(rt, rn, offset); + return true; + } + if (ARM64Assembler::canEncodePImmOffset(offset)) { + loadSignedAddressedByUnsignedImmediate(rt, rn, static_cast(offset)); + return true; + } + return false; + } + + template + ALWAYS_INLINE bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset) + { + if (ARM64Assembler::canEncodeSImmOffset(offset)) { + m_assembler.ldur(rt, rn, offset); + return true; + } + if (ARM64Assembler::canEncodePImmOffset(offset)) { + m_assembler.ldr(rt, rn, static_cast(offset)); + return true; + } + return false; + } + + template + ALWAYS_INLINE bool tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset) + { + if (ARM64Assembler::canEncodeSImmOffset(offset)) { + storeUnscaledImmediate(rt, rn, offset); + return true; + } + if (ARM64Assembler::canEncodePImmOffset(offset)) { + storeUnsignedImmediate(rt, rn, static_cast(offset)); + return true; + } + return false; + } + + template + ALWAYS_INLINE bool tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset) + { + if (ARM64Assembler::canEncodeSImmOffset(offset)) { + m_assembler.stur(rt, rn, offset); + return true; + } + if (ARM64Assembler::canEncodePImmOffset(offset)) { + m_assembler.str(rt, rn, static_cast(offset)); + return true; + } + return false; + } + + Jump jumpAfterFloatingPointCompare(DoubleCondition cond) + { + if (cond == DoubleNotEqual) { + // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump. + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + Jump result = makeBranch(ARM64Assembler::ConditionNE); + unordered.link(this); + return result; + } + if (cond == DoubleEqualOrUnordered) { + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + Jump notEqual = makeBranch(ARM64Assembler::ConditionNE); + unordered.link(this); + // We get here if either unordered or equal. + Jump result = jump(); + notEqual.link(this); + return result; + } + return makeBranch(cond); + } + + friend class LinkBuffer; + void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); } + int executableOffsetFor(int location) { return m_assembler.executableOffsetFor(location); } + + static void linkCall(void* code, Call call, FunctionPtr function) + { + if (!call.isFlagSet(Call::Near)) + ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value()); +#if 0 + else if (call.isFlagSet(Call::Tail)) + ARM64Assembler::linkJump(code, call.m_label, function.value()); +#endif + else + ARM64Assembler::linkCall(code, call.m_label, function.value()); + } + + CachedTempRegister m_dataMemoryTempRegister; + CachedTempRegister m_cachedMemoryTempRegister; + bool m_makeJumpPatchable; + bool m_allowScratchRegister = true; +}; + +// Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm) +{ + m_assembler.ldrb(rt, rn, pimm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm) +{ + m_assembler.ldrh(rt, rn, pimm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm) +{ + m_assembler.ldrsb<64>(rt, rn, pimm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm) +{ + m_assembler.ldrsh<64>(rt, rn, pimm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm) +{ + m_assembler.ldurb(rt, rn, simm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm) +{ + m_assembler.ldurh(rt, rn, simm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm) +{ + m_assembler.ldursb<64>(rt, rn, simm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm) +{ + m_assembler.ldursh<64>(rt, rn, simm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm) +{ + m_assembler.strb(rt, rn, pimm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm) +{ + m_assembler.strh(rt, rn, pimm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm) +{ + m_assembler.sturb(rt, rn, simm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm) +{ + m_assembler.sturh(rt, rn, simm); +} + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) + +#endif // MacroAssemblerARM64_h diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h b/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h index 85cd6c27b9..0938383513 100644 --- a/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h +++ b/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h @@ -27,7 +27,7 @@ #ifndef MacroAssemblerARMv7_h #define MacroAssemblerARMv7_h -#if ENABLE(ASSEMBLER) +#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) #include "ARMv7Assembler.h" #include "AbstractMacroAssembler.h" diff --git a/src/3rdparty/masm/disassembler/ARM64/A64DOpcode.cpp b/src/3rdparty/masm/disassembler/ARM64/A64DOpcode.cpp new file mode 100644 index 0000000000..52a92c669c --- /dev/null +++ b/src/3rdparty/masm/disassembler/ARM64/A64DOpcode.cpp @@ -0,0 +1,1202 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define __STDC_FORMAT_MACROS +#include "config.h" + +#if USE(ARM64_DISASSEMBLER) + +#include "A64DOpcode.h" + +#include +#include +#include + +namespace JSC { namespace ARM64Disassembler { + +A64DOpcode::OpcodeGroup* A64DOpcode::opcodeTable[32]; + +const char* const A64DOpcode::s_conditionNames[16] = { + "eq", "ne", "hs", "lo", "mi", "pl", "vs", "vc", + "hi", "ls", "ge", "lt", "gt", "le", "al", "ne" +}; + +const char* const A64DOpcode::s_optionName[8] = { + "uxtb", "uxth", "uxtw", "uxtx", "sxtb", "sxth", "sxtw", "sxtx" +}; + +const char* const A64DOpcode::s_shiftNames[4] = { + "lsl", "lsr", "asl", "ror" +}; + +const char A64DOpcode::s_FPRegisterPrefix[5] = { + 'b', 'h', 's', 'd', 'q' +}; + +struct OpcodeGroupInitializer { + unsigned m_opcodeGroupNumber; + uint32_t m_mask; + uint32_t m_pattern; + const char* (*m_format)(A64DOpcode*); +}; + +#define OPCODE_GROUP_ENTRY(groupIndex, groupClass) \ +{ groupIndex, groupClass::mask, groupClass::pattern, groupClass::format } + +static OpcodeGroupInitializer opcodeGroupList[] = { + OPCODE_GROUP_ENTRY(0x08, A64DOpcodeLoadStoreRegisterPair), + OPCODE_GROUP_ENTRY(0x09, A64DOpcodeLoadStoreRegisterPair), + OPCODE_GROUP_ENTRY(0x0a, A64DOpcodeLogicalShiftedRegister), + OPCODE_GROUP_ENTRY(0x0b, A64DOpcodeAddSubtractExtendedRegister), + OPCODE_GROUP_ENTRY(0x0b, A64DOpcodeAddSubtractShiftedRegister), + OPCODE_GROUP_ENTRY(0x11, A64DOpcodeAddSubtractImmediate), + OPCODE_GROUP_ENTRY(0x12, A64DOpcodeMoveWide), + OPCODE_GROUP_ENTRY(0x12, A64DOpcodeLogicalImmediate), + OPCODE_GROUP_ENTRY(0x13, A64DOpcodeBitfield), + OPCODE_GROUP_ENTRY(0x13, A64DOpcodeExtract), + OPCODE_GROUP_ENTRY(0x14, A64DOpcodeUnconditionalBranchImmediate), + OPCODE_GROUP_ENTRY(0x14, A64DOpcodeConditionalBranchImmediate), + OPCODE_GROUP_ENTRY(0x14, A64DOpcodeCompareAndBranchImmediate), + OPCODE_GROUP_ENTRY(0x14, A64OpcodeExceptionGeneration), + OPCODE_GROUP_ENTRY(0x15, A64DOpcodeUnconditionalBranchImmediate), + OPCODE_GROUP_ENTRY(0x15, A64DOpcodeConditionalBranchImmediate), + OPCODE_GROUP_ENTRY(0x15, A64DOpcodeCompareAndBranchImmediate), + OPCODE_GROUP_ENTRY(0x15, A64DOpcodeHint), + OPCODE_GROUP_ENTRY(0x16, A64DOpcodeUnconditionalBranchImmediate), + OPCODE_GROUP_ENTRY(0x16, A64DOpcodeUnconditionalBranchRegister), + OPCODE_GROUP_ENTRY(0x16, A64DOpcodeTestAndBranchImmediate), + OPCODE_GROUP_ENTRY(0x17, A64DOpcodeUnconditionalBranchImmediate), + OPCODE_GROUP_ENTRY(0x17, A64DOpcodeUnconditionalBranchRegister), + OPCODE_GROUP_ENTRY(0x17, A64DOpcodeTestAndBranchImmediate), + OPCODE_GROUP_ENTRY(0x18, A64DOpcodeLoadStoreImmediate), + OPCODE_GROUP_ENTRY(0x18, A64DOpcodeLoadStoreRegisterOffset), + OPCODE_GROUP_ENTRY(0x19, A64DOpcodeLoadStoreUnsignedImmediate), + OPCODE_GROUP_ENTRY(0x1a, A64DOpcodeConditionalSelect), + OPCODE_GROUP_ENTRY(0x1a, A64DOpcodeDataProcessing2Source), + OPCODE_GROUP_ENTRY(0x1b, A64DOpcodeDataProcessing3Source), + OPCODE_GROUP_ENTRY(0x1c, A64DOpcodeLoadStoreImmediate), + OPCODE_GROUP_ENTRY(0x1c, A64DOpcodeLoadStoreRegisterOffset), + OPCODE_GROUP_ENTRY(0x1d, A64DOpcodeLoadStoreUnsignedImmediate), + OPCODE_GROUP_ENTRY(0x1e, A64DOpcodeFloatingPointCompare), + OPCODE_GROUP_ENTRY(0x1e, A64DOpcodeFloatingPointDataProcessing2Source), + OPCODE_GROUP_ENTRY(0x1e, A64DOpcodeFloatingPointDataProcessing1Source), + OPCODE_GROUP_ENTRY(0x1e, A64DOpcodeFloatingFixedPointConversions), + OPCODE_GROUP_ENTRY(0x1e, A64DOpcodeFloatingPointIntegerConversions), +}; + +bool A64DOpcode::s_initialized = false; + +void A64DOpcode::init() +{ + if (s_initialized) + return; + + OpcodeGroup* lastGroups[32]; + + for (unsigned i = 0; i < 32; i++) { + opcodeTable[i] = 0; + lastGroups[i] = 0; + } + + for (unsigned i = 0; i < sizeof(opcodeGroupList) / sizeof(struct OpcodeGroupInitializer); i++) { + OpcodeGroup* newOpcodeGroup = new OpcodeGroup(opcodeGroupList[i].m_mask, opcodeGroupList[i].m_pattern, opcodeGroupList[i].m_format); + uint32_t opcodeGroupNumber = opcodeGroupList[i].m_opcodeGroupNumber; + + if (!opcodeTable[opcodeGroupNumber]) + opcodeTable[opcodeGroupNumber] = newOpcodeGroup; + else + lastGroups[opcodeGroupNumber]->setNext(newOpcodeGroup); + lastGroups[opcodeGroupNumber] = newOpcodeGroup; + } + + s_initialized = true; +} + +void A64DOpcode::setPCAndOpcode(uint32_t* newPC, uint32_t newOpcode) +{ + m_currentPC = newPC; + m_opcode = newOpcode; + m_bufferOffset = 0; + m_formatBuffer[0] = '\0'; +} + +const char* A64DOpcode::disassemble(uint32_t* currentPC) +{ + setPCAndOpcode(currentPC, *currentPC); + + OpcodeGroup* opGroup = opcodeTable[opcodeGroupNumber(m_opcode)]; + + while (opGroup) { + if (opGroup->matches(m_opcode)) + return opGroup->format(this); + opGroup = opGroup->next(); + } + + return A64DOpcode::format(); +} + +void A64DOpcode::bufferPrintf(const char* format, ...) +{ + if (m_bufferOffset >= bufferSize) + return; + + va_list argList; + va_start(argList, format); + + m_bufferOffset += vsnprintf(m_formatBuffer + m_bufferOffset, bufferSize - m_bufferOffset, format, argList); + + va_end(argList); +} + +const char* A64DOpcode::format() +{ + bufferPrintf(" .long %08x", m_opcode); + return m_formatBuffer; +} + +void A64DOpcode::appendRegisterName(unsigned registerNumber, bool is64Bit) +{ + if (registerNumber == 29) { + bufferPrintf(is64Bit ? "fp" : "wfp"); + return; + } + + if (registerNumber == 30) { + bufferPrintf(is64Bit ? "lr" : "wlr"); + return; + } + + bufferPrintf("%c%u", is64Bit ? 'x' : 'w', registerNumber); +} + +void A64DOpcode::appendFPRegisterName(unsigned registerNumber, unsigned registerSize) +{ + bufferPrintf("%c%u", FPRegisterPrefix(registerSize), registerNumber); +} + +const char* const A64DOpcodeAddSubtract::s_opNames[4] = { "add", "adds", "sub", "subs" }; + +const char* A64DOpcodeAddSubtractImmediate::format() +{ + if (isCMP()) + appendInstructionName(cmpName()); + else { + if (isMovSP()) + appendInstructionName("mov"); + else + appendInstructionName(opName()); + appendSPOrRegisterName(rd(), is64Bit()); + appendSeparator(); + } + appendSPOrRegisterName(rn(), is64Bit()); + + if (!isMovSP()) { + appendSeparator(); + appendUnsignedImmediate(immed12()); + if (shift()) { + appendSeparator(); + appendString(shift() == 1 ? "lsl" : "reserved"); + } + } + return m_formatBuffer; +} + +const char* A64DOpcodeAddSubtractExtendedRegister::format() +{ + if (immediate3() > 4) + return A64DOpcode::format(); + + if (isCMP()) + appendInstructionName(cmpName()); + else { + appendInstructionName(opName()); + appendSPOrRegisterName(rd(), is64Bit()); + appendSeparator(); + } + appendSPOrRegisterName(rn(), is64Bit()); + appendSeparator(); + appendZROrRegisterName(rm(), is64Bit() && ((option() & 0x3) == 0x3)); + appendSeparator(); + if (option() == 0x2 && ((rd() == 31) || (rn() == 31))) + appendString("lsl"); + else + appendString(optionName()); + if (immediate3()) { + appendCharacter(' '); + appendUnsignedImmediate(immediate3()); + } + + return m_formatBuffer; +} + +const char* A64DOpcodeAddSubtractShiftedRegister::format() +{ + if (!is64Bit() && immediate6() & 0x20) + return A64DOpcode::format(); + + if (shift() == 0x3) + return A64DOpcode::format(); + + if (isCMP()) + appendInstructionName(cmpName()); + else { + if (isNeg()) + appendInstructionName(cmpName()); + else + appendInstructionName(opName()); + appendSPOrRegisterName(rd(), is64Bit()); + appendSeparator(); + } + if (!isNeg()) { + appendRegisterName(rn(), is64Bit()); + appendSeparator(); + } + appendZROrRegisterName(rm(), is64Bit()); + if (immediate6()) { + appendSeparator(); + appendShiftType(shift()); + appendUnsignedImmediate(immediate6()); + } + + return m_formatBuffer; +} + +const char* const A64DOpcodeBitfield::s_opNames[3] = { "sbfm", "bfm", "ubfm" }; +const char* const A64DOpcodeBitfield::s_extendPseudoOpNames[3][3] = { + { "sxtb", "sxth", "sxtw" }, { 0, 0, 0} , { "uxtb", "uxth", "uxtw" } }; +const char* const A64DOpcodeBitfield::s_insertOpNames[3] = { "sbfiz", "bfi", "ubfiz" }; +const char* const A64DOpcodeBitfield::s_extractOpNames[3] = { "sbfx", "bf", "ubfx" }; + +const char* A64DOpcodeBitfield::format() +{ + if (opc() == 0x3) + return A64DOpcode::format(); + + if (is64Bit() != nBit()) + return A64DOpcode::format(); + + if (!is64Bit() && ((immediateR() & 0x20) || (immediateS() & 0x20))) + return A64DOpcode::format(); + + if (!(opc() & 0x1) && !immediateR()) { + // [un]signed {btye,half-word,word} extend + bool isSTXType = false; + if (immediateS() == 7) { + appendInstructionName(extendPseudoOpNames(0)); + isSTXType = true; + } else if (immediateS() == 15) { + appendInstructionName(extendPseudoOpNames(1)); + isSTXType = true; + } else if (immediateS() == 31 && is64Bit()) { + appendInstructionName(extendPseudoOpNames(2)); + isSTXType = true; + } + + if (isSTXType) { + appendRegisterName(rd(), is64Bit()); + appendSeparator(); + appendRegisterName(rn(), false); + + return m_formatBuffer; + } + } + + if (opc() == 0x2 && immediateS() == (immediateR() + 1)) { + // lsl + appendInstructionName("lsl"); + appendRegisterName(rd(), is64Bit()); + appendSeparator(); + appendRegisterName(rn(), is64Bit()); + appendSeparator(); + appendUnsignedImmediate((is64Bit() ? 63u : 31u) - immediateR()); + + return m_formatBuffer; + } else if (!(opc() & 0x1) && ((immediateS() & 0x1f) == 0x1f) && (is64Bit() == (immediateS() >> 5))) { + // asr/lsr + appendInstructionName(!opc() ? "ars" : "lsr"); + + appendRegisterName(rd(), is64Bit()); + appendSeparator(); + appendRegisterName(rn(), is64Bit()); + appendSeparator(); + appendUnsignedImmediate(immediateR()); + + return m_formatBuffer; + } else if (immediateS() < immediateR()) { + // bit field insert + appendInstructionName(insertOpNames()); + + appendRegisterName(rd(), is64Bit()); + appendSeparator(); + appendRegisterName(rn(), is64Bit()); + appendSeparator(); + appendUnsignedImmediate((is64Bit() ? 64u : 32u) - immediateR()); + appendSeparator(); + appendUnsignedImmediate(immediateS() + 1); + + return m_formatBuffer; + } else { + // bit field extract + appendInstructionName(extractOpNames()); + + appendRegisterName(rd(), is64Bit()); + appendSeparator(); + appendRegisterName(rn(), is64Bit()); + appendSeparator(); + appendUnsignedImmediate(immediateR()); + appendSeparator(); + appendUnsignedImmediate(immediateS() - immediateR() + 1); + + return m_formatBuffer; + } + + appendInstructionName(opName()); + appendRegisterName(rd(), is64Bit()); + appendSeparator(); + appendRegisterName(rn(), is64Bit()); + appendSeparator(); + appendUnsignedImmediate(immediateR()); + appendSeparator(); + appendUnsignedImmediate(immediateS()); + + return m_formatBuffer; +} + +const char* A64DOpcodeCompareAndBranchImmediate::format() +{ + appendInstructionName(opBit() ? "cbnz" : "cbz"); + appendRegisterName(rt(), is64Bit()); + appendSeparator(); + appendPCRelativeOffset(m_currentPC, static_cast(immediate19())); + return m_formatBuffer; +} + +const char* A64DOpcodeConditionalBranchImmediate::format() +{ + bufferPrintf(" b.%-5.5s", conditionName(condition())); + appendPCRelativeOffset(m_currentPC, static_cast(immediate19())); + return m_formatBuffer; +} + +const char* const A64DOpcodeConditionalSelect::s_opNames[4] = { + "csel", "csinc", "csinv", "csneg" +}; + +const char* A64DOpcodeConditionalSelect::format() +{ + if (sBit()) + return A64DOpcode::format(); + + if (op2() & 0x2) + return A64DOpcode::format(); + + if (rn() == rm() && (opNum() == 1 || opNum() == 2)) { + if (rn() == 31) { + appendInstructionName((opNum() == 1) ? "cset" : "csetm"); + appendRegisterName(rd(), is64Bit()); + } else { + appendInstructionName((opNum() == 1) ? "cinc" : "cinv"); + appendRegisterName(rd(), is64Bit()); + appendSeparator(); + appendZROrRegisterName(rn(), is64Bit()); + } + appendSeparator(); + appendString(conditionName(condition() ^ 0x1)); + + return m_formatBuffer; + } + + appendInstructionName(opName()); + appendRegisterName(rd(), is64Bit()); + appendSeparator(); + appendZROrRegisterName(rn(), is64Bit()); + appendSeparator(); + appendZROrRegisterName(rm(), is64Bit()); + appendSeparator(); + appendString(conditionName(condition())); + + return m_formatBuffer; + +} + +const char* const A64DOpcodeDataProcessing2Source::s_opNames[8] = { + 0, 0, "udiv", "sdiv", "lsl", "lsr", "asr", "ror" // We use the pseudo-op names for the shift/rotate instructions +}; + +const char* A64DOpcodeDataProcessing2Source::format() +{ + if (sBit()) + return A64DOpcode::format(); + + if (!(opCode() & 0x3e)) + return A64DOpcode::format(); + + if (opCode() & 0x30) + return A64DOpcode::format(); + + if ((opCode() & 0x34) == 0x4) + return A64DOpcode::format(); + + appendInstructionName(opName()); + appendRegisterName(rd(), is64Bit()); + appendSeparator(); + appendRegisterName(rn(), is64Bit()); + appendSeparator(); + appendRegisterName(rm(), is64Bit()); + + return m_formatBuffer; +} + +const char* const A64DOpcodeDataProcessing3Source::s_opNames[16] = { + "madd", "msub", "smaddl", "smsubl", "smulh", 0, 0, 0, + 0, 0, "umaddl", "umsubl", "umulh", 0, 0, 0 +}; + +const char* const A64DOpcodeDataProcessing3Source::s_pseudoOpNames[16] = { + "mul", "mneg", "smull", "smnegl", "smulh", 0, 0, 0, + 0, 0, "umull", "umnegl", "umulh", 0, 0, 0 +}; + +const char* A64DOpcodeDataProcessing3Source::format() +{ + if (op54()) + return A64DOpcode::format(); + + if (opNum() > 12) + return A64DOpcode::format(); + + if (!is64Bit() && opNum() > 1) + return A64DOpcode::format(); + + if (!opName()) + return A64DOpcode::format(); + + appendInstructionName(opName()); + appendRegisterName(rd(), is64Bit()); + appendSeparator(); + bool srcOneAndTwoAre64Bit = is64Bit() & !(opNum() & 0x2); + appendRegisterName(rn(), srcOneAndTwoAre64Bit); + appendSeparator(); + appendRegisterName(rm(), srcOneAndTwoAre64Bit); + + if ((ra() != 31) || !(opNum() & 0x4)) { + appendSeparator(); + appendRegisterName(ra(), is64Bit()); + } + + return m_formatBuffer; +} + +const char* A64OpcodeExceptionGeneration::format() +{ + const char* opname = 0; + if (!op2()) { + switch (opc()) { + case 0x0: // SVC, HVC & SMC + switch (ll()) { + case 0x1: + opname = "svc"; + break; + case 0x2: + opname = "hvc"; + break; + case 0x3: + opname = "smc"; + break; + } + break; + case 0x1: // BRK + if (!ll()) + opname = "brk"; + break; + case 0x2: // HLT + if (!ll()) + opname = "hlt"; + break; + case 0x5: // DPCS1-3 + switch (ll()) { + case 0x1: + opname = "dpcs1"; + break; + case 0x2: + opname = "dpcs2"; + break; + case 0x3: + opname = "dpcs3"; + break; + } + break; + } + } + + if (!opname) + return A64DOpcode::format(); + + appendInstructionName(opname); + appendUnsignedImmediate(immediate16()); + return m_formatBuffer; +} + +const char* A64DOpcodeExtract::format() +{ + if (!op21() || !o0Bit()) + return A64DOpcode::format(); + + if (is64Bit() != nBit()) + return A64DOpcode::format(); + + if (is64Bit() && (immediateS() & 0x20)) + return A64DOpcode::format(); + + const char* opName = (rn() == rm()) ? "ror" : "extr"; + + appendInstructionName(opName); + appendRegisterName(rd(), is64Bit()); + appendSeparator(); + appendRegisterName(rn(), is64Bit()); + appendSeparator(); + appendRegisterName(rm(), is64Bit()); + appendSeparator(); + appendUnsignedImmediate(immediateS()); + + return m_formatBuffer; +} + +const char* A64DOpcodeFloatingPointCompare::format() +{ + if (mBit()) + return A64DOpcode::format(); + + if (sBit()) + return A64DOpcode::format(); + + if (type() & 0x2) + return A64DOpcode::format(); + + if (op()) + return A64DOpcode::format(); + + if (opCode2() & 0x7) + return A64DOpcode::format(); + + appendInstructionName(opName()); + unsigned registerSize = type() + 2; + appendFPRegisterName(rn(), registerSize); + appendSeparator(); + if (opCode2() & 0x8) + bufferPrintf("#0.0"); + else + appendFPRegisterName(rm(), registerSize); + + return m_formatBuffer; +} + +const char* const A64DOpcodeFloatingPointDataProcessing1Source::s_opNames[16] = { + "fmov", "fabs", "fneg", "fsqrt", "fcvt", "fcvt", 0, "fcvt", + "frintn", "frintp", "frintm", "frintz", "frinta", 0, "frintx", "frinti" +}; + +const char* A64DOpcodeFloatingPointDataProcessing1Source::format() +{ + if (mBit()) + return A64DOpcode::format(); + + if (sBit()) + return A64DOpcode::format(); + + if (opNum() > 16) + return A64DOpcode::format(); + + switch (type()) { + case 0: + if ((opNum() == 0x4) || (opNum() == 0x6) || (opNum() == 0xd)) + return A64DOpcode::format(); + break; + case 1: + if ((opNum() == 0x5) || (opNum() == 0x6) || (opNum() == 0xd)) + return A64DOpcode::format(); + break; + case 2: + return A64DOpcode::format(); + case 3: + if ((opNum() < 0x4) || (opNum() > 0x5)) + return A64DOpcode::format(); + break; + } + + appendInstructionName(opName()); + if ((opNum() >= 0x4) && (opNum() <= 0x7)) { + unsigned srcRegisterSize = type() ^ 0x2; // 0:s, 1:d & 3:h + unsigned destRegisterSize = (opNum() & 0x3) ^ 0x2; + appendFPRegisterName(rd(), destRegisterSize); + appendSeparator(); + appendFPRegisterName(rn(), srcRegisterSize); + } else { + unsigned registerSize = type() + 2; + appendFPRegisterName(rd(), registerSize); + appendSeparator(); + appendFPRegisterName(rn(), registerSize); + } + + return m_formatBuffer; +} + +const char* const A64DOpcodeFloatingPointDataProcessing2Source::s_opNames[16] = { + "fmul", "fdiv", "fadd", "fsub", "fmax", "fmin", "fmaxnm", "fminnm", "fnmul" +}; + +const char* A64DOpcodeFloatingPointDataProcessing2Source::format() +{ + if (mBit()) + return A64DOpcode::format(); + + if (sBit()) + return A64DOpcode::format(); + + if (type() & 0x2) + return A64DOpcode::format(); + + if (opNum() > 8) + return A64DOpcode::format(); + + appendInstructionName(opName()); + unsigned registerSize = type() + 2; + appendFPRegisterName(rd(), registerSize); + appendSeparator(); + appendFPRegisterName(rn(), registerSize); + appendSeparator(); + appendFPRegisterName(rm(), registerSize); + + return m_formatBuffer; +} + +const char* const A64DOpcodeFloatingFixedPointConversions::s_opNames[4] = { + "fcvtzs", "fcvtzu", "scvtf", "ucvtf" +}; + +const char* A64DOpcodeFloatingFixedPointConversions::format() +{ + if (sBit()) + return A64DOpcode::format(); + + if (type() & 0x2) + return A64DOpcode::format(); + + if (opcode() & 0x4) + return A64DOpcode::format(); + + if (!(rmode() & 0x1) && !(opcode() & 0x6)) + return A64DOpcode::format(); + + if ((rmode() & 0x1) && (opcode() & 0x6) == 0x2) + return A64DOpcode::format(); + + if (!(rmode() & 0x2) && !(opcode() & 0x6)) + return A64DOpcode::format(); + + if ((rmode() & 0x2) && (opcode() & 0x6) == 0x2) + return A64DOpcode::format(); + + if (!is64Bit() && scale() >= 32) + return A64DOpcode::format(); + + appendInstructionName(opName()); + unsigned FPRegisterSize = type() + 2; + bool destIsFP = !rmode(); + + if (destIsFP) { + appendFPRegisterName(rd(), FPRegisterSize); + appendSeparator(); + appendRegisterName(rn(), is64Bit()); + } else { + appendRegisterName(rd(), is64Bit()); + appendSeparator(); + appendFPRegisterName(rn(), FPRegisterSize); + } + appendSeparator(); + appendUnsignedImmediate(64 - scale()); + + return m_formatBuffer; +} + +const char* const A64DOpcodeFloatingPointIntegerConversions::s_opNames[32] = { + "fcvtns", "fcvtnu", "scvtf", "ucvtf", "fcvtas", "fcvtau", "fmov", "fmov", + "fcvtps", "fcvtpu", 0, 0, 0, 0, "fmov", "fmov", + "fcvtms", "fcvtmu", 0, 0, 0, 0, 0, 0, + "fcvtzs", "fcvtzu", 0, 0, 0, 0, 0, 0 +}; + +const char* A64DOpcodeFloatingPointIntegerConversions::format() +{ + if (sBit()) + return A64DOpcode::format(); + + if (type() == 0x3) + return A64DOpcode::format(); + + if (((rmode() & 0x1) || (rmode() & 0x2)) && (((opcode() & 0x6) == 0x2) || ((opcode() & 0x6) == 0x4))) + return A64DOpcode::format(); + + if ((type() == 0x2) && (!(opcode() & 0x4) || ((opcode() & 0x6) == 0x4))) + return A64DOpcode::format(); + + if (!type() && (rmode() & 0x1) && ((opcode() & 0x6) == 0x6)) + return A64DOpcode::format(); + + if (is64Bit() && type() == 0x2 && ((opNum() & 0xe) == 0x6)) + return A64DOpcode::format(); + + if (!opName()) + return A64DOpcode::format(); + + if ((opNum() & 0x1e) == 0xe) { + // Handle fmov to/from upper half of quad separately + if (!is64Bit() || (type() != 0x2)) + return A64DOpcode::format(); + + appendInstructionName(opName()); + if (opcode() & 0x1) { + // fmov Vd.D[1], Xn + bufferPrintf("V%u.D[1]", rd()); + appendSeparator(); + appendRegisterName(rn()); + } else { + // fmov Xd, Vn.D[1] + appendRegisterName(rd()); + appendSeparator(); + bufferPrintf("V%u.D[1]", rn()); + } + + return m_formatBuffer; + } + + appendInstructionName(opName()); + unsigned FPRegisterSize = type() + 2; + bool destIsFP = ((opNum() == 2) || (opNum() == 3) || (opNum() == 7)); + + if (destIsFP) { + appendFPRegisterName(rd(), FPRegisterSize); + appendSeparator(); + appendRegisterName(rn(), is64Bit()); + } else { + appendRegisterName(rd(), is64Bit()); + appendSeparator(); + appendFPRegisterName(rn(), FPRegisterSize); + } + + return m_formatBuffer; +} + +const char* const A64DOpcodeHint::s_opNames[6] = { + "nop", "yield", "wfe", "wfi", "sev", "sevl" +}; + +const char* A64DOpcodeHint::format() +{ + appendInstructionName(opName()); + + if (immediate7() > 5) + appendUnsignedImmediate(immediate7()); + + return m_formatBuffer; +} + +// A zero in an entry of the table means the instruction is Unallocated +const char* const A64DOpcodeLoadStore::s_opNames[32] = { + "strb", "ldrb", "ldrsb", "ldrsb", "str", "ldr", "str", "ldr", + "strh", "ldrh", "ldrsh", "ldrsh", "str", "ldr", 0, 0, + "str", "ldr", "ldrsw", 0, "str", "ldr", 0, 0, + "str", "ldr", 0, 0, "str", "ldr", 0, 0 +}; + +// A zero in an entry of the table means the instruction is Unallocated +const char* const A64DOpcodeLoadStoreImmediate::s_unprivilegedOpNames[32] = { + "sttrb", "ldtrb", "ldtrsb", "ldtrsb", 0, 0, 0, 0, + "sttrh", "ldtrh", "ldtrsh", "ldtrsh", 0, 0, 0, 0, + "sttr", "ldtr", "ldtrsw", 0, 0, 0, 0, 0, + "sttr", "ldtr", 0, 0, 0, 0, 0, 0 +}; + +// A zero in an entry of the table means the instruction is Unallocated +const char* const A64DOpcodeLoadStoreImmediate::s_unscaledOpNames[32] = { + "sturb", "ldurb", "ldursb", "ldursb", "stur", "ldur", "stur", "ldur", + "sturh", "ldurh", "ldursh", "ldursh", "stur", "ldur", 0, 0, + "stur", "ldur", "ldursw", 0, "stur", "ldur", 0, 0, + "stur", "ldur", "prfum", 0, "stur", "ldur", 0, 0 +}; + +const char* A64DOpcodeLoadStoreImmediate::format() +{ + const char* thisOpName; + + if (type() & 0x1) + thisOpName = opName(); + else if (!type()) + thisOpName = unscaledOpName(); + else + thisOpName = unprivilegedOpName(); + + if (!thisOpName) + return A64DOpcode::format(); + + appendInstructionName(thisOpName); + if (vBit()) + appendFPRegisterName(rt(), size()); + else + appendRegisterName(rt(), is64BitRT()); + appendSeparator(); + appendCharacter('['); + appendSPOrRegisterName(rn()); + + switch (type()) { + case 0: // Unscaled Immediate + if (immediate9()) { + appendSeparator(); + appendSignedImmediate(immediate9()); + } + appendCharacter(']'); + break; + case 1: // Immediate Post-Indexed + appendCharacter(']'); + if (immediate9()) { + appendSeparator(); + appendSignedImmediate(immediate9()); + } + break; + case 2: // Unprivileged + if (immediate9()) { + appendSeparator(); + appendSignedImmediate(immediate9()); + } + appendCharacter(']'); + break; + case 3: // Immediate Pre-Indexed + if (immediate9()) { + appendSeparator(); + appendSignedImmediate(immediate9()); + } + appendCharacter(']'); + appendCharacter('!'); + break; + } + + return m_formatBuffer; +} + +const char* A64DOpcodeLoadStoreRegisterOffset::format() +{ + const char* thisOpName = opName(); + + if (!thisOpName) + return A64DOpcode::format(); + + if (!(option() & 0x2)) + return A64DOpcode::format(); + + appendInstructionName(thisOpName); + unsigned scale; + if (vBit()) { + appendFPRegisterName(rt(), size()); + scale = ((opc() & 2)<<1) | size(); + } else { + appendRegisterName(rt(), is64BitRT()); + scale = size(); + } + appendSeparator(); + appendCharacter('['); + appendSPOrRegisterName(rn()); + appendSeparator(); + appendZROrRegisterName(rm(), (option() & 0x3) == 0x3); + + unsigned shift = sBit() ? scale : 0; + + if (option() == 0x3) { + if (shift) { + appendSeparator(); + appendString("lsl "); + appendUnsignedImmediate(shift); + } + } else { + appendSeparator(); + appendString(optionName()); + if (shift) + appendUnsignedImmediate(shift); + } + + appendCharacter(']'); + + return m_formatBuffer; +} + +const char* A64DOpcodeLoadStoreRegisterPair::opName() +{ + if (!vBit() && lBit() && size() == 0x1) + return "ldpsw"; + if (lBit()) + return "ldp"; + return "stp"; +} + +const char* A64DOpcodeLoadStoreRegisterPair::format() +{ + const char* thisOpName = opName(); + + if (size() == 0x3) + return A64DOpcode::format(); + + if ((offsetMode() < 0x1) || (offsetMode() > 0x3)) + return A64DOpcode::format(); + + if ((offsetMode() == 0x1) && !vBit() && !lBit()) + return A64DOpcode::format(); + + appendInstructionName(thisOpName); + unsigned offsetShift; + if (vBit()) { + appendFPRegisterName(rt(), size()); + appendSeparator(); + appendFPRegisterName(rt2(), size()); + offsetShift = size() + 2; + } else { + appendRegisterName(rt(), is64Bit()); + appendSeparator(); + appendRegisterName(rt2(), is64Bit()); + offsetShift = (size() >> 1) + 2; + } + + appendSeparator(); + appendCharacter('['); + appendSPOrRegisterName(rn()); + + int offset = immediate7() << offsetShift; + + if (offsetMode() == 1) { + appendCharacter(']'); + appendSeparator(); + appendSignedImmediate(offset); + } else { + appendSeparator(); + appendSignedImmediate(offset); + appendCharacter(']'); + if (offsetMode() == 0x3) + appendCharacter('!'); + } + + return m_formatBuffer; +} + +const char* A64DOpcodeLoadStoreUnsignedImmediate::format() +{ + const char* thisOpName = opName(); + + if (!thisOpName) + return A64DOpcode::format(); + + appendInstructionName(thisOpName); + unsigned scale; + if (vBit()) { + appendFPRegisterName(rt(), size()); + scale = ((opc() & 2)<<1) | size(); + } else { + appendRegisterName(rt(), is64BitRT()); + scale = size(); + } + appendSeparator(); + appendCharacter('['); + appendSPOrRegisterName(rn()); + + if (immediate12()) { + appendSeparator(); + appendUnsignedImmediate(immediate12() << scale); + } + + appendCharacter(']'); + + return m_formatBuffer; +} + +// A zero in an entry of the table means the instruction is Unallocated +const char* const A64DOpcodeLogical::s_opNames[8] = { + "and", "bic", "orr", "orn", "eor", "eon", "ands", "bics" +}; + +const char* A64DOpcodeLogicalShiftedRegister::format() +{ + if (!is64Bit() && immediate6() & 0x20) + return A64DOpcode::format(); + + if (isTst()) + appendInstructionName("tst"); + else { + if (isMov()) + appendInstructionName("mov"); + else + appendInstructionName(opName(opNumber())); + appendSPOrRegisterName(rd(), is64Bit()); + appendSeparator(); + } + + if (!isMov()) { + appendRegisterName(rn(), is64Bit()); + appendSeparator(); + } + + appendZROrRegisterName(rm(), is64Bit()); + if (immediate6()) { + appendSeparator(); + appendShiftType(shift()); + appendUnsignedImmediate(immediate6()); + } + + return m_formatBuffer; +} + +static unsigned highestBitSet(unsigned value) +{ + unsigned result = 0; + + while (value >>= 1) + result++; + + return result; +} + +static uint64_t rotateRight(uint64_t value, unsigned width, unsigned shift) +{ + uint64_t result = value; + + if (shift) + result = (value >> (shift % width)) | (value << (width - shift)); + + return result; +} + +static uint64_t replicate(uint64_t value, unsigned width) +{ + uint64_t result = 0; + + for (unsigned totalBits = 0; totalBits < 64; totalBits += width) + result = (result << width) | value; + + return result; +} + +const char* A64DOpcodeLogicalImmediate::format() +{ + if (!is64Bit() && nBit()) + return A64DOpcode::format(); + + unsigned len = highestBitSet(nBit() << 6 | (immediateS() ^ 0x3f)); + unsigned levels = (1 << len) - 1; // len number of 1 bits starting at LSB + + if ((immediateS() & levels) == levels) + return A64DOpcode::format(); + + unsigned r = immediateR() & levels; + unsigned s = immediateS() & levels; + unsigned eSize = 1 << len; + uint64_t pattern = rotateRight((1ull << (s + 1)) - 1, eSize, r); + + uint64_t immediate = replicate(pattern, eSize); + + if (!is64Bit()) + immediate &= 0xffffffffull; + + if (isTst()) + appendInstructionName("tst"); + else { + if (isMov()) + appendInstructionName("mov"); + else + appendInstructionName(opName(opNumber())); + appendRegisterName(rd(), is64Bit()); + appendSeparator(); + } + if (!isMov()) { + appendRegisterName(rn(), is64Bit()); + appendSeparator(); + } + appendUnsignedImmediate64(immediate); + + return m_formatBuffer; +} + +const char* const A64DOpcodeMoveWide::s_opNames[4] = { "movn", "", "movz", "movk" }; + +const char* A64DOpcodeMoveWide::format() +{ + if (opc() == 1) + return A64DOpcode::format(); + if (!size() && hw() >= 2) + return A64DOpcode::format(); + + appendInstructionName(opName()); + appendRegisterName(rd(), is64Bit()); + appendSeparator(); + appendUnsignedImmediate(immediate16()); + if (hw()) { + appendSeparator(); + appendShiftAmount(hw()); + } + + return m_formatBuffer; +} + +const char* A64DOpcodeTestAndBranchImmediate::format() +{ + appendInstructionName(opBit() ? "tbnz" : "tbz"); + appendRegisterName(rt()); + appendSeparator(); + appendUnsignedImmediate(bitNumber()); + appendSeparator(); + appendPCRelativeOffset(m_currentPC, static_cast(immediate14())); + return m_formatBuffer; +} + +const char* A64DOpcodeUnconditionalBranchImmediate::format() +{ + appendInstructionName(op() ? "bl" : "b"); + appendPCRelativeOffset(m_currentPC, static_cast(immediate26())); + return m_formatBuffer; +} + +const char* const A64DOpcodeUnconditionalBranchRegister::s_opNames[8] = { "br", "blr", "ret", "", "eret", "drps", "", "" }; + +const char* A64DOpcodeUnconditionalBranchRegister::format() +{ + unsigned opcValue = opc(); + if (opcValue == 3 || opcValue > 5) + return A64DOpcode::format(); + if (((opcValue & 0xe) == 0x4) && rn() != 0x1f) + return A64DOpcode::format(); + appendInstructionName(opName()); + if (opcValue <= 2) + appendRegisterName(rn()); + return m_formatBuffer; +} + +} } // namespace JSC::ARM64Disassembler + +#endif // USE(ARM64_DISASSEMBLER) diff --git a/src/3rdparty/masm/disassembler/ARM64/A64DOpcode.h b/src/3rdparty/masm/disassembler/ARM64/A64DOpcode.h new file mode 100644 index 0000000000..5bb7db9f12 --- /dev/null +++ b/src/3rdparty/masm/disassembler/ARM64/A64DOpcode.h @@ -0,0 +1,708 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef A64DOpcode_h +#define A64DOpcode_h + +#include +#include + +namespace JSC { namespace ARM64Disassembler { + +class A64DOpcode { +private: + class OpcodeGroup { + public: + OpcodeGroup(uint32_t opcodeMask, uint32_t opcodePattern, const char* (*format)(A64DOpcode*)) + : m_opcodeMask(opcodeMask) + , m_opcodePattern(opcodePattern) + , m_format(format) + , m_next(0) + { + } + + void setNext(OpcodeGroup* next) + { + m_next = next; + } + + OpcodeGroup* next() + { + return m_next; + } + + bool matches(uint32_t opcode) + { + return (opcode & m_opcodeMask) == m_opcodePattern; + } + + const char* format(A64DOpcode* thisObj) + { + return m_format(thisObj); + } + + private: + uint32_t m_opcodeMask; + uint32_t m_opcodePattern; + const char* (*m_format)(A64DOpcode*); + OpcodeGroup* m_next; + }; + +public: + static void init(); + + A64DOpcode() + : m_opcode(0) + , m_bufferOffset(0) + { + init(); + m_formatBuffer[0] = '\0'; + } + + const char* disassemble(uint32_t* currentPC); + +protected: + void setPCAndOpcode(uint32_t*, uint32_t); + const char* format(); + + static const char* const s_conditionNames[16]; + static const char* const s_shiftNames[4]; + static const char* const s_optionName[8]; + static const char s_FPRegisterPrefix[5]; + + static const char* conditionName(unsigned condition) { return s_conditionNames[condition & 0xf]; } + static const char* shiftName(unsigned shiftValue) { return s_shiftNames[shiftValue & 0x3]; } + const char* optionName() { return s_optionName[option()]; } + static char FPRegisterPrefix(unsigned FPRegisterSize) + { + if (FPRegisterSize > 4) + FPRegisterSize = 4; + return s_FPRegisterPrefix[FPRegisterSize]; + } + + unsigned opcodeGroupNumber(uint32_t opcode) { return (opcode >> 24) & 0x1f; } + + bool is64Bit() { return m_opcode & 0x80000000; } + unsigned size() { return m_opcode >> 30; } + unsigned option() { return (m_opcode >> 13) & 0x7; } + unsigned rd() { return m_opcode & 0x1f; } + unsigned rt() { return m_opcode & 0x1f; } + unsigned rn() { return (m_opcode >> 5) & 0x1f; } + unsigned rm() { return (m_opcode >> 16) & 0x1f; } + + void bufferPrintf(const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3); + + void appendInstructionName(const char* instructionName) + { + bufferPrintf(" %-7.7s", instructionName); + } + + void appendRegisterName(unsigned registerNumber, bool is64Bit = true); + void appendSPOrRegisterName(unsigned registerNumber, bool is64Bit = true) + { + if (registerNumber == 31) { + bufferPrintf(is64Bit ? "sp" : "wsp"); + return; + } + appendRegisterName(registerNumber, is64Bit); + } + + void appendZROrRegisterName(unsigned registerNumber, bool is64Bit = true) + { + if (registerNumber == 31) { + bufferPrintf(is64Bit ? "xzr" : "wzr"); + return; + } + appendRegisterName(registerNumber, is64Bit); + } + + void appendFPRegisterName(unsigned registerNumber, unsigned registerSize); + + void appendSeparator() + { + bufferPrintf(", "); + } + + void appendCharacter(const char c) + { + bufferPrintf("%c", c); + } + + void appendString(const char* string) + { + bufferPrintf("%s", string); + } + + void appendShiftType(unsigned shiftValue) + { + bufferPrintf("%s ", shiftName(shiftValue)); + } + + void appendSignedImmediate(int immediate) + { + bufferPrintf("#%d", immediate); + } + + void appendUnsignedImmediate(unsigned immediate) + { + bufferPrintf("#%u", immediate); + } + + void appendUnsignedImmediate64(uint64_t immediate) + { + bufferPrintf("#0x%" PRIx64, immediate); + } + + void appendPCRelativeOffset(uint32_t* pc, int32_t immediate) + { + bufferPrintf("0x%" PRIx64, reinterpret_cast(pc + immediate)); + } + + void appendShiftAmount(unsigned amount) + { + bufferPrintf("lsl #%u", 16 * amount); + } + + static const int bufferSize = 81; + + char m_formatBuffer[bufferSize]; + uint32_t* m_currentPC; + uint32_t m_opcode; + int m_bufferOffset; + +private: + static OpcodeGroup* opcodeTable[32]; + + static bool s_initialized; +}; + +#define DEFINE_STATIC_FORMAT(klass, thisObj) \ + static const char* format(A64DOpcode* thisObj) { return reinterpret_cast< klass *>(thisObj)->format(); } + +class A64DOpcodeAddSubtract : public A64DOpcode { +private: + static const char* const s_opNames[4]; + +public: + const char* opName() { return s_opNames[opAndS()]; } + const char* cmpName() { return op() ? "cmp" : "cmn"; } + + bool isCMP() { return (sBit() && rd() == 31); } + unsigned op() { return (m_opcode >> 30) & 0x1; } + unsigned sBit() { return (m_opcode >> 29) & 0x1; } + unsigned opAndS() { return (m_opcode >> 29) & 0x3; } +}; + +class A64DOpcodeAddSubtractImmediate : public A64DOpcodeAddSubtract { +public: + static const uint32_t mask = 0x1f000000; + static const uint32_t pattern = 0x11000000; + + DEFINE_STATIC_FORMAT(A64DOpcodeAddSubtractImmediate, thisObj); + + const char* format(); + + bool isMovSP() { return (!opAndS() && !immed12() && ((rd() == 31) || rn() == 31)); } + unsigned shift() { return (m_opcode >> 22) & 0x3; } + unsigned immed12() { return (m_opcode >> 10) & 0xfff; } +}; + +class A64DOpcodeAddSubtractExtendedRegister : public A64DOpcodeAddSubtract { +public: + static const uint32_t mask = 0x1fe00000; + static const uint32_t pattern = 0x0b200000; + + DEFINE_STATIC_FORMAT(A64DOpcodeAddSubtractExtendedRegister, thisObj); + + const char* format(); + + unsigned immediate3() { return (m_opcode >> 10) & 0x7; } +}; + +class A64DOpcodeAddSubtractShiftedRegister : public A64DOpcodeAddSubtract { +public: + static const uint32_t mask = 0x1f200000; + static const uint32_t pattern = 0x0b000000; + + DEFINE_STATIC_FORMAT(A64DOpcodeAddSubtractShiftedRegister, thisObj); + + const char* format(); + + bool isNeg() { return (op() && rn() == 31); } + const char* negName() { return sBit() ? "negs" : "neg"; } + unsigned shift() { return (m_opcode >> 22) & 0x3; } + int immediate6() { return (static_cast((m_opcode >> 10) & 0x3f) << 26) >> 26; } +}; + +class A64DOpcodeBitfield : public A64DOpcode { +private: + static const char* const s_opNames[3]; + static const char* const s_extendPseudoOpNames[3][3]; + static const char* const s_insertOpNames[3]; + static const char* const s_extractOpNames[3]; + +public: + static const uint32_t mask = 0x1f800000; + static const uint32_t pattern = 0x13000000; + + DEFINE_STATIC_FORMAT(A64DOpcodeBitfield, thisObj); + + const char* format(); + + const char* opName() { return s_opNames[opc()]; } + const char* extendPseudoOpNames(unsigned opSize) { return s_extendPseudoOpNames[opc()][opSize]; } + const char* insertOpNames() { return s_insertOpNames[opc()]; } + const char* extractOpNames() { return s_extractOpNames[opc()]; } + + unsigned opc() { return (m_opcode >> 29) & 0x3; } + unsigned nBit() { return (m_opcode >> 22) & 0x1; } + unsigned immediateR() { return (m_opcode >> 16) & 0x3f; } + unsigned immediateS() { return (m_opcode >> 10) & 0x3f; } +}; + +class A64DOpcodeCompareAndBranchImmediate : public A64DOpcode { +public: + static const uint32_t mask = 0x7e000000; + static const uint32_t pattern = 0x34000000; + + DEFINE_STATIC_FORMAT(A64DOpcodeCompareAndBranchImmediate, thisObj); + + const char* format(); + + unsigned opBit() { return (m_opcode >> 24) & 0x1; } + int immediate19() { return (static_cast((m_opcode >> 5) & 0x7ffff) << 13) >> 13; } +}; + +class A64DOpcodeConditionalBranchImmediate : public A64DOpcode { +public: + static const uint32_t mask = 0xff000010; + static const uint32_t pattern = 0x54000000; + + DEFINE_STATIC_FORMAT(A64DOpcodeConditionalBranchImmediate, thisObj); + + const char* format(); + + unsigned condition() { return m_opcode & 0xf; } + int immediate19() { return (static_cast((m_opcode >> 5) & 0x7ffff) << 13) >> 13; } +}; + +class A64DOpcodeConditionalSelect : public A64DOpcode { +private: + static const char* const s_opNames[4]; + +public: + static const uint32_t mask = 0x1fe00010; + static const uint32_t pattern = 0x1a800000; + + DEFINE_STATIC_FORMAT(A64DOpcodeConditionalSelect, thisObj); + + const char* format(); + + const char* opName() { return s_opNames[opNum()]; } + unsigned opNum() { return (op() << 1 | (op2() & 0x1)); } + unsigned op() { return (m_opcode >> 30) & 0x1; } + unsigned sBit() { return (m_opcode >> 29) & 0x1; } + unsigned condition() { return (m_opcode >> 12) & 0xf; } + unsigned op2() { return (m_opcode >> 10) & 0x3; } +}; + +class A64DOpcodeDataProcessing2Source : public A64DOpcode { +private: + static const char* const s_opNames[8]; + +public: + static const uint32_t mask = 0x5fe00000; + static const uint32_t pattern = 0x1ac00000; + + DEFINE_STATIC_FORMAT(A64DOpcodeDataProcessing2Source, thisObj); + + const char* format(); + + const char* opName() { return s_opNames[opNameIndex()]; } + unsigned sBit() { return (m_opcode >> 29) & 0x1; } + unsigned opCode() { return (m_opcode >> 10) & 0x3f; } + unsigned opNameIndex() { return ((m_opcode >> 11) & 0x4) | ((m_opcode >> 10) & 0x3); } +}; + +class A64DOpcodeDataProcessing3Source : public A64DOpcode { +private: + static const char* const s_opNames[16]; + static const char* const s_pseudoOpNames[16]; + +public: + static const uint32_t mask = 0x1f000000; + static const uint32_t pattern = 0x1b000000; + + DEFINE_STATIC_FORMAT(A64DOpcodeDataProcessing3Source, thisObj); + + const char* format(); + + const char* opName() { return ra() == 31 ? s_opNames[opNum() & 0xf] : s_pseudoOpNames[opNum() & 0xf]; } + unsigned ra() { return (m_opcode >> 10) & 0x1f; } + unsigned op54() { return (m_opcode >> 29) & 0x3; } + unsigned op31() { return (m_opcode >> 21) & 0x7; } + unsigned op0() { return (m_opcode >> 15) & 0x1; } + unsigned opNum() { return ((m_opcode >> 25) & 0x30) | ((m_opcode >> 20) & 0xe) | ((m_opcode >> 15) & 0x1); } +}; + +class A64OpcodeExceptionGeneration : public A64DOpcode { +public: + static const uint32_t mask = 0xff000010; + static const uint32_t pattern = 0xd4000000; + + DEFINE_STATIC_FORMAT(A64OpcodeExceptionGeneration, thisObj); + + const char* format(); + + unsigned opc() { return (m_opcode>>21) & 0x7; } + unsigned op2() { return (m_opcode>>2) & 0x7; } + unsigned ll() { return m_opcode & 0x3; } + int immediate16() { return (static_cast((m_opcode >> 5) & 0xffff) << 16) >> 16; } +}; + +class A64DOpcodeExtract : public A64DOpcode { +public: + static const uint32_t mask = 0x1f800000; + static const uint32_t pattern = 0x13800000; + + DEFINE_STATIC_FORMAT(A64DOpcodeExtract, thisObj); + + const char* format(); + + unsigned op21() { return (m_opcode >> 29) & 0x3; } + unsigned nBit() { return (m_opcode >> 22) & 0x1; } + unsigned o0Bit() { return (m_opcode >> 21) & 0x1; } + unsigned immediateS() { return (m_opcode >> 10) & 0x3f; } +}; + +class A64DOpcodeFloatingPointOps : public A64DOpcode { +public: + unsigned mBit() { return (m_opcode >> 31) & 0x1; } + unsigned sBit() { return (m_opcode >> 29) & 0x1; } + unsigned type() { return (m_opcode >> 22) & 0x3; } +}; + +class A64DOpcodeFloatingPointCompare : public A64DOpcodeFloatingPointOps { +private: + static const char* const s_opNames[16]; + +public: + static const uint32_t mask = 0x5f203c00; + static const uint32_t pattern = 0x1e202000; + + DEFINE_STATIC_FORMAT(A64DOpcodeFloatingPointCompare, thisObj); + + const char* format(); + + const char* opName() { return (opNum() & 0x2) ? "fcmpe" : "fcmp"; } + + unsigned op() { return (m_opcode >> 14) & 0x3; } + unsigned opCode2() { return m_opcode & 0x1f; } + unsigned opNum() { return (m_opcode >> 3) & 0x3; } +}; + +class A64DOpcodeFloatingPointDataProcessing1Source : public A64DOpcodeFloatingPointOps { +private: + static const char* const s_opNames[16]; + +public: + static const uint32_t mask = 0x5f207c00; + static const uint32_t pattern = 0x1e204000; + + DEFINE_STATIC_FORMAT(A64DOpcodeFloatingPointDataProcessing1Source, thisObj); + + const char* format(); + + const char* opName() { return s_opNames[opNum()]; } + + unsigned opNum() { return (m_opcode >> 15) & 0x3f; } +}; + +class A64DOpcodeFloatingPointDataProcessing2Source : public A64DOpcodeFloatingPointOps { +private: + static const char* const s_opNames[16]; + +public: + static const uint32_t mask = 0x5f200800; + static const uint32_t pattern = 0x1e200800; + + DEFINE_STATIC_FORMAT(A64DOpcodeFloatingPointDataProcessing2Source, thisObj); + + const char* format(); + + const char* opName() { return s_opNames[opNum()]; } + + unsigned opNum() { return (m_opcode >> 12) & 0xf; } +}; + +class A64DOpcodeFloatingFixedPointConversions : public A64DOpcodeFloatingPointOps { +private: + static const char* const s_opNames[4]; + +public: + static const uint32_t mask = 0x5f200000; + static const uint32_t pattern = 0x1e000000; + + DEFINE_STATIC_FORMAT(A64DOpcodeFloatingFixedPointConversions, thisObj); + + const char* format(); + + const char* opName() { return s_opNames[opNum()]; } + unsigned rmode() { return (m_opcode >> 19) & 0x3; } + unsigned opcode() { return (m_opcode >> 16) & 0x7; } + unsigned scale() { return (m_opcode >> 10) & 0x3f; } + unsigned opNum() { return (m_opcode >> 16) & 0x3; } +}; + +class A64DOpcodeFloatingPointIntegerConversions : public A64DOpcodeFloatingPointOps { +private: + static const char* const s_opNames[32]; + +public: + static const uint32_t mask = 0x5f20fc00; + static const uint32_t pattern = 0x1e200000; + + DEFINE_STATIC_FORMAT(A64DOpcodeFloatingPointIntegerConversions, thisObj); + + const char* format(); + + const char* opName() { return s_opNames[opNum()]; } + unsigned rmode() { return (m_opcode >> 19) & 0x3; } + unsigned opcode() { return (m_opcode >> 16) & 0x7; } + unsigned opNum() { return (m_opcode >> 16) & 0x1f; } +}; + +class A64DOpcodeHint : public A64DOpcode { +private: + static const char* const s_opNames[6]; + +public: + static const uint32_t mask = 0xfffff01f; + static const uint32_t pattern = 0xd503201f; + + DEFINE_STATIC_FORMAT(A64DOpcodeHint, thisObj); + + const char* format(); + + const char* opName() { return immediate7() <= 5 ? s_opNames[immediate7()] : "hint"; } + unsigned immediate7() { return (m_opcode >> 5) & 0x7f; } +}; + +class A64DOpcodeLoadStore : public A64DOpcode { +private: + static const char* const s_opNames[32]; + +protected: + const char* opName() + { + return s_opNames[opNumber()]; + } + + unsigned size() { return (m_opcode >> 30) & 0x3; } + unsigned vBit() { return (m_opcode >> 26) & 0x1; } + unsigned opc() { return (m_opcode >> 22) & 0x3; } + unsigned opNumber() { return (size() <<3 ) | (vBit() << 2) | opc(); } + bool is64BitRT() { return ((opNumber() & 0x17) == 0x02) || ((opNumber() & 0x1e) == 0x18); } +}; + +class A64DOpcodeLoadStoreImmediate : public A64DOpcodeLoadStore { +private: + static const char* const s_unprivilegedOpNames[32]; + static const char* const s_unscaledOpNames[32]; + +public: + static const uint32_t mask = 0x3b200000; + static const uint32_t pattern = 0x38000000; + + DEFINE_STATIC_FORMAT(A64DOpcodeLoadStoreImmediate, thisObj); + + const char* format(); + + const char* unprivilegedOpName() + { + return s_unprivilegedOpNames[opNumber()]; + } + const char* unscaledOpName() + { + return s_unscaledOpNames[opNumber()]; + } + unsigned type() { return (m_opcode >> 10) & 0x3; } + int immediate9() { return (static_cast((m_opcode >> 12) & 0x1ff) << 23) >> 23; } +}; + +class A64DOpcodeLoadStoreRegisterOffset : public A64DOpcodeLoadStore { +public: + static const uint32_t mask = 0x3b200c00; + static const uint32_t pattern = 0x38200800; + + DEFINE_STATIC_FORMAT(A64DOpcodeLoadStoreRegisterOffset, thisObj); + + const char* format(); + + unsigned option() { return (m_opcode >> 13) & 0x7; } + int sBit() { return (m_opcode >> 12) & 0x1; } +}; + +class A64DOpcodeLoadStoreRegisterPair : public A64DOpcodeLoadStore { +public: + static const uint32_t mask = 0x3a000000; + static const uint32_t pattern = 0x28000000; + + DEFINE_STATIC_FORMAT(A64DOpcodeLoadStoreRegisterPair, thisObj); + + const char* format(); + const char* opName(); + + unsigned rt2() { return (m_opcode >> 10) & 0x1f; } + int immediate7() { return (static_cast((m_opcode >> 15) & 0x7f) << 25) >> 25; } + unsigned offsetMode() { return (m_opcode >> 23) & 0x7; } + int lBit() { return (m_opcode >> 22) & 0x1; } +}; + +class A64DOpcodeLoadStoreUnsignedImmediate : public A64DOpcodeLoadStore { +public: + static const uint32_t mask = 0x3b000000; + static const uint32_t pattern = 0x39000000; + + DEFINE_STATIC_FORMAT(A64DOpcodeLoadStoreUnsignedImmediate, thisObj); + + const char* format(); + + unsigned immediate12() { return (m_opcode >> 10) & 0xfff; } +}; + +class A64DOpcodeLogical : public A64DOpcode { +private: + static const char* const s_opNames[8]; + +public: + const char* opName(unsigned opNumber) + { + return s_opNames[opNumber & 0x7]; + } + + unsigned opc() { return (m_opcode >> 29) & 0x3; } + unsigned nBit() { return (m_opcode >> 21) & 0x1; } +}; + +class A64DOpcodeLogicalImmediate : public A64DOpcodeLogical { +public: + static const uint32_t mask = 0x1f800000; + static const uint32_t pattern = 0x12000000; + + DEFINE_STATIC_FORMAT(A64DOpcodeLogicalImmediate, thisObj); + + const char* format(); + + bool isTst() { return ((opNumber() == 6) && (rd() == 31)); } + bool isMov() { return ((opNumber() == 2) && (rn() == 31)); } + unsigned opNumber() { return opc() << 1; } + unsigned nBit() { return (m_opcode >> 22) & 0x1; } + unsigned immediateR() { return (m_opcode >> 16) & 0x3f; } + unsigned immediateS() { return (m_opcode >> 10) & 0x3f; } +}; + +class A64DOpcodeLogicalShiftedRegister : public A64DOpcodeLogical { +public: + static const uint32_t mask = 0x1f000000; + static const uint32_t pattern = 0x0a000000; + + DEFINE_STATIC_FORMAT(A64DOpcodeLogicalShiftedRegister, thisObj); + + const char* format(); + + bool isTst() { return ((opNumber() == 6) && (rd() == 31)); } + bool isMov() { return ((opNumber() == 2) && (rn() == 31)); } + unsigned opNumber() { return (opc() << 1) | nBit(); } + unsigned shift() { return (m_opcode >> 22) & 0x3; } + int immediate6() { return (static_cast((m_opcode >> 10) & 0x3f) << 26) >> 26; } +}; + +class A64DOpcodeMoveWide : public A64DOpcode { +private: + static const char* const s_opNames[4]; + +public: + static const uint32_t mask = 0x1f800000; + static const uint32_t pattern = 0x12800000; + + DEFINE_STATIC_FORMAT(A64DOpcodeMoveWide, thisObj); + + const char* format(); + + const char* opName() { return s_opNames[opc()]; } + unsigned opc() { return (m_opcode >> 29) & 0x3; } + unsigned hw() { return (m_opcode >> 21) & 0x3; } + unsigned immediate16() { return (m_opcode >> 5) & 0xffff; } +}; + +class A64DOpcodeTestAndBranchImmediate : public A64DOpcode { +public: + static const uint32_t mask = 0x7e000000; + static const uint32_t pattern = 0x36000000; + + DEFINE_STATIC_FORMAT(A64DOpcodeTestAndBranchImmediate, thisObj); + + const char* format(); + + unsigned bitNumber() { return ((m_opcode >> 26) & 0x20) | ((m_opcode >> 19) & 0x1f); } + unsigned opBit() { return (m_opcode >> 24) & 0x1; } + int immediate14() { return (static_cast((m_opcode >> 5) & 0x3fff) << 18) >> 18; } +}; + +class A64DOpcodeUnconditionalBranchImmediate : public A64DOpcode { +public: + static const uint32_t mask = 0x7c000000; + static const uint32_t pattern = 0x14000000; + + DEFINE_STATIC_FORMAT(A64DOpcodeUnconditionalBranchImmediate, thisObj); + + const char* format(); + + unsigned op() { return (m_opcode >> 31) & 0x1; } + int immediate26() { return (static_cast(m_opcode & 0x3ffffff) << 6) >> 6; } +}; + +class A64DOpcodeUnconditionalBranchRegister : public A64DOpcode { +private: + static const char* const s_opNames[8]; + +public: + static const uint32_t mask = 0xfe1ffc1f; + static const uint32_t pattern = 0xd61f0000; + + DEFINE_STATIC_FORMAT(A64DOpcodeUnconditionalBranchRegister, thisObj); + + const char* format(); + + const char* opName() { return s_opNames[opc()]; } + unsigned opc() { return (m_opcode >> 21) & 0xf; } +}; + +} } // namespace JSC::ARM64Disassembler + +using JSC::ARM64Disassembler::A64DOpcode; + +#endif // A64DOpcode_h diff --git a/src/3rdparty/masm/disassembler/ARM64Disassembler.cpp b/src/3rdparty/masm/disassembler/ARM64Disassembler.cpp new file mode 100644 index 0000000000..27ae4b96d0 --- /dev/null +++ b/src/3rdparty/masm/disassembler/ARM64Disassembler.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2012, 2014 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define __STDC_FORMAT_MACROS +#include "config.h" +#include "Disassembler.h" + +#if USE(ARM64_DISASSEMBLER) && CPU(ARM64) + +#include "ARM64/A64DOpcode.h" +#include "MacroAssemblerCodeRef.h" + +namespace JSC { + +bool tryToDisassemble(const MacroAssemblerCodePtr& codePtr, size_t size, const char* prefix, PrintStream& out) +{ + A64DOpcode arm64Opcode; + + uint32_t* currentPC = reinterpret_cast(codePtr.executableAddress()); + size_t byteCount = size; + + while (byteCount) { + char pcString[20]; + snprintf(pcString, sizeof(pcString), "0x%lx", reinterpret_cast(currentPC)); + out.printf("%s%16s: %s\n", prefix, pcString, arm64Opcode.disassemble(currentPC)); + currentPC++; + byteCount -= sizeof(uint32_t); + } + + return true; +} + +} // namespace JSC + +#endif // USE(ARM64_DISASSEMBLER) + +#if USE(LLVM_DISASSEMBLER) && CPU(ARM64) + +#include "LLVMDisassembler.h" + +namespace JSC { + +bool tryToDisassemble(const MacroAssemblerCodePtr& codePtr, size_t size, const char* prefix, PrintStream& out, InstructionSubsetHint hint) +{ + return tryToDisassembleWithLLVM(codePtr, size, prefix, out, hint); +} + +} // namespace JSC + +#endif // USE(LLVM_DISASSEMBLER) && CPU(ARM64) diff --git a/src/3rdparty/masm/disassembler/ARMv7/ARMv7DOpcode.cpp b/src/3rdparty/masm/disassembler/ARMv7/ARMv7DOpcode.cpp index d9afdd447a..6af71e523b 100644 --- a/src/3rdparty/masm/disassembler/ARMv7/ARMv7DOpcode.cpp +++ b/src/3rdparty/masm/disassembler/ARMv7/ARMv7DOpcode.cpp @@ -1623,6 +1623,8 @@ const char* ARMv7DOpcodeVCVT::format() default: n1 = "vcvt.?"; n2 = ".?"; + dregPrefix = '?'; + mregPrefix = '?'; break; } diff --git a/src/3rdparty/masm/disassembler/ARMv7/ARMv7DOpcode.h b/src/3rdparty/masm/disassembler/ARMv7/ARMv7DOpcode.h index 051d05525a..03e0ba21c4 100644 --- a/src/3rdparty/masm/disassembler/ARMv7/ARMv7DOpcode.h +++ b/src/3rdparty/masm/disassembler/ARMv7/ARMv7DOpcode.h @@ -127,7 +127,7 @@ protected: void appendPCRelativeOffset(int32_t immediate) { - bufferPrintf("0x%x", reinterpret_cast(m_currentPC + immediate)); + bufferPrintf("%p", m_currentPC + immediate); } void appendShiftAmount(unsigned amount) diff --git a/src/3rdparty/masm/disassembler/ARMv7Disassembler.cpp b/src/3rdparty/masm/disassembler/ARMv7Disassembler.cpp index bfb48953c0..dee017525d 100644 --- a/src/3rdparty/masm/disassembler/ARMv7Disassembler.cpp +++ b/src/3rdparty/masm/disassembler/ARMv7Disassembler.cpp @@ -26,7 +26,7 @@ #include "config.h" #include "Disassembler.h" -#if USE(ARMV7_DISASSEMBLER) +#if USE(ARMV7_DISASSEMBLER) && CPU(ARM_THUMB2) #include "ARMv7/ARMv7DOpcode.h" #include "MacroAssemblerCodeRef.h" diff --git a/src/3rdparty/masm/masm-defs.pri b/src/3rdparty/masm/masm-defs.pri index c76eeec724..bfcccfeeaa 100644 --- a/src/3rdparty/masm/masm-defs.pri +++ b/src/3rdparty/masm/masm-defs.pri @@ -24,7 +24,7 @@ INCLUDEPATH += $$PWD disassembler { if(isEqual(QT_ARCH, "i386")|isEqual(QT_ARCH, "x86_64")): DEFINES += WTF_USE_UDIS86=1 - if(isEqual(QT_ARCH, "arm")): DEFINES += WTF_USE_ARMV7_DISASSEMBLER=1 + if(isEqual(QT_ARCH, "arm")): DEFINES += WTF_USE_ARMV7_DISASSEMBLER=1 WTF_USE_ARM64_DISASSEMBLER=1 if(isEqual(QT_ARCH, "mips")): DEFINES += WTF_USE_MIPS32_DISASSEMBLER=1 } else { DEFINES += WTF_USE_UDIS86=0 diff --git a/src/3rdparty/masm/masm.pri b/src/3rdparty/masm/masm.pri index 04548fe8a3..afa1438974 100644 --- a/src/3rdparty/masm/masm.pri +++ b/src/3rdparty/masm/masm.pri @@ -62,6 +62,9 @@ HEADERS += $$PWD/disassembler/ARMv7/ARMv7DOpcode.h SOURCES += $$PWD/disassembler/Mips32Disassembler.cpp SOURCES += $$PWD/disassembler/mips32/Mips32Opcode.cpp HEADERS += $$PWD/disassembler/mips32/Mips32Opcode.h +SOURCES += $$PWD/disassembler/ARM64Disassembler.cpp +SOURCES += $$PWD/disassembler/ARM64/A64DOpcode.cpp +HEADERS += $$PWD/disassembler/ARM64/A64DOpcode.h SOURCES += $$PWD/yarr/*.cpp HEADERS += $$PWD/yarr/*.h diff --git a/src/3rdparty/masm/stubs/WTFStubs.cpp b/src/3rdparty/masm/stubs/WTFStubs.cpp index 828712b29a..3f10298f56 100644 --- a/src/3rdparty/masm/stubs/WTFStubs.cpp +++ b/src/3rdparty/masm/stubs/WTFStubs.cpp @@ -112,6 +112,13 @@ void WTFReportAssertionFailure(const char* file, int line, const char* function, fprintf(stderr, "WTF failing assertion in %s, line %d, function %s: %s\n", file, line, function, assertion); } +void WTFReportAssertionFailureWithMessage(const char* file, int line, const char* function, const char* assertion, const char* format, ...) +{ + // TODO: show the message, or remove this function completely. (The latter would probably be best.) + Q_UNUSED(format); + fprintf(stderr, "WTF failing assertion in %s, line %d, function %s: %s\n", file, line, function, assertion); +} + void WTFReportBacktrace() { } diff --git a/src/3rdparty/masm/wtf/Platform.h b/src/3rdparty/masm/wtf/Platform.h index 2a2b8abc61..3f480d344a 100644 --- a/src/3rdparty/masm/wtf/Platform.h +++ b/src/3rdparty/masm/wtf/Platform.h @@ -166,6 +166,11 @@ #define WTF_CPU_X86_64 1 #endif +/* CPU(ARM64) - Apple */ +#if (defined(__arm64__) && defined(__APPLE__)) || defined(__aarch64__) +#define WTF_CPU_ARM64 1 +#endif + /* CPU(ARM) - ARM, any version*/ #define WTF_ARM_ARCH_AT_LEAST(N) (CPU(ARM) && WTF_ARM_ARCH_VERSION >= N) @@ -705,6 +710,7 @@ #if (CPU(X86_64) && (OS(UNIX) || OS(WINDOWS))) \ || (CPU(IA64) && !CPU(IA64_32)) \ || CPU(ALPHA) \ + || CPU(ARM64) \ || CPU(SPARC64) \ || CPU(S390X) \ || CPU(PPC64) @@ -725,7 +731,7 @@ /* The JIT is enabled by default on all x86, x86-64, ARM & MIPS platforms. */ #if !defined(ENABLE_JIT) \ - && (CPU(X86) || CPU(X86_64) || CPU(ARM) || CPU(MIPS)) \ + && (CPU(X86) || CPU(X86_64) || CPU(ARM) || CPU(MIPS) || CPU(ARM64)) \ && (OS(DARWIN) || !COMPILER(GCC) || GCC_VERSION_AT_LEAST(4, 1, 0)) \ && !OS(WINCE) \ && !(OS(QNX) && !PLATFORM(QT)) /* We use JIT in QNX Qt */ @@ -740,7 +746,7 @@ #define WTF_USE_UDIS86 1 #endif -#if !defined(ENABLE_DISASSEMBLER) && (USE(UDIS86) || USE(ARMV7_DISASSEMBLER) || USE(MIPS32_DISASSEMBLER)) +#if !defined(ENABLE_DISASSEMBLER) && (USE(UDIS86) || USE(ARMV7_DISASSEMBLER) || USE(ARM64_DISASSEMBLER) || USE(MIPS32_DISASSEMBLER)) #define ENABLE_DISASSEMBLER 1 #endif @@ -874,7 +880,7 @@ /* Pick which allocator to use; we only need an executable allocator if the assembler is compiled in. On x86-64 we use a single fixed mmap, on other platforms we mmap on demand. */ #if ENABLE(ASSEMBLER) -#if CPU(X86_64) && !OS(WINDOWS) || PLATFORM(IOS) +#if CPU(X86_64) && !OS(WINDOWS) || PLATFORM(IOS) || CPU(ARM64) #define ENABLE_EXECUTABLE_ALLOCATOR_FIXED 1 #else #define ENABLE_EXECUTABLE_ALLOCATOR_DEMAND 1 @@ -933,7 +939,7 @@ #define WTF_USE_ACCESSIBILITY_CONTEXT_MENUS 1 #endif -#if CPU(ARM_THUMB2) +#if CPU(ARM_THUMB2) || CPU(ARM64) #define ENABLE_BRANCH_COMPACTION 1 #endif diff --git a/src/3rdparty/masm/yarr/YarrJIT.cpp b/src/3rdparty/masm/yarr/YarrJIT.cpp index ce18b3ab4f..5664c585b9 100644 --- a/src/3rdparty/masm/yarr/YarrJIT.cpp +++ b/src/3rdparty/masm/yarr/YarrJIT.cpp @@ -53,6 +53,17 @@ class YarrGenerator : private MacroAssembler { static const RegisterID returnRegister = ARMRegisters::r0; static const RegisterID returnRegister2 = ARMRegisters::r1; +#elif CPU(ARM64) + static const RegisterID input = ARM64Registers::x0; + static const RegisterID index = ARM64Registers::x1; + static const RegisterID length = ARM64Registers::x2; + static const RegisterID output = ARM64Registers::x3; + + static const RegisterID regT0 = ARM64Registers::x4; + static const RegisterID regT1 = ARM64Registers::x5; + + static const RegisterID returnRegister = ARM64Registers::x0; + static const RegisterID returnRegister2 = ARM64Registers::x1; #elif CPU(MIPS) static const RegisterID input = MIPSRegisters::a0; static const RegisterID index = MIPSRegisters::a1; diff --git a/src/qml/jit/qv4targetplatform_p.h b/src/qml/jit/qv4targetplatform_p.h index 3c93aa12b0..e38f0323e0 100644 --- a/src/qml/jit/qv4targetplatform_p.h +++ b/src/qml/jit/qv4targetplatform_p.h @@ -365,7 +365,120 @@ public: as->pop(StackFrameRegister); as->pop(JSC::ARMRegisters::lr); } -#endif // Linux on ARM (32 bit) +#endif // ARM (32 bit) + +#if CPU(ARM64) + enum { RegAllocIsSupported = 1 }; + + static const JSC::MacroAssembler::RegisterID StackFrameRegister = JSC::ARM64Registers::fp; + static const JSC::MacroAssembler::RegisterID LocalsRegister = JSC::ARM64Registers::x28; + static const JSC::MacroAssembler::RegisterID StackPointerRegister = JSC::ARM64Registers::sp; + static const JSC::MacroAssembler::RegisterID ScratchRegister = JSC::ARM64Registers::x9; + static const JSC::MacroAssembler::RegisterID EngineRegister = JSC::ARM64Registers::x27; + static const JSC::MacroAssembler::RegisterID ReturnValueRegister = JSC::ARM64Registers::x0; + static const JSC::MacroAssembler::FPRegisterID FPGpr0 = JSC::ARM64Registers::q0; + static const JSC::MacroAssembler::FPRegisterID FPGpr1 = JSC::ARM64Registers::q1; + + static RegisterInformation getPlatformRegisterInfo() + { + typedef RegisterInfo RI; + return RegisterInformation() + << RI(JSC::ARM64Registers::x0, QStringLiteral("x0"), RI::RegularRegister, RI::CallerSaved, RI::Predefined) + << RI(JSC::ARM64Registers::x1, QStringLiteral("x1"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x2, QStringLiteral("x2"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x3, QStringLiteral("x3"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x4, QStringLiteral("x4"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x5, QStringLiteral("x5"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x6, QStringLiteral("x6"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x7, QStringLiteral("x7"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x8, QStringLiteral("x8"), RI::RegularRegister, RI::CallerSaved, RI::Predefined) + << RI(JSC::ARM64Registers::x9, QStringLiteral("x9"), RI::RegularRegister, RI::CalleeSaved, RI::Predefined) + << RI(JSC::ARM64Registers::x10, QStringLiteral("x10"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x11, QStringLiteral("x11"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x12, QStringLiteral("x12"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x13, QStringLiteral("x13"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x14, QStringLiteral("x14"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x15, QStringLiteral("x15"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x19, QStringLiteral("x19"), RI::RegularRegister, RI::CalleeSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x20, QStringLiteral("x20"), RI::RegularRegister, RI::CalleeSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x21, QStringLiteral("x21"), RI::RegularRegister, RI::CalleeSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x22, QStringLiteral("x22"), RI::RegularRegister, RI::CalleeSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x23, QStringLiteral("x23"), RI::RegularRegister, RI::CalleeSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x24, QStringLiteral("x24"), RI::RegularRegister, RI::CalleeSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x25, QStringLiteral("x25"), RI::RegularRegister, RI::CalleeSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x26, QStringLiteral("x26"), RI::RegularRegister, RI::CalleeSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::x27, QStringLiteral("x27"), RI::RegularRegister, RI::CalleeSaved, RI::Predefined) + << RI(JSC::ARM64Registers::x28, QStringLiteral("x28"), RI::RegularRegister, RI::CalleeSaved, RI::Predefined) + + << RI(JSC::ARM64Registers::q2, QStringLiteral("q2"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q3, QStringLiteral("q3"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q4, QStringLiteral("q4"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q5, QStringLiteral("q5"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q6, QStringLiteral("q6"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q8, QStringLiteral("q8"), RI::FloatingPointRegister, RI::CalleeSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q9, QStringLiteral("q9"), RI::FloatingPointRegister, RI::CalleeSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q10, QStringLiteral("q10"), RI::FloatingPointRegister, RI::CalleeSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q11, QStringLiteral("q11"), RI::FloatingPointRegister, RI::CalleeSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q12, QStringLiteral("q12"), RI::FloatingPointRegister, RI::CalleeSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q13, QStringLiteral("q13"), RI::FloatingPointRegister, RI::CalleeSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q14, QStringLiteral("q14"), RI::FloatingPointRegister, RI::CalleeSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q15, QStringLiteral("q15"), RI::FloatingPointRegister, RI::CalleeSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q16, QStringLiteral("q16"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q17, QStringLiteral("q17"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q18, QStringLiteral("q18"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q19, QStringLiteral("q19"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q20, QStringLiteral("q20"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q21, QStringLiteral("q21"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q22, QStringLiteral("q22"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q23, QStringLiteral("q23"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q24, QStringLiteral("q24"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q25, QStringLiteral("q25"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q26, QStringLiteral("q26"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q27, QStringLiteral("q27"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q28, QStringLiteral("q28"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q29, QStringLiteral("q29"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q30, QStringLiteral("q30"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + << RI(JSC::ARM64Registers::q31, QStringLiteral("q31"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc) + ; + } + +#undef HAVE_ALU_OPS_WITH_MEM_OPERAND +#define VALUE_FITS_IN_REGISTER + static const int RegisterSize = 8; + +#define ARGUMENTS_IN_REGISTERS + static const int RegisterArgumentCount = 8; + static JSC::MacroAssembler::RegisterID registerForArgument(int index) + { + static JSC::MacroAssembler::RegisterID regs[RegisterArgumentCount] = { + JSC::ARM64Registers::x0, + JSC::ARM64Registers::x1, + JSC::ARM64Registers::x2, + JSC::ARM64Registers::x3, + JSC::ARM64Registers::x4, + JSC::ARM64Registers::x5, + JSC::ARM64Registers::x6, + JSC::ARM64Registers::x7 + }; + + Q_ASSERT(index >= 0 && index < RegisterArgumentCount); + return regs[index]; + }; + + static const int StackAlignment = 16; + static const int StackShadowSpace = 0; + static const int StackSpaceAllocatedUponFunctionEntry = 1 * RegisterSize; // Registers saved in platformEnterStandardStackFrame below. + + static void platformEnterStandardStackFrame(JSC::MacroAssembler *as) + { + as->pushPair(StackFrameRegister, JSC::ARM64Registers::lr); + } + + static void platformLeaveStandardStackFrame(JSC::MacroAssembler *as) + { + as->popPair(StackFrameRegister, JSC::ARM64Registers::lr); + } +#endif // ARM64 #if defined(Q_PROCESSOR_MIPS_32) && defined(Q_OS_LINUX) enum { RegAllocIsSupported = 1 }; diff --git a/src/qml/jsruntime/qv4global_p.h b/src/qml/jsruntime/qv4global_p.h index b47cf3cab4..15e405b977 100644 --- a/src/qml/jsruntime/qv4global_p.h +++ b/src/qml/jsruntime/qv4global_p.h @@ -99,6 +99,8 @@ inline double trunc(double d) { return d > 0 ? floor(d) : ceil(d); } #define V4_ENABLE_JIT #endif +#elif defined(Q_PROCESSOR_ARM_64) +#define V4_ENABLE_JIT // iOS is disabled below. #elif defined(Q_PROCESSOR_MIPS_32) && defined(Q_OS_LINUX) #define V4_ENABLE_JIT #endif -- cgit v1.2.3