From 9af8a47746b69b6040fc149c1d24602a1e25b08f Mon Sep 17 00:00:00 2001 From: Erik Verbruggen Date: Tue, 7 Nov 2017 11:30:14 +0100 Subject: V4: Add int32 fastpath for add/sub/mul in the JIT Change-Id: I21b0e31c7c93ae51b4ab406948450e566546e246 Reviewed-by: Lars Knoll --- src/3rdparty/masm/assembler/MacroAssemblerARM64.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'src/3rdparty/masm/assembler/MacroAssemblerARM64.h') diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARM64.h b/src/3rdparty/masm/assembler/MacroAssemblerARM64.h index d5f4acb3ca..1f94eb9032 100644 --- a/src/3rdparty/masm/assembler/MacroAssemblerARM64.h +++ b/src/3rdparty/masm/assembler/MacroAssemblerARM64.h @@ -717,16 +717,21 @@ public: } void or64(TrustedImm64 imm, RegisterID dest) + { + or64(imm, dest, dest); + } + + void or64(TrustedImm64 imm, RegisterID src, RegisterID dest) { LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast(static_cast(imm.m_value))); if (logicalImm.isValid()) { - m_assembler.orr<64>(dest, dest, logicalImm); + m_assembler.orr<64>(dest, src, logicalImm); return; } move(imm, getCachedDataTempRegisterIDAndInvalidate()); - m_assembler.orr<64>(dest, dest, dataTempRegister); + m_assembler.orr<64>(dest, src, dataTempRegister); } void rotateRight64(TrustedImm32 imm, RegisterID srcDst) -- cgit v1.2.3 From 4b27062f5fae891850fd0f048f2a2421ca9b6c7c Mon Sep 17 00:00:00 2001 From: Kimmo Ollila Date: Wed, 25 Apr 2018 13:25:45 +0300 Subject: Fix JIT build on INTEGRITY ARM64 -typedef "Jump" may not be used in an elaborated type specifier -explicit specialization of function must precede its first use -"Value" is ambiguous Change-Id: Ic15c196f1b33211cd3f2f25a54ba478747336fe4 Reviewed-by: Simon Hausmann Reviewed-by: Janne Koskinen Reviewed-by: Nikola Velinov --- src/3rdparty/masm/assembler/MacroAssemblerARM64.h | 203 +++++++++++++--------- 1 file changed, 119 insertions(+), 84 deletions(-) (limited to 'src/3rdparty/masm/assembler/MacroAssemblerARM64.h') diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARM64.h b/src/3rdparty/masm/assembler/MacroAssemblerARM64.h index 1f94eb9032..ba0d7e93f8 100644 --- a/src/3rdparty/masm/assembler/MacroAssemblerARM64.h +++ b/src/3rdparty/masm/assembler/MacroAssemblerARM64.h @@ -3101,40 +3101,22 @@ private: } template - ALWAYS_INLINE void loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm) - { - m_assembler.ldr(rt, rn, pimm); - } + void loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm); template - ALWAYS_INLINE void loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm) - { - m_assembler.ldur(rt, rn, simm); - } + void loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm); template - ALWAYS_INLINE void loadSignedAddressedByUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm) - { - loadUnsignedImmediate(rt, rn, pimm); - } + void loadSignedAddressedByUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm); template - ALWAYS_INLINE void loadSignedAddressedByUnscaledImmediate(RegisterID rt, RegisterID rn, int simm) - { - loadUnscaledImmediate(rt, rn, simm); - } + void loadSignedAddressedByUnscaledImmediate(RegisterID rt, RegisterID rn, int simm); template - ALWAYS_INLINE void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm) - { - m_assembler.str(rt, rn, pimm); - } + void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm); template - ALWAYS_INLINE void storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm) - { - m_assembler.stur(rt, rn, simm); - } + void storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm); void moveWithFixedWidth(TrustedImm32 imm, RegisterID dest) { @@ -3299,74 +3281,19 @@ private: } template - ALWAYS_INLINE bool tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset) - { - if (ARM64Assembler::canEncodeSImmOffset(offset)) { - loadUnscaledImmediate(rt, rn, offset); - return true; - } - if (ARM64Assembler::canEncodePImmOffset(offset)) { - loadUnsignedImmediate(rt, rn, static_cast(offset)); - return true; - } - return false; - } + bool tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset); template - ALWAYS_INLINE bool tryLoadSignedWithOffset(RegisterID rt, RegisterID rn, int32_t offset) - { - if (ARM64Assembler::canEncodeSImmOffset(offset)) { - loadSignedAddressedByUnscaledImmediate(rt, rn, offset); - return true; - } - if (ARM64Assembler::canEncodePImmOffset(offset)) { - loadSignedAddressedByUnsignedImmediate(rt, rn, static_cast(offset)); - return true; - } - return false; - } + bool tryLoadSignedWithOffset(RegisterID rt, RegisterID rn, int32_t offset); template - ALWAYS_INLINE bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset) - { - if (ARM64Assembler::canEncodeSImmOffset(offset)) { - m_assembler.ldur(rt, rn, offset); - return true; - } - if (ARM64Assembler::canEncodePImmOffset(offset)) { - m_assembler.ldr(rt, rn, static_cast(offset)); - return true; - } - return false; - } + bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset); template - ALWAYS_INLINE bool tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset) - { - if (ARM64Assembler::canEncodeSImmOffset(offset)) { - storeUnscaledImmediate(rt, rn, offset); - return true; - } - if (ARM64Assembler::canEncodePImmOffset(offset)) { - storeUnsignedImmediate(rt, rn, static_cast(offset)); - return true; - } - return false; - } + bool tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset); template - ALWAYS_INLINE bool tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset) - { - if (ARM64Assembler::canEncodeSImmOffset(offset)) { - m_assembler.stur(rt, rn, offset); - return true; - } - if (ARM64Assembler::canEncodePImmOffset(offset)) { - m_assembler.str(rt, rn, static_cast(offset)); - return true; - } - return false; - } + bool tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset); Jump jumpAfterFloatingPointCompare(DoubleCondition cond) { @@ -3413,6 +3340,43 @@ private: bool m_allowScratchRegister = true; }; +template +ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm) +{ + m_assembler.ldr(rt, rn, pimm); +} + +template +ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm) +{ + m_assembler.ldur(rt, rn, simm); +} + +template +ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm) +{ + loadUnsignedImmediate(rt, rn, pimm); +} + +template +ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate(RegisterID rt, RegisterID rn, int simm) +{ + loadUnscaledImmediate(rt, rn, simm); +} + +template +ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm) +{ + m_assembler.str(rt, rn, pimm); +} + +template +ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm) +{ + m_assembler.stur(rt, rn, simm); +} + + // Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes template<> ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm) @@ -3486,6 +3450,77 @@ ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<16>(RegisterID rt m_assembler.sturh(rt, rn, simm); } +template +ALWAYS_INLINE bool MacroAssemblerARM64::tryLoadSignedWithOffset(RegisterID rt, RegisterID rn, int32_t offset) +{ + if (ARM64Assembler::canEncodeSImmOffset(offset)) { + loadSignedAddressedByUnscaledImmediate(rt, rn, offset); + return true; + } + if (ARM64Assembler::canEncodePImmOffset(offset)) { + loadSignedAddressedByUnsignedImmediate(rt, rn, static_cast(offset)); + return true; + } + return false; +} + +template +ALWAYS_INLINE bool MacroAssemblerARM64::tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset) +{ + if (ARM64Assembler::canEncodeSImmOffset(offset)) { + storeUnscaledImmediate(rt, rn, offset); + return true; + } + if (ARM64Assembler::canEncodePImmOffset(offset)) { + storeUnsignedImmediate(rt, rn, static_cast(offset)); + return true; + } + return false; +} + +template +ALWAYS_INLINE bool MacroAssemblerARM64::tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset) +{ + if (ARM64Assembler::canEncodeSImmOffset(offset)) { + m_assembler.stur(rt, rn, offset); + return true; + } + if (ARM64Assembler::canEncodePImmOffset(offset)) { + m_assembler.str(rt, rn, static_cast(offset)); + return true; + } + return false; +} + + +template +ALWAYS_INLINE bool MacroAssemblerARM64::tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset) +{ + if (ARM64Assembler::canEncodeSImmOffset(offset)) { + loadUnscaledImmediate(rt, rn, offset); + return true; + } + if (ARM64Assembler::canEncodePImmOffset(offset)) { + loadUnsignedImmediate(rt, rn, static_cast(offset)); + return true; + } + return false; +} + +template +ALWAYS_INLINE bool MacroAssemblerARM64::tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset) + { + if (ARM64Assembler::canEncodeSImmOffset(offset)) { + m_assembler.ldur(rt, rn, offset); + return true; + } + if (ARM64Assembler::canEncodePImmOffset(offset)) { + m_assembler.ldr(rt, rn, static_cast(offset)); + return true; + } + return false; + } + } // namespace JSC #endif // ENABLE(ASSEMBLER) -- cgit v1.2.3 From 18d2f78437d28987297148b63b99ceed6313a78a Mon Sep 17 00:00:00 2001 From: Lars Knoll Date: Mon, 6 Aug 2018 14:55:21 +0200 Subject: Update Yarr to the latest version from WebKit Updated Yarr to a to commit 4d2a53d60487cb1f8b2a9a1e9f684af336fd7d2c in WebKit. Adjusted the yarr code base to work with our older version of wtf and masm. Change-Id: I04b4593ece051e1d7aa087b87aa08c92595d1098 Reviewed-by: Simon Hausmann --- src/3rdparty/masm/assembler/MacroAssemblerARM64.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'src/3rdparty/masm/assembler/MacroAssemblerARM64.h') diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARM64.h b/src/3rdparty/masm/assembler/MacroAssemblerARM64.h index ba0d7e93f8..e5a704292d 100644 --- a/src/3rdparty/masm/assembler/MacroAssemblerARM64.h +++ b/src/3rdparty/masm/assembler/MacroAssemblerARM64.h @@ -1126,6 +1126,11 @@ public: m_assembler.ldrh(dest, address.base, memoryTempRegister); } + void load16Unaligned(ImplicitAddress address, RegisterID dest) + { + load16(address, dest); + } + void load16Unaligned(BaseIndex address, RegisterID dest) { load16(address, dest); @@ -1283,6 +1288,16 @@ public: return label; } + void storePair64(RegisterID src1, RegisterID src2, RegisterID dest) + { + storePair64(src1, src2, dest, TrustedImm32(0)); + } + + void storePair64(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset) + { + m_assembler.stp<64>(src1, src2, dest, offset.m_value); + } + void store32(RegisterID src, ImplicitAddress address) { if (tryStoreWithOffset<32>(src, address.base, address.offset)) @@ -1420,6 +1435,14 @@ public: store8(dataTempRegister, address); } + void getEffectiveAddress(BaseIndex address, RegisterID dest) + { + m_assembler.add<64>(dest, address.base, address.index, ARM64Assembler::LSL, address.scale); + if (address.offset) + add64(TrustedImm32(address.offset), dest); + } + + // Floating-point operations: static bool supportsFloatingPoint() { return true; } -- cgit v1.2.3