aboutsummaryrefslogtreecommitdiffstats
path: root/src/3rdparty/masm/assembler/MacroAssemblerARM64.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/masm/assembler/MacroAssemblerARM64.h')
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerARM64.h203
1 files changed, 119 insertions, 84 deletions
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARM64.h b/src/3rdparty/masm/assembler/MacroAssemblerARM64.h
index 1f94eb9032..ba0d7e93f8 100644
--- a/src/3rdparty/masm/assembler/MacroAssemblerARM64.h
+++ b/src/3rdparty/masm/assembler/MacroAssemblerARM64.h
@@ -3101,40 +3101,22 @@ private:
}
template<int datasize>
- ALWAYS_INLINE void loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
- {
- m_assembler.ldr<datasize>(rt, rn, pimm);
- }
+ void loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm);
template<int datasize>
- ALWAYS_INLINE void loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
- {
- m_assembler.ldur<datasize>(rt, rn, simm);
- }
+ void loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm);
template<int datasize>
- ALWAYS_INLINE void loadSignedAddressedByUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
- {
- loadUnsignedImmediate<datasize>(rt, rn, pimm);
- }
+ void loadSignedAddressedByUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm);
template<int datasize>
- ALWAYS_INLINE void loadSignedAddressedByUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
- {
- loadUnscaledImmediate<datasize>(rt, rn, simm);
- }
+ void loadSignedAddressedByUnscaledImmediate(RegisterID rt, RegisterID rn, int simm);
template<int datasize>
- ALWAYS_INLINE void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
- {
- m_assembler.str<datasize>(rt, rn, pimm);
- }
+ void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm);
template<int datasize>
- ALWAYS_INLINE void storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
- {
- m_assembler.stur<datasize>(rt, rn, simm);
- }
+ void storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm);
void moveWithFixedWidth(TrustedImm32 imm, RegisterID dest)
{
@@ -3299,74 +3281,19 @@ private:
}
template<int datasize>
- ALWAYS_INLINE bool tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
- {
- if (ARM64Assembler::canEncodeSImmOffset(offset)) {
- loadUnscaledImmediate<datasize>(rt, rn, offset);
- return true;
- }
- if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
- loadUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
- return true;
- }
- return false;
- }
+ bool tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset);
template<int datasize>
- ALWAYS_INLINE bool tryLoadSignedWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
- {
- if (ARM64Assembler::canEncodeSImmOffset(offset)) {
- loadSignedAddressedByUnscaledImmediate<datasize>(rt, rn, offset);
- return true;
- }
- if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
- loadSignedAddressedByUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
- return true;
- }
- return false;
- }
+ bool tryLoadSignedWithOffset(RegisterID rt, RegisterID rn, int32_t offset);
template<int datasize>
- ALWAYS_INLINE bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
- {
- if (ARM64Assembler::canEncodeSImmOffset(offset)) {
- m_assembler.ldur<datasize>(rt, rn, offset);
- return true;
- }
- if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
- m_assembler.ldr<datasize>(rt, rn, static_cast<unsigned>(offset));
- return true;
- }
- return false;
- }
+ bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset);
template<int datasize>
- ALWAYS_INLINE bool tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
- {
- if (ARM64Assembler::canEncodeSImmOffset(offset)) {
- storeUnscaledImmediate<datasize>(rt, rn, offset);
- return true;
- }
- if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
- storeUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
- return true;
- }
- return false;
- }
+ bool tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset);
template<int datasize>
- ALWAYS_INLINE bool tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
- {
- if (ARM64Assembler::canEncodeSImmOffset(offset)) {
- m_assembler.stur<datasize>(rt, rn, offset);
- return true;
- }
- if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
- m_assembler.str<datasize>(rt, rn, static_cast<unsigned>(offset));
- return true;
- }
- return false;
- }
+ bool tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset);
Jump jumpAfterFloatingPointCompare(DoubleCondition cond)
{
@@ -3413,6 +3340,43 @@ private:
bool m_allowScratchRegister = true;
};
+template<int datasize>
+ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+ m_assembler.ldr<datasize>(rt, rn, pimm);
+}
+
+template<int datasize>
+ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
+{
+ m_assembler.ldur<datasize>(rt, rn, simm);
+}
+
+template<int datasize>
+ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+ loadUnsignedImmediate<datasize>(rt, rn, pimm);
+}
+
+template<int datasize>
+ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
+{
+ loadUnscaledImmediate<datasize>(rt, rn, simm);
+}
+
+template<int datasize>
+ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+ m_assembler.str<datasize>(rt, rn, pimm);
+}
+
+template<int datasize>
+ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
+{
+ m_assembler.stur<datasize>(rt, rn, simm);
+}
+
+
// Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes
template<>
ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
@@ -3486,6 +3450,77 @@ ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<16>(RegisterID rt
m_assembler.sturh(rt, rn, simm);
}
+template<int datasize>
+ALWAYS_INLINE bool MacroAssemblerARM64::tryLoadSignedWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
+{
+ if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+ loadSignedAddressedByUnscaledImmediate<datasize>(rt, rn, offset);
+ return true;
+ }
+ if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
+ loadSignedAddressedByUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
+ return true;
+ }
+ return false;
+}
+
+template<int datasize>
+ALWAYS_INLINE bool MacroAssemblerARM64::tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
+{
+ if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+ storeUnscaledImmediate<datasize>(rt, rn, offset);
+ return true;
+ }
+ if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
+ storeUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
+ return true;
+ }
+ return false;
+}
+
+template<int datasize>
+ALWAYS_INLINE bool MacroAssemblerARM64::tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
+{
+ if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+ m_assembler.stur<datasize>(rt, rn, offset);
+ return true;
+ }
+ if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
+ m_assembler.str<datasize>(rt, rn, static_cast<unsigned>(offset));
+ return true;
+ }
+ return false;
+}
+
+
+template<int datasize>
+ALWAYS_INLINE bool MacroAssemblerARM64::tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
+{
+ if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+ loadUnscaledImmediate<datasize>(rt, rn, offset);
+ return true;
+ }
+ if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
+ loadUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
+ return true;
+ }
+ return false;
+}
+
+template<int datasize>
+ALWAYS_INLINE bool MacroAssemblerARM64::tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
+ {
+ if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+ m_assembler.ldur<datasize>(rt, rn, offset);
+ return true;
+ }
+ if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
+ m_assembler.ldr<datasize>(rt, rn, static_cast<unsigned>(offset));
+ return true;
+ }
+ return false;
+ }
+
} // namespace JSC
#endif // ENABLE(ASSEMBLER)