aboutsummaryrefslogtreecommitdiffstats
path: root/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@qt.io>2017-01-13 16:45:54 +0100
committerSimon Hausmann <simon.hausmann@qt.io>2017-02-01 08:17:10 +0000
commit6aaf6abb692dbdc0f0400ab5c76c438034957d55 (patch)
treec5b1d58ab2932bebc078bf979b682c2a43051a69 /src/3rdparty/masm/assembler/MacroAssemblerARMv7.h
parenta159d6d43da100c5a4acb183589fa4554b33a474 (diff)
Include the ARMv7 assembler in the qmldevtools bootstrap build
Even when the C++ target is not ARMv7 we want to include it in order to be able to generate QML cache files with code ahead of time. This requires a few changes: * The Jump classes need to move from the AbstractMacroAssembler super-class into the concrete assembler sub-class, in order to use it in specializations. * Some of the template specializations in LinkBuffer for example or for platform dependent operations need to be pre-processor enabled when bootstrapping * The generic loadPtr/addPtr etc. functions need to move to the concrete assemblers to be able to call the correct 32-bit or 64-bit variations. * We need to force what looks like a loss of precision to the compiler in the 32-bit ARMv7 linking code when linking jumps. Finally then we can explicitly instantiate at least QV4::JIT::Assembler for ARMv7 when bootstrapping. Currently only on x86-64 hosts, but that is a temporary limitation. Change-Id: I501db2360e1fded48f17f17d9e87252d47f8537e Reviewed-by: Simon Hausmann <simon.hausmann@qt.io>
Diffstat (limited to 'src/3rdparty/masm/assembler/MacroAssemblerARMv7.h')
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerARMv7.h79
1 files changed, 73 insertions, 6 deletions
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h b/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h
index 3f6352c1db..806f2e13b6 100644
--- a/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h
+++ b/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h
@@ -27,7 +27,7 @@
#ifndef MacroAssemblerARMv7_h
#define MacroAssemblerARMv7_h
-#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+#if ENABLE(ASSEMBLER) && (CPU(ARM_THUMB2) || defined(V4_BOOTSTRAP))
#include "ARMv7Assembler.h"
#include "AbstractMacroAssembler.h"
@@ -160,12 +160,41 @@ public:
{
add32(imm, dest, dest);
}
+
+#if defined(V4_BOOTSTRAP)
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void subPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ sub32(imm, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ add32(imm, src, dest);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+ store32(src, address);
+ }
+#endif
+#if !defined(V4_BOOTSTRAP)
void add32(AbsoluteAddress src, RegisterID dest)
{
load32(src.m_ptr, dataTempRegister);
add32(dataTempRegister, dest);
}
+#endif
void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
@@ -206,6 +235,7 @@ public:
add32(dataTempRegister, dest);
}
+#if !defined(V4_BOOTSTRAP)
void add32(TrustedImm32 imm, AbsoluteAddress address)
{
load32(address.m_ptr, dataTempRegister);
@@ -242,6 +272,7 @@ public:
m_assembler.adc(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(imm.m_value >> 31));
m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
}
+#endif
void and32(RegisterID op1, RegisterID op2, RegisterID dest)
{
@@ -343,6 +374,7 @@ public:
or32(dataTempRegister, dest);
}
+#if !defined(V4_BOOTSTRAP)
void or32(RegisterID src, AbsoluteAddress dest)
{
move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
@@ -350,6 +382,7 @@ public:
or32(src, dataTempRegister);
store32(dataTempRegister, addressTempRegister);
}
+#endif
void or32(TrustedImm32 imm, RegisterID dest)
{
@@ -461,6 +494,7 @@ public:
sub32(dataTempRegister, dest);
}
+#if !defined(V4_BOOTSTRAP)
void sub32(TrustedImm32 imm, AbsoluteAddress address)
{
load32(address.m_ptr, dataTempRegister);
@@ -477,6 +511,7 @@ public:
store32(dataTempRegister, address.m_ptr);
}
+#endif
void xor32(Address src, RegisterID dest)
{
@@ -528,7 +563,6 @@ public:
// internal function, but public because of "using load32;" in template sub-classes to pull
// in the other public overloads.
-
void load32(ArmAddress address, RegisterID dest)
{
if (address.type == ArmAddress::HasIndex)
@@ -649,11 +683,13 @@ public:
load16(setupArmAddress(address), dest);
}
+#if !defined(V4_BOOTSTRAP)
void load32(const void* address, RegisterID dest)
{
move(TrustedImmPtr(address), addressTempRegister);
m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
}
+#endif
ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
{
@@ -758,6 +794,7 @@ public:
store32(dataTempRegister, setupArmAddress(address));
}
+#if !defined(V4_BOOTSTRAP)
void store32(RegisterID src, const void* address)
{
move(TrustedImmPtr(address), addressTempRegister);
@@ -769,12 +806,14 @@ public:
move(imm, dataTempRegister);
store32(dataTempRegister, address);
}
+#endif
void store8(RegisterID src, BaseIndex address)
{
store8(src, setupArmAddress(address));
}
+#if !defined(V4_BOOTSTRAP)
void store8(RegisterID src, void* address)
{
move(TrustedImmPtr(address), addressTempRegister);
@@ -786,6 +825,7 @@ public:
move(imm, dataTempRegister);
store8(dataTempRegister, address);
}
+#endif
void store16(RegisterID src, BaseIndex address)
{
@@ -883,11 +923,13 @@ public:
m_assembler.vmov(dest, src);
}
+#if !defined(V4_BOOTSTRAP)
void loadDouble(const void* address, FPRegisterID dest)
{
move(TrustedImmPtr(address), addressTempRegister);
m_assembler.vldr(dest, addressTempRegister, 0);
}
+#endif
void storeDouble(FPRegisterID src, ImplicitAddress address)
{
@@ -919,11 +961,13 @@ public:
m_assembler.fsts(ARMRegisters::asSingle(src), base, offset);
}
+#if !defined(V4_BOOTSTRAP)
void storeDouble(FPRegisterID src, const void* address)
{
move(TrustedImmPtr(address), addressTempRegister);
storeDouble(src, addressTempRegister);
}
+#endif
void storeDouble(FPRegisterID src, BaseIndex address)
{
@@ -957,11 +1001,13 @@ public:
m_assembler.vadd(dest, op1, op2);
}
+#if !defined(V4_BOOTSTRAP)
void addDouble(AbsoluteAddress address, FPRegisterID dest)
{
loadDouble(address.m_ptr, fpTempRegister);
m_assembler.vadd(dest, dest, fpTempRegister);
}
+#endif
void divDouble(FPRegisterID src, FPRegisterID dest)
{
@@ -1040,6 +1086,7 @@ public:
m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
}
+#if !defined(V4_BOOTSTRAP)
void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
{
// Fixme: load directly into the fpr!
@@ -1047,6 +1094,7 @@ public:
m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
}
+#endif
void convertUInt32ToDouble(RegisterID src, FPRegisterID dest, RegisterID /*scratch*/)
{
@@ -1200,7 +1248,7 @@ public:
void push(RegisterID src)
{
// store preindexed with writeback
- m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true);
+ m_assembler.str(src, ARMRegisters::sp, -4 /*sizeof(void*)*/, true, true);
}
void push(Address address)
@@ -1242,10 +1290,12 @@ public:
m_assembler.mov(dest, src);
}
+#if !defined(V4_BOOTSTRAP)
void move(TrustedImmPtr imm, RegisterID dest)
{
move(TrustedImm32(imm), dest);
}
+#endif
void swap(RegisterID reg1, RegisterID reg2)
{
@@ -1386,6 +1436,7 @@ public:
return branch32(cond, addressTempRegister, right);
}
+#if !defined(V4_BOOTSTRAP)
Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
{
load32(left.m_ptr, dataTempRegister);
@@ -1398,6 +1449,7 @@ public:
load32(left.m_ptr, addressTempRegister);
return branch32(cond, addressTempRegister, right);
}
+#endif
Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
{
@@ -1454,6 +1506,7 @@ public:
return branchTest32(cond, addressTempRegister, mask);
}
+#if !defined(V4_BOOTSTRAP)
Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
{
// use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
@@ -1461,6 +1514,7 @@ public:
load8(Address(addressTempRegister), addressTempRegister);
return branchTest32(cond, addressTempRegister, mask);
}
+#endif
void jump(RegisterID target)
{
@@ -1474,12 +1528,14 @@ public:
m_assembler.bx(dataTempRegister);
}
+#if !defined(V4_BOOTSTRAP)
void jump(AbsoluteAddress address)
{
move(TrustedImmPtr(address.m_ptr), dataTempRegister);
load32(Address(dataTempRegister), dataTempRegister);
m_assembler.bx(dataTempRegister);
}
+#endif
// Arithmetic control flow operations:
@@ -1520,6 +1576,7 @@ public:
return branchAdd32(cond, dest, imm, dest);
}
+#if !defined(V4_BOOTSTRAP)
Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
{
// Move the high bits of the address into addressTempRegister,
@@ -1545,6 +1602,7 @@ public:
return Jump(makeBranch(cond));
}
+#endif
Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
{
@@ -1715,6 +1773,7 @@ public:
return DataLabel32(this);
}
+#if !defined(V4_BOOTSTRAP)
ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst)
{
padBeforePatch();
@@ -1742,7 +1801,8 @@ public:
m_makeJumpPatchable = false;
return PatchableJump(result);
}
-
+#endif
+
PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
{
m_makeJumpPatchable = true;
@@ -1759,6 +1819,7 @@ public:
return PatchableJump(result);
}
+#if !defined(V4_BOOTSTRAP)
PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
{
m_makeJumpPatchable = true;
@@ -1766,6 +1827,7 @@ public:
m_makeJumpPatchable = false;
return PatchableJump(result);
}
+#endif
PatchableJump patchableJump()
{
@@ -1776,6 +1838,7 @@ public:
return PatchableJump(result);
}
+#if !defined(V4_BOOTSTRAP)
ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
{
DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
@@ -1783,7 +1846,7 @@ public:
return label;
}
ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
-
+#endif
ALWAYS_INLINE Call tailRecursiveCall()
{
@@ -1804,6 +1867,7 @@ public:
return m_assembler.executableOffsetFor(location);
}
+#if !defined(V4_BOOTSTRAP)
static FunctionPtr readCallTarget(CodeLocationCall call)
{
return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation())));
@@ -1816,7 +1880,8 @@ public:
const unsigned twoWordOpSize = 4;
return label.labelAtOffset(-twoWordOpSize * 2);
}
-
+#endif
+
static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue)
{
#if OS(LINUX) || OS(QNX)
@@ -1933,6 +1998,7 @@ private:
template <typename, template <typename> class> friend class LinkBufferBase;
friend class RepatchBuffer;
+#if !defined(V4_BOOTSTRAP)
static void linkCall(void* code, Call call, FunctionPtr function)
{
ARMv7Assembler::linkCall(code, call.m_label, function.value());
@@ -1947,6 +2013,7 @@ private:
{
ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
}
+#endif
bool m_makeJumpPatchable;
};