aboutsummaryrefslogtreecommitdiffstats
path: root/src/qml/jit
diff options
context:
space:
mode:
Diffstat (limited to 'src/qml/jit')
-rw-r--r--src/qml/jit/qv4assembler.cpp182
-rw-r--r--src/qml/jit/qv4assembler_p.h4
-rw-r--r--src/qml/jit/qv4binop.cpp86
-rw-r--r--src/qml/jit/qv4binop_p.h98
-rw-r--r--src/qml/jit/qv4isel_masm.cpp178
-rw-r--r--src/qml/jit/qv4regalloc.cpp14
6 files changed, 295 insertions, 267 deletions
diff --git a/src/qml/jit/qv4assembler.cpp b/src/qml/jit/qv4assembler.cpp
index e1acc33f82..018396318e 100644
--- a/src/qml/jit/qv4assembler.cpp
+++ b/src/qml/jit/qv4assembler.cpp
@@ -52,6 +52,8 @@
#include <WTFStubs.h>
#include <iostream>
+#include <QBuffer>
+#include <QCoreApplication>
#if ENABLE(ASSEMBLER)
@@ -146,11 +148,11 @@ bool CompilationUnit::memoryMapCode(QString *errorString)
const Assembler::VoidType Assembler::Void;
-Assembler::Assembler(InstructionSelection *isel, IR::Function* function, QV4::ExecutableAllocator *executableAllocator)
+Assembler::Assembler(QV4::Compiler::JSUnitGenerator *jsGenerator, IR::Function* function, QV4::ExecutableAllocator *executableAllocator)
: _function(function)
, _nextBlock(0)
, _executableAllocator(executableAllocator)
- , _isel(isel)
+ , _jsGenerator(jsGenerator)
{
_addrs.resize(_function->basicBlockCount());
_patches.resize(_function->basicBlockCount());
@@ -301,7 +303,7 @@ Assembler::Pointer Assembler::loadStringAddress(RegisterID reg, const QString &s
loadPtr(Address(Assembler::EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), Assembler::ScratchRegister);
loadPtr(Address(Assembler::ScratchRegister, qOffsetOf(QV4::Heap::ExecutionContext, compilationUnit)), Assembler::ScratchRegister);
loadPtr(Address(Assembler::ScratchRegister, qOffsetOf(QV4::CompiledData::CompilationUnit, runtimeStrings)), reg);
- const int id = _isel->registerString(string);
+ const int id = _jsGenerator->registerString(string);
return Pointer(reg, id * sizeof(QV4::String*));
}
@@ -314,13 +316,13 @@ Assembler::Address Assembler::loadConstant(const Primitive &v, RegisterID baseRe
{
loadPtr(Address(Assembler::EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), baseReg);
loadPtr(Address(baseReg, qOffsetOf(QV4::Heap::ExecutionContext, constantTable)), baseReg);
- const int index = _isel->jsUnitGenerator()->registerConstant(v.asReturnedValue());
+ const int index = _jsGenerator->registerConstant(v.asReturnedValue());
return Address(baseReg, index * sizeof(QV4::Value));
}
void Assembler::loadStringRef(RegisterID reg, const QString &string)
{
- const int id = _isel->registerString(string);
+ const int id = _jsGenerator->registerString(string);
move(TrustedImm32(id), reg);
}
@@ -496,4 +498,174 @@ void Assembler::setStackLayout(int maxArgCountForBuiltins, int regularRegistersT
_stackLayout.reset(new StackLayout(_function, maxArgCountForBuiltins, regularRegistersToSave, fpRegistersToSave));
}
+
+namespace {
+class QIODevicePrintStream: public FilePrintStream
+{
+ Q_DISABLE_COPY(QIODevicePrintStream)
+
+public:
+ explicit QIODevicePrintStream(QIODevice *dest)
+ : FilePrintStream(0)
+ , dest(dest)
+ , buf(4096, '0')
+ {
+ Q_ASSERT(dest);
+ }
+
+ ~QIODevicePrintStream()
+ {}
+
+ void vprintf(const char* format, va_list argList) WTF_ATTRIBUTE_PRINTF(2, 0)
+ {
+ const int written = qvsnprintf(buf.data(), buf.size(), format, argList);
+ if (written > 0)
+ dest->write(buf.constData(), written);
+ memset(buf.data(), 0, qMin(written, buf.size()));
+ }
+
+ void flush()
+ {}
+
+private:
+ QIODevice *dest;
+ QByteArray buf;
+};
+} // anonymous namespace
+
+static void printDisassembledOutputWithCalls(QByteArray processedOutput, const QHash<void*, const char*>& functions)
+{
+ for (QHash<void*, const char*>::ConstIterator it = functions.begin(), end = functions.end();
+ it != end; ++it) {
+ const QByteArray ptrString = "0x" + QByteArray::number(quintptr(it.key()), 16);
+ int idx = processedOutput.indexOf(ptrString);
+ if (idx < 0)
+ continue;
+ idx = processedOutput.lastIndexOf('\n', idx);
+ if (idx < 0)
+ continue;
+ processedOutput = processedOutput.insert(idx, QByteArrayLiteral(" ; call ") + it.value());
+ }
+
+ qDebug("%s", processedOutput.constData());
+}
+
+#if defined(Q_OS_LINUX)
+static FILE *pmap;
+
+static void qt_closePmap()
+{
+ if (pmap) {
+ fclose(pmap);
+ pmap = 0;
+ }
+}
+
+#endif
+
+JSC::MacroAssemblerCodeRef Assembler::link(int *codeSize)
+{
+ Label endOfCode = label();
+
+ {
+ for (size_t i = 0, ei = _patches.size(); i != ei; ++i) {
+ Label target = _addrs.at(i);
+ Q_ASSERT(target.isSet());
+ for (Jump jump : qAsConst(_patches.at(i)))
+ jump.linkTo(target, this);
+ }
+ }
+
+ JSC::JSGlobalData dummy(_executableAllocator);
+ JSC::LinkBuffer linkBuffer(dummy, this, 0);
+
+ for (const DataLabelPatch &p : qAsConst(_dataLabelPatches))
+ linkBuffer.patch(p.dataLabel, linkBuffer.locationOf(p.target));
+
+ // link exception handlers
+ for (Jump jump : qAsConst(exceptionPropagationJumps))
+ linkBuffer.link(jump, linkBuffer.locationOf(exceptionReturnLabel));
+
+ {
+ for (size_t i = 0, ei = _labelPatches.size(); i != ei; ++i) {
+ Label target = _addrs.at(i);
+ Q_ASSERT(target.isSet());
+ for (DataLabelPtr label : _labelPatches.at(i))
+ linkBuffer.patch(label, linkBuffer.locationOf(target));
+ }
+ }
+
+ *codeSize = linkBuffer.offsetOf(endOfCode);
+
+ QByteArray name;
+
+ JSC::MacroAssemblerCodeRef codeRef;
+
+ static const bool showCode = qEnvironmentVariableIsSet("QV4_SHOW_ASM");
+ if (showCode) {
+ QHash<void*, const char*> functions;
+#ifndef QT_NO_DEBUG
+ for (CallInfo call : qAsConst(_callInfos))
+ functions[linkBuffer.locationOf(call.label).dataLocation()] = call.functionName;
+#endif
+
+ QBuffer buf;
+ buf.open(QIODevice::WriteOnly);
+ WTF::setDataFile(new QIODevicePrintStream(&buf));
+
+ name = _function->name->toUtf8();
+ if (name.isEmpty())
+ name = "IR::Function(0x" + QByteArray::number(quintptr(_function), 16) + ')';
+ codeRef = linkBuffer.finalizeCodeWithDisassembly("%s", name.data());
+
+ WTF::setDataFile(stderr);
+ printDisassembledOutputWithCalls(buf.data(), functions);
+ } else {
+ codeRef = linkBuffer.finalizeCodeWithoutDisassembly();
+ }
+
+#if defined(Q_OS_LINUX)
+ // This implements writing of JIT'd addresses so that perf can find the
+ // symbol names.
+ //
+ // Perf expects the mapping to be in a certain place and have certain
+ // content, for more information, see:
+ // https://github.com/torvalds/linux/blob/master/tools/perf/Documentation/jit-interface.txt
+ static bool doProfile = !qEnvironmentVariableIsEmpty("QV4_PROFILE_WRITE_PERF_MAP");
+ static bool profileInitialized = false;
+ if (doProfile && !profileInitialized) {
+ profileInitialized = true;
+
+ char pname[PATH_MAX];
+ snprintf(pname, PATH_MAX - 1, "/tmp/perf-%lu.map",
+ (unsigned long)QCoreApplication::applicationPid());
+
+ pmap = fopen(pname, "w");
+ if (!pmap)
+ qWarning("QV4: Can't write %s, call stacks will not contain JavaScript function names", pname);
+
+ // make sure we clean up nicely
+ std::atexit(qt_closePmap);
+ }
+
+ if (pmap) {
+ // this may have been pre-populated, if QV4_SHOW_ASM was on
+ if (name.isEmpty()) {
+ name = _function->name->toUtf8();
+ if (name.isEmpty())
+ name = "IR::Function(0x" + QByteArray::number(quintptr(_function), 16) + ')';
+ }
+
+ fprintf(pmap, "%llx %x %.*s\n",
+ (long long unsigned int)codeRef.code().executableAddress(),
+ *codeSize,
+ name.length(),
+ name.constData());
+ fflush(pmap);
+ }
+#endif
+
+ return codeRef;
+}
+
#endif
diff --git a/src/qml/jit/qv4assembler_p.h b/src/qml/jit/qv4assembler_p.h
index 94478cd9cd..de9c246ed6 100644
--- a/src/qml/jit/qv4assembler_p.h
+++ b/src/qml/jit/qv4assembler_p.h
@@ -111,7 +111,7 @@ class Assembler : public JSC::MacroAssembler, public TargetPlatform
Q_DISABLE_COPY(Assembler)
public:
- Assembler(InstructionSelection *isel, IR::Function* function, QV4::ExecutableAllocator *executableAllocator);
+ Assembler(QV4::Compiler::JSUnitGenerator *jsGenerator, IR::Function* function, QV4::ExecutableAllocator *executableAllocator);
// Explicit type to allow distinguishing between
// pushing an address itself or the value it points
@@ -1092,7 +1092,7 @@ private:
IR::BasicBlock *_nextBlock;
QV4::ExecutableAllocator *_executableAllocator;
- InstructionSelection *_isel;
+ QV4::Compiler::JSUnitGenerator *_jsGenerator;
};
template <typename Result, typename Source>
diff --git a/src/qml/jit/qv4binop.cpp b/src/qml/jit/qv4binop.cpp
index 9c535bb0bb..d2758c4a47 100644
--- a/src/qml/jit/qv4binop.cpp
+++ b/src/qml/jit/qv4binop.cpp
@@ -112,7 +112,7 @@ void Binop::generate(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
return;
}
- Assembler::Jump done;
+ Jump done;
if (lhs->type != IR::StringType && rhs->type != IR::StringType)
done = genInlineBinop(lhs, rhs, target);
@@ -129,13 +129,13 @@ void Binop::generate(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
RuntimeCall context(info.contextImplementation);
if (fallBack.isValid()) {
as->generateFunctionCallImp(info.needsExceptionCheck, target, info.name, fallBack,
- Assembler::PointerToValue(lhs),
- Assembler::PointerToValue(rhs));
+ PointerToValue(lhs),
+ PointerToValue(rhs));
} else if (context.isValid()) {
as->generateFunctionCallImp(info.needsExceptionCheck, target, info.name, context,
Assembler::EngineRegister,
- Assembler::PointerToValue(lhs),
- Assembler::PointerToValue(rhs));
+ PointerToValue(lhs),
+ PointerToValue(rhs));
} else {
Q_ASSERT(!"unreachable");
}
@@ -148,9 +148,9 @@ void Binop::generate(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
{
IR::Temp *targetTemp = target->asTemp();
- Assembler::FPRegisterID targetReg;
+ FPRegisterID targetReg;
if (targetTemp && targetTemp->kind == IR::Temp::PhysicalRegister)
- targetReg = (Assembler::FPRegisterID) targetTemp->index;
+ targetReg = (FPRegisterID) targetTemp->index;
else
targetReg = Assembler::FPGpr0;
@@ -162,7 +162,7 @@ void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
#if CPU(X86)
if (IR::Const *c = rhs->asConst()) { // Y = X + constant -> Y = X; Y += [constant-address]
as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
- Assembler::Address addr = as->loadConstant(c, Assembler::ScratchRegister);
+ Address addr = as->loadConstant(c, Assembler::ScratchRegister);
as->addDouble(addr, targetReg);
break;
}
@@ -184,7 +184,7 @@ void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
#if CPU(X86)
if (IR::Const *c = rhs->asConst()) { // Y = X * constant -> Y = X; Y *= [constant-address]
as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
- Assembler::Address addr = as->loadConstant(c, Assembler::ScratchRegister);
+ Address addr = as->loadConstant(c, Assembler::ScratchRegister);
as->mulDouble(addr, targetReg);
break;
}
@@ -203,7 +203,7 @@ void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
#if CPU(X86)
if (IR::Const *c = rhs->asConst()) { // Y = X - constant -> Y = X; Y -= [constant-address]
as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
- Assembler::Address addr = as->loadConstant(c, Assembler::ScratchRegister);
+ Address addr = as->loadConstant(c, Assembler::ScratchRegister);
as->subDouble(addr, targetReg);
break;
}
@@ -231,7 +231,7 @@ void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
#if CPU(X86)
if (IR::Const *c = rhs->asConst()) { // Y = X / constant -> Y = X; Y /= [constant-address]
as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
- Assembler::Address addr = as->loadConstant(c, Assembler::ScratchRegister);
+ Address addr = as->loadConstant(c, Assembler::ScratchRegister);
as->divDouble(addr, targetReg);
break;
}
@@ -258,9 +258,9 @@ void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
default: {
Q_ASSERT(target->type == IR::BoolType);
- Assembler::Jump trueCase = as->branchDouble(false, op, lhs, rhs);
+ Jump trueCase = as->branchDouble(false, op, lhs, rhs);
as->storeBool(false, target);
- Assembler::Jump done = as->jump();
+ Jump done = as->jump();
trueCase.link(as);
as->storeBool(true, target);
done.link(as);
@@ -305,13 +305,13 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *ta
bool inplaceOpWithAddress = false;
IR::Temp *targetTemp = target->asTemp();
- Assembler::RegisterID targetReg = Assembler::ReturnValueRegister;
+ RegisterID targetReg = Assembler::ReturnValueRegister;
if (targetTemp && targetTemp->kind == IR::Temp::PhysicalRegister) {
IR::Temp *rhs = rightSource->asTemp();
if (!rhs || rhs->kind != IR::Temp::PhysicalRegister || rhs->index != targetTemp->index) {
// We try to load leftSource into the target's register, but we can't do that if
// the target register is the same as rightSource.
- targetReg = (Assembler::RegisterID) targetTemp->index;
+ targetReg = (RegisterID) targetTemp->index;
} else if (rhs && rhs->kind == IR::Temp::PhysicalRegister && targetTemp->index == rhs->index) {
// However, if the target register is the same as the rightSource register, we can flip
// the operands for certain operations.
@@ -323,7 +323,7 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *ta
case IR::OpMul:
// X = Y op X -> X = X op Y (or rephrased: X op= Y (so an in-place operation))
std::swap(leftSource, rightSource);
- targetReg = (Assembler::RegisterID) targetTemp->index;
+ targetReg = (RegisterID) targetTemp->index;
break;
case IR::OpLShift:
@@ -368,7 +368,7 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *ta
&& targetTemp->kind == IR::Temp::PhysicalRegister
&& targetTemp->index == rightSource->asTemp()->index) {
// X = Y - X -> Tmp = X; X = Y; X -= Tmp
- targetReg = (Assembler::RegisterID) targetTemp->index;
+ targetReg = (RegisterID) targetTemp->index;
as->move(targetReg, Assembler::ScratchRegister);
as->move(as->toInt32Register(leftSource, targetReg), targetReg);
as->sub32(Assembler::ScratchRegister, targetReg);
@@ -384,7 +384,7 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *ta
case IR::OpURShift:
if (IR::Const *c = rightSource->asConst()) {
if ((QV4::Primitive::toUInt32(c->value) & 0x1f) == 0) {
- Assembler::RegisterID r = as->toInt32Register(leftSource, targetReg);
+ RegisterID r = as->toInt32Register(leftSource, targetReg);
as->storeInt32(r, target);
return true;
}
@@ -395,10 +395,10 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *ta
break;
}
- Assembler::RegisterID l = as->toInt32Register(leftSource, targetReg);
+ RegisterID l = as->toInt32Register(leftSource, targetReg);
if (IR::Const *c = rightSource->asConst()) { // All cases of Y = X op Const
- Assembler::TrustedImm32 r(int(c->value));
- Assembler::TrustedImm32 ur(QV4::Primitive::toUInt32(c->value) & 0x1f);
+ TrustedImm32 r(int(c->value));
+ TrustedImm32 ur(QV4::Primitive::toUInt32(c->value) & 0x1f);
switch (op) {
case IR::OpBitAnd: as->and32(r, l, targetReg); break;
@@ -419,7 +419,7 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *ta
return false;
}
} else if (inplaceOpWithAddress) { // All cases of X = X op [address-of-Y]
- Assembler::Pointer rhsAddr = as->loadAddress(Assembler::ScratchRegister, rightSource);
+ Pointer rhsAddr = as->loadAddress(Assembler::ScratchRegister, rightSource);
switch (op) {
case IR::OpBitAnd: as->and32(rhsAddr, targetReg); break;
case IR::OpBitOr: as->or32 (rhsAddr, targetReg); break;
@@ -433,7 +433,7 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *ta
return false;
}
} else { // All cases of Z = X op Y
- Assembler::RegisterID r = as->toInt32Register(rightSource, Assembler::ScratchRegister);
+ RegisterID r = as->toInt32Register(rightSource, Assembler::ScratchRegister);
switch (op) {
case IR::OpBitAnd: as->and32(l, r, targetReg); break;
case IR::OpBitOr: as->or32 (l, r, targetReg); break;
@@ -452,10 +452,10 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *ta
// Not all CPUs accept shifts over more than 31 bits, and some CPUs (like ARM) will do
// surprising stuff when shifting over 0 bits.
#define CHECK_RHS(op) { \
- as->and32(Assembler::TrustedImm32(0x1f), r, Assembler::ScratchRegister); \
- Assembler::Jump notZero = as->branch32(Assembler::NotEqual, Assembler::ScratchRegister, Assembler::TrustedImm32(0)); \
+ as->and32(TrustedImm32(0x1f), r, Assembler::ScratchRegister); \
+ Jump notZero = as->branch32(RelationalCondition::NotEqual, Assembler::ScratchRegister, TrustedImm32(0)); \
as->move(l, targetReg); \
- Assembler::Jump done = as->jump(); \
+ Jump done = as->jump(); \
notZero.link(as); \
op; \
done.link(as); \
@@ -493,7 +493,7 @@ static inline Assembler::FPRegisterID getFreeFPReg(IR::Expr *shouldNotOverlap, u
Assembler::Jump Binop::genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *target)
{
- Assembler::Jump done;
+ Jump done;
// Try preventing a call for a few common binary operations. This is used in two cases:
// - no register allocation was performed (not available for the platform, or the IR was
@@ -505,10 +505,10 @@ Assembler::Jump Binop::genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSourc
// register.
switch (op) {
case IR::OpAdd: {
- Assembler::FPRegisterID lReg = getFreeFPReg(rightSource, 2);
- Assembler::FPRegisterID rReg = getFreeFPReg(leftSource, 4);
- Assembler::Jump leftIsNoDbl = as->genTryDoubleConversion(leftSource, lReg);
- Assembler::Jump rightIsNoDbl = as->genTryDoubleConversion(rightSource, rReg);
+ FPRegisterID lReg = getFreeFPReg(rightSource, 2);
+ FPRegisterID rReg = getFreeFPReg(leftSource, 4);
+ Jump leftIsNoDbl = as->genTryDoubleConversion(leftSource, lReg);
+ Jump rightIsNoDbl = as->genTryDoubleConversion(rightSource, rReg);
as->addDouble(rReg, lReg);
as->storeDouble(lReg, target);
@@ -520,10 +520,10 @@ Assembler::Jump Binop::genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSourc
rightIsNoDbl.link(as);
} break;
case IR::OpMul: {
- Assembler::FPRegisterID lReg = getFreeFPReg(rightSource, 2);
- Assembler::FPRegisterID rReg = getFreeFPReg(leftSource, 4);
- Assembler::Jump leftIsNoDbl = as->genTryDoubleConversion(leftSource, lReg);
- Assembler::Jump rightIsNoDbl = as->genTryDoubleConversion(rightSource, rReg);
+ FPRegisterID lReg = getFreeFPReg(rightSource, 2);
+ FPRegisterID rReg = getFreeFPReg(leftSource, 4);
+ Jump leftIsNoDbl = as->genTryDoubleConversion(leftSource, lReg);
+ Jump rightIsNoDbl = as->genTryDoubleConversion(rightSource, rReg);
as->mulDouble(rReg, lReg);
as->storeDouble(lReg, target);
@@ -535,10 +535,10 @@ Assembler::Jump Binop::genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSourc
rightIsNoDbl.link(as);
} break;
case IR::OpSub: {
- Assembler::FPRegisterID lReg = getFreeFPReg(rightSource, 2);
- Assembler::FPRegisterID rReg = getFreeFPReg(leftSource, 4);
- Assembler::Jump leftIsNoDbl = as->genTryDoubleConversion(leftSource, lReg);
- Assembler::Jump rightIsNoDbl = as->genTryDoubleConversion(rightSource, rReg);
+ FPRegisterID lReg = getFreeFPReg(rightSource, 2);
+ FPRegisterID rReg = getFreeFPReg(leftSource, 4);
+ Jump leftIsNoDbl = as->genTryDoubleConversion(leftSource, lReg);
+ Jump rightIsNoDbl = as->genTryDoubleConversion(rightSource, rReg);
as->subDouble(rReg, lReg);
as->storeDouble(lReg, target);
@@ -550,10 +550,10 @@ Assembler::Jump Binop::genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSourc
rightIsNoDbl.link(as);
} break;
case IR::OpDiv: {
- Assembler::FPRegisterID lReg = getFreeFPReg(rightSource, 2);
- Assembler::FPRegisterID rReg = getFreeFPReg(leftSource, 4);
- Assembler::Jump leftIsNoDbl = as->genTryDoubleConversion(leftSource, lReg);
- Assembler::Jump rightIsNoDbl = as->genTryDoubleConversion(rightSource, rReg);
+ FPRegisterID lReg = getFreeFPReg(rightSource, 2);
+ FPRegisterID rReg = getFreeFPReg(leftSource, 4);
+ Jump leftIsNoDbl = as->genTryDoubleConversion(leftSource, lReg);
+ Jump rightIsNoDbl = as->genTryDoubleConversion(rightSource, rReg);
as->divDouble(rReg, lReg);
as->storeDouble(lReg, target);
diff --git a/src/qml/jit/qv4binop_p.h b/src/qml/jit/qv4binop_p.h
index 37601f54ba..3742e99e5a 100644
--- a/src/qml/jit/qv4binop_p.h
+++ b/src/qml/jit/qv4binop_p.h
@@ -67,13 +67,23 @@ struct Binop {
, op(operation)
{}
+ using Jump = Assembler::Jump;
+ using Address = Assembler::Address;
+ using RegisterID = Assembler::RegisterID;
+ using FPRegisterID = Assembler::FPRegisterID;
+ using TrustedImm32 = Assembler::TrustedImm32;
+ using ResultCondition = Assembler::ResultCondition;
+ using RelationalCondition = Assembler::RelationalCondition;
+ using Pointer = Assembler::Pointer;
+ using PointerToValue = Assembler::PointerToValue;
+
void generate(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target);
void doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target);
bool int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *target);
- Assembler::Jump genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *target);
+ Jump genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *target);
- typedef Assembler::Jump (Binop::*MemRegOp)(Assembler::Address, Assembler::RegisterID);
- typedef Assembler::Jump (Binop::*ImmRegOp)(Assembler::TrustedImm32, Assembler::RegisterID);
+ typedef Jump (Binop::*MemRegOp)(Address, RegisterID);
+ typedef Jump (Binop::*ImmRegOp)(TrustedImm32, RegisterID);
struct OpInfo {
const char *name;
@@ -88,97 +98,97 @@ struct Binop {
static const OpInfo &operation(IR::AluOp operation)
{ return operations[operation]; }
- Assembler::Jump inline_add32(Assembler::Address addr, Assembler::RegisterID reg)
+ Jump inline_add32(Address addr, RegisterID reg)
{
#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
- return as->branchAdd32(Assembler::Overflow, addr, reg);
+ return as->branchAdd32(ResultCondition::Overflow, addr, reg);
#else
as->load32(addr, Assembler::ScratchRegister);
- return as->branchAdd32(Assembler::Overflow, Assembler::ScratchRegister, reg);
+ return as->branchAdd32(ResultCondition::Overflow, Assembler::ScratchRegister, reg);
#endif
}
- Assembler::Jump inline_add32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ Jump inline_add32(TrustedImm32 imm, RegisterID reg)
{
- return as->branchAdd32(Assembler::Overflow, imm, reg);
+ return as->branchAdd32(ResultCondition::Overflow, imm, reg);
}
- Assembler::Jump inline_sub32(Assembler::Address addr, Assembler::RegisterID reg)
+ Jump inline_sub32(Address addr, RegisterID reg)
{
#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
- return as->branchSub32(Assembler::Overflow, addr, reg);
+ return as->branchSub32(ResultCondition::Overflow, addr, reg);
#else
as->load32(addr, Assembler::ScratchRegister);
- return as->branchSub32(Assembler::Overflow, Assembler::ScratchRegister, reg);
+ return as->branchSub32(ResultCondition::Overflow, Assembler::ScratchRegister, reg);
#endif
}
- Assembler::Jump inline_sub32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ Jump inline_sub32(TrustedImm32 imm, RegisterID reg)
{
- return as->branchSub32(Assembler::Overflow, imm, reg);
+ return as->branchSub32(ResultCondition::Overflow, imm, reg);
}
- Assembler::Jump inline_mul32(Assembler::Address addr, Assembler::RegisterID reg)
+ Jump inline_mul32(Address addr, RegisterID reg)
{
#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
return as->branchMul32(Assembler::Overflow, addr, reg);
#else
as->load32(addr, Assembler::ScratchRegister);
- return as->branchMul32(Assembler::Overflow, Assembler::ScratchRegister, reg);
+ return as->branchMul32(ResultCondition::Overflow, Assembler::ScratchRegister, reg);
#endif
}
- Assembler::Jump inline_mul32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ Jump inline_mul32(TrustedImm32 imm, RegisterID reg)
{
- return as->branchMul32(Assembler::Overflow, imm, reg, reg);
+ return as->branchMul32(ResultCondition::Overflow, imm, reg, reg);
}
- Assembler::Jump inline_shl32(Assembler::Address addr, Assembler::RegisterID reg)
+ Jump inline_shl32(Address addr, RegisterID reg)
{
as->load32(addr, Assembler::ScratchRegister);
- as->and32(Assembler::TrustedImm32(0x1f), Assembler::ScratchRegister);
+ as->and32(TrustedImm32(0x1f), Assembler::ScratchRegister);
as->lshift32(Assembler::ScratchRegister, reg);
- return Assembler::Jump();
+ return Jump();
}
- Assembler::Jump inline_shl32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ Jump inline_shl32(TrustedImm32 imm, RegisterID reg)
{
imm.m_value &= 0x1f;
as->lshift32(imm, reg);
- return Assembler::Jump();
+ return Jump();
}
- Assembler::Jump inline_shr32(Assembler::Address addr, Assembler::RegisterID reg)
+ Jump inline_shr32(Address addr, RegisterID reg)
{
as->load32(addr, Assembler::ScratchRegister);
- as->and32(Assembler::TrustedImm32(0x1f), Assembler::ScratchRegister);
+ as->and32(TrustedImm32(0x1f), Assembler::ScratchRegister);
as->rshift32(Assembler::ScratchRegister, reg);
- return Assembler::Jump();
+ return Jump();
}
- Assembler::Jump inline_shr32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ Jump inline_shr32(TrustedImm32 imm, RegisterID reg)
{
imm.m_value &= 0x1f;
as->rshift32(imm, reg);
- return Assembler::Jump();
+ return Jump();
}
- Assembler::Jump inline_ushr32(Assembler::Address addr, Assembler::RegisterID reg)
+ Jump inline_ushr32(Address addr, RegisterID reg)
{
as->load32(addr, Assembler::ScratchRegister);
- as->and32(Assembler::TrustedImm32(0x1f), Assembler::ScratchRegister);
+ as->and32(TrustedImm32(0x1f), Assembler::ScratchRegister);
as->urshift32(Assembler::ScratchRegister, reg);
- return as->branchTest32(Assembler::Signed, reg, reg);
+ return as->branchTest32(ResultCondition::Signed, reg, reg);
}
- Assembler::Jump inline_ushr32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ Jump inline_ushr32(TrustedImm32 imm, RegisterID reg)
{
imm.m_value &= 0x1f;
as->urshift32(imm, reg);
- return as->branchTest32(Assembler::Signed, reg, reg);
+ return as->branchTest32(ResultCondition::Signed, reg, reg);
}
- Assembler::Jump inline_and32(Assembler::Address addr, Assembler::RegisterID reg)
+ Jump inline_and32(Address addr, RegisterID reg)
{
#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
as->and32(addr, reg);
@@ -186,16 +196,16 @@ struct Binop {
as->load32(addr, Assembler::ScratchRegister);
as->and32(Assembler::ScratchRegister, reg);
#endif
- return Assembler::Jump();
+ return Jump();
}
- Assembler::Jump inline_and32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ Jump inline_and32(TrustedImm32 imm, RegisterID reg)
{
as->and32(imm, reg);
- return Assembler::Jump();
+ return Jump();
}
- Assembler::Jump inline_or32(Assembler::Address addr, Assembler::RegisterID reg)
+ Jump inline_or32(Address addr, RegisterID reg)
{
#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
as->or32(addr, reg);
@@ -203,16 +213,16 @@ struct Binop {
as->load32(addr, Assembler::ScratchRegister);
as->or32(Assembler::ScratchRegister, reg);
#endif
- return Assembler::Jump();
+ return Jump();
}
- Assembler::Jump inline_or32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ Jump inline_or32(TrustedImm32 imm, RegisterID reg)
{
as->or32(imm, reg);
- return Assembler::Jump();
+ return Jump();
}
- Assembler::Jump inline_xor32(Assembler::Address addr, Assembler::RegisterID reg)
+ Jump inline_xor32(Address addr, RegisterID reg)
{
#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
as->xor32(addr, reg);
@@ -220,13 +230,13 @@ struct Binop {
as->load32(addr, Assembler::ScratchRegister);
as->xor32(Assembler::ScratchRegister, reg);
#endif
- return Assembler::Jump();
+ return Jump();
}
- Assembler::Jump inline_xor32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ Jump inline_xor32(TrustedImm32 imm, RegisterID reg)
{
as->xor32(imm, reg);
- return Assembler::Jump();
+ return Jump();
}
diff --git a/src/qml/jit/qv4isel_masm.cpp b/src/qml/jit/qv4isel_masm.cpp
index c5d612ae65..279ccabf81 100644
--- a/src/qml/jit/qv4isel_masm.cpp
+++ b/src/qml/jit/qv4isel_masm.cpp
@@ -68,175 +68,6 @@ using namespace QV4;
using namespace QV4::JIT;
-namespace {
-class QIODevicePrintStream: public FilePrintStream
-{
- Q_DISABLE_COPY(QIODevicePrintStream)
-
-public:
- explicit QIODevicePrintStream(QIODevice *dest)
- : FilePrintStream(0)
- , dest(dest)
- , buf(4096, '0')
- {
- Q_ASSERT(dest);
- }
-
- ~QIODevicePrintStream()
- {}
-
- void vprintf(const char* format, va_list argList) WTF_ATTRIBUTE_PRINTF(2, 0)
- {
- const int written = qvsnprintf(buf.data(), buf.size(), format, argList);
- if (written > 0)
- dest->write(buf.constData(), written);
- memset(buf.data(), 0, qMin(written, buf.size()));
- }
-
- void flush()
- {}
-
-private:
- QIODevice *dest;
- QByteArray buf;
-};
-} // anonymous namespace
-
-static void printDisassembledOutputWithCalls(QByteArray processedOutput, const QHash<void*, const char*>& functions)
-{
- for (QHash<void*, const char*>::ConstIterator it = functions.begin(), end = functions.end();
- it != end; ++it) {
- const QByteArray ptrString = "0x" + QByteArray::number(quintptr(it.key()), 16);
- int idx = processedOutput.indexOf(ptrString);
- if (idx < 0)
- continue;
- idx = processedOutput.lastIndexOf('\n', idx);
- if (idx < 0)
- continue;
- processedOutput = processedOutput.insert(idx, QByteArrayLiteral(" ; call ") + it.value());
- }
-
- qDebug("%s", processedOutput.constData());
-}
-
-#if defined(Q_OS_LINUX)
-static FILE *pmap;
-
-static void qt_closePmap()
-{
- if (pmap) {
- fclose(pmap);
- pmap = 0;
- }
-}
-
-#endif
-
-JSC::MacroAssemblerCodeRef Assembler::link(int *codeSize)
-{
- Label endOfCode = label();
-
- {
- for (size_t i = 0, ei = _patches.size(); i != ei; ++i) {
- Label target = _addrs.at(i);
- Q_ASSERT(target.isSet());
- for (Jump jump : qAsConst(_patches.at(i)))
- jump.linkTo(target, this);
- }
- }
-
- JSC::JSGlobalData dummy(_executableAllocator);
- JSC::LinkBuffer linkBuffer(dummy, this, 0);
-
- for (const DataLabelPatch &p : qAsConst(_dataLabelPatches))
- linkBuffer.patch(p.dataLabel, linkBuffer.locationOf(p.target));
-
- // link exception handlers
- for (Jump jump : qAsConst(exceptionPropagationJumps))
- linkBuffer.link(jump, linkBuffer.locationOf(exceptionReturnLabel));
-
- {
- for (size_t i = 0, ei = _labelPatches.size(); i != ei; ++i) {
- Label target = _addrs.at(i);
- Q_ASSERT(target.isSet());
- for (DataLabelPtr label : _labelPatches.at(i))
- linkBuffer.patch(label, linkBuffer.locationOf(target));
- }
- }
-
- *codeSize = linkBuffer.offsetOf(endOfCode);
-
- QByteArray name;
-
- JSC::MacroAssemblerCodeRef codeRef;
-
- static const bool showCode = qEnvironmentVariableIsSet("QV4_SHOW_ASM");
- if (showCode) {
- QHash<void*, const char*> functions;
-#ifndef QT_NO_DEBUG
- for (CallInfo call : qAsConst(_callInfos))
- functions[linkBuffer.locationOf(call.label).dataLocation()] = call.functionName;
-#endif
-
- QBuffer buf;
- buf.open(QIODevice::WriteOnly);
- WTF::setDataFile(new QIODevicePrintStream(&buf));
-
- name = _function->name->toUtf8();
- if (name.isEmpty())
- name = "IR::Function(0x" + QByteArray::number(quintptr(_function), 16) + ')';
- codeRef = linkBuffer.finalizeCodeWithDisassembly("%s", name.data());
-
- WTF::setDataFile(stderr);
- printDisassembledOutputWithCalls(buf.data(), functions);
- } else {
- codeRef = linkBuffer.finalizeCodeWithoutDisassembly();
- }
-
-#if defined(Q_OS_LINUX)
- // This implements writing of JIT'd addresses so that perf can find the
- // symbol names.
- //
- // Perf expects the mapping to be in a certain place and have certain
- // content, for more information, see:
- // https://github.com/torvalds/linux/blob/master/tools/perf/Documentation/jit-interface.txt
- static bool doProfile = !qEnvironmentVariableIsEmpty("QV4_PROFILE_WRITE_PERF_MAP");
- static bool profileInitialized = false;
- if (doProfile && !profileInitialized) {
- profileInitialized = true;
-
- char pname[PATH_MAX];
- snprintf(pname, PATH_MAX - 1, "/tmp/perf-%lu.map",
- (unsigned long)QCoreApplication::applicationPid());
-
- pmap = fopen(pname, "w");
- if (!pmap)
- qWarning("QV4: Can't write %s, call stacks will not contain JavaScript function names", pname);
-
- // make sure we clean up nicely
- std::atexit(qt_closePmap);
- }
-
- if (pmap) {
- // this may have been pre-populated, if QV4_SHOW_ASM was on
- if (name.isEmpty()) {
- name = _function->name->toUtf8();
- if (name.isEmpty())
- name = "IR::Function(0x" + QByteArray::number(quintptr(_function), 16) + ')';
- }
-
- fprintf(pmap, "%llx %x %.*s\n",
- (long long unsigned int)codeRef.code().executableAddress(),
- *codeSize,
- name.length(),
- name.constData());
- fflush(pmap);
- }
-#endif
-
- return codeRef;
-}
-
InstructionSelection::InstructionSelection(QQmlEnginePrivate *qmlEngine, QV4::ExecutableAllocator *execAllocator, IR::Module *module, Compiler::JSUnitGenerator *jsGenerator, EvalISelFactory *iselFactory)
: EvalInstructionSelection(execAllocator, module, jsGenerator, iselFactory)
, _block(0)
@@ -278,7 +109,7 @@ void InstructionSelection::run(int functionIndex)
qSwap(_removableJumps, removableJumps);
Assembler* oldAssembler = _as;
- _as = new Assembler(this, _function, executableAllocator);
+ _as = new Assembler(jsGenerator, _function, executableAllocator);
_as->setStackLayout(6, // 6 == max argc for calls to built-ins with an argument array
regularRegistersToSave.size(),
fpRegistersToSave.size());
@@ -1881,6 +1712,13 @@ bool InstructionSelection::visitCJumpStrictBool(IR::Binop *binop, IR::BasicBlock
// neither operands are statically typed as bool, so bail out.
return false;
}
+ if (otherSrc->type == IR::UnknownType) {
+ // Ok, we really need to call into the runtime.
+ // (This case doesn't happen when the optimizer ran, because everything will be typed (yes,
+ // possibly as "var" meaning anything), but it does happen for $0===true, which is generated
+ // for things where the optimizer didn't run (like functions with a try block).)
+ return false;
+ }
Assembler::RelationalCondition cond = binop->op == IR::OpStrictEqual ? Assembler::Equal
: Assembler::NotEqual;
diff --git a/src/qml/jit/qv4regalloc.cpp b/src/qml/jit/qv4regalloc.cpp
index 63d542b5c8..f2ae7e117a 100644
--- a/src/qml/jit/qv4regalloc.cpp
+++ b/src/qml/jit/qv4regalloc.cpp
@@ -973,7 +973,15 @@ private:
break;
Q_ASSERT(!i->isFixedInterval());
- _liveIntervals.push_back(i);
+ auto it = _liveIntervals.begin();
+ for (; it != _liveIntervals.end(); ++it) {
+ if ((*it)->temp() == i->temp()) {
+ *it = i;
+ break;
+ }
+ }
+ if (it == _liveIntervals.end())
+ _liveIntervals.push_back(i);
// qDebug() << "-- Activating interval for temp" << i->temp().index;
_unprocessedReverseOrder.removeLast();
@@ -1087,12 +1095,12 @@ private:
if (_info->def(it->temp()) != successorStart && !it->isSplitFromInterval()) {
const int successorEnd = successor->terminator()->id();
const int idx = successor->in.indexOf(predecessor);
- foreach (const Use &use, _info->uses(it->temp())) {
+ for (const Use &use : _info->uses(it->temp)) {
if (use.pos == static_cast<unsigned>(successorStart)) {
// only check the current edge, not all other possible ones. This is
// important for phi nodes: they have uses that are only valid when
// coming in over a specific edge.
- foreach (Stmt *s, successor->statements()) {
+ for (Stmt *s : successor->statements()) {
if (Phi *phi = s->asPhi()) {
Q_ASSERT(it->temp().index != phi->targetTemp->index);
Q_ASSERT(phi->d->incoming[idx]->asTemp() == 0