aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorErik Verbruggen <erik.verbruggen@me.com>2013-10-04 16:06:16 +0200
committerThe Qt Project <gerrit-noreply@qt-project.org>2013-10-10 09:54:24 +0200
commita967a9bdcc2c75a0270c2be48d845ded5332e4f0 (patch)
treebb19264f0e045239103678d568d506d8094bbcd9 /src
parentdcec03166c93fcbc9aa1ca97f53a6f436faa482c (diff)
V4 JIT: generate some strict (not) equal conditions
Checks for strict (not) equal to null, undefined, or a boolean value can be generated without reserving extra registers, or doing a call. This reduces the amount of runtime calls from >25mln to ~6500 for v8-bench.js Change-Id: If08d1124b2869227654b1233a89833c5b5e7b40c Reviewed-by: Lars Knoll <lars.knoll@digia.com>
Diffstat (limited to 'src')
-rw-r--r--src/qml/compiler/qv4isel_masm.cpp172
-rw-r--r--src/qml/compiler/qv4isel_masm_p.h19
-rw-r--r--src/qml/compiler/qv4regalloc.cpp26
-rw-r--r--src/qml/jsruntime/qv4runtime.cpp10
4 files changed, 194 insertions, 33 deletions
diff --git a/src/qml/compiler/qv4isel_masm.cpp b/src/qml/compiler/qv4isel_masm.cpp
index 859f6b918f..c92b865222 100644
--- a/src/qml/compiler/qv4isel_masm.cpp
+++ b/src/qml/compiler/qv4isel_masm.cpp
@@ -262,27 +262,48 @@ void Assembler::addPatch(DataLabelPtr patch, V4IR::BasicBlock *target)
}
void Assembler::generateCJumpOnNonZero(RegisterID reg, V4IR::BasicBlock *currentBlock,
- V4IR::BasicBlock *trueBlock, V4IR::BasicBlock *falseBlock)
+ V4IR::BasicBlock *trueBlock, V4IR::BasicBlock *falseBlock)
+{
+ generateCJumpOnCompare(NotEqual, reg, TrustedImm32(0), currentBlock, trueBlock, falseBlock);
+}
+
+void Assembler::generateCJumpOnCompare(RelationalCondition cond, RegisterID left,TrustedImm32 right,
+ V4IR::BasicBlock *currentBlock, V4IR::BasicBlock *trueBlock,
+ V4IR::BasicBlock *falseBlock)
{
if (trueBlock == _nextBlock) {
- Jump target = branch32(Equal, reg, TrustedImm32(0));
+ Jump target = branch32(invert(cond), left, right);
addPatch(falseBlock, target);
} else {
- Jump target = branch32(NotEqual, reg, TrustedImm32(0));
+ Jump target = branch32(cond, left, right);
addPatch(trueBlock, target);
jumpToBlock(currentBlock, falseBlock);
}
}
-Assembler::Pointer Assembler::loadTempAddress(RegisterID reg, V4IR::Temp *t)
+void Assembler::generateCJumpOnCompare(RelationalCondition cond, RegisterID left, RegisterID right,
+ V4IR::BasicBlock *currentBlock, V4IR::BasicBlock *trueBlock,
+ V4IR::BasicBlock *falseBlock)
+{
+ if (trueBlock == _nextBlock) {
+ Jump target = branch32(invert(cond), left, right);
+ addPatch(falseBlock, target);
+ } else {
+ Jump target = branch32(cond, left, right);
+ addPatch(trueBlock, target);
+ jumpToBlock(currentBlock, falseBlock);
+ }
+}
+
+Assembler::Pointer Assembler::loadTempAddress(RegisterID baseReg, V4IR::Temp *t)
{
int32_t offset = 0;
int scope = t->scope;
RegisterID context = ContextRegister;
if (scope) {
- loadPtr(Address(ContextRegister, qOffsetOf(ExecutionContext, outer)), ScratchRegister);
+ loadPtr(Address(ContextRegister, qOffsetOf(ExecutionContext, outer)), baseReg);
--scope;
- context = ScratchRegister;
+ context = baseReg;
while (scope) {
loadPtr(Address(context, qOffsetOf(ExecutionContext, outer)), context);
--scope;
@@ -291,12 +312,12 @@ Assembler::Pointer Assembler::loadTempAddress(RegisterID reg, V4IR::Temp *t)
switch (t->kind) {
case V4IR::Temp::Formal:
case V4IR::Temp::ScopedFormal: {
- loadPtr(Address(context, qOffsetOf(CallContext, callData)), reg);
+ loadPtr(Address(context, qOffsetOf(CallContext, callData)), baseReg);
offset = sizeof(CallData) + (t->index - 1) * sizeof(SafeValue);
} break;
case V4IR::Temp::Local:
case V4IR::Temp::ScopedLocal: {
- loadPtr(Address(context, qOffsetOf(CallContext, locals)), reg);
+ loadPtr(Address(context, qOffsetOf(CallContext, locals)), baseReg);
offset = t->index * sizeof(SafeValue);
} break;
case V4IR::Temp::StackSlot: {
@@ -305,7 +326,7 @@ Assembler::Pointer Assembler::loadTempAddress(RegisterID reg, V4IR::Temp *t)
default:
Q_UNREACHABLE();
}
- return Pointer(reg, offset);
+ return Pointer(baseReg, offset);
}
Assembler::Pointer Assembler::loadStringAddress(RegisterID reg, const QString &string)
@@ -1376,6 +1397,7 @@ void InstructionSelection::binop(V4IR::AluOp oper, V4IR::Expr *leftSource, V4IR:
if (leftSource->type != V4IR::StringType && rightSource->type != V4IR::StringType)
done = genInlineBinop(oper, leftSource, rightSource, target);
+ // TODO: inline var===null and var!==null
const Assembler::BinaryOperationInfo& info = Assembler::binaryOperation(oper);
if (info.fallbackImplementation) {
_as->generateFunctionCallImp(target, info.name, info.fallbackImplementation,
@@ -1726,6 +1748,11 @@ void InstructionSelection::visitCJump(V4IR::CJump *s)
&& visitCJumpDouble(b->op, b->left, b->right, s->iftrue, s->iffalse))
return;
+ if (b->op == V4IR::OpStrictEqual || b->op == V4IR::OpStrictNotEqual) {
+ visitCJumpStrict(b, s->iftrue, s->iffalse);
+ return;
+ }
+
CmpOp op = 0;
CmpOpContext opContext = 0;
const char *opName = 0;
@@ -2146,6 +2173,133 @@ bool InstructionSelection::visitCJumpDouble(V4IR::AluOp op, V4IR::Expr *left, V4
return true;
}
+void InstructionSelection::visitCJumpStrict(V4IR::Binop *binop, V4IR::BasicBlock *trueBlock,
+ V4IR::BasicBlock *falseBlock)
+{
+ if (visitCJumpStrictNullUndefined(V4IR::NullType, binop, trueBlock, falseBlock))
+ return;
+ if (visitCJumpStrictNullUndefined(V4IR::UndefinedType, binop, trueBlock, falseBlock))
+ return;
+ if (visitCJumpStrictBool(binop, trueBlock, falseBlock))
+ return;
+
+ QV4::BinOp op;
+ const char *opName;
+ if (binop->op == V4IR::OpStrictEqual) {
+ op = __qmljs_se;
+ opName = "__qmljs_se";
+ } else {
+ op = __qmljs_sne;
+ opName = "__qmljs_sne";
+ }
+
+ V4IR::Expr *left = binop->left;
+ V4IR::Expr *right = binop->right;
+
+ _as->generateFunctionCallImp(Assembler::ReturnValueRegister, opName, op,
+ Assembler::PointerToValue(left), Assembler::PointerToValue(right));
+ _as->generateCJumpOnNonZero(Assembler::ReturnValueRegister, _block, trueBlock, falseBlock);
+}
+
+// Only load the non-null temp.
+bool InstructionSelection::visitCJumpStrictNullUndefined(V4IR::Type nullOrUndef, V4IR::Binop *binop,
+ V4IR::BasicBlock *trueBlock,
+ V4IR::BasicBlock *falseBlock)
+{
+ Q_ASSERT(nullOrUndef == V4IR::NullType || nullOrUndef == V4IR::UndefinedType);
+
+ V4IR::Expr *varSrc = 0;
+ if (binop->left->type == V4IR::VarType && binop->right->type == nullOrUndef)
+ varSrc = binop->left;
+ else if (binop->left->type == nullOrUndef && binop->right->type == V4IR::VarType)
+ varSrc = binop->right;
+ if (!varSrc)
+ return false;
+
+ if (varSrc->asTemp() && varSrc->asTemp()->kind == V4IR::Temp::PhysicalRegister) {
+ _as->jumpToBlock(_block, falseBlock);
+ return true;
+ }
+
+ if (V4IR::Const *c = varSrc->asConst()) {
+ if (c->type == nullOrUndef)
+ _as->jumpToBlock(_block, trueBlock);
+ else
+ _as->jumpToBlock(_block, falseBlock);
+ return true;
+ }
+
+ V4IR::Temp *t = varSrc->asTemp();
+ Q_ASSERT(t);
+
+ Assembler::Pointer tagAddr = _as->loadTempAddress(Assembler::ScratchRegister, t);
+ tagAddr.offset += 4;
+ const Assembler::RegisterID tagReg = Assembler::ScratchRegister;
+ _as->load32(tagAddr, tagReg);
+
+ Assembler::RelationalCondition cond = binop->op == V4IR::OpStrictEqual ? Assembler::Equal
+ : Assembler::NotEqual;
+ const Assembler::TrustedImm32 tag(nullOrUndef == V4IR::NullType ? QV4::Value::_Null_Type
+ : QV4::Value::Undefined_Type);
+ _as->generateCJumpOnCompare(cond, tagReg, tag, _block, trueBlock, falseBlock);
+ return true;
+}
+
+bool InstructionSelection::visitCJumpStrictBool(V4IR::Binop *binop, V4IR::BasicBlock *trueBlock,
+ V4IR::BasicBlock *falseBlock)
+{
+ V4IR::Expr *boolSrc = 0, *otherSrc = 0;
+ if (binop->left->type == V4IR::BoolType) {
+ boolSrc = binop->left;
+ otherSrc = binop->right;
+ } else if (binop->right->type == V4IR::BoolType) {
+ boolSrc = binop->right;
+ otherSrc = binop->left;
+ } else {
+ // neither operands are statically typed as bool, so bail out.
+ return false;
+ }
+
+ Assembler::RelationalCondition cond = binop->op == V4IR::OpStrictEqual ? Assembler::Equal
+ : Assembler::NotEqual;
+
+ if (otherSrc->type == V4IR::BoolType) { // both are boolean
+ Assembler::RegisterID one = _as->toBoolRegister(boolSrc, Assembler::ReturnValueRegister);
+ Assembler::RegisterID two = _as->toBoolRegister(otherSrc, Assembler::ScratchRegister);
+ _as->generateCJumpOnCompare(cond, one, two, _block, trueBlock, falseBlock);
+ return true;
+ }
+
+ if (otherSrc->type != V4IR::VarType) {
+ _as->jumpToBlock(_block, falseBlock);
+ return true;
+ }
+
+ V4IR::Temp *otherTemp = otherSrc->asTemp();
+ Q_ASSERT(otherTemp); // constants cannot have "var" type
+ Q_ASSERT(otherTemp->kind != V4IR::Temp::PhysicalRegister);
+
+ Assembler::Pointer otherAddr = _as->loadTempAddress(Assembler::ReturnValueRegister, otherTemp);
+ otherAddr.offset += 4; // tag address
+
+ // check if the tag of the var operand is indicates 'boolean'
+ _as->load32(otherAddr, Assembler::ScratchRegister);
+ Assembler::Jump noBool = _as->branch32(Assembler::NotEqual, Assembler::ScratchRegister,
+ Assembler::TrustedImm32(QV4::Value::_Boolean_Type));
+ if (binop->op == V4IR::OpStrictEqual)
+ _as->addPatch(falseBlock, noBool);
+ else
+ _as->addPatch(trueBlock, noBool);
+
+ // ok, both are boolean, so let's load them and compare them.
+ otherAddr.offset -= 4; // int_32 address
+ _as->load32(otherAddr, Assembler::ReturnValueRegister);
+ Assembler::RegisterID boolReg = _as->toBoolRegister(boolSrc, Assembler::ScratchRegister);
+ _as->generateCJumpOnCompare(cond, boolReg, Assembler::ReturnValueRegister, _block, trueBlock,
+ falseBlock);
+ return true;
+}
+
bool InstructionSelection::int32Binop(V4IR::AluOp oper, V4IR::Expr *leftSource,
V4IR::Expr *rightSource, V4IR::Temp *target)
{
diff --git a/src/qml/compiler/qv4isel_masm_p.h b/src/qml/compiler/qv4isel_masm_p.h
index 1a8df9a833..3dfd979654 100644
--- a/src/qml/compiler/qv4isel_masm_p.h
+++ b/src/qml/compiler/qv4isel_masm_p.h
@@ -453,8 +453,14 @@ public:
void addPatch(DataLabelPtr patch, V4IR::BasicBlock *target);
void generateCJumpOnNonZero(RegisterID reg, V4IR::BasicBlock *currentBlock,
V4IR::BasicBlock *trueBlock, V4IR::BasicBlock *falseBlock);
-
- Pointer loadTempAddress(RegisterID reg, V4IR::Temp *t);
+ void generateCJumpOnCompare(RelationalCondition cond, RegisterID left, TrustedImm32 right,
+ V4IR::BasicBlock *currentBlock, V4IR::BasicBlock *trueBlock,
+ V4IR::BasicBlock *falseBlock);
+ void generateCJumpOnCompare(RelationalCondition cond, RegisterID left, RegisterID right,
+ V4IR::BasicBlock *currentBlock, V4IR::BasicBlock *trueBlock,
+ V4IR::BasicBlock *falseBlock);
+
+ Pointer loadTempAddress(RegisterID baseReg, V4IR::Temp *t);
Pointer loadStringAddress(RegisterID reg, const QString &string);
void loadStringRef(RegisterID reg, const QString &string);
Pointer stackSlotPointer(V4IR::Temp *t) const
@@ -1245,6 +1251,11 @@ public:
return target;
}
+ RegisterID toBoolRegister(V4IR::Expr *e, RegisterID scratchReg)
+ {
+ return toInt32Register(e, scratchReg);
+ }
+
RegisterID toInt32Register(V4IR::Expr *e, RegisterID scratchReg)
{
if (V4IR::Const *c = e->asConst()) {
@@ -1441,6 +1452,10 @@ protected:
Assembler::Jump branchDouble(bool invertCondition, V4IR::AluOp op, V4IR::Expr *left, V4IR::Expr *right);
bool visitCJumpDouble(V4IR::AluOp op, V4IR::Expr *left, V4IR::Expr *right,
V4IR::BasicBlock *iftrue, V4IR::BasicBlock *iffalse);
+ void visitCJumpStrict(V4IR::Binop *binop, V4IR::BasicBlock *trueBlock, V4IR::BasicBlock *falseBlock);
+ bool visitCJumpStrictNullUndefined(V4IR::Type nullOrUndef, V4IR::Binop *binop,
+ V4IR::BasicBlock *trueBlock, V4IR::BasicBlock *falseBlock);
+ bool visitCJumpStrictBool(V4IR::Binop *binop, V4IR::BasicBlock *trueBlock, V4IR::BasicBlock *falseBlock);
bool int32Binop(V4IR::AluOp oper, V4IR::Expr *leftSource, V4IR::Expr *rightSource,
V4IR::Temp *target);
diff --git a/src/qml/compiler/qv4regalloc.cpp b/src/qml/compiler/qv4regalloc.cpp
index 58c84add2e..c325ea6b71 100644
--- a/src/qml/compiler/qv4regalloc.cpp
+++ b/src/qml/compiler/qv4regalloc.cpp
@@ -424,7 +424,12 @@ protected: // IRDecoder
{
bool needsCall = true;
- if (leftSource->type == DoubleType && rightSource->type == DoubleType) {
+ if (oper == OpStrictEqual || oper == OpStrictNotEqual) {
+ bool noCall = leftSource->type == NullType || rightSource->type == NullType
+ || leftSource->type == UndefinedType || rightSource->type == UndefinedType
+ || leftSource->type == BoolType || rightSource->type == BoolType;
+ needsCall = !noCall;
+ } else if (leftSource->type == DoubleType && rightSource->type == DoubleType) {
if (oper == OpMul || oper == OpAdd || oper == OpDiv || oper == OpSub
|| (oper >= OpGt && oper <= OpStrictNotEqual)) {
needsCall = false;
@@ -439,25 +444,6 @@ protected: // IRDecoder
}
}
-#if 0 // TODO: change masm to generate code
- switch (leftSource->type) {
- case DoubleType:
- case SInt32Type:
- case UInt32Type:
- switch (rightSource->type) {
- case DoubleType:
- case SInt32Type:
- case UInt32Type:
- if (oper != OpMod)
- needsCall = false;
- default:
- break;
- } break;
- default:
- break;
- }
-#endif
-
addDef(target);
if (needsCall) {
diff --git a/src/qml/jsruntime/qv4runtime.cpp b/src/qml/jsruntime/qv4runtime.cpp
index b8b62c1bb7..6e566953c7 100644
--- a/src/qml/jsruntime/qv4runtime.cpp
+++ b/src/qml/jsruntime/qv4runtime.cpp
@@ -139,6 +139,7 @@ struct RuntimeCounters::Data {
};
void dump() const {
+ QTextStream outs(stderr, QIODevice::WriteOnly);
QList<Line> lines;
foreach (const char *func, counters.keys()) {
const Counters &fCount = counters[func];
@@ -154,9 +155,13 @@ struct RuntimeCounters::Data {
}
}
qSort(lines.begin(), lines.end(), Line::less);
- qDebug() << "Counters:";
+ outs << lines.size() << " counters:" << endl;
foreach (const Line &line, lines)
- qDebug("%10ld | %s | %s | %s", line.count, line.func, pretty(line.tag1), pretty(line.tag2));
+ outs << qSetFieldWidth(10) << line.count << qSetFieldWidth(0)
+ << " | " << line.func
+ << " | " << pretty(line.tag1)
+ << " | " << pretty(line.tag2)
+ << endl;
}
};
@@ -172,6 +177,7 @@ RuntimeCounters::RuntimeCounters()
RuntimeCounters::~RuntimeCounters()
{
d->dump();
+ delete d;
}
void RuntimeCounters::count(const char *func, uint tag)