diff options
author | Erik Verbruggen <erik.verbruggen@digia.com> | 2014-04-30 15:38:01 +0200 |
---|---|---|
committer | The Qt Project <gerrit-noreply@qt-project.org> | 2014-05-23 12:23:32 +0200 |
commit | 75c22465cf8fe262edfe6178bb9ca19661fb710e (patch) | |
tree | 69da4cbb16124ae88678f96ca3e37b76851e72f0 /src/qml/jit/qv4binop.cpp | |
parent | e950557e1133e8aac65a453597ab35400a5b9a10 (diff) |
V4: Split arguments/locals from temps.
There are a couple of reasons to split the temporaries off from the
arguments and locals:
Temporaries are invisible, and changes to them cannot be observed.
On the other hand, arguments and locals are visible, and writes to them
can be seen from other places (nested functions), or by using the
arguments array. So, in practice these correspond to memory locations.
(One could argue that if neither nested functions, nor eval(), nor
arguments[] is used, the loads/stores are invisible too. But that's an
optimization, and changing locals/arguments to temporaries can be done
in a separate pass.)
Because of the "volatile" nature of arguments and locals, their usage
cannot be optimized. All optimizations (SSA construction, register
allocation, copy elimination, etc.) work on temporaries. Being able to
easily ignore all non-temporaries has the benefit that optimizations can
be faster.
Previously, Temps were not uniquely numbered: argument 1, local 1, and
temporary 1 all had the same number and were distinguishable by their
type. So, for any mapping from Temp to something else, a QHash was used.
Now that Temps only hold proper temporaries, the indexes do uniquely
identify them. Add to that the fact that after transforming to SSA form
all temporaries are renumbered starting from 0 and without any holes in
the numbering, many of those datastructures can be changed to simple
vectors. That change gives a noticeable performance improvement.
One implication of this change is that a number of functions that took
a Temp as their argument, now need to take Temp-or-ArgLocal, so Expr.
However, it turns out that there are very few places where that applies,
as many of those places also need to take constants or names. However,
explicitly separating memory loads/stores for arguments/locals from
temporaries adds the benefit that it's now easier to do a peep-hole
optimizer for those load/store operations in the future: when a load is
directly preceded by a store, it can be eliminated if the value is
still available in a temporary.
Change-Id: I4114006b076795d9ea9fe3649cdb3b9d7b7508f0
Reviewed-by: Simon Hausmann <simon.hausmann@digia.com>
Diffstat (limited to 'src/qml/jit/qv4binop.cpp')
-rw-r--r-- | src/qml/jit/qv4binop.cpp | 50 |
1 files changed, 28 insertions, 22 deletions
diff --git a/src/qml/jit/qv4binop.cpp b/src/qml/jit/qv4binop.cpp index 344bbf56e0..a19072f52e 100644 --- a/src/qml/jit/qv4binop.cpp +++ b/src/qml/jit/qv4binop.cpp @@ -112,7 +112,7 @@ const Binop::OpInfo Binop::operations[IR::LastAluOp + 1] = { -void Binop::generate(IR::Expr *lhs, IR::Expr *rhs, IR::Temp *target) +void Binop::generate(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target) { if (op != IR::OpMod && lhs->type == IR::DoubleType && rhs->type == IR::DoubleType @@ -156,14 +156,15 @@ void Binop::generate(IR::Expr *lhs, IR::Expr *rhs, IR::Temp *target) } -void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Temp *target) +void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target) { Q_ASSERT(lhs->asConst() == 0 || rhs->asConst() == 0); Q_ASSERT(isPregOrConst(lhs)); Q_ASSERT(isPregOrConst(rhs)); + IR::Temp *targetTemp = target->asTemp(); Assembler::FPRegisterID targetReg; - if (target->kind == IR::Temp::PhysicalRegister) - targetReg = (Assembler::FPRegisterID) target->index; + if (targetTemp && targetTemp->kind == IR::Temp::PhysicalRegister) + targetReg = (Assembler::FPRegisterID) targetTemp->index; else targetReg = Assembler::FPGpr0; @@ -232,31 +233,33 @@ void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Temp *target) } return; } - if (target->kind != IR::Temp::PhysicalRegister) + if (!targetTemp || targetTemp->kind != IR::Temp::PhysicalRegister) as->storeDouble(Assembler::FPGpr0, target); } -bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Temp *target) +bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *target) { Q_ASSERT(leftSource->type == IR::SInt32Type); + IR::Temp *targetTemp = target->asTemp(); Assembler::RegisterID targetReg = Assembler::ReturnValueRegister; - if (target->kind == IR::Temp::PhysicalRegister) { + if (targetTemp && targetTemp->kind == IR::Temp::PhysicalRegister) { // We try to load leftSource into the target's register, but we can't do that if // the target register is the same as rightSource. IR::Temp *rhs = rightSource->asTemp(); - if (!rhs || rhs->kind != IR::Temp::PhysicalRegister || rhs->index != target->index) - targetReg = (Assembler::RegisterID) target->index; + if (!rhs || rhs->kind != IR::Temp::PhysicalRegister || rhs->index != targetTemp->index) + targetReg = (Assembler::RegisterID) targetTemp->index; } switch (op) { case IR::OpBitAnd: { Q_ASSERT(rightSource->type == IR::SInt32Type); if (rightSource->asTemp() && rightSource->asTemp()->kind == IR::Temp::PhysicalRegister - && target->kind == IR::Temp::PhysicalRegister - && target->index == rightSource->asTemp()->index) { + && targetTemp + && targetTemp->kind == IR::Temp::PhysicalRegister + && targetTemp->index == rightSource->asTemp()->index) { as->and32(as->toInt32Register(leftSource, Assembler::ScratchRegister), - (Assembler::RegisterID) target->index); + (Assembler::RegisterID) targetTemp->index); return true; } @@ -268,10 +271,11 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Temp *ta case IR::OpBitOr: { Q_ASSERT(rightSource->type == IR::SInt32Type); if (rightSource->asTemp() && rightSource->asTemp()->kind == IR::Temp::PhysicalRegister - && target->kind == IR::Temp::PhysicalRegister - && target->index == rightSource->asTemp()->index) { + && targetTemp + && targetTemp->kind == IR::Temp::PhysicalRegister + && targetTemp->index == rightSource->asTemp()->index) { as->or32(as->toInt32Register(leftSource, Assembler::ScratchRegister), - (Assembler::RegisterID) target->index); + (Assembler::RegisterID) targetTemp->index); return true; } @@ -283,10 +287,11 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Temp *ta case IR::OpBitXor: { Q_ASSERT(rightSource->type == IR::SInt32Type); if (rightSource->asTemp() && rightSource->asTemp()->kind == IR::Temp::PhysicalRegister - && target->kind == IR::Temp::PhysicalRegister - && target->index == rightSource->asTemp()->index) { + && targetTemp + && targetTemp->kind == IR::Temp::PhysicalRegister + && targetTemp->index == rightSource->asTemp()->index) { as->xor32(as->toInt32Register(leftSource, Assembler::ScratchRegister), - (Assembler::RegisterID) target->index); + (Assembler::RegisterID) targetTemp->index); return true; } @@ -370,9 +375,10 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Temp *ta Q_ASSERT(rightSource->type == IR::SInt32Type); if (rightSource->asTemp() && rightSource->asTemp()->kind == IR::Temp::PhysicalRegister - && target->kind == IR::Temp::PhysicalRegister - && target->index == rightSource->asTemp()->index) { - Assembler::RegisterID targetReg = (Assembler::RegisterID) target->index; + && targetTemp + && targetTemp->kind == IR::Temp::PhysicalRegister + && targetTemp->index == rightSource->asTemp()->index) { + Assembler::RegisterID targetReg = (Assembler::RegisterID) targetTemp->index; as->move(targetReg, Assembler::ScratchRegister); as->move(as->toInt32Register(leftSource, targetReg), targetReg); as->sub32(Assembler::ScratchRegister, targetReg); @@ -407,7 +413,7 @@ static inline Assembler::FPRegisterID getFreeFPReg(IR::Expr *shouldNotOverlap, u return Assembler::FPRegisterID(hint); } -Assembler::Jump Binop::genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Temp *target) +Assembler::Jump Binop::genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *target) { Assembler::Jump done; |