aboutsummaryrefslogtreecommitdiffstats
path: root/src/qml/jit
diff options
context:
space:
mode:
authorLars Knoll <lars.knoll@digia.com>2014-02-21 08:45:09 +0100
committerThe Qt Project <gerrit-noreply@qt-project.org>2014-03-04 07:56:08 +0100
commit72293bf3b0a6a3d6ac30cf472b81fd4844b3d582 (patch)
tree45a7c50f2a556338ce5c390338580e3066eab200 /src/qml/jit
parentca056ed3fa25b417bc88786377999b04640b1265 (diff)
Move all binop related code into qv4binop*
Change-Id: I8f96b8d570dd4c0139b0a2e595055b3b2c6dae70 Reviewed-by: Simon Hausmann <simon.hausmann@digia.com>
Diffstat (limited to 'src/qml/jit')
-rw-r--r--src/qml/jit/qv4assembler.cpp54
-rw-r--r--src/qml/jit/qv4assembler_p.h156
-rw-r--r--src/qml/jit/qv4binop.cpp61
-rw-r--r--src/qml/jit/qv4binop_p.h159
4 files changed, 218 insertions, 212 deletions
diff --git a/src/qml/jit/qv4assembler.cpp b/src/qml/jit/qv4assembler.cpp
index 8d034cde9f..2495771c0a 100644
--- a/src/qml/jit/qv4assembler.cpp
+++ b/src/qml/jit/qv4assembler.cpp
@@ -309,60 +309,6 @@ void Assembler::leaveStandardStackFrame()
-#define OP(op) \
- { isel_stringIfy(op), op, 0, 0, 0 }
-#define OPCONTEXT(op) \
- { isel_stringIfy(op), 0, op, 0, 0 }
-
-#define INLINE_OP(op, memOp, immOp) \
- { isel_stringIfy(op), op, 0, memOp, immOp }
-#define INLINE_OPCONTEXT(op, memOp, immOp) \
- { isel_stringIfy(op), 0, op, memOp, immOp }
-
-#define NULL_OP \
- { 0, 0, 0, 0, 0 }
-
-const Assembler::BinaryOperationInfo Assembler::binaryOperations[IR::LastAluOp + 1] = {
- NULL_OP, // OpInvalid
- NULL_OP, // OpIfTrue
- NULL_OP, // OpNot
- NULL_OP, // OpUMinus
- NULL_OP, // OpUPlus
- NULL_OP, // OpCompl
- NULL_OP, // OpIncrement
- NULL_OP, // OpDecrement
-
- INLINE_OP(__qmljs_bit_and, &Assembler::inline_and32, &Assembler::inline_and32), // OpBitAnd
- INLINE_OP(__qmljs_bit_or, &Assembler::inline_or32, &Assembler::inline_or32), // OpBitOr
- INLINE_OP(__qmljs_bit_xor, &Assembler::inline_xor32, &Assembler::inline_xor32), // OpBitXor
-
- INLINE_OPCONTEXT(__qmljs_add, &Assembler::inline_add32, &Assembler::inline_add32), // OpAdd
- INLINE_OP(__qmljs_sub, &Assembler::inline_sub32, &Assembler::inline_sub32), // OpSub
- INLINE_OP(__qmljs_mul, &Assembler::inline_mul32, &Assembler::inline_mul32), // OpMul
-
- OP(__qmljs_div), // OpDiv
- OP(__qmljs_mod), // OpMod
-
- INLINE_OP(__qmljs_shl, &Assembler::inline_shl32, &Assembler::inline_shl32), // OpLShift
- INLINE_OP(__qmljs_shr, &Assembler::inline_shr32, &Assembler::inline_shr32), // OpRShift
- INLINE_OP(__qmljs_ushr, &Assembler::inline_ushr32, &Assembler::inline_ushr32), // OpURShift
-
- OP(__qmljs_gt), // OpGt
- OP(__qmljs_lt), // OpLt
- OP(__qmljs_ge), // OpGe
- OP(__qmljs_le), // OpLe
- OP(__qmljs_eq), // OpEqual
- OP(__qmljs_ne), // OpNotEqual
- OP(__qmljs_se), // OpStrictEqual
- OP(__qmljs_sne), // OpStrictNotEqual
-
- OPCONTEXT(__qmljs_instanceof), // OpInstanceof
- OPCONTEXT(__qmljs_in), // OpIn
-
- NULL_OP, // OpAnd
- NULL_OP // OpOr
-};
-
// Try to load the source expression into the destination FP register. This assumes that two
// general purpose (integer) registers are available: the ScratchRegister and the
diff --git a/src/qml/jit/qv4assembler_p.h b/src/qml/jit/qv4assembler_p.h
index fa9aa4193b..2774b8d8e0 100644
--- a/src/qml/jit/qv4assembler_p.h
+++ b/src/qml/jit/qv4assembler_p.h
@@ -1031,162 +1031,6 @@ public:
generateFunctionCallImp(r, functionName, function, arg1, VoidType(), VoidType(), VoidType(), VoidType());
}
- typedef Jump (Assembler::*MemRegBinOp)(Address, RegisterID);
- typedef Jump (Assembler::*ImmRegBinOp)(TrustedImm32, RegisterID);
-
- struct BinaryOperationInfo {
- const char *name;
- QV4::BinOp fallbackImplementation;
- QV4::BinOpContext contextImplementation;
- MemRegBinOp inlineMemRegOp;
- ImmRegBinOp inlineImmRegOp;
- };
-
- static const BinaryOperationInfo binaryOperations[IR::LastAluOp + 1];
- static const BinaryOperationInfo &binaryOperation(IR::AluOp operation)
- { return binaryOperations[operation]; }
-
- Jump inline_add32(Address addr, RegisterID reg)
- {
-#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
- return branchAdd32(Overflow, addr, reg);
-#else
- load32(addr, ScratchRegister);
- return branchAdd32(Overflow, ScratchRegister, reg);
-#endif
- }
-
- Jump inline_add32(TrustedImm32 imm, RegisterID reg)
- {
- return branchAdd32(Overflow, imm, reg);
- }
-
- Jump inline_sub32(Address addr, RegisterID reg)
- {
-#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
- return branchSub32(Overflow, addr, reg);
-#else
- load32(addr, ScratchRegister);
- return branchSub32(Overflow, ScratchRegister, reg);
-#endif
- }
-
- Jump inline_sub32(TrustedImm32 imm, RegisterID reg)
- {
- return branchSub32(Overflow, imm, reg);
- }
-
- Jump inline_mul32(Address addr, RegisterID reg)
- {
-#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
- return branchMul32(Overflow, addr, reg);
-#else
- load32(addr, ScratchRegister);
- return branchMul32(Overflow, ScratchRegister, reg);
-#endif
- }
-
- Jump inline_mul32(TrustedImm32 imm, RegisterID reg)
- {
- return branchMul32(Overflow, imm, reg, reg);
- }
-
- Jump inline_shl32(Address addr, RegisterID reg)
- {
- load32(addr, ScratchRegister);
- and32(TrustedImm32(0x1f), ScratchRegister);
- lshift32(ScratchRegister, reg);
- return Jump();
- }
-
- Jump inline_shl32(TrustedImm32 imm, RegisterID reg)
- {
- imm.m_value &= 0x1f;
- lshift32(imm, reg);
- return Jump();
- }
-
- Jump inline_shr32(Address addr, RegisterID reg)
- {
- load32(addr, ScratchRegister);
- and32(TrustedImm32(0x1f), ScratchRegister);
- rshift32(ScratchRegister, reg);
- return Jump();
- }
-
- Jump inline_shr32(TrustedImm32 imm, RegisterID reg)
- {
- imm.m_value &= 0x1f;
- rshift32(imm, reg);
- return Jump();
- }
-
- Jump inline_ushr32(Address addr, RegisterID reg)
- {
- load32(addr, ScratchRegister);
- and32(TrustedImm32(0x1f), ScratchRegister);
- urshift32(ScratchRegister, reg);
- return branchTest32(Signed, reg, reg);
- }
-
- Jump inline_ushr32(TrustedImm32 imm, RegisterID reg)
- {
- imm.m_value &= 0x1f;
- urshift32(imm, reg);
- return branchTest32(Signed, reg, reg);
- }
-
- Jump inline_and32(Address addr, RegisterID reg)
- {
-#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
- and32(addr, reg);
-#else
- load32(addr, ScratchRegister);
- and32(ScratchRegister, reg);
-#endif
- return Jump();
- }
-
- Jump inline_and32(TrustedImm32 imm, RegisterID reg)
- {
- and32(imm, reg);
- return Jump();
- }
-
- Jump inline_or32(Address addr, RegisterID reg)
- {
-#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
- or32(addr, reg);
-#else
- load32(addr, ScratchRegister);
- or32(ScratchRegister, reg);
-#endif
- return Jump();
- }
-
- Jump inline_or32(TrustedImm32 imm, RegisterID reg)
- {
- or32(imm, reg);
- return Jump();
- }
-
- Jump inline_xor32(Address addr, RegisterID reg)
- {
-#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
- xor32(addr, reg);
-#else
- load32(addr, ScratchRegister);
- xor32(ScratchRegister, reg);
-#endif
- return Jump();
- }
-
- Jump inline_xor32(TrustedImm32 imm, RegisterID reg)
- {
- xor32(imm, reg);
- return Jump();
- }
-
Pointer toAddress(RegisterID tmpReg, IR::Expr *e, int offset)
{
if (IR::Const *c = e->asConst()) {
diff --git a/src/qml/jit/qv4binop.cpp b/src/qml/jit/qv4binop.cpp
index 473f260be7..2a4cbeea9b 100644
--- a/src/qml/jit/qv4binop.cpp
+++ b/src/qml/jit/qv4binop.cpp
@@ -55,6 +55,63 @@ inline bool isPregOrConst(IR::Expr *e)
}
} // anonymous namespace
+
+#define OP(op) \
+ { isel_stringIfy(op), op, 0, 0, 0 }
+#define OPCONTEXT(op) \
+ { isel_stringIfy(op), 0, op, 0, 0 }
+
+#define INLINE_OP(op, memOp, immOp) \
+ { isel_stringIfy(op), op, 0, memOp, immOp }
+#define INLINE_OPCONTEXT(op, memOp, immOp) \
+ { isel_stringIfy(op), 0, op, memOp, immOp }
+
+#define NULL_OP \
+ { 0, 0, 0, 0, 0 }
+
+const Binop::OpInfo Binop::operations[IR::LastAluOp + 1] = {
+ NULL_OP, // OpInvalid
+ NULL_OP, // OpIfTrue
+ NULL_OP, // OpNot
+ NULL_OP, // OpUMinus
+ NULL_OP, // OpUPlus
+ NULL_OP, // OpCompl
+ NULL_OP, // OpIncrement
+ NULL_OP, // OpDecrement
+
+ INLINE_OP(__qmljs_bit_and, &Binop::inline_and32, &Binop::inline_and32), // OpBitAnd
+ INLINE_OP(__qmljs_bit_or, &Binop::inline_or32, &Binop::inline_or32), // OpBitOr
+ INLINE_OP(__qmljs_bit_xor, &Binop::inline_xor32, &Binop::inline_xor32), // OpBitXor
+
+ INLINE_OPCONTEXT(__qmljs_add, &Binop::inline_add32, &Binop::inline_add32), // OpAdd
+ INLINE_OP(__qmljs_sub, &Binop::inline_sub32, &Binop::inline_sub32), // OpSub
+ INLINE_OP(__qmljs_mul, &Binop::inline_mul32, &Binop::inline_mul32), // OpMul
+
+ OP(__qmljs_div), // OpDiv
+ OP(__qmljs_mod), // OpMod
+
+ INLINE_OP(__qmljs_shl, &Binop::inline_shl32, &Binop::inline_shl32), // OpLShift
+ INLINE_OP(__qmljs_shr, &Binop::inline_shr32, &Binop::inline_shr32), // OpRShift
+ INLINE_OP(__qmljs_ushr, &Binop::inline_ushr32, &Binop::inline_ushr32), // OpURShift
+
+ OP(__qmljs_gt), // OpGt
+ OP(__qmljs_lt), // OpLt
+ OP(__qmljs_ge), // OpGe
+ OP(__qmljs_le), // OpLe
+ OP(__qmljs_eq), // OpEqual
+ OP(__qmljs_ne), // OpNotEqual
+ OP(__qmljs_se), // OpStrictEqual
+ OP(__qmljs_sne), // OpStrictNotEqual
+
+ OPCONTEXT(__qmljs_instanceof), // OpInstanceof
+ OPCONTEXT(__qmljs_in), // OpIn
+
+ NULL_OP, // OpAnd
+ NULL_OP // OpOr
+};
+
+
+
void Binop::generate(IR::Expr *lhs, IR::Expr *rhs, IR::Temp *target)
{
if (op != IR::OpMod
@@ -73,11 +130,11 @@ void Binop::generate(IR::Expr *lhs, IR::Expr *rhs, IR::Temp *target)
done = genInlineBinop(lhs, rhs, target);
// TODO: inline var===null and var!==null
- Assembler::BinaryOperationInfo info = Assembler::binaryOperation(op);
+ Binop::OpInfo info = Binop::operation(op);
if (op == IR::OpAdd &&
(lhs->type == IR::StringType || rhs->type == IR::StringType)) {
- const Assembler::BinaryOperationInfo stringAdd = OPCONTEXT(__qmljs_add_string);
+ const Binop::OpInfo stringAdd = OPCONTEXT(__qmljs_add_string);
info = stringAdd;
}
diff --git a/src/qml/jit/qv4binop_p.h b/src/qml/jit/qv4binop_p.h
index d29b370f37..096f28e881 100644
--- a/src/qml/jit/qv4binop_p.h
+++ b/src/qml/jit/qv4binop_p.h
@@ -43,6 +43,7 @@
#include <qv4jsir_p.h>
#include <qv4isel_masm_p.h>
+#include <qv4assembler_p.h>
QT_BEGIN_NAMESPACE
@@ -62,6 +63,164 @@ struct Binop {
bool int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Temp *target);
Assembler::Jump genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Temp *target);
+ typedef Assembler::Jump (Binop::*MemRegOp)(Assembler::Address, Assembler::RegisterID);
+ typedef Assembler::Jump (Binop::*ImmRegOp)(Assembler::TrustedImm32, Assembler::RegisterID);
+
+ struct OpInfo {
+ const char *name;
+ QV4::BinOp fallbackImplementation;
+ QV4::BinOpContext contextImplementation;
+ MemRegOp inlineMemRegOp;
+ ImmRegOp inlineImmRegOp;
+ };
+
+ static const OpInfo operations[IR::LastAluOp + 1];
+ static const OpInfo &operation(IR::AluOp operation)
+ { return operations[operation]; }
+
+ Assembler::Jump inline_add32(Assembler::Address addr, Assembler::RegisterID reg)
+ {
+#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
+ return as->branchAdd32(Assembler::Overflow, addr, reg);
+#else
+ as->load32(addr, Assembler::ScratchRegister);
+ return as->branchAdd32(Assembler::Overflow, Assembler::ScratchRegister, reg);
+#endif
+ }
+
+ Assembler::Jump inline_add32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ {
+ return as->branchAdd32(Assembler::Overflow, imm, reg);
+ }
+
+ Assembler::Jump inline_sub32(Assembler::Address addr, Assembler::RegisterID reg)
+ {
+#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
+ return as->branchSub32(Assembler::Overflow, addr, reg);
+#else
+ as->load32(addr, Assembler::ScratchRegister);
+ return as->branchSub32(Assembler::Overflow, Assembler::ScratchRegister, reg);
+#endif
+ }
+
+ Assembler::Jump inline_sub32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ {
+ return as->branchSub32(Assembler::Overflow, imm, reg);
+ }
+
+ Assembler::Jump inline_mul32(Assembler::Address addr, Assembler::RegisterID reg)
+ {
+#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
+ return as->branchMul32(Assembler::Overflow, addr, reg);
+#else
+ as->load32(addr, Assembler::ScratchRegister);
+ return as->branchMul32(Assembler::Overflow, Assembler::ScratchRegister, reg);
+#endif
+ }
+
+ Assembler::Jump inline_mul32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ {
+ return as->branchMul32(Assembler::Overflow, imm, reg, reg);
+ }
+
+ Assembler::Jump inline_shl32(Assembler::Address addr, Assembler::RegisterID reg)
+ {
+ as->load32(addr, Assembler::ScratchRegister);
+ as->and32(Assembler::TrustedImm32(0x1f), Assembler::ScratchRegister);
+ as->lshift32(Assembler::ScratchRegister, reg);
+ return Assembler::Jump();
+ }
+
+ Assembler::Jump inline_shl32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ {
+ imm.m_value &= 0x1f;
+ as->lshift32(imm, reg);
+ return Assembler::Jump();
+ }
+
+ Assembler::Jump inline_shr32(Assembler::Address addr, Assembler::RegisterID reg)
+ {
+ as->load32(addr, Assembler::ScratchRegister);
+ as->and32(Assembler::TrustedImm32(0x1f), Assembler::ScratchRegister);
+ as->rshift32(Assembler::ScratchRegister, reg);
+ return Assembler::Jump();
+ }
+
+ Assembler::Jump inline_shr32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ {
+ imm.m_value &= 0x1f;
+ as->rshift32(imm, reg);
+ return Assembler::Jump();
+ }
+
+ Assembler::Jump inline_ushr32(Assembler::Address addr, Assembler::RegisterID reg)
+ {
+ as->load32(addr, Assembler::ScratchRegister);
+ as->and32(Assembler::TrustedImm32(0x1f), Assembler::ScratchRegister);
+ as->urshift32(Assembler::ScratchRegister, reg);
+ return as->branchTest32(Assembler::Signed, reg, reg);
+ }
+
+ Assembler::Jump inline_ushr32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ {
+ imm.m_value &= 0x1f;
+ as->urshift32(imm, reg);
+ return as->branchTest32(Assembler::Signed, reg, reg);
+ }
+
+ Assembler::Jump inline_and32(Assembler::Address addr, Assembler::RegisterID reg)
+ {
+#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
+ as->and32(addr, reg);
+#else
+ as->load32(addr, Assembler::ScratchRegister);
+ as->and32(Assembler::ScratchRegister, reg);
+#endif
+ return Assembler::Jump();
+ }
+
+ Assembler::Jump inline_and32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ {
+ as->and32(imm, reg);
+ return Assembler::Jump();
+ }
+
+ Assembler::Jump inline_or32(Assembler::Address addr, Assembler::RegisterID reg)
+ {
+#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
+ as->or32(addr, reg);
+#else
+ as->load32(addr, Assembler::ScratchRegister);
+ as->or32(Assembler::ScratchRegister, reg);
+#endif
+ return Assembler::Jump();
+ }
+
+ Assembler::Jump inline_or32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ {
+ as->or32(imm, reg);
+ return Assembler::Jump();
+ }
+
+ Assembler::Jump inline_xor32(Assembler::Address addr, Assembler::RegisterID reg)
+ {
+#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
+ as->xor32(addr, reg);
+#else
+ as->load32(addr, Assembler::ScratchRegister);
+ as->xor32(Assembler::ScratchRegister, reg);
+#endif
+ return Assembler::Jump();
+ }
+
+ Assembler::Jump inline_xor32(Assembler::TrustedImm32 imm, Assembler::RegisterID reg)
+ {
+ as->xor32(imm, reg);
+ return Assembler::Jump();
+ }
+
+
+
Assembler *as;
IR::AluOp op;
};