aboutsummaryrefslogtreecommitdiffstats
path: root/qv4isel_masm.cpp
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@digia.com>2012-12-11 11:17:55 +0100
committerErik Verbruggen <erik.verbruggen@digia.com>2012-12-11 15:52:30 +0100
commitbcdddfda8ca81752b249540b0abaefb46eb5f766 (patch)
treedd91ea6476825121e58279a0090158a9c87a56ae /qv4isel_masm.cpp
parent381ce0e902254f39b411f3b21548943720e3db0c (diff)
Make MASM InstructionSelection reentrant
The isel is currently not reentrant because the MacroAssembler's instruction output buffer is inaccessible and can only be reset by destroying the instance and re-creating it. This patch moves assembler specific code into an Assembler subclass and changes isel to instantiate and use it instead of subclassing JSC::MacroAssembler. Change-Id: Ic633214c67f475195202459698077e47a75ece2f Reviewed-by: Erik Verbruggen <erik.verbruggen@digia.com>
Diffstat (limited to 'qv4isel_masm.cpp')
-rw-r--r--qv4isel_masm.cpp577
1 files changed, 326 insertions, 251 deletions
diff --git a/qv4isel_masm.cpp b/qv4isel_masm.cpp
index a754bee666..bcd7585c64 100644
--- a/qv4isel_masm.cpp
+++ b/qv4isel_masm.cpp
@@ -62,6 +62,109 @@ namespace {
QTextStream qout(stderr, QIODevice::WriteOnly);
}
+Assembler::Assembler(IR::Function* function)
+ : _function(function)
+{
+}
+
+void Assembler::registerBlock(IR::BasicBlock* block)
+{
+ _addrs[block] = label();
+}
+
+void Assembler::jumpToBlock(IR::BasicBlock* current, IR::BasicBlock *target)
+{
+ if (current->index + 1 != target->index)
+ _patches[target].append(jump());
+}
+
+void Assembler::addPatch(IR::BasicBlock* targetBlock, Jump targetJump)
+{
+ _patches[targetBlock].append(targetJump);
+}
+
+Assembler::Pointer Assembler::loadTempAddress(RegisterID reg, IR::Temp *t)
+{
+ int32_t offset = 0;
+ if (t->index < 0) {
+ const int arg = -t->index - 1;
+ loadPtr(Address(ContextRegister, offsetof(ExecutionContext, arguments)), reg);
+ offset = arg * sizeof(Value);
+ } else if (t->index < _function->locals.size()) {
+ loadPtr(Address(ContextRegister, offsetof(ExecutionContext, locals)), reg);
+ offset = t->index * sizeof(Value);
+ } else {
+ const int arg = _function->maxNumberOfArguments + t->index - _function->locals.size();
+ // StackFrameRegister points to its old value on the stack, so even for the first temp we need to
+ // subtract at least sizeof(Value).
+ offset = - sizeof(Value) * (arg + 1);
+ reg = StackFrameRegister;
+ }
+ return Pointer(reg, offset);
+}
+
+template <typename Result, typename Source>
+void Assembler::copyValue(Result result, Source source)
+{
+#ifdef VALUE_FITS_IN_REGISTER
+ // Use ReturnValueRegister as "scratch" register because loadArgument
+ // and storeArgument are functions that may need a scratch register themselves.
+ loadArgument(source, ReturnValueRegister);
+ storeArgument(ReturnValueRegister, result);
+#else
+ loadDouble(source, FPGpr0);
+ storeDouble(FPGpr0, result);
+#endif
+}
+
+void Assembler::enterStandardStackFrame(int locals)
+{
+#if CPU(ARM)
+ push(JSC::ARMRegisters::lr);
+#endif
+ push(StackFrameRegister);
+ move(StackPointerRegister, StackFrameRegister);
+
+ // space for the locals and the ContextRegister
+ int32_t frameSize = locals * sizeof(QQmlJS::VM::Value) + sizeof(void*);
+
+#if CPU(X86) || CPU(X86_64)
+ frameSize = (frameSize + 15) & ~15; // align on 16 byte boundaries for MMX
+#endif
+ subPtr(TrustedImm32(frameSize), StackPointerRegister);
+
+#if CPU(X86) || CPU(ARM)
+ for (int saveReg = CalleeSavedFirstRegister; saveReg <= CalleeSavedLastRegister; ++saveReg)
+ push(static_cast<RegisterID>(saveReg));
+#endif
+ // save the ContextRegister
+ storePtr(ContextRegister, StackPointerRegister);
+}
+
+void Assembler::leaveStandardStackFrame(int locals)
+{
+ // restore the ContextRegister
+ loadPtr(StackPointerRegister, ContextRegister);
+
+#if CPU(X86) || CPU(ARM)
+ for (int saveReg = CalleeSavedLastRegister; saveReg >= CalleeSavedFirstRegister; --saveReg)
+ pop(static_cast<RegisterID>(saveReg));
+#endif
+ // space for the locals and the ContextRegister
+ int32_t frameSize = locals * sizeof(QQmlJS::VM::Value) + sizeof(void*);
+#if CPU(X86) || CPU(X86_64)
+ frameSize = (frameSize + 15) & ~15; // align on 16 byte boundaries for MMX
+#endif
+ addPtr(TrustedImm32(frameSize), StackPointerRegister);
+
+ pop(StackFrameRegister);
+#if CPU(ARM)
+ pop(JSC::ARMRegisters::lr);
+#endif
+}
+
+
+
#define OP(op) \
{ isel_stringIfy(op), op, 0, 0 }
@@ -71,7 +174,7 @@ QTextStream qout(stderr, QIODevice::WriteOnly);
#define NULL_OP \
{ 0, 0, 0, 0 }
-const InstructionSelection::BinaryOperationInfo InstructionSelection::binaryOperations[QQmlJS::IR::LastAluOp + 1] = {
+const Assembler::BinaryOperationInfo Assembler::binaryOperations[QQmlJS::IR::LastAluOp + 1] = {
NULL_OP, // OpInvalid
NULL_OP, // OpIfTrue
NULL_OP, // OpNot
@@ -79,20 +182,20 @@ const InstructionSelection::BinaryOperationInfo InstructionSelection::binaryOper
NULL_OP, // OpUPlus
NULL_OP, // OpCompl
- INLINE_OP(__qmljs_bit_and, &InstructionSelection::inline_and32, &InstructionSelection::inline_and32), // OpBitAnd
- INLINE_OP(__qmljs_bit_or, &InstructionSelection::inline_or32, &InstructionSelection::inline_or32), // OpBitOr
- INLINE_OP(__qmljs_bit_xor, &InstructionSelection::inline_xor32, &InstructionSelection::inline_xor32), // OpBitXor
+ INLINE_OP(__qmljs_bit_and, &Assembler::inline_and32, &Assembler::inline_and32), // OpBitAnd
+ INLINE_OP(__qmljs_bit_or, &Assembler::inline_or32, &Assembler::inline_or32), // OpBitOr
+ INLINE_OP(__qmljs_bit_xor, &Assembler::inline_xor32, &Assembler::inline_xor32), // OpBitXor
- INLINE_OP(__qmljs_add, &InstructionSelection::inline_add32, &InstructionSelection::inline_add32), // OpAdd
- INLINE_OP(__qmljs_sub, &InstructionSelection::inline_sub32, &InstructionSelection::inline_sub32), // OpSub
- INLINE_OP(__qmljs_mul, &InstructionSelection::inline_mul32, &InstructionSelection::inline_mul32), // OpMul
+ INLINE_OP(__qmljs_add, &Assembler::inline_add32, &Assembler::inline_add32), // OpAdd
+ INLINE_OP(__qmljs_sub, &Assembler::inline_sub32, &Assembler::inline_sub32), // OpSub
+ INLINE_OP(__qmljs_mul, &Assembler::inline_mul32, &Assembler::inline_mul32), // OpMul
OP(__qmljs_div), // OpDiv
OP(__qmljs_mod), // OpMod
- INLINE_OP(__qmljs_shl, &InstructionSelection::inline_shl32, &InstructionSelection::inline_shl32), // OpLShift
- INLINE_OP(__qmljs_shr, &InstructionSelection::inline_shr32, &InstructionSelection::inline_shr32), // OpRShift
- INLINE_OP(__qmljs_ushr, &InstructionSelection::inline_ushr32, &InstructionSelection::inline_ushr32), // OpURShift
+ INLINE_OP(__qmljs_shl, &Assembler::inline_shl32, &Assembler::inline_shl32), // OpLShift
+ INLINE_OP(__qmljs_shr, &Assembler::inline_shr32, &Assembler::inline_shr32), // OpRShift
+ INLINE_OP(__qmljs_ushr, &Assembler::inline_ushr32, &Assembler::inline_ushr32), // OpURShift
OP(__qmljs_gt), // OpGt
OP(__qmljs_lt), // OpLt
@@ -110,6 +213,92 @@ const InstructionSelection::BinaryOperationInfo InstructionSelection::binaryOper
NULL_OP // OpOr
};
+void Assembler::generateBinOp(IR::AluOp operation, IR::Temp* target, IR::Expr* left, IR::Expr* right)
+{
+ const BinaryOperationInfo& info = binaryOperations[operation];
+ if (!info.fallbackImplementation) {
+ assert(!"unreachable");
+ return;
+ }
+
+ Value leftConst = Value::undefinedValue();
+ Value rightConst = Value::undefinedValue();
+
+ bool canDoInline = info.inlineMemRegOp && info.inlineImmRegOp;
+
+ if (canDoInline) {
+ if (left->asConst()) {
+ leftConst = convertToValue(left->asConst());
+ canDoInline = canDoInline && leftConst.tryIntegerConversion();
+ }
+ if (right->asConst()) {
+ rightConst = convertToValue(right->asConst());
+ canDoInline = canDoInline && rightConst.tryIntegerConversion();
+ }
+ }
+
+ Jump binOpFinished;
+
+ if (canDoInline) {
+
+ Jump leftTypeCheck;
+ if (left->asTemp()) {
+ Address typeAddress = loadTempAddress(ScratchRegister, left->asTemp());
+ typeAddress.offset += offsetof(VM::Value, tag);
+ leftTypeCheck = branch32(NotEqual, typeAddress, TrustedImm32(VM::Value::_Integer_Type));
+ }
+
+ Jump rightTypeCheck;
+ if (right->asTemp()) {
+ Address typeAddress = loadTempAddress(ScratchRegister, right->asTemp());
+ typeAddress.offset += offsetof(VM::Value, tag);
+ rightTypeCheck = branch32(NotEqual, typeAddress, TrustedImm32(VM::Value::_Integer_Type));
+ }
+
+ if (left->asTemp()) {
+ Address leftValue = loadTempAddress(ScratchRegister, left->asTemp());
+ leftValue.offset += offsetof(VM::Value, int_32);
+ load32(leftValue, IntegerOpRegister);
+ } else { // left->asConst()
+ move(TrustedImm32(leftConst.integerValue()), IntegerOpRegister);
+ }
+
+ Jump overflowCheck;
+
+ if (right->asTemp()) {
+ Address rightValue = loadTempAddress(ScratchRegister, right->asTemp());
+ rightValue.offset += offsetof(VM::Value, int_32);
+
+ overflowCheck = (this->*info.inlineMemRegOp)(rightValue, IntegerOpRegister);
+ } else { // right->asConst()
+ overflowCheck = (this->*info.inlineImmRegOp)(TrustedImm32(rightConst.integerValue()), IntegerOpRegister);
+ }
+
+ Address resultAddr = loadTempAddress(ScratchRegister, target);
+ Address resultValueAddr = resultAddr;
+ resultValueAddr.offset += offsetof(VM::Value, int_32);
+ store32(IntegerOpRegister, resultValueAddr);
+
+ Address resultTypeAddr = resultAddr;
+ resultTypeAddr.offset += offsetof(VM::Value, tag);
+ store32(TrustedImm32(VM::Value::_Integer_Type), resultTypeAddr);
+
+ binOpFinished = jump();
+
+ if (leftTypeCheck.isSet())
+ leftTypeCheck.link(this);
+ if (rightTypeCheck.isSet())
+ rightTypeCheck.link(this);
+ if (overflowCheck.isSet())
+ overflowCheck.link(this);
+ }
+
+ // Fallback
+ generateFunctionCallImp(target, info.name, info.fallbackImplementation, left, right, ContextRegister);
+
+ if (binOpFinished.isSet())
+ binOpFinished.link(this);
+}
#if OS(LINUX)
static void printDisassembledOutputWithCalls(const char* output, const QHash<void*, const char*>& functions)
{
@@ -124,59 +313,8 @@ static void printDisassembledOutputWithCalls(const char* output, const QHash<voi
}
#endif
-InstructionSelection::InstructionSelection(VM::ExecutionEngine *engine)
- : _engine(engine)
- , _function(0)
- , _block(0)
+void Assembler::link()
{
-}
-
-InstructionSelection::~InstructionSelection()
-{
-}
-
-void InstructionSelection::operator()(IR::Function *function)
-{
- qSwap(_function, function);
-
- int locals = (_function->tempCount - _function->locals.size() + _function->maxNumberOfArguments);
- locals = (locals + 1) & ~1;
- enterStandardStackFrame(locals);
-
- int contextPointer = 0;
-#ifndef VALUE_FITS_IN_REGISTER
- // When the return VM value doesn't fit into a register, then
- // the caller provides a pointer for storage as first argument.
- // That shifts the index the context pointer argument by one.
- contextPointer++;
-#endif
-#if CPU(X86)
- loadPtr(addressForArgument(contextPointer), ContextRegister);
-#elif CPU(X86_64) || CPU(ARM)
- move(registerForArgument(contextPointer), ContextRegister);
-#else
- assert(!"TODO");
-#endif
-
- foreach (IR::BasicBlock *block, _function->basicBlocks) {
- _block = block;
- _addrs[block] = label();
- foreach (IR::Stmt *s, block->statements) {
- s->accept(this);
- }
- }
-
- leaveStandardStackFrame(locals);
-#ifndef VALUE_FITS_IN_REGISTER
- // Emulate ret(n) instruction
- // Pop off return address into scratch register ...
- pop(ScratchRegister);
- // ... and overwrite the invisible argument with
- // the return address.
- poke(ScratchRegister);
-#endif
- ret();
-
QHashIterator<IR::BasicBlock *, QVector<Jump> > it(_patches);
while (it.hasNext()) {
it.next();
@@ -220,33 +358,75 @@ void InstructionSelection::operator()(IR::Function *function)
}
_function->code = (Value (*)(VM::ExecutionContext *, const uchar *)) _function->codeRef.code().executableAddress();
+}
- qSwap(_function, function);
+InstructionSelection::InstructionSelection(VM::ExecutionEngine *engine)
+ : _engine(engine)
+ , _block(0)
+ , _function(0)
+ , _asm(0)
+{
}
-String *InstructionSelection::identifier(const QString &s)
+InstructionSelection::~InstructionSelection()
{
- return _engine->identifier(s);
+ delete _asm;
}
-InstructionSelection::Pointer InstructionSelection::loadTempAddress(RegisterID reg, IR::Temp *t)
+void InstructionSelection::operator()(IR::Function *function)
{
- int32_t offset = 0;
- if (t->index < 0) {
- const int arg = -t->index - 1;
- loadPtr(Address(ContextRegister, offsetof(ExecutionContext, arguments)), reg);
- offset = arg * sizeof(Value);
- } else if (t->index < _function->locals.size()) {
- loadPtr(Address(ContextRegister, offsetof(ExecutionContext, locals)), reg);
- offset = t->index * sizeof(Value);
- } else {
- const int arg = _function->maxNumberOfArguments + t->index - _function->locals.size();
- // StackFrameRegister points to its old value on the stack, so even for the first temp we need to
- // subtract at least sizeof(Value).
- offset = - sizeof(Value) * (arg + 1);
- reg = StackFrameRegister;
+ qSwap(_function, function);
+ Assembler* oldAssembler = _asm;
+ _asm = new Assembler(_function);
+
+ int locals = (_function->tempCount - _function->locals.size() + _function->maxNumberOfArguments);
+ locals = (locals + 1) & ~1;
+ _asm->enterStandardStackFrame(locals);
+
+ int contextPointer = 0;
+#ifndef VALUE_FITS_IN_REGISTER
+ // When the return VM value doesn't fit into a register, then
+ // the caller provides a pointer for storage as first argument.
+ // That shifts the index the context pointer argument by one.
+ contextPointer++;
+#endif
+#if CPU(X86)
+ _asm->loadPtr(addressForArgument(contextPointer), Assembler::ContextRegister);
+#elif CPU(X86_64) || CPU(ARM)
+ _asm->move(_asm->registerForArgument(contextPointer), Assembler::ContextRegister);
+#else
+ assert(!"TODO");
+#endif
+
+ foreach (IR::BasicBlock *block, _function->basicBlocks) {
+ _block = block;
+ _asm->registerBlock(_block);
+ foreach (IR::Stmt *s, block->statements) {
+ s->accept(this);
+ }
}
- return Pointer(reg, offset);
+
+ _asm->leaveStandardStackFrame(locals);
+#ifndef VALUE_FITS_IN_REGISTER
+ // Emulate ret(n) instruction
+ // Pop off return address into scratch register ...
+ _asm->pop(Assembler::ScratchRegister);
+ // ... and overwrite the invisible argument with
+ // the return address.
+ _asm->poke(Assembler::ScratchRegister);
+#endif
+ _asm->ret();
+
+ _asm->link();
+
+ qSwap(_function, function);
+ delete _asm;
+ _asm = oldAssembler;
+}
+
+String *InstructionSelection::identifier(const QString &s)
+{
+ return _engine->identifier(s);
}
void InstructionSelection::callActivationProperty(IR::Call *call, IR::Temp *result)
@@ -260,17 +440,17 @@ void InstructionSelection::callActivationProperty(IR::Call *call, IR::Temp *resu
break;
case IR::Name::builtin_typeof: {
if (IR::Member *m = call->args->expr->asMember()) {
- generateFunctionCall(result, __qmljs_builtin_typeof_member, m->base->asTemp(), identifier(*m->name), ContextRegister);
+ generateFunctionCall(result, __qmljs_builtin_typeof_member, m->base->asTemp(), identifier(*m->name), Assembler::ContextRegister);
return;
} else if (IR::Subscript *ss = call->args->expr->asSubscript()) {
- generateFunctionCall(result, __qmljs_builtin_typeof_element, ss->base->asTemp(), ss->index->asTemp(), ContextRegister);
+ generateFunctionCall(result, __qmljs_builtin_typeof_element, ss->base->asTemp(), ss->index->asTemp(), Assembler::ContextRegister);
return;
} else if (IR::Name *n = call->args->expr->asName()) {
- generateFunctionCall(result, __qmljs_builtin_typeof_name, identifier(*n->id), ContextRegister);
+ generateFunctionCall(result, __qmljs_builtin_typeof_name, identifier(*n->id), Assembler::ContextRegister);
return;
} else if (IR::Temp *arg = call->args->expr->asTemp()){
assert(arg != 0);
- generateFunctionCall(result, __qmljs_builtin_typeof, arg, ContextRegister);
+ generateFunctionCall(result, __qmljs_builtin_typeof, arg, Assembler::ContextRegister);
} else {
assert(false);
}
@@ -278,19 +458,19 @@ void InstructionSelection::callActivationProperty(IR::Call *call, IR::Temp *resu
break;
case IR::Name::builtin_delete: {
if (IR::Member *m = call->args->expr->asMember()) {
- generateFunctionCall(result, __qmljs_delete_member, ContextRegister, m->base->asTemp(), identifier(*m->name));
+ generateFunctionCall(result, __qmljs_delete_member, Assembler::ContextRegister, m->base->asTemp(), identifier(*m->name));
return;
} else if (IR::Subscript *ss = call->args->expr->asSubscript()) {
- generateFunctionCall(result, __qmljs_delete_subscript, ContextRegister, ss->base->asTemp(), ss->index->asTemp());
+ generateFunctionCall(result, __qmljs_delete_subscript, Assembler::ContextRegister, ss->base->asTemp(), ss->index->asTemp());
return;
} else if (IR::Name *n = call->args->expr->asName()) {
- generateFunctionCall(result, __qmljs_delete_name, ContextRegister, identifier(*n->id));
+ generateFunctionCall(result, __qmljs_delete_name, Assembler::ContextRegister, identifier(*n->id));
return;
} else if (call->args->expr->asTemp()){
// ### should throw in strict mode
- Address dest = loadTempAddress(ScratchRegister, result);
+ Address dest = _asm->loadTempAddress(Assembler::ScratchRegister, result);
Value v = Value::fromBoolean(false);
- storeValue(v, dest);
+ _asm->storeValue(v, dest);
return;
}
break;
@@ -298,23 +478,23 @@ void InstructionSelection::callActivationProperty(IR::Call *call, IR::Temp *resu
case IR::Name::builtin_throw: {
IR::Temp *arg = call->args->expr->asTemp();
assert(arg != 0);
- generateFunctionCall(result, __qmljs_builtin_throw, arg, ContextRegister);
+ generateFunctionCall(result, __qmljs_builtin_throw, arg, Assembler::ContextRegister);
}
break;
case IR::Name::builtin_create_exception_handler:
- generateFunctionCall(ReturnValueRegister, __qmljs_create_exception_handler, ContextRegister);
- generateFunctionCall(result, setjmp, ReturnValueRegister);
+ generateFunctionCall(Assembler::ReturnValueRegister, __qmljs_create_exception_handler, Assembler::ContextRegister);
+ generateFunctionCall(result, setjmp, Assembler::ReturnValueRegister);
break;
case IR::Name::builtin_delete_exception_handler:
- generateFunctionCall(Void, __qmljs_delete_exception_handler, ContextRegister);
+ generateFunctionCall(Assembler::Void, __qmljs_delete_exception_handler, Assembler::ContextRegister);
break;
case IR::Name::builtin_get_exception:
- generateFunctionCall(result, __qmljs_get_exception, ContextRegister);
+ generateFunctionCall(result, __qmljs_get_exception, Assembler::ContextRegister);
break;
case IR::Name::builtin_foreach_iterator_object: {
IR::Temp *arg = call->args->expr->asTemp();
assert(arg != 0);
- generateFunctionCall(result, __qmljs_foreach_iterator_object, arg, ContextRegister);
+ generateFunctionCall(result, __qmljs_foreach_iterator_object, arg, Assembler::ContextRegister);
}
break;
case IR::Name::builtin_foreach_next_property_name: {
@@ -326,11 +506,11 @@ void InstructionSelection::callActivationProperty(IR::Call *call, IR::Temp *resu
case IR::Name::builtin_push_with: {
IR::Temp *arg = call->args->expr->asTemp();
assert(arg != 0);
- generateFunctionCall(Void, __qmljs_builtin_push_with, arg, ContextRegister);
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_push_with, arg, Assembler::ContextRegister);
}
break;
case IR::Name::builtin_pop_with:
- generateFunctionCall(Void, __qmljs_builtin_pop_with, ContextRegister);
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_pop_with, Assembler::ContextRegister);
break;
case IR::Name::builtin_declare_vars: {
if (!call->args)
@@ -340,8 +520,8 @@ void InstructionSelection::callActivationProperty(IR::Call *call, IR::Temp *resu
for (IR::ExprList *it = call->args->next; it; it = it->next) {
IR::Name *arg = it->expr->asName();
assert(arg != 0);
- generateFunctionCall(Void, __qmljs_builtin_declare_var, ContextRegister,
- TrustedImm32(deletable->value != 0), identifier(*arg->id));
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_declare_var, Assembler::ContextRegister,
+ Assembler::TrustedImm32(deletable->value != 0), identifier(*arg->id));
}
}
}
@@ -355,7 +535,7 @@ void InstructionSelection::callValue(IR::Call *call, IR::Temp *result)
int argc = prepareVariableArguments(call->args);
IR::Temp* thisObject = 0;
- generateFunctionCall(result, __qmljs_call_value, ContextRegister, thisObject, baseTemp, baseAddressForCallArguments(), TrustedImm32(argc));
+ generateFunctionCall(result, __qmljs_call_value, Assembler::ContextRegister, thisObject, baseTemp, baseAddressForCallArguments(), Assembler::TrustedImm32(argc));
}
void InstructionSelection::callProperty(IR::Call *call, IR::Temp *result)
@@ -365,7 +545,7 @@ void InstructionSelection::callProperty(IR::Call *call, IR::Temp *result)
assert(member->base->asTemp() != 0);
int argc = prepareVariableArguments(call->args);
- generateFunctionCall(result, __qmljs_call_property, ContextRegister, member->base->asTemp(), identifier(*member->name), baseAddressForCallArguments(), TrustedImm32(argc));
+ generateFunctionCall(result, __qmljs_call_property, Assembler::ContextRegister, member->base->asTemp(), identifier(*member->name), baseAddressForCallArguments(), Assembler::TrustedImm32(argc));
}
void InstructionSelection::constructActivationProperty(IR::New *call, IR::Temp *result)
@@ -383,7 +563,7 @@ void InstructionSelection::constructProperty(IR::New *call, IR::Temp *result)
assert(member->base->asTemp() != 0);
int argc = prepareVariableArguments(call->args);
- generateFunctionCall(result, __qmljs_construct_property, ContextRegister, member->base->asTemp(), identifier(*member->name), baseAddressForCallArguments(), TrustedImm32(argc));
+ generateFunctionCall(result, __qmljs_construct_property, Assembler::ContextRegister, member->base->asTemp(), identifier(*member->name), baseAddressForCallArguments(), Assembler::TrustedImm32(argc));
}
void InstructionSelection::constructValue(IR::New *call, IR::Temp *result)
@@ -392,7 +572,7 @@ void InstructionSelection::constructValue(IR::New *call, IR::Temp *result)
assert(baseTemp != 0);
int argc = prepareVariableArguments(call->args);
- generateFunctionCall(result, __qmljs_construct_value, ContextRegister, baseTemp, baseAddressForCallArguments(), TrustedImm32(argc));
+ generateFunctionCall(result, __qmljs_construct_value, Assembler::ContextRegister, baseTemp, baseAddressForCallArguments(), Assembler::TrustedImm32(argc));
}
void InstructionSelection::visitExp(IR::Exp *s)
@@ -435,7 +615,7 @@ void InstructionSelection::visitMove(IR::Move *s)
String *propertyName = identifier(*n->id);
if (s->source->asTemp() || s->source->asConst()) {
- generateFunctionCall(Void, __qmljs_set_activation_property, ContextRegister, propertyName, s->source);
+ generateFunctionCall(Assembler::Void, __qmljs_set_activation_property, Assembler::ContextRegister, propertyName, s->source);
return;
} else {
Q_UNREACHABLE();
@@ -443,14 +623,14 @@ void InstructionSelection::visitMove(IR::Move *s)
} else if (IR::Temp *t = s->target->asTemp()) {
if (IR::Name *n = s->source->asName()) {
if (*n->id == QStringLiteral("this")) { // ### `this' should be a builtin.
- generateFunctionCall(t, __qmljs_get_thisObject, ContextRegister);
+ generateFunctionCall(t, __qmljs_get_thisObject, Assembler::ContextRegister);
} else {
String *propertyName = identifier(*n->id);
- generateFunctionCall(t, __qmljs_get_activation_property, ContextRegister, propertyName);
+ generateFunctionCall(t, __qmljs_get_activation_property, Assembler::ContextRegister, propertyName);
}
return;
} else if (IR::Const *c = s->source->asConst()) {
- Address dest = loadTempAddress(ScratchRegister, t);
+ Address dest = _asm->loadTempAddress(Assembler::ScratchRegister, t);
Value v;
switch (c->type) {
case IR::NullType:
@@ -475,23 +655,23 @@ void InstructionSelection::visitMove(IR::Move *s)
Q_UNIMPLEMENTED();
assert(!"TODO");
}
- storeValue(v, dest);
+ _asm->storeValue(v, dest);
return;
} else if (IR::Temp *t2 = s->source->asTemp()) {
- copyValue(t, t2);
+ _asm->copyValue(t, t2);
return;
} else if (IR::String *str = s->source->asString()) {
- Address dest = loadTempAddress(ScratchRegister, t);
+ Address dest = _asm->loadTempAddress(Assembler::ScratchRegister, t);
Value v = Value::fromString(_engine->newString(*str->value));
- storeValue(v, dest);
+ _asm->storeValue(v, dest);
return;
} else if (IR::RegExp *re = s->source->asRegExp()) {
- Address dest = loadTempAddress(ScratchRegister, t);
+ Address dest = _asm->loadTempAddress(Assembler::ScratchRegister, t);
Value v = Value::fromObject(_engine->newRegExpObject(*re->value, re->flags));
- storeValue(v, dest);
+ _asm->storeValue(v, dest);
return;
} else if (IR::Closure *clos = s->source->asClosure()) {
- generateFunctionCall(t, __qmljs_init_closure, TrustedImmPtr(clos->value), ContextRegister);
+ generateFunctionCall(t, __qmljs_init_closure, Assembler::TrustedImmPtr(clos->value), Assembler::ContextRegister);
return;
} else if (IR::New *ctor = s->source->asNew()) {
if (ctor->base->asName()) {
@@ -507,13 +687,13 @@ void InstructionSelection::visitMove(IR::Move *s)
} else if (IR::Member *m = s->source->asMember()) {
//__qmljs_get_property(ctx, result, object, name);
if (IR::Temp *base = m->base->asTemp()) {
- generateFunctionCall(t, __qmljs_get_property, ContextRegister, base, identifier(*m->name));
+ generateFunctionCall(t, __qmljs_get_property, Assembler::ContextRegister, base, identifier(*m->name));
return;
}
assert(!"wip");
return;
} else if (IR::Subscript *ss = s->source->asSubscript()) {
- generateFunctionCall(t, __qmljs_get_element, ContextRegister, ss->base->asTemp(), ss->index->asTemp());
+ generateFunctionCall(t, __qmljs_get_element, Assembler::ContextRegister, ss->base->asTemp(), ss->index->asTemp());
return;
} else if (IR::Unop *u = s->source->asUnop()) {
if (IR::Temp *e = u->expr->asTemp()) {
@@ -529,13 +709,13 @@ void InstructionSelection::visitMove(IR::Move *s)
} // switch
if (op)
- generateFunctionCallImp(t, opName, op, e, ContextRegister);
+ _asm->generateFunctionCallImp(t, opName, op, e, Assembler::ContextRegister);
return;
}
} else if (IR::Binop *b = s->source->asBinop()) {
if ((b->left->asTemp() || b->left->asConst()) &&
(b->right->asTemp() || b->right->asConst())) {
- generateBinOp((IR::AluOp)b->op, t, b->left, b->right);
+ _asm->generateBinOp((IR::AluOp)b->op, t, b->left, b->right);
return;
}
} else if (IR::Call *c = s->source->asCall()) {
@@ -553,7 +733,7 @@ void InstructionSelection::visitMove(IR::Move *s)
} else if (IR::Member *m = s->target->asMember()) {
if (IR::Temp *base = m->base->asTemp()) {
if (s->source->asTemp() || s->source->asConst()) {
- generateFunctionCall(Void, __qmljs_set_property, ContextRegister, base, identifier(*m->name), s->source);
+ generateFunctionCall(Assembler::Void, __qmljs_set_property, Assembler::ContextRegister, base, identifier(*m->name), s->source);
return;
} else {
Q_UNREACHABLE();
@@ -561,7 +741,7 @@ void InstructionSelection::visitMove(IR::Move *s)
}
} else if (IR::Subscript *ss = s->target->asSubscript()) {
if (s->source->asTemp() || s->source->asConst()) {
- generateFunctionCall(Void, __qmljs_set_element, ContextRegister, ss->base->asTemp(), ss->index->asTemp(), s->source);
+ generateFunctionCall(Assembler::Void, __qmljs_set_element, Assembler::ContextRegister, ss->base->asTemp(), ss->index->asTemp(), s->source);
return;
} else {
Q_UNIMPLEMENTED();
@@ -571,7 +751,7 @@ void InstructionSelection::visitMove(IR::Move *s)
// inplace assignment, e.g. x += 1, ++x, ...
if (IR::Temp *t = s->target->asTemp()) {
if (s->source->asTemp() || s->source->asConst()) {
- generateBinOp((IR::AluOp)s->op, t, t, s->source);
+ _asm->generateBinOp((IR::AluOp)s->op, t, t, s->source);
return;
}
} else if (IR::Name *n = s->target->asName()) {
@@ -595,7 +775,7 @@ void InstructionSelection::visitMove(IR::Move *s)
break;
}
if (op) {
- generateFunctionCallImp(Void, opName, op, s->source, identifier(*n->id), ContextRegister);
+ _asm->generateFunctionCallImp(Assembler::Void, opName, op, s->source, identifier(*n->id), Assembler::ContextRegister);
}
return;
}
@@ -623,7 +803,7 @@ void InstructionSelection::visitMove(IR::Move *s)
if (op) {
IR::Temp* base = ss->base->asTemp();
IR::Temp* index = ss->index->asTemp();
- generateFunctionCallImp(Void, opName, op, base, index, s->source, ContextRegister);
+ _asm->generateFunctionCallImp(Assembler::Void, opName, op, base, index, s->source, Assembler::ContextRegister);
}
return;
}
@@ -651,7 +831,7 @@ void InstructionSelection::visitMove(IR::Move *s)
if (op) {
IR::Temp* base = m->base->asTemp();
String* member = identifier(*m->name);
- generateFunctionCallImp(Void, opName, op, s->source, base, member, ContextRegister);
+ _asm->generateFunctionCallImp(Assembler::Void, opName, op, s->source, base, member, Assembler::ContextRegister);
}
return;
}
@@ -666,38 +846,32 @@ void InstructionSelection::visitMove(IR::Move *s)
void InstructionSelection::visitJump(IR::Jump *s)
{
- jumpToBlock(s->target);
-}
-
-void InstructionSelection::jumpToBlock(IR::BasicBlock *target)
-{
- if (_block->index + 1 != target->index)
- _patches[target].append(jump());
+ _asm->jumpToBlock(_block, s->target);
}
void InstructionSelection::visitCJump(IR::CJump *s)
{
if (IR::Temp *t = s->cond->asTemp()) {
- Address temp = loadTempAddress(ScratchRegister, t);
+ Address temp = _asm->loadTempAddress(Assembler::ScratchRegister, t);
Address tag = temp;
tag.offset += offsetof(VM::Value, tag);
- Jump booleanConversion = branch32(NotEqual, tag, TrustedImm32(VM::Value::Boolean_Type));
+ Assembler::Jump booleanConversion = _asm->branch32(Assembler::NotEqual, tag, Assembler::TrustedImm32(VM::Value::Boolean_Type));
Address data = temp;
data.offset += offsetof(VM::Value, int_32);
- load32(data, ReturnValueRegister);
- Jump testBoolean = jump();
+ _asm->load32(data, Assembler::ReturnValueRegister);
+ Assembler::Jump testBoolean = _asm->jump();
- booleanConversion.link(this);
+ booleanConversion.link(_asm);
{
- generateFunctionCall(ReturnValueRegister, __qmljs_to_boolean, t, ContextRegister);
+ generateFunctionCall(Assembler::ReturnValueRegister, __qmljs_to_boolean, t, Assembler::ContextRegister);
}
- testBoolean.link(this);
- Jump target = branch32(NotEqual, ReturnValueRegister, TrustedImm32(0));
- _patches[s->iftrue].append(target);
+ testBoolean.link(_asm);
+ Assembler::Jump target = _asm->branch32(Assembler::NotEqual, Assembler::ReturnValueRegister, Assembler::TrustedImm32(0));
+ _asm->addPatch(s->iftrue, target);
- jumpToBlock(s->iffalse);
+ _asm->jumpToBlock(_block, s->iffalse);
return;
} else if (IR::Binop *b = s->cond->asBinop()) {
if ((b->left->asTemp() || b->left->asConst()) &&
@@ -718,12 +892,12 @@ void InstructionSelection::visitCJump(IR::CJump *s)
case IR::OpIn: setOp(op, opName, __qmljs_cmp_in); break;
} // switch
- generateFunctionCallImp(ReturnValueRegister, opName, op, b->left, b->right, ContextRegister);
+ _asm->generateFunctionCallImp(Assembler::ReturnValueRegister, opName, op, b->left, b->right, Assembler::ContextRegister);
- Jump target = branch32(NotEqual, ReturnValueRegister, TrustedImm32(0));
- _patches[s->iftrue].append(target);
+ Assembler::Jump target = _asm->branch32(Assembler::NotEqual, Assembler::ReturnValueRegister, Assembler::TrustedImm32(0));
+ _asm->addPatch(s->iftrue, target);
- jumpToBlock(s->iffalse);
+ _asm->jumpToBlock(_block, s->iffalse);
return;
} else {
assert(!"wip");
@@ -738,10 +912,10 @@ void InstructionSelection::visitRet(IR::Ret *s)
{
if (IR::Temp *t = s->expr->asTemp()) {
#ifdef VALUE_FITS_IN_REGISTER
- copyValue(ReturnValueRegister, t);
+ _asm->copyValue(Assembler::ReturnValueRegister, t);
#else
- loadPtr(addressForArgument(0), ReturnValueRegister);
- copyValue(Address(ReturnValueRegister, 0), t);
+ _asm->loadPtr(addressForArgument(0), Assembler::ReturnValueRegister);
+ _asm->copyValue(Address(Assembler::ReturnValueRegister, 0), t);
#endif
return;
}
@@ -760,7 +934,7 @@ int InstructionSelection::prepareVariableArguments(IR::ExprList* args)
for (IR::ExprList *it = args; it; it = it->next, ++i) {
IR::Temp *arg = it->expr->asTemp();
assert(arg != 0);
- copyValue(argumentAddressForCall(i), arg);
+ _asm->copyValue(argumentAddressForCall(i), arg);
}
return argc;
@@ -772,112 +946,13 @@ void InstructionSelection::callRuntimeMethodImp(IR::Temp *result, const char* na
assert(baseName != 0);
int argc = prepareVariableArguments(args);
- generateFunctionCallImp(result, name, method, ContextRegister, identifier(*baseName->id), baseAddressForCallArguments(), TrustedImm32(argc));
+ _asm->generateFunctionCallImp(result, name, method, Assembler::ContextRegister, identifier(*baseName->id), baseAddressForCallArguments(), Assembler::TrustedImm32(argc));
}
void InstructionSelection::callRuntimeMethodImp(IR::Temp *result, const char* name, BuiltinMethod method, IR::ExprList *args)
{
int argc = prepareVariableArguments(args);
- generateFunctionCallImp(result, name, method, ContextRegister, baseAddressForCallArguments(), TrustedImm32(argc));
-}
-
-template <typename Result, typename Source>
-void InstructionSelection::copyValue(Result result, Source source)
-{
-#ifdef VALUE_FITS_IN_REGISTER
- // Use ReturnValueRegister as "scratch" register because loadArgument
- // and storeArgument are functions that may need a scratch register themselves.
- loadArgument(source, ReturnValueRegister);
- storeArgument(ReturnValueRegister, result);
-#else
- loadDouble(source, FPGpr0);
- storeDouble(FPGpr0, result);
-#endif
+ _asm->generateFunctionCallImp(result, name, method, Assembler::ContextRegister, baseAddressForCallArguments(), Assembler::TrustedImm32(argc));
}
-void InstructionSelection::generateBinOp(IR::AluOp operation, IR::Temp* target, IR::Expr* left, IR::Expr* right)
-{
- const BinaryOperationInfo& info = binaryOperations[operation];
- if (!info.fallbackImplementation) {
- assert(!"unreachable");
- return;
- }
-
- Value leftConst = Value::undefinedValue();
- Value rightConst = Value::undefinedValue();
-
- bool canDoInline = info.inlineMemRegOp && info.inlineImmRegOp;
-
- if (canDoInline) {
- if (left->asConst()) {
- leftConst = convertToValue(left->asConst());
- canDoInline = canDoInline && leftConst.tryIntegerConversion();
- }
- if (right->asConst()) {
- rightConst = convertToValue(right->asConst());
- canDoInline = canDoInline && rightConst.tryIntegerConversion();
- }
- }
-
- Jump binOpFinished;
-
- if (canDoInline) {
-
- Jump leftTypeCheck;
- if (left->asTemp()) {
- Address typeAddress = loadTempAddress(ScratchRegister, left->asTemp());
- typeAddress.offset += offsetof(VM::Value, tag);
- leftTypeCheck = branch32(NotEqual, typeAddress, TrustedImm32(VM::Value::_Integer_Type));
- }
-
- Jump rightTypeCheck;
- if (right->asTemp()) {
- Address typeAddress = loadTempAddress(ScratchRegister, right->asTemp());
- typeAddress.offset += offsetof(VM::Value, tag);
- rightTypeCheck = branch32(NotEqual, typeAddress, TrustedImm32(VM::Value::_Integer_Type));
- }
-
- if (left->asTemp()) {
- Address leftValue = loadTempAddress(ScratchRegister, left->asTemp());
- leftValue.offset += offsetof(VM::Value, int_32);
- load32(leftValue, IntegerOpRegister);
- } else { // left->asConst()
- move(TrustedImm32(leftConst.integerValue()), IntegerOpRegister);
- }
-
- Jump overflowCheck;
- if (right->asTemp()) {
- Address rightValue = loadTempAddress(ScratchRegister, right->asTemp());
- rightValue.offset += offsetof(VM::Value, int_32);
-
- overflowCheck = (this->*info.inlineMemRegOp)(rightValue, IntegerOpRegister);
- } else { // right->asConst()
- overflowCheck = (this->*info.inlineImmRegOp)(TrustedImm32(rightConst.integerValue()), IntegerOpRegister);
- }
-
- Address resultAddr = loadTempAddress(ScratchRegister, target);
- Address resultValueAddr = resultAddr;
- resultValueAddr.offset += offsetof(VM::Value, int_32);
- store32(IntegerOpRegister, resultValueAddr);
-
- Address resultTypeAddr = resultAddr;
- resultTypeAddr.offset += offsetof(VM::Value, tag);
- store32(TrustedImm32(VM::Value::_Integer_Type), resultTypeAddr);
-
- binOpFinished = jump();
-
- if (leftTypeCheck.isSet())
- leftTypeCheck.link(this);
- if (rightTypeCheck.isSet())
- rightTypeCheck.link(this);
- if (overflowCheck.isSet())
- overflowCheck.link(this);
- }
-
- // Fallback
- generateFunctionCallImp(target, info.name, info.fallbackImplementation, left, right, ContextRegister);
-
- if (binOpFinished.isSet())
- binOpFinished.link(this);
-}