aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorLars Knoll <lars.knoll@qt.io>2017-02-08 16:21:02 +0100
committerLars Knoll <lars.knoll@qt.io>2017-03-09 08:59:20 +0000
commit2a554434a571dcefd26cf10ef8c5ae8b3b7d66db (patch)
tree5532f0a0206fbbde0a3099ff1e0ee58188a97275 /src
parent05de4e044f92dd278a00e410be8f070bc4d66e6f (diff)
Implement a real write barrier
Implement a Steel write barrier for our objects. The barrier is interesting as it can also be used for incremental GC runs by simply turning the barrier on and leaving old objects marked as black. Change-Id: I0b273974d94a990dee3cd9298089b8b202c75bf2 Reviewed-by: Simon Hausmann <simon.hausmann@qt.io>
Diffstat (limited to 'src')
-rw-r--r--src/qml/compiler/qv4jsir_p.h10
-rw-r--r--src/qml/jit/qv4assembler.cpp22
-rw-r--r--src/qml/jit/qv4assembler_p.h155
-rw-r--r--src/qml/jit/qv4isel_masm.cpp1
-rw-r--r--src/qml/jsruntime/qv4context.cpp11
-rw-r--r--src/qml/jsruntime/qv4context_p.h12
-rw-r--r--src/qml/jsruntime/qv4engine.cpp6
-rw-r--r--src/qml/jsruntime/qv4engine_p.h8
-rw-r--r--src/qml/jsruntime/qv4lookup.cpp2
-rw-r--r--src/qml/jsruntime/qv4object_p.h2
-rw-r--r--src/qml/memory/qv4heap_p.h6
-rw-r--r--src/qml/memory/qv4mm.cpp2
-rw-r--r--src/qml/memory/qv4mm_p.h13
-rw-r--r--src/qml/memory/qv4mmdefs_p.h11
-rw-r--r--src/qml/memory/qv4writebarrier_p.h100
15 files changed, 296 insertions, 65 deletions
diff --git a/src/qml/compiler/qv4jsir_p.h b/src/qml/compiler/qv4jsir_p.h
index 8ddeb71f69..756d90a033 100644
--- a/src/qml/compiler/qv4jsir_p.h
+++ b/src/qml/compiler/qv4jsir_p.h
@@ -1367,6 +1367,15 @@ struct Function {
}
return !f->canUseSimpleCall();
}
+ int localsCountForScope(ArgLocal *al) const {
+ uint scope = al->scope;
+ const IR::Function *f = this;
+ while (scope) {
+ f = f->outer;
+ --scope;
+ }
+ return f->locals.size();
+ }
private:
BasicBlock *getOrCreateBasicBlock(int index);
@@ -1436,6 +1445,7 @@ public:
ArgLocal *newArgLocal = f->New<ArgLocal>();
newArgLocal->init(argLocal->kind, argLocal->index, argLocal->scope);
newArgLocal->type = argLocal->type;
+ newArgLocal->isArgumentsOrEval = argLocal->isArgumentsOrEval;
return newArgLocal;
}
diff --git a/src/qml/jit/qv4assembler.cpp b/src/qml/jit/qv4assembler.cpp
index 59470aeea2..687c35adfa 100644
--- a/src/qml/jit/qv4assembler.cpp
+++ b/src/qml/jit/qv4assembler.cpp
@@ -271,22 +271,28 @@ template <typename TargetConfiguration>
typename Assembler<TargetConfiguration>::Pointer
Assembler<TargetConfiguration>::loadArgLocalAddressForWriting(RegisterID baseReg, IR::ArgLocal *al, WriteBarrier::Type *barrier)
{
+ if (barrier)
+ *barrier = _function->argLocalRequiresWriteBarrier(al) ? WriteBarrier::Barrier : WriteBarrier::NoBarrier;
+
int32_t offset = 0;
int scope = al->scope;
loadPtr(Address(EngineRegister, qOffsetOf(ExecutionEngine, current)), baseReg);
- if (scope) {
+ while (scope) {
loadPtr(Address(baseReg, qOffsetOf(ExecutionContext::Data, outer)), baseReg);
--scope;
- while (scope) {
- loadPtr(Address(baseReg, qOffsetOf(ExecutionContext::Data, outer)), baseReg);
- --scope;
- }
}
switch (al->kind) {
case IR::ArgLocal::Formal:
case IR::ArgLocal::ScopedFormal: {
- loadPtr(Address(baseReg, qOffsetOf(ExecutionContext::Data, callData)), baseReg);
- offset = sizeof(CallData) + (al->index - 1) * sizeof(Value);
+ if (barrier && *barrier == WriteBarrier::Barrier) {
+ // if we need a barrier, the baseReg has to point to the ExecutionContext
+ // callData comes directly after locals, calculate the offset using that
+ offset = qOffsetOf(CallContext::Data, locals.values) + _function->localsCountForScope(al) * sizeof(Value);
+ offset += sizeof(CallData) + (al->index - 1) * sizeof(Value);
+ } else {
+ loadPtr(Address(baseReg, qOffsetOf(ExecutionContext::Data, callData)), baseReg);
+ offset = sizeof(CallData) + (al->index - 1) * sizeof(Value);
+ }
} break;
case IR::ArgLocal::Local:
case IR::ArgLocal::ScopedLocal: {
@@ -295,8 +301,6 @@ Assembler<TargetConfiguration>::loadArgLocalAddressForWriting(RegisterID baseReg
default:
Q_UNREACHABLE();
}
- if (barrier)
- *barrier = _function->argLocalRequiresWriteBarrier(al) ? WriteBarrier::Barrier : WriteBarrier::NoBarrier;
return Pointer(baseReg, offset);
}
diff --git a/src/qml/jit/qv4assembler_p.h b/src/qml/jit/qv4assembler_p.h
index 6e95b29ca7..d56b54f491 100644
--- a/src/qml/jit/qv4assembler_p.h
+++ b/src/qml/jit/qv4assembler_p.h
@@ -154,6 +154,56 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo
using Jump = typename JITAssembler::Jump;
using Label = typename JITAssembler::Label;
+ static void emitSetGrayBit(JITAssembler *as, RegisterID base)
+ {
+ bool returnValueUsed = (base == TargetPlatform::ReturnValueRegister);
+
+ as->push(TargetPlatform::EngineRegister); // free up one register for work
+
+ RegisterID grayBitmap = returnValueUsed ? TargetPlatform::ScratchRegister : TargetPlatform::ReturnValueRegister;
+ as->move(base, grayBitmap);
+ Q_ASSERT(base != grayBitmap);
+ as->urshift32(TrustedImm32(Chunk::ChunkShift), grayBitmap);
+ as->lshift32(TrustedImm32(Chunk::ChunkShift), grayBitmap);
+ Q_STATIC_ASSERT(offsetof(Chunk, grayBitmap) == 0);
+
+ RegisterID index = base;
+ as->move(base, index);
+ as->sub32(grayBitmap, index);
+ as->urshift32(TrustedImm32(Chunk::SlotSizeShift), index);
+ RegisterID grayIndex = TargetPlatform::EngineRegister;
+ as->move(index, grayIndex);
+ as->urshift32(TrustedImm32(Chunk::BitShift), grayIndex);
+ as->lshift32(TrustedImm32(2), grayIndex); // 4 bytes per quintptr
+ as->add32(grayIndex, grayBitmap);
+ as->and32(TrustedImm32(Chunk::Bits - 1), index);
+
+ RegisterID bit = TargetPlatform::EngineRegister;
+ as->move(TrustedImm32(1), bit);
+ as->lshift32(index, bit);
+
+ as->load32(Pointer(grayBitmap, 0), index);
+ as->or32(bit, index);
+ as->store32(index, Pointer(grayBitmap, 0));
+
+ as->pop(TargetPlatform::EngineRegister);
+ }
+
+#if WRITEBARRIER(steele)
+ static void emitWriteBarrier(JITAssembler *as, Address addr)
+ {
+// RegisterID test = addr.base == TargetPlatform::ReturnValueRegister ? TargetPlatform::ScratchRegister : TargetPlatform::ReturnValueRegister;
+ // if (engine->writeBarrier)
+// as->load8(Address(TargetPlatform::EngineRegister, offsetof(EngineBase, writeBarrierActive)), test);
+// typename JITAssembler::Jump jump = as->branch32(JITAssembler::Equal, test, TrustedImm32(0));
+ // ### emit fence
+ emitSetGrayBit(as, addr.base);
+// jump.link(as);
+ }
+#elif WRITEBARRIER(none)
+ static Q_ALWAYS_INLINE void emitWriteBarrier(JITAssembler *, Address) {}
+#endif
+
static void loadDouble(JITAssembler *as, Address addr, FPRegisterID dest)
{
as->MacroAssembler::loadDouble(addr, dest);
@@ -161,8 +211,9 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo
static void storeDouble(JITAssembler *as, FPRegisterID source, Address addr, WriteBarrier::Type barrier)
{
- Q_UNUSED(barrier);
as->MacroAssembler::storeDouble(source, addr);
+ if (WriteBarrier::isRequired<WriteBarrier::Primitive>() && barrier == WriteBarrier::Barrier)
+ emitWriteBarrier(as, addr);
}
static void storeDouble(JITAssembler *as, FPRegisterID source, IR::Expr* target)
@@ -174,18 +225,23 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo
static void storeValue(JITAssembler *as, QV4::Primitive value, Address destination, WriteBarrier::Type barrier)
{
- Q_UNUSED(barrier);
as->store32(TrustedImm32(value.int_32()), destination);
destination.offset += 4;
as->store32(TrustedImm32(value.tag()), destination);
+ if (WriteBarrier::isRequired<WriteBarrier::Primitive>() && barrier == WriteBarrier::Barrier)
+ emitWriteBarrier(as, destination);
}
template <typename Source, typename Destination>
static void copyValueViaRegisters(JITAssembler *as, Source source, Destination destination, WriteBarrier::Type barrier)
{
- Q_UNUSED(barrier);
as->loadDouble(source, TargetPlatform::FPGpr0);
- as->storeDouble(TargetPlatform::FPGpr0, destination, barrier);
+ // We need to pass NoBarrier to storeDouble and call emitWriteBarrier ourselves, as the
+ // code in storeDouble assumes the type we're storing is actually a double, something
+ // that isn't always the case here.
+ as->storeDouble(TargetPlatform::FPGpr0, destination, WriteBarrier::NoBarrier);
+ if (WriteBarrier::isRequired<WriteBarrier::Unknown>() && barrier == WriteBarrier::Barrier)
+ emitWriteBarrier(as, destination);
}
static void loadDoubleConstant(JITAssembler *as, IR::Const *c, FPRegisterID target)
@@ -200,11 +256,12 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo
static void storeReturnValue(JITAssembler *as, const Pointer &dest, WriteBarrier::Type barrier)
{
- Q_UNUSED(barrier);
Address destination = dest;
as->store32(TargetPlatform::LowReturnValueRegister, destination);
destination.offset += 4;
as->store32(TargetPlatform::HighReturnValueRegister, destination);
+ if (WriteBarrier::isRequired<WriteBarrier::Unknown>() && barrier == WriteBarrier::Barrier)
+ emitWriteBarrier(as, dest);
}
static void setFunctionReturnValueFromTemp(JITAssembler *as, IR::Temp *t)
@@ -331,10 +388,11 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo
as->load32(addr, TargetPlatform::ReturnValueRegister);
WriteBarrier::Type barrier;
Pointer targetAddr = as->loadAddressForWriting(TargetPlatform::ScratchRegister, target, &barrier);
- Q_UNUSED(barrier);
as->store32(TargetPlatform::ReturnValueRegister, targetAddr);
targetAddr.offset += 4;
as->store32(TrustedImm32(Value::Integer_Type_Internal), targetAddr);
+ if (WriteBarrier::isRequired<WriteBarrier::Primitive>() && barrier == WriteBarrier::Barrier)
+ emitWriteBarrier(as, targetAddr);
} else {
as->load32(addr, (RegisterID) targetTemp->index);
}
@@ -351,10 +409,11 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo
static void loadManagedPointer(JITAssembler *as, RegisterID registerWithPtr, Pointer destAddr, WriteBarrier::Type barrier)
{
- Q_UNUSED(barrier);
as->store32(registerWithPtr, destAddr);
destAddr.offset += 4;
as->store32(TrustedImm32(QV4::Value::Managed_Type_Internal_32), destAddr);
+ if (WriteBarrier::isRequired<WriteBarrier::Object>() && barrier == WriteBarrier::Barrier)
+ emitWriteBarrier(as, destAddr);
}
static Jump generateIsDoubleCheck(JITAssembler *as, RegisterID tagOrValueRegister)
@@ -393,6 +452,56 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo
using Jump = typename JITAssembler::Jump;
using Label = typename JITAssembler::Label;
+ static void emitSetGrayBit(JITAssembler *as, RegisterID base)
+ {
+ bool returnValueUsed = (base == TargetPlatform::ReturnValueRegister);
+
+ as->push(TargetPlatform::EngineRegister); // free up one register for work
+
+ RegisterID grayBitmap = returnValueUsed ? TargetPlatform::ScratchRegister : TargetPlatform::ReturnValueRegister;
+ as->move(base, grayBitmap);
+ Q_ASSERT(base != grayBitmap);
+ as->urshift64(TrustedImm32(Chunk::ChunkShift), grayBitmap);
+ as->lshift64(TrustedImm32(Chunk::ChunkShift), grayBitmap);
+ Q_STATIC_ASSERT(offsetof(Chunk, grayBitmap) == 0);
+
+ RegisterID index = base;
+ as->move(base, index);
+ as->sub64(grayBitmap, index);
+ as->urshift64(TrustedImm32(Chunk::SlotSizeShift), index);
+ RegisterID grayIndex = TargetPlatform::EngineRegister;
+ as->move(index, grayIndex);
+ as->urshift64(TrustedImm32(Chunk::BitShift), grayIndex);
+ as->lshift64(TrustedImm32(3), grayIndex); // 8 bytes per quintptr
+ as->add64(grayIndex, grayBitmap);
+ as->and64(TrustedImm32(Chunk::Bits - 1), index);
+
+ RegisterID bit = TargetPlatform::EngineRegister;
+ as->move(TrustedImm32(1), bit);
+ as->lshift64(index, bit);
+
+ as->load64(Pointer(grayBitmap, 0), index);
+ as->or64(bit, index);
+ as->store64(index, Pointer(grayBitmap, 0));
+
+ as->pop(TargetPlatform::EngineRegister);
+ }
+
+#if WRITEBARRIER(steele)
+ static void emitWriteBarrier(JITAssembler *as, Address addr)
+ {
+// RegisterID test = addr.base == TargetPlatform::ReturnValueRegister ? TargetPlatform::ScratchRegister : TargetPlatform::ReturnValueRegister;
+ // if (engine->writeBarrier)
+// as->load8(Address(TargetPlatform::EngineRegister, offsetof(EngineBase, writeBarrierActive)), test);
+// typename JITAssembler::Jump jump = as->branch32(JITAssembler::Equal, test, TrustedImm32(0));
+ // ### emit fence
+ emitSetGrayBit(as, addr.base);
+// jump.link(as);
+ }
+#elif WRITEBARRIER(none)
+ static Q_ALWAYS_INLINE void emitWriteBarrier(JITAssembler *, Address) {}
+#endif
+
static void loadDouble(JITAssembler *as, Address addr, FPRegisterID dest)
{
as->load64(addr, TargetPlatform::ReturnValueRegister);
@@ -402,10 +511,11 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo
static void storeDouble(JITAssembler *as, FPRegisterID source, Address addr, WriteBarrier::Type barrier)
{
- Q_UNUSED(barrier);
as->moveDoubleTo64(source, TargetPlatform::ReturnValueRegister);
as->xor64(TargetPlatform::DoubleMaskRegister, TargetPlatform::ReturnValueRegister);
as->store64(TargetPlatform::ReturnValueRegister, addr);
+ if (WriteBarrier::isRequired<WriteBarrier::Primitive>() && barrier == WriteBarrier::Barrier)
+ emitWriteBarrier(as, addr);
}
static void storeDouble(JITAssembler *as, FPRegisterID source, IR::Expr* target)
@@ -414,8 +524,9 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo
as->xor64(TargetPlatform::DoubleMaskRegister, TargetPlatform::ReturnValueRegister);
WriteBarrier::Type barrier;
Pointer ptr = as->loadAddressForWriting(TargetPlatform::ScratchRegister, target, &barrier);
- Q_UNUSED(barrier);
as->store64(TargetPlatform::ReturnValueRegister, ptr);
+ if (WriteBarrier::isRequired<WriteBarrier::Primitive>() && barrier == WriteBarrier::Barrier)
+ emitWriteBarrier(as, ptr);
}
static void storeReturnValue(JITAssembler *as, FPRegisterID dest)
@@ -426,8 +537,9 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo
static void storeReturnValue(JITAssembler *as, const Pointer &dest, WriteBarrier::Type barrier)
{
- Q_UNUSED(barrier);
as->store64(TargetPlatform::ReturnValueRegister, dest);
+ if (WriteBarrier::isRequired<WriteBarrier::Unknown>() && barrier == WriteBarrier::Barrier)
+ emitWriteBarrier(as, dest);
}
static void setFunctionReturnValueFromTemp(JITAssembler *as, IR::Temp *t)
@@ -479,8 +591,9 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo
static void storeValue(JITAssembler *as, QV4::Primitive value, Address destination, WriteBarrier::Type barrier)
{
- Q_UNUSED(barrier);
as->store64(TrustedImm64(value.rawValue()), destination);
+ if (WriteBarrier::isRequired<WriteBarrier::Unknown>() && barrier == WriteBarrier::Barrier)
+ emitWriteBarrier(as, destination);
}
template <typename Source, typename Destination>
@@ -636,10 +749,11 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo
if (!targetTemp || targetTemp->kind == IR::Temp::StackSlot) {
WriteBarrier::Type barrier;
Pointer targetAddr = as->loadAddressForWriting(TargetPlatform::ScratchRegister, target, &barrier);
- Q_UNUSED(barrier);
as->store32(TargetPlatform::ReturnValueRegister, targetAddr);
targetAddr.offset += 4;
as->store32(TrustedImm32(Value::Integer_Type_Internal), targetAddr);
+ if (WriteBarrier::isRequired<WriteBarrier::Primitive>() && barrier == WriteBarrier::Barrier)
+ emitWriteBarrier(as, targetAddr);
} else {
as->storeInt32(TargetPlatform::ReturnValueRegister, target);
}
@@ -647,8 +761,9 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo
static void loadManagedPointer(JITAssembler *as, RegisterID registerWithPtr, Pointer destAddr, WriteBarrier::Type barrier)
{
- Q_UNUSED(barrier);
as->store64(registerWithPtr, destAddr);
+ if (WriteBarrier::isRequired<WriteBarrier::Object>() && barrier == WriteBarrier::Barrier)
+ emitWriteBarrier(as, destAddr);
}
static Jump generateIsDoubleCheck(JITAssembler *as, RegisterID tagOrValueRegister)
@@ -1261,8 +1376,9 @@ public:
void storeRawValue(FPRegisterID source, Pointer dest, WriteBarrier::Type barrier)
{
- Q_UNUSED(barrier);
TargetConfiguration::MacroAssembler::storeDouble(source, dest);
+ if (WriteBarrier::isRequired<WriteBarrier::Unknown>() && barrier == WriteBarrier::Barrier)
+ RegisterSizeDependentOps::emitWriteBarrier(this, dest);
}
void storeValue(QV4::Primitive value, RegisterID destination)
@@ -1279,6 +1395,11 @@ public:
void storeValue(QV4::Primitive value, IR::Expr* temp);
+ void emitWriteBarrier(Address addr, WriteBarrier::Type barrier) {
+ if (WriteBarrier::isRequired<WriteBarrier::Primitive>() && barrier == WriteBarrier::Barrier)
+ RegisterSizeDependentOps::emitWriteBarrier(this, addr);
+ }
+
void enterStandardStackFrame(const RegisterInformation &regularRegistersToSave,
const RegisterInformation &fpRegistersToSave);
void leaveStandardStackFrame(const RegisterInformation &regularRegistersToSave,
@@ -1471,10 +1592,11 @@ public:
void storeBool(RegisterID reg, Pointer addr, WriteBarrier::Type barrier)
{
- Q_UNUSED(barrier);
store32(reg, addr);
addr.offset += 4;
store32(TrustedImm32(QV4::Primitive::fromBoolean(0).tag()), addr);
+ if (WriteBarrier::isRequired<WriteBarrier::Primitive>() && barrier == WriteBarrier::Barrier)
+ RegisterSizeDependentOps::emitWriteBarrier(this, addr);
}
void storeBool(RegisterID src, RegisterID dest)
@@ -1517,10 +1639,11 @@ public:
void storeInt32(RegisterID reg, Pointer addr, WriteBarrier::Type barrier)
{
- Q_UNUSED(barrier);
store32(reg, addr);
addr.offset += 4;
store32(TrustedImm32(QV4::Primitive::fromInt32(0).tag()), addr);
+ if (WriteBarrier::isRequired<WriteBarrier::Primitive>() && barrier == WriteBarrier::Barrier)
+ RegisterSizeDependentOps::emitWriteBarrier(this, addr);
}
void storeInt32(RegisterID reg, IR::Expr *target)
diff --git a/src/qml/jit/qv4isel_masm.cpp b/src/qml/jit/qv4isel_masm.cpp
index f4bbedad43..5b57d29bca 100644
--- a/src/qml/jit/qv4isel_masm.cpp
+++ b/src/qml/jit/qv4isel_masm.cpp
@@ -791,6 +791,7 @@ void InstructionSelection<JITAssembler>::swapValues(IR::Expr *source, IR::Expr *
Q_UNREACHABLE();
}
_as->store32(TrustedImm32(tag), addr);
+ _as->emitWriteBarrier(addr, barrier);
}
_as->move(JITTargetPlatform::ScratchRegister, (RegisterID) regTemp->index);
}
diff --git a/src/qml/jsruntime/qv4context.cpp b/src/qml/jsruntime/qv4context.cpp
index a9b0d67630..7a0df5dbf3 100644
--- a/src/qml/jsruntime/qv4context.cpp
+++ b/src/qml/jsruntime/qv4context.cpp
@@ -176,6 +176,17 @@ void Heap::CatchContext::init(ExecutionContext *outerContext, String *exceptionV
this->exceptionValue.set(engine, exceptionValue);
}
+void Heap::WithContext::init(ExecutionContext *outerContext, Object *with)
+{
+ Heap::ExecutionContext::init(outerContext->engine, Heap::ExecutionContext::Type_WithContext);
+ outer.set(engine, outerContext);
+ callData = outer->callData;
+ lookups = outer->lookups;
+ constantTable = outer->constantTable;
+ compilationUnit = outer->compilationUnit;
+
+ withObject.set(engine, with);
+}
Identifier * const *SimpleCallContext::formals() const
{
diff --git a/src/qml/jsruntime/qv4context_p.h b/src/qml/jsruntime/qv4context_p.h
index a405da0f57..79e5f74d8f 100644
--- a/src/qml/jsruntime/qv4context_p.h
+++ b/src/qml/jsruntime/qv4context_p.h
@@ -186,17 +186,7 @@ V4_ASSERT_IS_TRIVIAL(CatchContext)
DECLARE_HEAP_OBJECT(WithContext, ExecutionContext) {
DECLARE_MARK_TABLE(WithContext);
- void init(ExecutionContext *outerContext, Object *with)
- {
- Heap::ExecutionContext::init(outerContext->engine, Heap::ExecutionContext::Type_WithContext);
- outer.set(engine, outerContext);
- callData = outer->callData;
- lookups = outer->lookups;
- constantTable = outer->constantTable;
- compilationUnit = outer->compilationUnit;
-
- withObject.set(engine, with);
- }
+ void init(ExecutionContext *outerContext, Object *with);
};
V4_ASSERT_IS_TRIVIAL(WithContext)
diff --git a/src/qml/jsruntime/qv4engine.cpp b/src/qml/jsruntime/qv4engine.cpp
index 02cd19008a..ab5ced9f58 100644
--- a/src/qml/jsruntime/qv4engine.cpp
+++ b/src/qml/jsruntime/qv4engine.cpp
@@ -130,9 +130,7 @@ QQmlEngine *ExecutionEngine::qmlEngine() const
qint32 ExecutionEngine::maxCallDepth = -1;
ExecutionEngine::ExecutionEngine(EvalISelFactory *factory)
- : current(0)
- , hasException(false)
- , callDepth(0)
+ : callDepth(0)
, memoryManager(new QV4::MemoryManager(this))
, executableAllocator(new QV4::ExecutableAllocator)
, regExpAllocator(new QV4::ExecutableAllocator)
@@ -151,6 +149,8 @@ ExecutionEngine::ExecutionEngine(EvalISelFactory *factory)
, m_profiler(0)
#endif
{
+ writeBarrierActive = true;
+
if (maxCallDepth == -1) {
bool ok = false;
maxCallDepth = qEnvironmentVariableIntValue("QV4_MAX_CALL_DEPTH", &ok);
diff --git a/src/qml/jsruntime/qv4engine_p.h b/src/qml/jsruntime/qv4engine_p.h
index 2ac3a77e29..6de087a4e2 100644
--- a/src/qml/jsruntime/qv4engine_p.h
+++ b/src/qml/jsruntime/qv4engine_p.h
@@ -88,7 +88,7 @@ struct CompilationUnit;
struct InternalClass;
struct InternalClassPool;
-struct Q_QML_EXPORT ExecutionEngine
+struct Q_QML_EXPORT ExecutionEngine : public EngineBase
{
private:
static qint32 maxCallDepth;
@@ -97,12 +97,6 @@ private:
friend struct ExecutionContext;
friend struct Heap::ExecutionContext;
public:
- Heap::ExecutionContext *current;
-
- Value *jsStackTop;
- quint8 hasException;
- quint8 writeBarrierActive = false;
- quint16 unused = 0;
qint32 callDepth;
MemoryManager *memoryManager;
diff --git a/src/qml/jsruntime/qv4lookup.cpp b/src/qml/jsruntime/qv4lookup.cpp
index 9fc11a2d2d..11d7767e05 100644
--- a/src/qml/jsruntime/qv4lookup.cpp
+++ b/src/qml/jsruntime/qv4lookup.cpp
@@ -772,7 +772,7 @@ void Lookup::setter0(Lookup *l, ExecutionEngine *engine, Value &object, const Va
{
Object *o = object.as<Object>();
if (o && o->internalClass() == l->classList[0]) {
- o->setProperty(l->index, value);
+ o->setProperty(engine, l->index, value);
return;
}
diff --git a/src/qml/jsruntime/qv4object_p.h b/src/qml/jsruntime/qv4object_p.h
index c0169ed035..df9d68525d 100644
--- a/src/qml/jsruntime/qv4object_p.h
+++ b/src/qml/jsruntime/qv4object_p.h
@@ -204,6 +204,8 @@ struct Q_QML_EXPORT Object: Managed {
void setProperty(uint index, const Property *p);
void setProperty(uint index, Value v) const { d()->setProperty(engine(), index, v); }
void setProperty(uint index, Heap::Base *b) const { d()->setProperty(engine(), index, b); }
+ void setProperty(ExecutionEngine *engine, uint index, Value v) const { d()->setProperty(engine, index, v); }
+ void setProperty(ExecutionEngine *engine, uint index, Heap::Base *b) const { d()->setProperty(engine, index, b); }
const ObjectVTable *vtable() const { return reinterpret_cast<const ObjectVTable *>(d()->vtable()); }
Heap::Object *prototype() const { return d()->prototype; }
diff --git a/src/qml/memory/qv4heap_p.h b/src/qml/memory/qv4heap_p.h
index bcd1af7705..7bedd705f9 100644
--- a/src/qml/memory/qv4heap_p.h
+++ b/src/qml/memory/qv4heap_p.h
@@ -113,6 +113,12 @@ struct Q_QML_EXPORT Base {
Q_ASSERT(!Chunk::testBit(c->extendsBitmap, h - c->realBase()));
return Chunk::setBit(c->blackBitmap, h - c->realBase());
}
+ inline void setGrayBit() {
+ const HeapItem *h = reinterpret_cast<const HeapItem *>(this);
+ Chunk *c = h->chunk();
+ Q_ASSERT(!Chunk::testBit(c->extendsBitmap, h - c->realBase()));
+ return Chunk::setBit(c->grayBitmap, h - c->realBase());
+ }
inline bool inUse() const {
const HeapItem *h = reinterpret_cast<const HeapItem *>(this);
diff --git a/src/qml/memory/qv4mm.cpp b/src/qml/memory/qv4mm.cpp
index f42d509942..8f4f2f4aa8 100644
--- a/src/qml/memory/qv4mm.cpp
+++ b/src/qml/memory/qv4mm.cpp
@@ -251,7 +251,9 @@ void Chunk::sweep()
// DEBUG << "sweeping chunk" << this << (*freeList);
HeapItem *o = realBase();
for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
+#if WRITEBARRIER(none)
Q_ASSERT((grayBitmap[i] | blackBitmap[i]) == blackBitmap[i]); // check that we don't have gray only objects
+#endif
quintptr toFree = objectBitmap[i] ^ blackBitmap[i];
Q_ASSERT((toFree & objectBitmap[i]) == toFree); // check all black objects are marked as being used
quintptr e = extendsBitmap[i];
diff --git a/src/qml/memory/qv4mm_p.h b/src/qml/memory/qv4mm_p.h
index 6e9303acb6..3e542b0aa3 100644
--- a/src/qml/memory/qv4mm_p.h
+++ b/src/qml/memory/qv4mm_p.h
@@ -80,27 +80,28 @@ struct StackAllocator {
StackAllocator(ChunkAllocator *chunkAlloc);
T *allocate() {
- T *m = nextFree->as<T>();
+ HeapItem *m = nextFree;
if (Q_UNLIKELY(nextFree == lastInChunk)) {
nextChunk();
} else {
nextFree += requiredSlots;
}
-#if MM_DEBUG
+#if !defined(QT_NO_DEBUG) || defined(MM_DEBUG)
Chunk *c = m->chunk();
Chunk::setBit(c->objectBitmap, m - c->realBase());
#endif
- return m;
+ return m->as<T>();
}
void free() {
-#if MM_DEBUG
- Chunk::clearBit(item->chunk()->objectBitmap, item - item->chunk()->realBase());
-#endif
if (Q_UNLIKELY(nextFree == firstInChunk)) {
prevChunk();
} else {
nextFree -= requiredSlots;
}
+#if !defined(QT_NO_DEBUG) || defined(MM_DEBUG)
+ Chunk *c = nextFree->chunk();
+ Chunk::clearBit(c->objectBitmap, nextFree - c->realBase());
+#endif
}
void nextChunk();
diff --git a/src/qml/memory/qv4mmdefs_p.h b/src/qml/memory/qv4mmdefs_p.h
index 3f65e97d86..987e669040 100644
--- a/src/qml/memory/qv4mmdefs_p.h
+++ b/src/qml/memory/qv4mmdefs_p.h
@@ -255,6 +255,17 @@ Q_STATIC_ASSERT(sizeof(HeapItem) == Chunk::SlotSize);
Q_STATIC_ASSERT(QT_POINTER_SIZE*8 == Chunk::Bits);
Q_STATIC_ASSERT((1 << Chunk::BitShift) == Chunk::Bits);
+// Base class for the execution engine
+
+struct EngineBase {
+ Heap::ExecutionContext *current = 0;
+
+ Value *jsStackTop = 0;
+ quint8 hasException = false;
+ quint8 writeBarrierActive = false;
+ quint16 unused = 0;
+};
+
// Some helper classes and macros to automate the generation of our
// tables used for marking objects
diff --git a/src/qml/memory/qv4writebarrier_p.h b/src/qml/memory/qv4writebarrier_p.h
index c0f508f962..838ed7a456 100644
--- a/src/qml/memory/qv4writebarrier_p.h
+++ b/src/qml/memory/qv4writebarrier_p.h
@@ -55,6 +55,11 @@
QT_BEGIN_NAMESPACE
+#define WRITEBARRIER_steele -1
+#define WRITEBARRIER_none 1
+
+#define WRITEBARRIER(x) (1/WRITEBARRIER_##x == 1)
+
namespace QV4 {
namespace WriteBarrier {
@@ -64,20 +69,79 @@ enum Type {
Barrier
};
-inline void write(QV4::ExecutionEngine *engine, QV4::Heap::Base *base, QV4::Value *slot, QV4::Value value)
+enum NewValueType {
+ Primitive,
+ Object,
+ Unknown
+};
+
+// ### this needs to be filled with a real memory fence once marking is concurrent
+Q_ALWAYS_INLINE void fence() {}
+
+#if WRITEBARRIER(steele)
+
+template <NewValueType type>
+static Q_CONSTEXPR inline bool isRequired() {
+ return type != Primitive;
+}
+
+inline void write(EngineBase *engine, Heap::Base *base, Value *slot, Value value)
+{
+ *slot = value;
+ if (engine->writeBarrierActive && isRequired<Unknown>()) {
+ fence();
+ base->setGrayBit();
+ }
+}
+
+inline void write(EngineBase *engine, Heap::Base *base, Value *slot, Heap::Base *value)
+{
+ *slot = value;
+ if (engine->writeBarrierActive && isRequired<Object>()) {
+ fence();
+ base->setGrayBit();
+ }
+}
+
+inline void write(EngineBase *engine, Heap::Base *base, Heap::Base **slot, Heap::Base *value)
+{
+ *slot = value;
+ if (engine->writeBarrierActive) {
+ fence();
+ base->setGrayBit();
+ }
+}
+
+#elif WRITEBARRIER(none)
+
+template <NewValueType type>
+static Q_CONSTEXPR inline bool isRequired() {
+ return false;
+}
+
+inline void write(EngineBase *engine, Heap::Base *base, Value *slot, Value value)
+{
+ Q_UNUSED(engine);
+ Q_UNUSED(base);
+ *slot = value;
+}
+
+inline void write(EngineBase *engine, Heap::Base *base, Value *slot, Heap::Base *value)
{
Q_UNUSED(engine);
Q_UNUSED(base);
*slot = value;
}
-inline void write(QV4::ExecutionEngine *engine, QV4::Heap::Base *base, QV4::Heap::Base **slot, QV4::Heap::Base *value)
+inline void write(EngineBase *engine, Heap::Base *base, Heap::Base **slot, Heap::Base *value)
{
Q_UNUSED(engine);
Q_UNUSED(base);
*slot = value;
}
+#endif
+
}
namespace Heap {
@@ -88,9 +152,14 @@ struct Pointer {
T operator->() const { return ptr; }
operator T () const { return ptr; }
+ Heap::Base *base() {
+ Heap::Base *base = reinterpret_cast<Heap::Base *>(this) - (offset/sizeof(Heap::Base));
+ Q_ASSERT(base->inUse());
+ return base;
+ }
+
void set(ExecutionEngine *e, T newVal) {
- Q_UNUSED(e);
- ptr = newVal;
+ WriteBarrier::write(e, base(), reinterpret_cast<Heap::Base **>(&ptr), reinterpret_cast<Heap::Base *>(newVal));
}
template <typename Type>
@@ -106,9 +175,14 @@ V4_ASSERT_IS_TRIVIAL(V4PointerCheck)
template <size_t offset>
struct HeapValue : Value {
+ Heap::Base *base() {
+ Heap::Base *base = reinterpret_cast<Heap::Base *>(this) - (offset/sizeof(Heap::Base));
+ Q_ASSERT(base->inUse());
+ return base;
+ }
+
void set(ExecutionEngine *e, const Value &newVal) {
- Q_UNUSED(e);
- setRawValue(newVal.rawValue());
+ WriteBarrier::write(e, base(), this, newVal);
}
};
@@ -118,15 +192,17 @@ struct ValueArray {
uint alloc;
Value values[1];
+ Heap::Base *base() {
+ Heap::Base *base = reinterpret_cast<Heap::Base *>(this) - (offset/sizeof(Heap::Base));
+ Q_ASSERT(base->inUse());
+ return base;
+ }
+
void set(ExecutionEngine *e, uint index, Value v) {
- Q_UNUSED(e);
- Q_ASSERT(index < alloc);
- values[index] = v;
+ WriteBarrier::write(e, base(), values + index, v);
}
void set(ExecutionEngine *e, uint index, Heap::Base *b) {
- Q_UNUSED(e);
- Q_ASSERT(index < alloc);
- values[index] = b;
+ WriteBarrier::write(e, base(), values + index, b);
}
inline const Value &operator[] (uint index) const {
Q_ASSERT(index < alloc);