aboutsummaryrefslogtreecommitdiffstats
path: root/src/qml/memory
diff options
context:
space:
mode:
authorLaszlo Agocs <laszlo.agocs@qt.io>2017-04-03 13:35:26 +0200
committerLaszlo Agocs <laszlo.agocs@qt.io>2017-04-03 13:35:30 +0200
commit0eeb7ada04cc81d0ab1b61747bdf92fd7c33e1ec (patch)
treebe4d0201b81b098a2976e857b5c6642f9c96e6ac /src/qml/memory
parent349d3400c11c0ad1c9aaec01c44b174dbb6ebf9a (diff)
parente4894fe13d178b6aa8b5580b402df2d1b4f2615c (diff)
Merge remote-tracking branch 'origin/dev' into wip/scenegraphng
Diffstat (limited to 'src/qml/memory')
-rw-r--r--src/qml/memory/memory.pri3
-rw-r--r--src/qml/memory/qv4heap_p.h31
-rw-r--r--src/qml/memory/qv4mm.cpp331
-rw-r--r--src/qml/memory/qv4mm_p.h40
-rw-r--r--src/qml/memory/qv4mmdefs_p.h118
-rw-r--r--src/qml/memory/qv4writebarrier_p.h239
6 files changed, 670 insertions, 92 deletions
diff --git a/src/qml/memory/memory.pri b/src/qml/memory/memory.pri
index 38fadbf23f..7956e4a9a1 100644
--- a/src/qml/memory/memory.pri
+++ b/src/qml/memory/memory.pri
@@ -7,7 +7,8 @@ SOURCES += \
HEADERS += \
$$PWD/qv4mm_p.h \
- $$PWD/qv4mmdefs_p.h
+ $$PWD/qv4mmdefs_p.h \
+ $$PWD/qv4writebarrier_p.h
}
HEADERS += \
diff --git a/src/qml/memory/qv4heap_p.h b/src/qml/memory/qv4heap_p.h
index 8285ef4de7..1347a9bd6e 100644
--- a/src/qml/memory/qv4heap_p.h
+++ b/src/qml/memory/qv4heap_p.h
@@ -72,6 +72,7 @@ namespace QV4 {
struct VTable
{
const VTable * const parent;
+ const quint64 markTable;
uint isExecutionContext : 1;
uint isString : 1;
uint isObject : 1;
@@ -91,6 +92,8 @@ namespace Heap {
struct Q_QML_EXPORT Base {
void *operator new(size_t) = delete;
+ static Q_CONSTEXPR quint64 markTable = 0;
+
const VTable *vt;
inline ReturnedValue asReturnedValue() const;
@@ -110,6 +113,12 @@ struct Q_QML_EXPORT Base {
Q_ASSERT(!Chunk::testBit(c->extendsBitmap, h - c->realBase()));
return Chunk::setBit(c->blackBitmap, h - c->realBase());
}
+ inline void setGrayBit() {
+ const HeapItem *h = reinterpret_cast<const HeapItem *>(this);
+ Chunk *c = h->chunk();
+ Q_ASSERT(!Chunk::testBit(c->extendsBitmap, h - c->realBase()));
+ return Chunk::setBit(c->grayBitmap, h - c->realBase());
+ }
inline bool inUse() const {
const HeapItem *h = reinterpret_cast<const HeapItem *>(this);
@@ -133,7 +142,7 @@ struct Q_QML_EXPORT Base {
else if (_livenessStatus == Destroyed)
fprintf(stderr, "ERROR: use of object '%s' after call to destroy() !!\n",
vtable()->className);
- Q_ASSERT(_livenessStatus = Initialized);
+ Q_ASSERT(_livenessStatus == Initialized);
}
void _checkIsDestroyed() {
if (_livenessStatus == Initialized)
@@ -160,20 +169,12 @@ struct Q_QML_EXPORT Base {
#endif
};
V4_ASSERT_IS_TRIVIAL(Base)
-
-template <typename T>
-struct Pointer {
- T *operator->() const { return ptr; }
- operator T *() const { return ptr; }
-
- Pointer &operator =(T *t) { ptr = t; return *this; }
-
- template <typename Type>
- Type *cast() { return static_cast<Type *>(ptr); }
-
- T *ptr;
-};
-V4_ASSERT_IS_TRIVIAL(Pointer<void>)
+// This class needs to consist only of pointer sized members to allow
+// for a size/offset translation when cross-compiling between 32- and
+// 64-bit.
+Q_STATIC_ASSERT(std::is_standard_layout<Base>::value);
+Q_STATIC_ASSERT(offsetof(Base, vt) == 0);
+Q_STATIC_ASSERT(sizeof(Base) == QT_POINTER_SIZE);
}
diff --git a/src/qml/memory/qv4mm.cpp b/src/qml/memory/qv4mm.cpp
index a829e902fb..c025dd09a4 100644
--- a/src/qml/memory/qv4mm.cpp
+++ b/src/qml/memory/qv4mm.cpp
@@ -60,7 +60,11 @@
#include "qv4alloca_p.h"
#include "qv4profiling_p.h"
-#define MM_DEBUG 0
+//#define MM_STATS
+
+#if !defined(MM_STATS) && !defined(QT_NO_DEBUG)
+#define MM_STATS
+#endif
#if MM_DEBUG
#define DEBUG qDebug() << "MM:"
@@ -113,14 +117,16 @@ struct MemorySegment {
pageReservation = PageReservation::reserve(size, OSAllocator::JSGCHeapPages);
base = reinterpret_cast<Chunk *>((reinterpret_cast<quintptr>(pageReservation.base()) + Chunk::ChunkSize - 1) & ~(Chunk::ChunkSize - 1));
nChunks = NumChunks;
- if (base != pageReservation.base())
+ availableBytes = size - (reinterpret_cast<quintptr>(base) - reinterpret_cast<quintptr>(pageReservation.base()));
+ if (availableBytes < SegmentSize)
--nChunks;
}
MemorySegment(MemorySegment &&other) {
qSwap(pageReservation, other.pageReservation);
qSwap(base, other.base);
- qSwap(nChunks, other.nChunks);
qSwap(allocatedMap, other.allocatedMap);
+ qSwap(availableBytes, other.availableBytes);
+ qSwap(nChunks, other.nChunks);
}
~MemorySegment() {
@@ -150,7 +156,7 @@ struct MemorySegment {
void free(Chunk *chunk, size_t size) {
DEBUG << "freeing chunk" << chunk;
size_t index = static_cast<size_t>(chunk - base);
- size_t end = index + (size - 1)/Chunk::ChunkSize + 1;
+ size_t end = qMin(static_cast<size_t>(NumChunks), index + (size - 1)/Chunk::ChunkSize + 1);
while (index < end) {
Q_ASSERT(testBit(index));
clearBit(index);
@@ -169,11 +175,19 @@ struct MemorySegment {
PageReservation pageReservation;
Chunk *base = 0;
quint64 allocatedMap = 0;
+ size_t availableBytes = 0;
uint nChunks = 0;
};
Chunk *MemorySegment::allocate(size_t size)
{
+ if (!allocatedMap && size >= SegmentSize) {
+ // chunk allocated for one huge allocation
+ Q_ASSERT(availableBytes >= size);
+ pageReservation.commit(base, size);
+ allocatedMap = ~static_cast<quintptr>(0);
+ return base;
+ }
size_t requiredChunks = (size + sizeof(Chunk) - 1)/sizeof(Chunk);
uint sequence = 0;
Chunk *candidate = 0;
@@ -251,7 +265,9 @@ void Chunk::sweep()
// DEBUG << "sweeping chunk" << this << (*freeList);
HeapItem *o = realBase();
for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
+#if WRITEBARRIER(none)
Q_ASSERT((grayBitmap[i] | blackBitmap[i]) == blackBitmap[i]); // check that we don't have gray only objects
+#endif
quintptr toFree = objectBitmap[i] ^ blackBitmap[i];
Q_ASSERT((toFree & objectBitmap[i]) == toFree); // check all black objects are marked as being used
quintptr e = extendsBitmap[i];
@@ -280,7 +296,7 @@ void Chunk::sweep()
}
}
objectBitmap[i] = blackBitmap[i];
- blackBitmap[i] = 0;
+ grayBitmap[i] = 0;
extendsBitmap[i] = e;
o += Chunk::Bits;
}
@@ -319,13 +335,56 @@ void Chunk::freeAll()
}
}
objectBitmap[i] = 0;
- blackBitmap[i] = 0;
+ grayBitmap[i] = 0;
extendsBitmap[i] = e;
o += Chunk::Bits;
}
// DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots.";
}
+void Chunk::resetBlackBits()
+{
+ memset(blackBitmap, 0, sizeof(blackBitmap));
+}
+
+#ifdef MM_STATS
+static uint nGrayItems = 0;
+#endif
+
+void Chunk::collectGrayItems(ExecutionEngine *engine)
+{
+ // DEBUG << "sweeping chunk" << this << (*freeList);
+ HeapItem *o = realBase();
+ for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
+#if WRITEBARRIER(none)
+ Q_ASSERT((grayBitmap[i] | blackBitmap[i]) == blackBitmap[i]); // check that we don't have gray only objects
+#endif
+ quintptr toMark = blackBitmap[i] & grayBitmap[i]; // correct for a Steele type barrier
+ Q_ASSERT((toMark & objectBitmap[i]) == toMark); // check all black objects are marked as being used
+ // DEBUG << hex << " index=" << i << toFree;
+ while (toMark) {
+ uint index = qCountTrailingZeroBits(toMark);
+ quintptr bit = (static_cast<quintptr>(1) << index);
+
+ toMark ^= bit; // mask out marked slot
+ // DEBUG << " index" << hex << index << toFree;
+
+ HeapItem *itemToFree = o + index;
+ Heap::Base *b = *itemToFree;
+ Q_ASSERT(b->inUse());
+ engine->pushForGC(b);
+#ifdef MM_STATS
+ ++nGrayItems;
+// qDebug() << "adding gray item" << b << "to mark stack";
+#endif
+ }
+ grayBitmap[i] = 0;
+ o += Chunk::Bits;
+ }
+ // DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots.";
+
+}
+
void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
{
// qDebug() << "sortIntoBins:";
@@ -335,7 +394,7 @@ void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
#else
const int start = 1;
#endif
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
uint freeSlots = 0;
uint allocatedSlots = 0;
#endif
@@ -345,7 +404,7 @@ void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
if (!i)
usedSlots |= (static_cast<quintptr>(1) << (HeaderSize/SlotSize)) - 1;
#endif
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
allocatedSlots += qPopulationCount(usedSlots);
// qDebug() << hex << " i=" << i << "used=" << usedSlots;
#endif
@@ -362,7 +421,7 @@ void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
break;
}
usedSlots = (objectBitmap[i]|extendsBitmap[i]);
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
allocatedSlots += qPopulationCount(usedSlots);
// qDebug() << hex << " i=" << i << "used=" << usedSlots;
#endif
@@ -373,7 +432,7 @@ void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
usedSlots |= (quintptr(1) << index) - 1;
uint freeEnd = i*Bits + index;
uint nSlots = freeEnd - freeStart;
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
// qDebug() << hex << " got free slots from" << freeStart << "to" << freeEnd << "n=" << nSlots << "usedSlots=" << usedSlots;
freeSlots += nSlots;
#endif
@@ -384,7 +443,7 @@ void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
bins[bin] = freeItem;
}
}
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
Q_ASSERT(freeSlots + allocatedSlots == (EntriesInBitmap - start) * 8 * sizeof(quintptr));
#endif
}
@@ -555,6 +614,19 @@ void BlockAllocator::freeAll()
}
}
+void BlockAllocator::resetBlackBits()
+{
+ for (auto c : chunks)
+ c->resetBlackBits();
+}
+
+void BlockAllocator::collectGrayItems(ExecutionEngine *engine)
+{
+ for (auto c : chunks)
+ c->collectGrayItems(engine);
+
+}
+
#if MM_DEBUG
void BlockAllocator::stats() {
DEBUG << "MM stats:";
@@ -607,7 +679,6 @@ static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocato
void HugeItemAllocator::sweep() {
auto isBlack = [this] (const HugeChunk &c) {
bool b = c.chunk->first()->isBlack();
- Chunk::clearBit(c.chunk->blackBitmap, c.chunk->first() - c.chunk->realBase());
if (!b)
freeHugeChunk(chunkAllocator, c);
return !b;
@@ -617,6 +688,24 @@ void HugeItemAllocator::sweep() {
chunks.erase(newEnd, chunks.end());
}
+void HugeItemAllocator::resetBlackBits()
+{
+ for (auto c : chunks)
+ Chunk::clearBit(c.chunk->blackBitmap, c.chunk->first() - c.chunk->realBase());
+}
+
+void HugeItemAllocator::collectGrayItems(ExecutionEngine *engine)
+{
+ for (auto c : chunks)
+ // Correct for a Steele type barrier
+ if (Chunk::testBit(c.chunk->blackBitmap, c.chunk->first() - c.chunk->realBase()) &&
+ Chunk::testBit(c.chunk->grayBitmap, c.chunk->first() - c.chunk->realBase())) {
+ HeapItem *i = c.chunk->first();
+ Heap::Base *b = *i;
+ b->mark(engine);
+ }
+}
+
void HugeItemAllocator::freeAll()
{
for (auto &c : chunks) {
@@ -642,15 +731,17 @@ MemoryManager::MemoryManager(ExecutionEngine *engine)
#endif
}
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
+static int allocationCount = 0;
static size_t lastAllocRequestedSlots = 0;
#endif
Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize)
{
const size_t stringSize = align(sizeof(Heap::String));
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
lastAllocRequestedSlots = stringSize >> Chunk::SlotSizeShift;
+ ++allocationCount;
#endif
bool didGCRun = false;
@@ -661,7 +752,8 @@ Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize)
unmanagedHeapSize += unmanagedSize;
if (unmanagedHeapSize > unmanagedHeapSizeGCLimit) {
- runGC();
+ if (!didGCRun)
+ runGC();
if (3*unmanagedHeapSizeGCLimit <= 4*unmanagedHeapSize)
// more than 75% full, raise limit
@@ -679,14 +771,16 @@ Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize)
m = blockAllocator.allocate(stringSize, true);
}
+// qDebug() << "allocated string" << m;
memset(m, 0, stringSize);
return *m;
}
Heap::Base *MemoryManager::allocData(std::size_t size)
{
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
lastAllocRequestedSlots = size >> Chunk::SlotSizeShift;
+ ++allocationCount;
#endif
bool didRunGC = false;
@@ -703,8 +797,11 @@ Heap::Base *MemoryManager::allocData(std::size_t size)
// qDebug() << "unmanagedHeapSize:" << unmanagedHeapSize << "limit:" << unmanagedHeapSizeGCLimit << "unmanagedSize:" << unmanagedSize;
- if (size > Chunk::DataSize)
- return *hugeItemAllocator.allocate(size);
+ if (size > Chunk::DataSize) {
+ HeapItem *h = hugeItemAllocator.allocate(size);
+// qDebug() << "allocating huge item" << h;
+ return *h;
+ }
HeapItem *m = blockAllocator.allocate(size);
if (!m) {
@@ -714,39 +811,92 @@ Heap::Base *MemoryManager::allocData(std::size_t size)
}
memset(m, 0, size);
+// qDebug() << "allocating data" << m;
return *m;
}
Heap::Object *MemoryManager::allocObjectWithMemberData(std::size_t size, uint nMembers)
{
- Heap::Object *o = static_cast<Heap::Object *>(allocData(size));
-
- // ### Could optimize this and allocate both in one go through the block allocator
- if (nMembers) {
+ Heap::Object *o;
+ if (!nMembers) {
+ o = static_cast<Heap::Object *>(allocData(size));
+ } else {
+ // Allocate both in one go through the block allocator
std::size_t memberSize = align(sizeof(Heap::MemberData) + (nMembers - 1)*sizeof(Value));
-// qDebug() << "allocating member data for" << o << nMembers << memberSize;
- Heap::Base *m;
- if (memberSize > Chunk::DataSize)
- m = *hugeItemAllocator.allocate(memberSize);
- else
- m = *blockAllocator.allocate(memberSize, true);
- memset(m, 0, memberSize);
- o->memberData = static_cast<Heap::MemberData *>(m);
- o->memberData->setVtable(MemberData::staticVTable());
- o->memberData->size = static_cast<uint>((memberSize - sizeof(Heap::MemberData) + sizeof(Value))/sizeof(Value));
- o->memberData->init();
+ size_t totalSize = size + memberSize;
+ Heap::MemberData *m;
+ if (totalSize > Chunk::DataSize) {
+ o = static_cast<Heap::Object *>(allocData(size));
+ m = hugeItemAllocator.allocate(memberSize)->as<Heap::MemberData>();
+ } else {
+ HeapItem *mh = reinterpret_cast<HeapItem *>(allocData(totalSize));
+ Heap::Base *b = *mh;
+ o = static_cast<Heap::Object *>(b);
+ mh += (size >> Chunk::SlotSizeShift);
+ m = mh->as<Heap::MemberData>();
+ Chunk *c = mh->chunk();
+ size_t index = mh - c->realBase();
+ Chunk::setBit(c->objectBitmap, index);
+ Chunk::clearBit(c->extendsBitmap, index);
+ }
+ o->memberData.set(engine, m);
+ m->setVtable(MemberData::staticVTable());
+ m->values.alloc = static_cast<uint>((memberSize - sizeof(Heap::MemberData) + sizeof(Value))/sizeof(Value));
+ m->values.size = o->memberData->values.alloc;
+ m->init();
// qDebug() << " got" << o->memberData << o->memberData->size;
}
+// qDebug() << "allocating object with memberData" << o << o->memberData.operator->();
return o;
}
-static void drainMarkStack(QV4::ExecutionEngine *engine, Value *markBase)
+static uint markStackSize = 0;
+
+void MemoryManager::drainMarkStack(Value *markBase)
{
while (engine->jsStackTop > markBase) {
Heap::Base *h = engine->popForGC();
+ ++markStackSize;
Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen.
- Q_ASSERT (h->vtable()->markObjects);
- h->vtable()->markObjects(h, engine);
+ if (h->vtable()->markObjects)
+ h->vtable()->markObjects(h, engine);
+ if (quint64 m = h->vtable()->markTable) {
+// qDebug() << "using mark table:" << hex << m << "for" << h;
+ void **mem = reinterpret_cast<void **>(h);
+ while (m) {
+ MarkFlags mark = static_cast<MarkFlags>(m & 3);
+ switch (mark) {
+ case Mark_NoMark:
+ break;
+ case Mark_Value:
+// qDebug() << "marking value at " << mem;
+ reinterpret_cast<Value *>(mem)->mark(engine);
+ break;
+ case Mark_Pointer: {
+// qDebug() << "marking pointer at " << mem;
+ Heap::Base *p = *reinterpret_cast<Heap::Base **>(mem);
+ if (p)
+ p->mark(engine);
+ break;
+ }
+ case Mark_ValueArray: {
+ Q_ASSERT(m == Mark_ValueArray);
+// qDebug() << "marking Value Array at offset" << hex << (mem - reinterpret_cast<void **>(h));
+ ValueArray<0> *a = reinterpret_cast<ValueArray<0> *>(mem);
+ Value *v = a->values;
+ const Value *end = v + a->alloc;
+ while (v < end) {
+ v->mark(engine);
+ ++v;
+ }
+ break;
+ }
+ }
+
+ m >>= 2;
+ ++mem;
+ }
+ }
}
}
@@ -754,12 +904,28 @@ void MemoryManager::mark()
{
Value *markBase = engine->jsStackTop;
- engine->markObjects();
+ markStackSize = 0;
+
+ if (nextGCIsIncremental) {
+ // need to collect all gray items and push them onto the mark stack
+ blockAllocator.collectGrayItems(engine);
+ hugeItemAllocator.collectGrayItems(engine);
+ }
+
+// qDebug() << ">>>> Mark phase:";
+// qDebug() << " mark stack after gray items" << (engine->jsStackTop - markBase);
+
+ engine->markObjects(nextGCIsIncremental);
+
+// qDebug() << " mark stack after engine->mark" << (engine->jsStackTop - markBase);
collectFromJSStack();
+// qDebug() << " mark stack after js stack collect" << (engine->jsStackTop - markBase);
m_persistentValues->mark(engine);
+// qDebug() << " mark stack after persistants" << (engine->jsStackTop - markBase);
+
// Preserve QObject ownership rules within JavaScript: A parent with c++ ownership
// keeps all of its children alive in JavaScript.
@@ -788,14 +954,20 @@ void MemoryManager::mark()
qobjectWrapper->mark(engine);
if (engine->jsStackTop >= engine->jsStackLimit)
- drainMarkStack(engine, markBase);
+ drainMarkStack(markBase);
}
- drainMarkStack(engine, markBase);
+ drainMarkStack(markBase);
}
void MemoryManager::sweep(bool lastSweep)
{
+ if (lastSweep && nextGCIsIncremental) {
+ // ensure we properly clean up on destruction even if the GC is in incremental mode
+ blockAllocator.resetBlackBits();
+ hugeItemAllocator.resetBlackBits();
+ }
+
for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) {
Managed *m = (*it).managed();
if (!m || m->markBit())
@@ -848,41 +1020,48 @@ void MemoryManager::sweep(bool lastSweep)
bool MemoryManager::shouldRunGC() const
{
size_t total = blockAllocator.totalSlots();
- size_t usedSlots = blockAllocator.usedSlotsAfterLastSweep;
- if (total > MinSlotsGCLimit && usedSlots * GCOverallocation < total * 100)
+ if (total > MinSlotsGCLimit && usedSlotsAfterLastFullSweep * GCOverallocation < total * 100)
return true;
return false;
}
size_t dumpBins(BlockAllocator *b, bool printOutput = true)
{
- size_t totalFragmentedSlots = 0;
+ size_t totalSlotMem = 0;
if (printOutput)
- qDebug() << "Fragmentation map:";
+ qDebug() << "Slot map:";
for (uint i = 0; i < BlockAllocator::NumBins; ++i) {
uint nEntries = 0;
HeapItem *h = b->freeBins[i];
while (h) {
++nEntries;
- totalFragmentedSlots += h->freeData.availableSlots;
+ totalSlotMem += h->freeData.availableSlots;
h = h->freeData.next;
}
if (printOutput)
qDebug() << " number of entries in slot" << i << ":" << nEntries;
}
if (printOutput)
- qDebug() << " total mem in bins" << totalFragmentedSlots*Chunk::SlotSize;
- return totalFragmentedSlots*Chunk::SlotSize;
+ qDebug() << " total mem in bins" << totalSlotMem*Chunk::SlotSize;
+ return totalSlotMem*Chunk::SlotSize;
}
-void MemoryManager::runGC()
+void MemoryManager::runGC(bool forceFullCollection)
{
if (gcBlocked) {
// qDebug() << "Not running GC.";
return;
}
+ if (forceFullCollection) {
+ // do a full GC
+ blockAllocator.resetBlackBits();
+ hugeItemAllocator.resetBlackBits();
+ nextGCIsIncremental = false;
+ }
+
QScopedValueRollback<bool> gcBlocker(gcBlocked, true);
+// qDebug() << "runGC";
if (!gcStats) {
// uint oldUsed = allocator.usedMem();
@@ -897,21 +1076,29 @@ void MemoryManager::runGC()
const size_t largeItemsBefore = getLargeItemsMem();
qDebug() << "========== GC ==========";
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
qDebug() << " Triggered by alloc request of" << lastAllocRequestedSlots << "slots.";
+ qDebug() << " Allocations since last GC" << allocationCount;
+ allocationCount = 0;
#endif
+ qDebug() << "Incremental:" << nextGCIsIncremental;
qDebug() << "Allocated" << totalMem << "bytes in" << blockAllocator.chunks.size() << "chunks";
qDebug() << "Fragmented memory before GC" << (totalMem - usedBefore);
dumpBins(&blockAllocator);
+#ifdef MM_STATS
+ nGrayItems = 0;
+#endif
+
QElapsedTimer t;
t.start();
mark();
- qint64 markTime = t.restart();
+ qint64 markTime = t.nsecsElapsed()/1000;
+ t.restart();
sweep();
const size_t usedAfter = getUsedMem();
const size_t largeItemsAfter = getLargeItemsMem();
- qint64 sweepTime = t.elapsed();
+ qint64 sweepTime = t.nsecsElapsed()/1000;
if (triggeredByUnmanagedHeap) {
qDebug() << "triggered by unmanaged heap:";
@@ -920,11 +1107,16 @@ void MemoryManager::runGC()
qDebug() << " unmanaged heap limit:" << unmanagedHeapSizeGCLimit;
}
size_t memInBins = dumpBins(&blockAllocator);
- qDebug() << "Marked object in" << markTime << "ms.";
- qDebug() << "Sweeped object in" << sweepTime << "ms.";
+#ifdef MM_STATS
+ if (nextGCIsIncremental)
+ qDebug() << " number of gray items:" << nGrayItems;
+#endif
+ qDebug() << "Marked object in" << markTime << "us.";
+ qDebug() << " " << markStackSize << "objects marked";
+ qDebug() << "Sweeped object in" << sweepTime << "us.";
qDebug() << "Used memory before GC:" << usedBefore;
- qDebug() << "Used memory after GC:" << usedAfter;
- qDebug() << "Freed up bytes:" << (usedBefore - usedAfter);
+ qDebug() << "Used memory after GC :" << usedAfter;
+ qDebug() << "Freed up bytes :" << (usedBefore - usedAfter);
size_t lost = blockAllocator.allocatedMem() - memInBins - usedAfter;
if (lost)
qDebug() << "!!!!!!!!!!!!!!!!!!!!! LOST MEM:" << lost << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!";
@@ -940,6 +1132,37 @@ void MemoryManager::runGC()
// ensure we don't 'loose' any memory
Q_ASSERT(blockAllocator.allocatedMem() == getUsedMem() + dumpBins(&blockAllocator, false));
}
+
+ if (!nextGCIsIncremental)
+ usedSlotsAfterLastFullSweep = blockAllocator.usedSlotsAfterLastSweep;
+
+#if WRITEBARRIER(steele)
+ static int count = 0;
+ ++count;
+ if (aggressiveGC) {
+ nextGCIsIncremental = (count % 256);
+ } else {
+ size_t total = blockAllocator.totalSlots();
+ size_t usedSlots = blockAllocator.usedSlotsAfterLastSweep;
+ if (!nextGCIsIncremental) {
+ // always try an incremental GC after a full one, unless there is anyway lots of memory pressure
+ nextGCIsIncremental = usedSlots * 4 < total * 3;
+ count = 0;
+ } else {
+ if (count > 16)
+ nextGCIsIncremental = false;
+ else
+ nextGCIsIncremental = usedSlots * 4 < total * 3; // less than 75% full
+ }
+ }
+#else
+ nextGCIsIncremental = false;
+#endif
+ if (!nextGCIsIncremental) {
+ // do a full GC
+ blockAllocator.resetBlackBits();
+ hugeItemAllocator.resetBlackBits();
+ }
}
size_t MemoryManager::getUsedMem() const
diff --git a/src/qml/memory/qv4mm_p.h b/src/qml/memory/qv4mm_p.h
index 00daf8a622..7f02a4f929 100644
--- a/src/qml/memory/qv4mm_p.h
+++ b/src/qml/memory/qv4mm_p.h
@@ -80,27 +80,28 @@ struct StackAllocator {
StackAllocator(ChunkAllocator *chunkAlloc);
T *allocate() {
- T *m = nextFree->as<T>();
+ HeapItem *m = nextFree;
if (Q_UNLIKELY(nextFree == lastInChunk)) {
nextChunk();
} else {
nextFree += requiredSlots;
}
-#if MM_DEBUG
+#if MM_DEBUG || !defined(QT_NO_DEBUG)
Chunk *c = m->chunk();
Chunk::setBit(c->objectBitmap, m - c->realBase());
#endif
- return m;
+ return m->as<T>();
}
void free() {
-#if MM_DEBUG
- Chunk::clearBit(item->chunk()->objectBitmap, item - item->chunk()->realBase());
-#endif
if (Q_UNLIKELY(nextFree == firstInChunk)) {
prevChunk();
} else {
nextFree -= requiredSlots;
}
+#if MM_DEBUG || !defined(QT_NO_DEBUG)
+ Chunk *c = nextFree->chunk();
+ Chunk::clearBit(c->objectBitmap, nextFree - c->realBase());
+#endif
}
void nextChunk();
@@ -154,6 +155,8 @@ struct BlockAllocator {
void sweep();
void freeAll();
+ void resetBlackBits();
+ void collectGrayItems(ExecutionEngine *engine);
// bump allocations
HeapItem *nextFree = 0;
@@ -175,6 +178,8 @@ struct HugeItemAllocator {
HeapItem *allocate(size_t size);
void sweep();
void freeAll();
+ void resetBlackBits();
+ void collectGrayItems(ExecutionEngine *engine);
size_t usedMem() const {
size_t used = 0;
@@ -206,11 +211,11 @@ public:
Q_DECL_CONSTEXPR static inline std::size_t align(std::size_t size)
{ return (size + Chunk::SlotSize - 1) & ~(Chunk::SlotSize - 1); }
- QV4::Heap::CallContext *allocSimpleCallContext(QV4::ExecutionEngine *v4)
+ QV4::Heap::SimpleCallContext *allocSimpleCallContext(QV4::ExecutionEngine *v4)
{
Heap::CallContext *ctxt = stackAllocator.allocate();
- memset(ctxt, 0, sizeof(Heap::CallContext));
- ctxt->setVtable(QV4::CallContext::staticVTable());
+ memset(ctxt, 0, sizeof(Heap::SimpleCallContext));
+ ctxt->setVtable(QV4::SimpleCallContext::staticVTable());
ctxt->init(v4);
return ctxt;
@@ -245,7 +250,7 @@ public:
o->setVtable(ObjectType::staticVTable());
Object *prototype = ObjectType::defaultPrototype(engine);
o->internalClass = ic;
- o->prototype = prototype->d();
+ o->prototype.set(engine, prototype->d());
return static_cast<typename ObjectType::Data *>(o);
}
@@ -272,7 +277,7 @@ public:
{
Scope scope(engine);
Scoped<ObjectType> t(scope, allocateObject<ObjectType>(ic));
- t->d_unchecked()->prototype = prototype->d();
+ t->d_unchecked()->prototype.set(engine, prototype->d());
t->d_unchecked()->init();
return t->d();
}
@@ -282,7 +287,7 @@ public:
{
Scope scope(engine);
Scoped<ObjectType> t(scope, allocateObject<ObjectType>(ic));
- t->d_unchecked()->prototype = prototype->d();
+ t->d_unchecked()->prototype.set(engine, prototype->d());
t->d_unchecked()->init(arg1);
return t->d();
}
@@ -292,7 +297,7 @@ public:
{
Scope scope(engine);
Scoped<ObjectType> t(scope, allocateObject<ObjectType>(ic));
- t->d_unchecked()->prototype = prototype->d();
+ t->d_unchecked()->prototype.set(engine, prototype->d());
t->d_unchecked()->init(arg1, arg2);
return t->d();
}
@@ -302,7 +307,7 @@ public:
{
Scope scope(engine);
Scoped<ObjectType> t(scope, allocateObject<ObjectType>(ic));
- t->d_unchecked()->prototype = prototype->d();
+ t->d_unchecked()->prototype.set(engine, prototype->d());
t->d_unchecked()->init(arg1, arg2, arg3);
return t->d();
}
@@ -312,7 +317,7 @@ public:
{
Scope scope(engine);
Scoped<ObjectType> t(scope, allocateObject<ObjectType>(ic));
- t->d_unchecked()->prototype = prototype->d();
+ t->d_unchecked()->prototype.set(engine, prototype->d());
t->d_unchecked()->init(arg1, arg2, arg3, arg4);
return t->d();
}
@@ -417,7 +422,7 @@ public:
return t->d();
}
- void runGC();
+ void runGC(bool forceFullCollection = false);
void dumpStats() const;
@@ -427,6 +432,7 @@ public:
// called when a JS object grows itself. Specifically: Heap::String::append
void changeUnmanagedHeapSizeUsage(qptrdiff delta) { unmanagedHeapSize += delta; }
+ void drainMarkStack(Value *markBase);
protected:
@@ -457,10 +463,12 @@ public:
std::size_t unmanagedHeapSize = 0; // the amount of bytes of heap that is not managed by the memory manager, but which is held onto by managed items.
std::size_t unmanagedHeapSizeGCLimit;
+ std::size_t usedSlotsAfterLastFullSweep = 0;
bool gcBlocked = false;
bool aggressiveGC = false;
bool gcStats = false;
+ bool nextGCIsIncremental = false;
};
}
diff --git a/src/qml/memory/qv4mmdefs_p.h b/src/qml/memory/qv4mmdefs_p.h
index 588ae21ee0..1fc7b6a527 100644
--- a/src/qml/memory/qv4mmdefs_p.h
+++ b/src/qml/memory/qv4mmdefs_p.h
@@ -51,6 +51,7 @@
//
#include <private/qv4global_p.h>
+#include <private/qv4runtimeapi_p.h>
#include <QtCore/qalgorithms.h>
#include <qdebug.h>
@@ -111,22 +112,29 @@ struct Chunk {
HeapItem *realBase();
HeapItem *first();
+ static Q_ALWAYS_INLINE size_t bitmapIndex(size_t index) {
+ return index >> BitShift;
+ }
+ static Q_ALWAYS_INLINE quintptr bitForIndex(size_t index) {
+ return static_cast<quintptr>(1) << (index & (Bits - 1));
+ }
+
static void setBit(quintptr *bitmap, size_t index) {
// Q_ASSERT(index >= HeaderSize/SlotSize && index < ChunkSize/SlotSize);
- bitmap += index >> BitShift;
- quintptr bit = static_cast<quintptr>(1) << (index & (Bits - 1));
+ bitmap += bitmapIndex(index);
+ quintptr bit = bitForIndex(index);
*bitmap |= bit;
}
static void clearBit(quintptr *bitmap, size_t index) {
// Q_ASSERT(index >= HeaderSize/SlotSize && index < ChunkSize/SlotSize);
- bitmap += index >> BitShift;
- quintptr bit = static_cast<quintptr>(1) << (index & (Bits - 1));
+ bitmap += bitmapIndex(index);
+ quintptr bit = bitForIndex(index);
*bitmap &= ~bit;
}
static bool testBit(quintptr *bitmap, size_t index) {
// Q_ASSERT(index >= HeaderSize/SlotSize && index < ChunkSize/SlotSize);
- bitmap += index >> BitShift;
- quintptr bit = static_cast<quintptr>(1) << (index & (Bits - 1));
+ bitmap += bitmapIndex(index);
+ quintptr bit = bitForIndex(index);
return (*bitmap & bit);
}
static void setBits(quintptr *bitmap, size_t index, size_t nBits) {
@@ -176,6 +184,8 @@ struct Chunk {
void sweep();
void freeAll();
+ void resetBlackBits();
+ void collectGrayItems(ExecutionEngine *engine);
void sortIntoBins(HeapItem **bins, uint nBins);
};
@@ -255,6 +265,102 @@ Q_STATIC_ASSERT(sizeof(HeapItem) == Chunk::SlotSize);
Q_STATIC_ASSERT(QT_POINTER_SIZE*8 == Chunk::Bits);
Q_STATIC_ASSERT((1 << Chunk::BitShift) == Chunk::Bits);
+// Base class for the execution engine
+
+#if defined(Q_CC_MSVC) || defined(Q_CC_GNU)
+#pragma pack(push, 1)
+#endif
+struct EngineBase {
+ Heap::ExecutionContext *current = 0;
+
+ Value *jsStackTop = 0;
+ quint8 hasException = false;
+ quint8 writeBarrierActive = false;
+ quint16 unused = 0;
+#if QT_POINTER_SIZE == 8
+ quint8 padding[4];
+#endif
+ MemoryManager *memoryManager = 0;
+ Runtime runtime;
+};
+#if defined(Q_CC_MSVC) || defined(Q_CC_GNU)
+#pragma pack(pop)
+#endif
+
+Q_STATIC_ASSERT(std::is_standard_layout<EngineBase>::value);
+Q_STATIC_ASSERT(offsetof(EngineBase, current) == 0);
+Q_STATIC_ASSERT(offsetof(EngineBase, jsStackTop) == offsetof(EngineBase, current) + QT_POINTER_SIZE);
+Q_STATIC_ASSERT(offsetof(EngineBase, hasException) == offsetof(EngineBase, jsStackTop) + QT_POINTER_SIZE);
+Q_STATIC_ASSERT(offsetof(EngineBase, memoryManager) == offsetof(EngineBase, hasException) + QT_POINTER_SIZE);
+Q_STATIC_ASSERT(offsetof(EngineBase, runtime) == offsetof(EngineBase, memoryManager) + QT_POINTER_SIZE);
+
+// Some helper classes and macros to automate the generation of our
+// tables used for marking objects
+
+enum MarkFlags {
+ Mark_NoMark = 0,
+ Mark_Value = 1,
+ Mark_Pointer = 2,
+ Mark_ValueArray = 3
+};
+
+template <typename T>
+struct MarkFlagEvaluator {
+ static Q_CONSTEXPR quint64 value = 0;
+};
+template <typename T, size_t o>
+struct MarkFlagEvaluator<Heap::Pointer<T, o>> {
+ static Q_CONSTEXPR quint64 value = static_cast<quint64>(Mark_Pointer) << (2*o / sizeof(quintptr));
+};
+template <size_t o>
+struct MarkFlagEvaluator<ValueArray<o>> {
+ static Q_CONSTEXPR quint64 value = static_cast<quint64>(Mark_ValueArray) << (2*o / sizeof(quintptr));
+};
+template <size_t o>
+struct MarkFlagEvaluator<HeapValue<o>> {
+ static Q_CONSTEXPR quint64 value = static_cast<quint64>(Mark_Value) << (2 *o / sizeof(quintptr));
+};
+
+#define HEAP_OBJECT_OFFSET_MEMBER_EXPANSION(c, gcType, type, name) \
+ HEAP_OBJECT_OFFSET_MEMBER_EXPANSION_##gcType(c, type, name)
+
+#define HEAP_OBJECT_OFFSET_MEMBER_EXPANSION_Pointer(c, type, name) Pointer<type, 0> name;
+#define HEAP_OBJECT_OFFSET_MEMBER_EXPANSION_NoMark(c, type, name) type name;
+#define HEAP_OBJECT_OFFSET_MEMBER_EXPANSION_HeapValue(c, type, name) HeapValue<0> name;
+#define HEAP_OBJECT_OFFSET_MEMBER_EXPANSION_ValueArray(c, type, name) type<0> name;
+
+#define HEAP_OBJECT_MEMBER_EXPANSION(c, gcType, type, name) \
+ HEAP_OBJECT_MEMBER_EXPANSION_##gcType(c, type, name)
+
+#define HEAP_OBJECT_MEMBER_EXPANSION_Pointer(c, type, name) \
+ Pointer<type, offsetof(c##OffsetStruct, name) + baseOffset> name;
+#define HEAP_OBJECT_MEMBER_EXPANSION_NoMark(c, type, name) \
+ type name;
+#define HEAP_OBJECT_MEMBER_EXPANSION_HeapValue(c, type, name) \
+ HeapValue<offsetof(c##OffsetStruct, name) + baseOffset> name;
+#define HEAP_OBJECT_MEMBER_EXPANSION_ValueArray(c, type, name) \
+ type<offsetof(c##OffsetStruct, name) + baseOffset> name;
+
+#define HEAP_OBJECT_MARK_EXPANSION(class, gcType, type, name) \
+ MarkFlagEvaluator<decltype(class::name)>::value |
+
+#define DECLARE_HEAP_OBJECT(name, base) \
+struct name##OffsetStruct { \
+ name##Members(name, HEAP_OBJECT_OFFSET_MEMBER_EXPANSION) \
+}; \
+struct name##SizeStruct : base, name##OffsetStruct {}; \
+struct name##Data { \
+ static Q_CONSTEXPR size_t baseOffset = sizeof(name##SizeStruct) - sizeof(name##OffsetStruct); \
+ name##Members(name, HEAP_OBJECT_MEMBER_EXPANSION) \
+}; \
+Q_STATIC_ASSERT(sizeof(name##SizeStruct) == sizeof(name##Data) + name##Data::baseOffset); \
+static Q_CONSTEXPR quint64 name##_markTable = \
+ (name##Members(name##Data, HEAP_OBJECT_MARK_EXPANSION) 0) | QV4::Heap::base::markTable; \
+ \
+struct name : base, name##Data
+
+#define DECLARE_MARK_TABLE(class) static Q_CONSTEXPR quint64 markTable = class##_markTable
+
}
QT_END_NAMESPACE
diff --git a/src/qml/memory/qv4writebarrier_p.h b/src/qml/memory/qv4writebarrier_p.h
new file mode 100644
index 0000000000..a2f85822ca
--- /dev/null
+++ b/src/qml/memory/qv4writebarrier_p.h
@@ -0,0 +1,239 @@
+/****************************************************************************
+**
+** Copyright (C) 2016 The Qt Company Ltd.
+** Contact: https://www.qt.io/licensing/
+**
+** This file is part of the QtQml module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and The Qt Company. For licensing terms
+** and conditions see https://www.qt.io/terms-conditions. For further
+** information use the contact form at https://www.qt.io/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 3 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL3 included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 3 requirements
+** will be met: https://www.gnu.org/licenses/lgpl-3.0.html.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 2.0 or (at your option) the GNU General
+** Public license version 3 or any later version approved by the KDE Free
+** Qt Foundation. The licenses are as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3
+** included in the packaging of this file. Please review the following
+** information to ensure the GNU General Public License requirements will
+** be met: https://www.gnu.org/licenses/gpl-2.0.html and
+** https://www.gnu.org/licenses/gpl-3.0.html.
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4WRITEBARRIER_P_H
+#define QV4WRITEBARRIER_P_H
+
+//
+// W A R N I N G
+// -------------
+//
+// This file is not part of the Qt API. It exists purely as an
+// implementation detail. This header file may change from version to
+// version without notice, or even be removed.
+//
+// We mean it.
+//
+
+#include <private/qv4global_p.h>
+#include <private/qv4value_p.h>
+
+QT_BEGIN_NAMESPACE
+
+#define WRITEBARRIER_steele -1
+#define WRITEBARRIER_none 1
+
+#define WRITEBARRIER(x) (1/WRITEBARRIER_##x == 1)
+
+namespace QV4 {
+
+namespace WriteBarrier {
+
+enum Type {
+ NoBarrier,
+ Barrier
+};
+
+enum NewValueType {
+ Primitive,
+ Object,
+ Unknown
+};
+
+// ### this needs to be filled with a real memory fence once marking is concurrent
+Q_ALWAYS_INLINE void fence() {}
+
+#if WRITEBARRIER(steele)
+
+template <NewValueType type>
+static Q_CONSTEXPR inline bool isRequired() {
+ return type != Primitive;
+}
+
+inline void write(EngineBase *engine, Heap::Base *base, Value *slot, Value value)
+{
+ Q_UNUSED(engine);
+ *slot = value;
+ if (isRequired<Unknown>()) {
+ fence();
+ base->setGrayBit();
+ }
+}
+
+inline void write(EngineBase *engine, Heap::Base *base, Value *slot, Heap::Base *value)
+{
+ Q_UNUSED(engine);
+ *slot = value;
+ if (isRequired<Object>()) {
+ fence();
+ base->setGrayBit();
+ }
+}
+
+inline void write(EngineBase *engine, Heap::Base *base, Heap::Base **slot, Heap::Base *value)
+{
+ Q_UNUSED(engine);
+ *slot = value;
+ fence();
+ base->setGrayBit();
+}
+
+#elif WRITEBARRIER(none)
+
+template <NewValueType type>
+static Q_CONSTEXPR inline bool isRequired() {
+ return false;
+}
+
+inline void write(EngineBase *engine, Heap::Base *base, Value *slot, Value value)
+{
+ Q_UNUSED(engine);
+ Q_UNUSED(base);
+ *slot = value;
+}
+
+inline void write(EngineBase *engine, Heap::Base *base, Value *slot, Heap::Base *value)
+{
+ Q_UNUSED(engine);
+ Q_UNUSED(base);
+ *slot = value;
+}
+
+inline void write(EngineBase *engine, Heap::Base *base, Heap::Base **slot, Heap::Base *value)
+{
+ Q_UNUSED(engine);
+ Q_UNUSED(base);
+ *slot = value;
+}
+
+#endif
+
+}
+
+namespace Heap {
+
+template <typename T, size_t o>
+struct Pointer {
+ static Q_CONSTEXPR size_t offset = o;
+ T operator->() const { return ptr; }
+ operator T () const { return ptr; }
+
+ Heap::Base *base() {
+ Heap::Base *base = reinterpret_cast<Heap::Base *>(this) - (offset/sizeof(Heap::Base));
+ Q_ASSERT(base->inUse());
+ return base;
+ }
+
+ void set(ExecutionEngine *e, T newVal) {
+ WriteBarrier::write(e, base(), reinterpret_cast<Heap::Base **>(&ptr), reinterpret_cast<Heap::Base *>(newVal));
+ }
+
+ template <typename Type>
+ Type *cast() { return static_cast<Type *>(ptr); }
+
+private:
+ T ptr;
+};
+typedef Pointer<char *, 0> V4PointerCheck;
+V4_ASSERT_IS_TRIVIAL(V4PointerCheck)
+
+}
+
+template <size_t offset>
+struct HeapValue : Value {
+ Heap::Base *base() {
+ Heap::Base *base = reinterpret_cast<Heap::Base *>(this) - (offset/sizeof(Heap::Base));
+ Q_ASSERT(base->inUse());
+ return base;
+ }
+
+ void set(ExecutionEngine *e, const Value &newVal) {
+ WriteBarrier::write(e, base(), this, newVal);
+ }
+};
+
+template <size_t offset>
+struct ValueArray {
+ uint size;
+ uint alloc;
+ Value values[1];
+
+ Heap::Base *base() {
+ Heap::Base *base = reinterpret_cast<Heap::Base *>(this) - (offset/sizeof(Heap::Base));
+ Q_ASSERT(base->inUse());
+ return base;
+ }
+
+ void set(ExecutionEngine *e, uint index, Value v) {
+ WriteBarrier::write(e, base(), values + index, v);
+ }
+ void set(ExecutionEngine *e, uint index, Heap::Base *b) {
+ WriteBarrier::write(e, base(), values + index, b);
+ }
+ inline const Value &operator[] (uint index) const {
+ Q_ASSERT(index < alloc);
+ return values[index];
+ }
+ inline const Value *data() const {
+ return values;
+ }
+
+ void insertData(ExecutionEngine *e, uint index, Value v) {
+ for (uint i = size - 1; i > index; --i) {
+ values[i] = values[i - 1];
+ }
+ set(e, index, v);
+ }
+ void removeData(ExecutionEngine *e, uint index, int n = 1) {
+ Q_UNUSED(e);
+ for (uint i = index; i < size - n; ++i) {
+ values[i] = values[i + n];
+ }
+ }
+};
+
+// It's really important that the offset of values in this structure is
+// constant across all architecture, otherwise JIT cross-compiled code will
+// have wrong offsets between host and target.
+Q_STATIC_ASSERT(offsetof(ValueArray<0>, values) == 8);
+
+}
+
+QT_END_NAMESPACE
+
+#endif