aboutsummaryrefslogtreecommitdiffstats
path: root/src/qml/memory
diff options
context:
space:
mode:
Diffstat (limited to 'src/qml/memory')
-rw-r--r--src/qml/memory/memory.pri3
-rw-r--r--src/qml/memory/qv4heap_p.h29
-rw-r--r--src/qml/memory/qv4mm.cpp373
-rw-r--r--src/qml/memory/qv4mm_p.h30
-rw-r--r--src/qml/memory/qv4mmdefs_p.h108
-rw-r--r--src/qml/memory/qv4writebarrier_p.h206
6 files changed, 644 insertions, 105 deletions
diff --git a/src/qml/memory/memory.pri b/src/qml/memory/memory.pri
index 38fadbf23f..7956e4a9a1 100644
--- a/src/qml/memory/memory.pri
+++ b/src/qml/memory/memory.pri
@@ -7,7 +7,8 @@ SOURCES += \
HEADERS += \
$$PWD/qv4mm_p.h \
- $$PWD/qv4mmdefs_p.h
+ $$PWD/qv4mmdefs_p.h \
+ $$PWD/qv4writebarrier_p.h
}
HEADERS += \
diff --git a/src/qml/memory/qv4heap_p.h b/src/qml/memory/qv4heap_p.h
index a4e96b4c84..f00ce4283c 100644
--- a/src/qml/memory/qv4heap_p.h
+++ b/src/qml/memory/qv4heap_p.h
@@ -75,6 +75,7 @@ struct InternalClass;
struct VTable
{
const VTable * const parent;
+ const quint64 markTable;
uint inlinePropertyOffset : 16;
uint nInlineProperties : 16;
uint isExecutionContext : 1;
@@ -87,7 +88,7 @@ struct VTable
uint type : 8;
const char *className;
void (*destroy)(Heap::Base *);
- void (*markObjects)(Heap::Base *, ExecutionEngine *e);
+ void (*markObjects)(Heap::Base *, MarkStack *markStack);
bool (*isEqualTo)(Managed *m, Managed *other);
};
@@ -96,10 +97,12 @@ namespace Heap {
struct Q_QML_EXPORT Base {
void *operator new(size_t) = delete;
+ static Q_CONSTEXPR quint64 markTable = 0;
+
InternalClass *internalClass;
inline ReturnedValue asReturnedValue() const;
- inline void mark(QV4::ExecutionEngine *engine);
+ inline void mark(QV4::MarkStack *markStack);
const VTable *vtable() const { return internalClass->vtable; }
inline bool isMarked() const {
@@ -114,6 +117,12 @@ struct Q_QML_EXPORT Base {
Q_ASSERT(!Chunk::testBit(c->extendsBitmap, h - c->realBase()));
return Chunk::setBit(c->blackBitmap, h - c->realBase());
}
+ inline void setGrayBit() {
+ const HeapItem *h = reinterpret_cast<const HeapItem *>(this);
+ Chunk *c = h->chunk();
+ Q_ASSERT(!Chunk::testBit(c->extendsBitmap, h - c->realBase()));
+ return Chunk::setBit(c->grayBitmap, h - c->realBase());
+ }
inline bool inUse() const {
const HeapItem *h = reinterpret_cast<const HeapItem *>(this);
@@ -122,6 +131,8 @@ struct Q_QML_EXPORT Base {
return Chunk::testBit(c->objectBitmap, h - c->realBase());
}
+ inline void markChildren(MarkStack *markStack);
+
void *operator new(size_t, Managed *m) { return m; }
void *operator new(size_t, Heap::Base *m) { return m; }
void operator delete(void *, Heap::Base *) {}
@@ -171,20 +182,6 @@ Q_STATIC_ASSERT(std::is_standard_layout<Base>::value);
Q_STATIC_ASSERT(offsetof(Base, internalClass) == 0);
Q_STATIC_ASSERT(sizeof(Base) == QT_POINTER_SIZE);
-template <typename T>
-struct Pointer {
- T *operator->() const { return ptr; }
- operator T *() const { return ptr; }
-
- Pointer &operator =(T *t) { ptr = t; return *this; }
-
- template <typename Type>
- Type *cast() { return static_cast<Type *>(ptr); }
-
- T *ptr;
-};
-V4_ASSERT_IS_TRIVIAL(Pointer<void>)
-
}
#ifdef QT_NO_QOBJECT
diff --git a/src/qml/memory/qv4mm.cpp b/src/qml/memory/qv4mm.cpp
index 4f4124d505..27b3c756c5 100644
--- a/src/qml/memory/qv4mm.cpp
+++ b/src/qml/memory/qv4mm.cpp
@@ -61,7 +61,11 @@
#include "qv4alloca_p.h"
#include "qv4profiling_p.h"
-#define MM_DEBUG 0
+//#define MM_STATS
+
+#if !defined(MM_STATS) && !defined(QT_NO_DEBUG)
+#define MM_STATS
+#endif
#if MM_DEBUG
#define DEBUG qDebug() << "MM:"
@@ -281,6 +285,76 @@ QString binary(quintptr) { return QString(); }
#define SDUMP if (1) ; else qDebug
#endif
+void Heap::Base::markChildren(MarkStack *markStack)
+{
+ if (vtable()->markObjects)
+ vtable()->markObjects(this, markStack);
+ if (quint64 m = vtable()->markTable) {
+// qDebug() << "using mark table:" << hex << m << "for" << h;
+ void **mem = reinterpret_cast<void **>(this);
+ while (m) {
+ MarkFlags mark = static_cast<MarkFlags>(m & 3);
+ switch (mark) {
+ case Mark_NoMark:
+ break;
+ case Mark_Value:
+// qDebug() << "marking value at " << mem;
+ reinterpret_cast<Value *>(mem)->mark(markStack);
+ break;
+ case Mark_Pointer: {
+// qDebug() << "marking pointer at " << mem;
+ Heap::Base *p = *reinterpret_cast<Heap::Base **>(mem);
+ if (p)
+ p->mark(markStack);
+ break;
+ }
+ case Mark_ValueArray: {
+ Q_ASSERT(m == Mark_ValueArray);
+// qDebug() << "marking Value Array at offset" << hex << (mem - reinterpret_cast<void **>(h));
+ ValueArray<0> *a = reinterpret_cast<ValueArray<0> *>(mem);
+ Value *v = a->values;
+ const Value *end = v + a->alloc;
+ if (a->alloc > 32*1024) {
+ // drain from time to time to avoid overflows in the js stack
+ Heap::Base **currentBase = markStack->top;
+ while (v < end) {
+ v->mark(markStack);
+ ++v;
+ if (markStack->top >= currentBase + 32*1024) {
+ Heap::Base **oldBase = markStack->base;
+ markStack->base = currentBase;
+ markStack->drain();
+ markStack->base = oldBase;
+ }
+ }
+ } else {
+ while (v < end) {
+ v->mark(markStack);
+ ++v;
+ }
+ }
+ break;
+ }
+ }
+
+ m >>= 2;
+ ++mem;
+ }
+ }
+}
+
+// Stores a classname -> freed count mapping.
+typedef QHash<const char*, int> MMStatsHash;
+Q_GLOBAL_STATIC(MMStatsHash, freedObjectStatsGlobal)
+
+// This indirection avoids sticking QHash code in each of the call sites, which
+// shaves off some instructions in the case that it's unused.
+static void increaseFreedCountForClass(const char *className)
+{
+ (*freedObjectStatsGlobal())[className]++;
+}
+
+//bool Chunk::sweep(ClassDestroyStatsCallback classCountPtr)
bool Chunk::sweep(ExecutionEngine *engine)
{
bool hasUsedSlots = false;
@@ -288,7 +362,9 @@ bool Chunk::sweep(ExecutionEngine *engine)
HeapItem *o = realBase();
bool lastSlotFree = false;
for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
+#if WRITEBARRIER(none)
Q_ASSERT((grayBitmap[i] | blackBitmap[i]) == blackBitmap[i]); // check that we don't have gray only objects
+#endif
quintptr toFree = objectBitmap[i] ^ blackBitmap[i];
Q_ASSERT((toFree & objectBitmap[i]) == toFree); // check all black objects are marked as being used
quintptr e = extendsBitmap[i];
@@ -317,8 +393,11 @@ bool Chunk::sweep(ExecutionEngine *engine)
HeapItem *itemToFree = o + index;
Heap::Base *b = *itemToFree;
- if (b->vtable()->destroy) {
- b->vtable()->destroy(b);
+ const VTable *v = b->vtable();
+// if (Q_UNLIKELY(classCountPtr))
+// classCountPtr(v->className);
+ if (v->destroy) {
+ v->destroy(b);
b->_checkIsDestroyed();
}
}
@@ -326,8 +405,8 @@ bool Chunk::sweep(ExecutionEngine *engine)
- (blackBitmap[i] | e)) * Chunk::SlotSize,
Profiling::SmallItem);
objectBitmap[i] = blackBitmap[i];
+ grayBitmap[i] = 0;
hasUsedSlots |= (blackBitmap[i] != 0);
- blackBitmap[i] = 0;
extendsBitmap[i] = e;
lastSlotFree = !((objectBitmap[i]|extendsBitmap[i]) >> (sizeof(quintptr)*8 - 1));
SDUMP() << " new extends =" << binary(e);
@@ -373,13 +452,56 @@ void Chunk::freeAll(ExecutionEngine *engine)
Q_V4_PROFILE_DEALLOC(engine, (qPopulationCount(objectBitmap[i]|extendsBitmap[i])
- qPopulationCount(e)) * Chunk::SlotSize, Profiling::SmallItem);
objectBitmap[i] = 0;
- blackBitmap[i] = 0;
+ grayBitmap[i] = 0;
extendsBitmap[i] = e;
o += Chunk::Bits;
}
// DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots.";
}
+void Chunk::resetBlackBits()
+{
+ memset(blackBitmap, 0, sizeof(blackBitmap));
+}
+
+#ifdef MM_STATS
+static uint nGrayItems = 0;
+#endif
+
+void Chunk::collectGrayItems(MarkStack *markStack)
+{
+ // DEBUG << "sweeping chunk" << this << (*freeList);
+ HeapItem *o = realBase();
+ for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
+#if WRITEBARRIER(none)
+ Q_ASSERT((grayBitmap[i] | blackBitmap[i]) == blackBitmap[i]); // check that we don't have gray only objects
+#endif
+ quintptr toMark = blackBitmap[i] & grayBitmap[i]; // correct for a Steele type barrier
+ Q_ASSERT((toMark & objectBitmap[i]) == toMark); // check all black objects are marked as being used
+ // DEBUG << hex << " index=" << i << toFree;
+ while (toMark) {
+ uint index = qCountTrailingZeroBits(toMark);
+ quintptr bit = (static_cast<quintptr>(1) << index);
+
+ toMark ^= bit; // mask out marked slot
+ // DEBUG << " index" << hex << index << toFree;
+
+ HeapItem *itemToFree = o + index;
+ Heap::Base *b = *itemToFree;
+ Q_ASSERT(b->inUse());
+ markStack->push(b);
+#ifdef MM_STATS
+ ++nGrayItems;
+// qDebug() << "adding gray item" << b << "to mark stack";
+#endif
+ }
+ grayBitmap[i] = 0;
+ o += Chunk::Bits;
+ }
+ // DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots.";
+
+}
+
void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
{
// qDebug() << "sortIntoBins:";
@@ -389,7 +511,7 @@ void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
#else
const int start = 1;
#endif
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
uint freeSlots = 0;
uint allocatedSlots = 0;
#endif
@@ -399,7 +521,7 @@ void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
if (!i)
usedSlots |= (static_cast<quintptr>(1) << (HeaderSize/SlotSize)) - 1;
#endif
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
allocatedSlots += qPopulationCount(usedSlots);
// qDebug() << hex << " i=" << i << "used=" << usedSlots;
#endif
@@ -416,7 +538,7 @@ void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
break;
}
usedSlots = (objectBitmap[i]|extendsBitmap[i]);
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
allocatedSlots += qPopulationCount(usedSlots);
// qDebug() << hex << " i=" << i << "used=" << usedSlots;
#endif
@@ -427,7 +549,7 @@ void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
usedSlots |= (quintptr(1) << index) - 1;
uint freeEnd = i*Bits + index;
uint nSlots = freeEnd - freeStart;
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
// qDebug() << hex << " got free slots from" << freeStart << "to" << freeEnd << "n=" << nSlots << "usedSlots=" << usedSlots;
freeSlots += nSlots;
#endif
@@ -438,7 +560,7 @@ void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
bins[bin] = freeItem;
}
}
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
Q_ASSERT(freeSlots + allocatedSlots == (EntriesInBitmap - start) * 8 * sizeof(quintptr));
#endif
}
@@ -622,6 +744,18 @@ void BlockAllocator::freeAll()
}
}
+void BlockAllocator::resetBlackBits()
+{
+ for (auto c : chunks)
+ c->resetBlackBits();
+}
+
+void BlockAllocator::collectGrayItems(MarkStack *markStack)
+{
+ for (auto c : chunks)
+ c->collectGrayItems(markStack);
+
+}
HeapItem *HugeItemAllocator::allocate(size_t size) {
Chunk *c = chunkAllocator->allocate(size);
@@ -631,24 +765,29 @@ HeapItem *HugeItemAllocator::allocate(size_t size) {
return c->first();
}
-static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c)
+static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c, ClassDestroyStatsCallback classCountPtr)
{
HeapItem *itemToFree = c.chunk->first();
Heap::Base *b = *itemToFree;
- if (b->vtable()->destroy) {
- b->vtable()->destroy(b);
+ const VTable *v = b->vtable();
+ if (Q_UNLIKELY(classCountPtr))
+ classCountPtr(v->className);
+
+ if (v->destroy) {
+ v->destroy(b);
b->_checkIsDestroyed();
}
chunkAllocator->free(c.chunk, c.size);
}
-void HugeItemAllocator::sweep() {
- auto isBlack = [this] (const HugeChunk &c) {
+void HugeItemAllocator::sweep(ClassDestroyStatsCallback classCountPtr)
+{
+ auto isBlack = [this, classCountPtr] (const HugeChunk &c) {
bool b = c.chunk->first()->isBlack();
Chunk::clearBit(c.chunk->blackBitmap, c.chunk->first() - c.chunk->realBase());
if (!b) {
Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem);
- freeHugeChunk(chunkAllocator, c);
+ freeHugeChunk(chunkAllocator, c, classCountPtr);
}
return !b;
};
@@ -657,11 +796,29 @@ void HugeItemAllocator::sweep() {
chunks.erase(newEnd, chunks.end());
}
+void HugeItemAllocator::resetBlackBits()
+{
+ for (auto c : chunks)
+ Chunk::clearBit(c.chunk->blackBitmap, c.chunk->first() - c.chunk->realBase());
+}
+
+void HugeItemAllocator::collectGrayItems(MarkStack *markStack)
+{
+ for (auto c : chunks)
+ // Correct for a Steele type barrier
+ if (Chunk::testBit(c.chunk->blackBitmap, c.chunk->first() - c.chunk->realBase()) &&
+ Chunk::testBit(c.chunk->grayBitmap, c.chunk->first() - c.chunk->realBase())) {
+ HeapItem *i = c.chunk->first();
+ Heap::Base *b = *i;
+ b->mark(markStack);
+ }
+}
+
void HugeItemAllocator::freeAll()
{
for (auto &c : chunks) {
Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem);
- freeHugeChunk(chunkAllocator, c);
+ freeHugeChunk(chunkAllocator, c, nullptr);
}
}
@@ -687,15 +844,17 @@ MemoryManager::MemoryManager(ExecutionEngine *engine)
blockAllocator.allocationStats = statistics.allocations;
}
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
+static int allocationCount = 0;
static size_t lastAllocRequestedSlots = 0;
#endif
Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize)
{
const size_t stringSize = align(sizeof(Heap::String));
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
lastAllocRequestedSlots = stringSize >> Chunk::SlotSizeShift;
+ ++allocationCount;
#endif
bool didGCRun = false;
@@ -706,7 +865,8 @@ Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize)
unmanagedHeapSize += unmanagedSize;
if (unmanagedHeapSize > unmanagedHeapSizeGCLimit) {
- runGC();
+ if (!didGCRun)
+ runGC();
if (3*unmanagedHeapSizeGCLimit <= 4*unmanagedHeapSize)
// more than 75% full, raise limit
@@ -724,14 +884,16 @@ Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize)
m = blockAllocator.allocate(stringSize, true);
}
+// qDebug() << "allocated string" << m;
memset(m, 0, stringSize);
return *m;
}
Heap::Base *MemoryManager::allocData(std::size_t size)
{
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
lastAllocRequestedSlots = size >> Chunk::SlotSizeShift;
+ ++allocationCount;
#endif
bool didRunGC = false;
@@ -745,8 +907,11 @@ Heap::Base *MemoryManager::allocData(std::size_t size)
// qDebug() << "unmanagedHeapSize:" << unmanagedHeapSize << "limit:" << unmanagedHeapSizeGCLimit << "unmanagedSize:" << unmanagedSize;
- if (size > Chunk::DataSize)
- return *hugeItemAllocator.allocate(size);
+ if (size > Chunk::DataSize) {
+ HeapItem *h = hugeItemAllocator.allocate(size);
+// qDebug() << "allocating huge item" << h;
+ return *h;
+ }
HeapItem *m = blockAllocator.allocate(size);
if (!m) {
@@ -756,6 +921,7 @@ Heap::Base *MemoryManager::allocData(std::size_t size)
}
memset(m, 0, size);
+// qDebug() << "allocating data" << m;
return *m;
}
@@ -763,48 +929,74 @@ Heap::Object *MemoryManager::allocObjectWithMemberData(const QV4::VTable *vtable
{
uint size = (vtable->nInlineProperties + vtable->inlinePropertyOffset)*sizeof(Value);
Q_ASSERT(!(size % sizeof(HeapItem)));
- Heap::Object *o = static_cast<Heap::Object *>(allocData(size));
- // ### Could optimize this and allocate both in one go through the block allocator
- if (nMembers > vtable->nInlineProperties) {
+ Heap::Object *o;
+ if (nMembers <= vtable->nInlineProperties) {
+ o = static_cast<Heap::Object *>(allocData(size));
+ } else {
+ // Allocate both in one go through the block allocator
nMembers -= vtable->nInlineProperties;
std::size_t memberSize = align(sizeof(Heap::MemberData) + (nMembers - 1)*sizeof(Value));
-// qDebug() << "allocating member data for" << o << nMembers << memberSize;
- Heap::Base *m;
- if (memberSize > Chunk::DataSize)
- m = *hugeItemAllocator.allocate(memberSize);
- else
- m = *blockAllocator.allocate(memberSize, true);
- memset(m, 0, memberSize);
- o->memberData = static_cast<Heap::MemberData *>(m);
- o->memberData->internalClass = engine->internalClasses[EngineBase::Class_MemberData];
+ size_t totalSize = size + memberSize;
+ Heap::MemberData *m;
+ if (totalSize > Chunk::DataSize) {
+ o = static_cast<Heap::Object *>(allocData(size));
+ m = hugeItemAllocator.allocate(memberSize)->as<Heap::MemberData>();
+ } else {
+ HeapItem *mh = reinterpret_cast<HeapItem *>(allocData(totalSize));
+ Heap::Base *b = *mh;
+ o = static_cast<Heap::Object *>(b);
+ mh += (size >> Chunk::SlotSizeShift);
+ m = mh->as<Heap::MemberData>();
+ Chunk *c = mh->chunk();
+ size_t index = mh - c->realBase();
+ Chunk::setBit(c->objectBitmap, index);
+ Chunk::clearBit(c->extendsBitmap, index);
+ }
+ o->memberData.set(engine, m);
+ m->internalClass = engine->internalClasses[EngineBase::Class_MemberData];
Q_ASSERT(o->memberData->internalClass);
- o->memberData->size = static_cast<uint>((memberSize - sizeof(Heap::MemberData) + sizeof(Value))/sizeof(Value));
- o->memberData->init();
+ m->values.alloc = static_cast<uint>((memberSize - sizeof(Heap::MemberData) + sizeof(Value))/sizeof(Value));
+ m->values.size = o->memberData->values.alloc;
+ m->init();
// qDebug() << " got" << o->memberData << o->memberData->size;
}
+// qDebug() << "allocating object with memberData" << o << o->memberData.operator->();
return o;
}
-static void drainMarkStack(QV4::ExecutionEngine *engine, Value *markBase)
+static uint markStackSize = 0;
+
+MarkStack::MarkStack(ExecutionEngine *engine)
+ : engine(engine)
+{
+ base = (Heap::Base **)engine->gcStack->base();
+ top = base;
+ limit = base + ExecutionEngine::GCStackLimit/sizeof(Heap::Base)*3/4;
+}
+
+void MarkStack::drain()
{
- while (engine->jsStackTop > markBase) {
- Heap::Base *h = engine->popForGC();
+ while (top > base) {
+ Heap::Base *h = pop();
+ ++markStackSize;
Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen.
- Q_ASSERT (h->vtable()->markObjects);
- h->vtable()->markObjects(h, engine);
+ h->markChildren(this);
}
}
-void MemoryManager::mark()
+void MemoryManager::collectRoots(MarkStack *markStack)
{
- Value *markBase = engine->jsStackTop;
+ engine->markObjects(markStack);
+
+// qDebug() << " mark stack after engine->mark" << (engine->jsStackTop - markBase);
- engine->markObjects();
+ collectFromJSStack(markStack);
- collectFromJSStack();
+// qDebug() << " mark stack after js stack collect" << (engine->jsStackTop - markBase);
+ m_persistentValues->mark(markStack);
- m_persistentValues->mark(engine);
+// qDebug() << " mark stack after persistants" << (engine->jsStackTop - markBase);
// Preserve QObject ownership rules within JavaScript: A parent with c++ ownership
// keeps all of its children alive in JavaScript.
@@ -831,16 +1023,24 @@ void MemoryManager::mark()
}
if (keepAlive)
- qobjectWrapper->mark(engine);
+ qobjectWrapper->mark(markStack);
- if (engine->jsStackTop >= engine->jsStackLimit)
- drainMarkStack(engine, markBase);
+ if (markStack->top >= markStack->limit)
+ markStack->drain();
}
+}
+
+void MemoryManager::mark()
+{
+ markStackSize = 0;
+
+ MarkStack markStack(engine);
+ collectRoots(&markStack);
- drainMarkStack(engine, markBase);
+ markStack.drain();
}
-void MemoryManager::sweep(bool lastSweep)
+void MemoryManager::sweep(bool lastSweep, ClassDestroyStatsCallback classCountPtr)
{
for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) {
Managed *m = (*it).managed();
@@ -888,14 +1088,13 @@ void MemoryManager::sweep(bool lastSweep)
}
blockAllocator.sweep();
- hugeItemAllocator.sweep();
+ hugeItemAllocator.sweep(classCountPtr);
}
bool MemoryManager::shouldRunGC() const
{
size_t total = blockAllocator.totalSlots();
- size_t usedSlots = blockAllocator.usedSlotsAfterLastSweep;
- if (total > MinSlotsGCLimit && usedSlots * GCOverallocation < total * 100)
+ if (total > MinSlotsGCLimit && usedSlotsAfterLastFullSweep * GCOverallocation < total * 100)
return true;
return false;
}
@@ -903,15 +1102,15 @@ bool MemoryManager::shouldRunGC() const
size_t dumpBins(BlockAllocator *b, bool printOutput = true)
{
const QLoggingCategory &stats = lcGcAllocatorStats();
- size_t totalFragmentedSlots = 0;
+ size_t totalSlotMem = 0;
if (printOutput)
- qDebug(stats) << "Fragmentation map:";
+ qDebug(stats) << "Slot map:";
for (uint i = 0; i < BlockAllocator::NumBins; ++i) {
uint nEntries = 0;
HeapItem *h = b->freeBins[i];
while (h) {
++nEntries;
- totalFragmentedSlots += h->freeData.availableSlots;
+ totalSlotMem += h->freeData.availableSlots;
h = h->freeData.next;
}
if (printOutput)
@@ -925,8 +1124,8 @@ size_t dumpBins(BlockAllocator *b, bool printOutput = true)
}
if (printOutput)
- qDebug(stats) << " total mem in bins" << totalFragmentedSlots*Chunk::SlotSize;
- return totalFragmentedSlots*Chunk::SlotSize;
+ qDebug(stats) << " total mem in bins" << totalSlotMem*Chunk::SlotSize;
+ return totalSlotMem*Chunk::SlotSize;
}
void MemoryManager::runGC()
@@ -937,6 +1136,7 @@ void MemoryManager::runGC()
}
QScopedValueRollback<bool> gcBlocker(gcBlocked, true);
+// qDebug() << "runGC";
if (gcStats) {
statistics.maxReservedMem = qMax(statistics.maxReservedMem, getAllocatedMem());
@@ -956,22 +1156,29 @@ void MemoryManager::runGC()
const QLoggingCategory &stats = lcGcAllocatorStats();
qDebug(stats) << "========== GC ==========";
-#ifndef QT_NO_DEBUG
+#ifdef MM_STATS
qDebug(stats) << " Triggered by alloc request of" << lastAllocRequestedSlots << "slots.";
+ qDebug(stats) << " Allocations since last GC" << allocationCount;
+ allocationCount = 0;
#endif
size_t oldChunks = blockAllocator.chunks.size();
qDebug(stats) << "Allocated" << totalMem << "bytes in" << oldChunks << "chunks";
qDebug(stats) << "Fragmented memory before GC" << (totalMem - usedBefore);
dumpBins(&blockAllocator);
+#ifdef MM_STATS
+ nGrayItems = 0;
+#endif
+
QElapsedTimer t;
t.start();
mark();
- qint64 markTime = t.restart();
- sweep();
+ qint64 markTime = t.nsecsElapsed()/1000;
+ t.restart();
+ sweep(false, increaseFreedCountForClass);
const size_t usedAfter = getUsedMem();
const size_t largeItemsAfter = getLargeItemsMem();
- qint64 sweepTime = t.elapsed();
+ qint64 sweepTime = t.nsecsElapsed()/1000;
if (triggeredByUnmanagedHeap) {
qDebug(stats) << "triggered by unmanaged heap:";
@@ -980,12 +1187,27 @@ void MemoryManager::runGC()
qDebug(stats) << " unmanaged heap limit:" << unmanagedHeapSizeGCLimit;
}
size_t memInBins = dumpBins(&blockAllocator);
- qDebug(stats) << "Marked object in" << markTime << "ms.";
- qDebug(stats) << "Sweeped object in" << sweepTime << "ms.";
+ qDebug(stats) << "Marked object in" << markTime << "us.";
+ qDebug(stats) << " " << markStackSize << "objects marked";
+ qDebug(stats) << "Sweeped object in" << sweepTime << "us.";
+
+ // sort our object types by number of freed instances
+ MMStatsHash freedObjectStats;
+ std::swap(freedObjectStats, *freedObjectStatsGlobal());
+ typedef std::pair<const char*, int> ObjectStatInfo;
+ std::vector<ObjectStatInfo> freedObjectsSorted;
+ freedObjectsSorted.reserve(freedObjectStats.count());
+ for (auto it = freedObjectStats.constBegin(); it != freedObjectStats.constEnd(); ++it) {
+ freedObjectsSorted.push_back(std::make_pair(it.key(), it.value()));
+ }
+ std::sort(freedObjectsSorted.begin(), freedObjectsSorted.end(), [](const ObjectStatInfo &a, const ObjectStatInfo &b) {
+ return a.second > b.second && strcmp(a.first, b.first) < 0;
+ });
+
qDebug(stats) << "Used memory before GC:" << usedBefore;
qDebug(stats) << "Used memory after GC:" << usedAfter;
- qDebug(stats) << "Freed up bytes:" << (usedBefore - usedAfter);
- qDebug(stats) << "Freed up chunks:" << (oldChunks - blockAllocator.chunks.size());
+ qDebug(stats) << "Freed up bytes :" << (usedBefore - usedAfter);
+ qDebug(stats) << "Freed up chunks :" << (oldChunks - blockAllocator.chunks.size());
size_t lost = blockAllocator.allocatedMem() - memInBins - usedAfter;
if (lost)
qDebug(stats) << "!!!!!!!!!!!!!!!!!!!!! LOST MEM:" << lost << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!";
@@ -994,6 +1216,11 @@ void MemoryManager::runGC()
qDebug(stats) << "Large item memory after GC:" << largeItemsAfter;
qDebug(stats) << "Large item memory freed up:" << (largeItemsBefore - largeItemsAfter);
}
+
+ for (auto it = freedObjectsSorted.cbegin(); it != freedObjectsSorted.cend(); ++it) {
+ qDebug(stats).noquote() << QString::fromLatin1("Freed JS type: %1 (%2 instances)").arg(QString::fromLatin1(it->first), QString::number(it->second));
+ }
+
qDebug(stats) << "======== End GC ========";
}
@@ -1004,6 +1231,12 @@ void MemoryManager::runGC()
// ensure we don't 'loose' any memory
Q_ASSERT(blockAllocator.allocatedMem() == getUsedMem() + dumpBins(&blockAllocator, false));
}
+
+ usedSlotsAfterLastFullSweep = blockAllocator.usedSlotsAfterLastSweep;
+
+ // reset all black bits
+ blockAllocator.resetBlackBits();
+ hugeItemAllocator.resetBlackBits();
}
size_t MemoryManager::getUsedMem() const
@@ -1056,7 +1289,7 @@ void MemoryManager::dumpStats() const
qDebug(stats) << " >=" << ((BlockAllocator::NumBins - 1) << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[BlockAllocator::NumBins - 1];
}
-void MemoryManager::collectFromJSStack() const
+void MemoryManager::collectFromJSStack(MarkStack *markStack) const
{
Value *v = engine->jsStackBase;
Value *top = engine->jsStackTop;
@@ -1064,7 +1297,7 @@ void MemoryManager::collectFromJSStack() const
Managed *m = v->managed();
if (m && m->inUse())
// Skip pointers to already freed objects, they are bogus as well
- m->mark(engine);
+ m->mark(markStack);
++v;
}
}
diff --git a/src/qml/memory/qv4mm_p.h b/src/qml/memory/qv4mm_p.h
index aa8d13de8c..fb21481d5b 100644
--- a/src/qml/memory/qv4mm_p.h
+++ b/src/qml/memory/qv4mm_p.h
@@ -78,27 +78,28 @@ struct StackAllocator {
StackAllocator(ChunkAllocator *chunkAlloc);
T *allocate() {
- T *m = nextFree->as<T>();
+ HeapItem *m = nextFree;
if (Q_UNLIKELY(nextFree == lastInChunk)) {
nextChunk();
} else {
nextFree += requiredSlots;
}
-#if MM_DEBUG
+#if MM_DEBUG || !defined(QT_NO_DEBUG) || defined(QT_FORCE_ASSERTS)
Chunk *c = m->chunk();
Chunk::setBit(c->objectBitmap, m - c->realBase());
#endif
- return m;
+ return m->as<T>();
}
void free() {
-#if MM_DEBUG
- Chunk::clearBit(item->chunk()->objectBitmap, item - item->chunk()->realBase());
-#endif
if (Q_UNLIKELY(nextFree == firstInChunk)) {
prevChunk();
} else {
nextFree -= requiredSlots;
}
+#if MM_DEBUG || !defined(QT_NO_DEBUG) || defined(QT_FORCE_ASSERTS)
+ Chunk *c = nextFree->chunk();
+ Chunk::clearBit(c->objectBitmap, nextFree - c->realBase());
+#endif
}
void nextChunk();
@@ -145,6 +146,8 @@ struct BlockAllocator {
void sweep();
void freeAll();
+ void resetBlackBits();
+ void collectGrayItems(MarkStack *markStack);
// bump allocations
HeapItem *nextFree = 0;
@@ -163,8 +166,10 @@ struct HugeItemAllocator {
{}
HeapItem *allocate(size_t size);
- void sweep();
+ void sweep(ClassDestroyStatsCallback classCountPtr);
void freeAll();
+ void resetBlackBits();
+ void collectGrayItems(MarkStack *markStack);
size_t usedMem() const {
size_t used = 0;
@@ -200,8 +205,8 @@ public:
QV4::Heap::CallContext *allocSimpleCallContext()
{
Heap::CallContext *ctxt = stackAllocator.allocate();
- memset(ctxt, 0, sizeof(Heap::CallContext));
- ctxt->internalClass = CallContext::defaultInternalClass(engine);
+ memset(ctxt, 0, sizeof(Heap::SimpleCallContext));
+ ctxt->internalClass = SimpleCallContext::defaultInternalClass(engine);
Q_ASSERT(ctxt->internalClass && ctxt->internalClass->vtable);
ctxt->init();
return ctxt;
@@ -431,7 +436,6 @@ public:
// called when a JS object grows itself. Specifically: Heap::String::append
void changeUnmanagedHeapSizeUsage(qptrdiff delta) { unmanagedHeapSize += delta; }
-
protected:
/// expects size to be aligned
Heap::Base *allocString(std::size_t unmanagedSize);
@@ -439,10 +443,11 @@ protected:
Heap::Object *allocObjectWithMemberData(const QV4::VTable *vtable, uint nMembers);
private:
- void collectFromJSStack() const;
+ void collectFromJSStack(MarkStack *markStack) const;
void mark();
- void sweep(bool lastSweep = false);
+ void sweep(bool lastSweep = false, ClassDestroyStatsCallback classCountPtr = nullptr);
bool shouldRunGC() const;
+ void collectRoots(MarkStack *markStack);
public:
QV4::ExecutionEngine *engine;
@@ -456,6 +461,7 @@ public:
std::size_t unmanagedHeapSize = 0; // the amount of bytes of heap that is not managed by the memory manager, but which is held onto by managed items.
std::size_t unmanagedHeapSizeGCLimit;
+ std::size_t usedSlotsAfterLastFullSweep = 0;
bool gcBlocked = false;
bool aggressiveGC = false;
diff --git a/src/qml/memory/qv4mmdefs_p.h b/src/qml/memory/qv4mmdefs_p.h
index 7c82fef056..df69f928ef 100644
--- a/src/qml/memory/qv4mmdefs_p.h
+++ b/src/qml/memory/qv4mmdefs_p.h
@@ -116,22 +116,29 @@ struct Chunk {
HeapItem *realBase();
HeapItem *first();
+ static Q_ALWAYS_INLINE size_t bitmapIndex(size_t index) {
+ return index >> BitShift;
+ }
+ static Q_ALWAYS_INLINE quintptr bitForIndex(size_t index) {
+ return static_cast<quintptr>(1) << (index & (Bits - 1));
+ }
+
static void setBit(quintptr *bitmap, size_t index) {
// Q_ASSERT(index >= HeaderSize/SlotSize && index < ChunkSize/SlotSize);
- bitmap += index >> BitShift;
- quintptr bit = static_cast<quintptr>(1) << (index & (Bits - 1));
+ bitmap += bitmapIndex(index);
+ quintptr bit = bitForIndex(index);
*bitmap |= bit;
}
static void clearBit(quintptr *bitmap, size_t index) {
// Q_ASSERT(index >= HeaderSize/SlotSize && index < ChunkSize/SlotSize);
- bitmap += index >> BitShift;
- quintptr bit = static_cast<quintptr>(1) << (index & (Bits - 1));
+ bitmap += bitmapIndex(index);
+ quintptr bit = bitForIndex(index);
*bitmap &= ~bit;
}
static bool testBit(quintptr *bitmap, size_t index) {
// Q_ASSERT(index >= HeaderSize/SlotSize && index < ChunkSize/SlotSize);
- bitmap += index >> BitShift;
- quintptr bit = static_cast<quintptr>(1) << (index & (Bits - 1));
+ bitmap += bitmapIndex(index);
+ quintptr bit = bitForIndex(index);
return (*bitmap & bit);
}
static void setBits(quintptr *bitmap, size_t index, size_t nBits) {
@@ -179,6 +186,10 @@ struct Chunk {
return usedSlots;
}
+ bool sweep(ClassDestroyStatsCallback classCountPtr);
+ void freeAll();
+ void resetBlackBits();
+ void collectGrayItems(QV4::MarkStack *markStack);
bool sweep(ExecutionEngine *engine);
void freeAll(ExecutionEngine *engine);
@@ -260,6 +271,91 @@ Q_STATIC_ASSERT(sizeof(HeapItem) == Chunk::SlotSize);
Q_STATIC_ASSERT(QT_POINTER_SIZE*8 == Chunk::Bits);
Q_STATIC_ASSERT((1 << Chunk::BitShift) == Chunk::Bits);
+struct MarkStack {
+ MarkStack(ExecutionEngine *engine);
+ Heap::Base **top = 0;
+ Heap::Base **base = 0;
+ Heap::Base **limit = 0;
+ ExecutionEngine *engine;
+ void push(Heap::Base *m) {
+ *top = m;
+ ++top;
+ }
+ Heap::Base *pop() {
+ --top;
+ return *top;
+ }
+ void drain();
+
+};
+
+// Some helper classes and macros to automate the generation of our
+// tables used for marking objects
+
+enum MarkFlags {
+ Mark_NoMark = 0,
+ Mark_Value = 1,
+ Mark_Pointer = 2,
+ Mark_ValueArray = 3
+};
+
+template <typename T>
+struct MarkFlagEvaluator {
+ static Q_CONSTEXPR quint64 value = 0;
+};
+template <typename T, size_t o>
+struct MarkFlagEvaluator<Heap::Pointer<T, o>> {
+ static Q_CONSTEXPR quint64 value = static_cast<quint64>(Mark_Pointer) << (2*o / sizeof(quintptr));
+};
+template <size_t o>
+struct MarkFlagEvaluator<ValueArray<o>> {
+ static Q_CONSTEXPR quint64 value = static_cast<quint64>(Mark_ValueArray) << (2*o / sizeof(quintptr));
+};
+template <size_t o>
+struct MarkFlagEvaluator<HeapValue<o>> {
+ static Q_CONSTEXPR quint64 value = static_cast<quint64>(Mark_Value) << (2 *o / sizeof(quintptr));
+};
+
+#define HEAP_OBJECT_OFFSET_MEMBER_EXPANSION(c, gcType, type, name) \
+ HEAP_OBJECT_OFFSET_MEMBER_EXPANSION_##gcType(c, type, name)
+
+#define HEAP_OBJECT_OFFSET_MEMBER_EXPANSION_Pointer(c, type, name) Pointer<type, 0> name;
+#define HEAP_OBJECT_OFFSET_MEMBER_EXPANSION_NoMark(c, type, name) type name;
+#define HEAP_OBJECT_OFFSET_MEMBER_EXPANSION_HeapValue(c, type, name) HeapValue<0> name;
+#define HEAP_OBJECT_OFFSET_MEMBER_EXPANSION_ValueArray(c, type, name) type<0> name;
+
+#define HEAP_OBJECT_MEMBER_EXPANSION(c, gcType, type, name) \
+ HEAP_OBJECT_MEMBER_EXPANSION_##gcType(c, type, name)
+
+#define HEAP_OBJECT_MEMBER_EXPANSION_Pointer(c, type, name) \
+ Pointer<type, offsetof(c##OffsetStruct, name) + baseOffset> name;
+#define HEAP_OBJECT_MEMBER_EXPANSION_NoMark(c, type, name) \
+ type name;
+#define HEAP_OBJECT_MEMBER_EXPANSION_HeapValue(c, type, name) \
+ HeapValue<offsetof(c##OffsetStruct, name) + baseOffset> name;
+#define HEAP_OBJECT_MEMBER_EXPANSION_ValueArray(c, type, name) \
+ type<offsetof(c##OffsetStruct, name) + baseOffset> name;
+
+#define HEAP_OBJECT_MARK_EXPANSION(class, gcType, type, name) \
+ MarkFlagEvaluator<decltype(class::name)>::value |
+
+#define DECLARE_HEAP_OBJECT(name, base) \
+struct name##OffsetStruct { \
+ name##Members(name, HEAP_OBJECT_OFFSET_MEMBER_EXPANSION) \
+}; \
+struct name##SizeStruct : base, name##OffsetStruct {}; \
+struct name##Data { \
+ static Q_CONSTEXPR size_t baseOffset = sizeof(name##SizeStruct) - sizeof(name##OffsetStruct); \
+ name##Members(name, HEAP_OBJECT_MEMBER_EXPANSION) \
+}; \
+Q_STATIC_ASSERT(sizeof(name##SizeStruct) == sizeof(name##Data) + name##Data::baseOffset); \
+static Q_CONSTEXPR quint64 name##_markTable = \
+ (name##Members(name##Data, HEAP_OBJECT_MARK_EXPANSION) 0) | QV4::Heap::base::markTable; \
+ \
+struct name : base, name##Data
+
+#define DECLARE_MARK_TABLE(class) static Q_CONSTEXPR quint64 markTable = class##_markTable
+
}
QT_END_NAMESPACE
diff --git a/src/qml/memory/qv4writebarrier_p.h b/src/qml/memory/qv4writebarrier_p.h
new file mode 100644
index 0000000000..86fd28000d
--- /dev/null
+++ b/src/qml/memory/qv4writebarrier_p.h
@@ -0,0 +1,206 @@
+/****************************************************************************
+**
+** Copyright (C) 2016 The Qt Company Ltd.
+** Contact: https://www.qt.io/licensing/
+**
+** This file is part of the QtQml module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and The Qt Company. For licensing terms
+** and conditions see https://www.qt.io/terms-conditions. For further
+** information use the contact form at https://www.qt.io/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 3 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL3 included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 3 requirements
+** will be met: https://www.gnu.org/licenses/lgpl-3.0.html.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 2.0 or (at your option) the GNU General
+** Public license version 3 or any later version approved by the KDE Free
+** Qt Foundation. The licenses are as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3
+** included in the packaging of this file. Please review the following
+** information to ensure the GNU General Public License requirements will
+** be met: https://www.gnu.org/licenses/gpl-2.0.html and
+** https://www.gnu.org/licenses/gpl-3.0.html.
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4WRITEBARRIER_P_H
+#define QV4WRITEBARRIER_P_H
+
+//
+// W A R N I N G
+// -------------
+//
+// This file is not part of the Qt API. It exists purely as an
+// implementation detail. This header file may change from version to
+// version without notice, or even be removed.
+//
+// We mean it.
+//
+
+#include <private/qv4global_p.h>
+#include <private/qv4value_p.h>
+
+QT_BEGIN_NAMESPACE
+
+#define WRITEBARRIER_none 1
+
+#define WRITEBARRIER(x) (1/WRITEBARRIER_##x == 1)
+
+namespace QV4 {
+
+namespace WriteBarrier {
+
+enum Type {
+ NoBarrier,
+ Barrier
+};
+
+enum NewValueType {
+ Primitive,
+ Object,
+ Unknown
+};
+
+// ### this needs to be filled with a real memory fence once marking is concurrent
+Q_ALWAYS_INLINE void fence() {}
+
+#if WRITEBARRIER(none)
+
+template <NewValueType type>
+static Q_CONSTEXPR inline bool isRequired() {
+ return false;
+}
+
+inline void write(EngineBase *engine, Heap::Base *base, Value *slot, Value value)
+{
+ Q_UNUSED(engine);
+ Q_UNUSED(base);
+ *slot = value;
+}
+
+inline void write(EngineBase *engine, Heap::Base *base, Value *slot, Heap::Base *value)
+{
+ Q_UNUSED(engine);
+ Q_UNUSED(base);
+ *slot = value;
+}
+
+inline void write(EngineBase *engine, Heap::Base *base, Heap::Base **slot, Heap::Base *value)
+{
+ Q_UNUSED(engine);
+ Q_UNUSED(base);
+ *slot = value;
+}
+
+#endif
+
+}
+
+namespace Heap {
+
+template <typename T, size_t o>
+struct Pointer {
+ static Q_CONSTEXPR size_t offset = o;
+ T operator->() const { return ptr; }
+ operator T () const { return ptr; }
+
+ Heap::Base *base() {
+ Heap::Base *base = reinterpret_cast<Heap::Base *>(this) - (offset/sizeof(Heap::Base));
+ Q_ASSERT(base->inUse());
+ return base;
+ }
+
+ void set(EngineBase *e, T newVal) {
+ WriteBarrier::write(e, base(), reinterpret_cast<Heap::Base **>(&ptr), reinterpret_cast<Heap::Base *>(newVal));
+ }
+
+ template <typename Type>
+ Type *cast() { return static_cast<Type *>(ptr); }
+
+private:
+ T ptr;
+};
+typedef Pointer<char *, 0> V4PointerCheck;
+V4_ASSERT_IS_TRIVIAL(V4PointerCheck)
+
+}
+
+template <size_t offset>
+struct HeapValue : Value {
+ Heap::Base *base() {
+ Heap::Base *base = reinterpret_cast<Heap::Base *>(this) - (offset/sizeof(Heap::Base));
+ Q_ASSERT(base->inUse());
+ return base;
+ }
+
+ void set(EngineBase *e, const Value &newVal) {
+ WriteBarrier::write(e, base(), this, newVal);
+ }
+ void set(EngineBase *e, Heap::Base *b) {
+ WriteBarrier::write(e, base(), this, b);
+ }
+};
+
+template <size_t offset>
+struct ValueArray {
+ uint size;
+ uint alloc;
+ Value values[1];
+
+ Heap::Base *base() {
+ Heap::Base *base = reinterpret_cast<Heap::Base *>(this) - (offset/sizeof(Heap::Base));
+ Q_ASSERT(base->inUse());
+ return base;
+ }
+
+ void set(EngineBase *e, uint index, Value v) {
+ WriteBarrier::write(e, base(), values + index, v);
+ }
+ void set(EngineBase *e, uint index, Heap::Base *b) {
+ WriteBarrier::write(e, base(), values + index, b);
+ }
+ inline const Value &operator[] (uint index) const {
+ Q_ASSERT(index < alloc);
+ return values[index];
+ }
+ inline const Value *data() const {
+ return values;
+ }
+
+ void insertData(EngineBase *e, uint index, Value v) {
+ for (uint i = size - 1; i > index; --i) {
+ values[i] = values[i - 1];
+ }
+ set(e, index, v);
+ }
+ void removeData(EngineBase *e, uint index, int n = 1) {
+ Q_UNUSED(e);
+ for (uint i = index; i < size - n; ++i) {
+ values[i] = values[i + n];
+ }
+ }
+};
+
+// It's really important that the offset of values in this structure is
+// constant across all architecture, otherwise JIT cross-compiled code will
+// have wrong offsets between host and target.
+Q_STATIC_ASSERT(offsetof(ValueArray<0>, values) == 8);
+
+}
+
+QT_END_NAMESPACE
+
+#endif