aboutsummaryrefslogtreecommitdiffstats
path: root/src/qml/memory/qv4mm_p.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/qml/memory/qv4mm_p.h')
-rw-r--r--src/qml/memory/qv4mm_p.h242
1 files changed, 180 insertions, 62 deletions
diff --git a/src/qml/memory/qv4mm_p.h b/src/qml/memory/qv4mm_p.h
index 881f4fd19d..ef0cd0c36c 100644
--- a/src/qml/memory/qv4mm_p.h
+++ b/src/qml/memory/qv4mm_p.h
@@ -1,41 +1,5 @@
-/****************************************************************************
-**
-** Copyright (C) 2016 The Qt Company Ltd.
-** Contact: https://www.qt.io/licensing/
-**
-** This file is part of the QtQml module of the Qt Toolkit.
-**
-** $QT_BEGIN_LICENSE:LGPL$
-** Commercial License Usage
-** Licensees holding valid commercial Qt licenses may use this file in
-** accordance with the commercial license agreement provided with the
-** Software or, alternatively, in accordance with the terms contained in
-** a written agreement between you and The Qt Company. For licensing terms
-** and conditions see https://www.qt.io/terms-conditions. For further
-** information use the contact form at https://www.qt.io/contact-us.
-**
-** GNU Lesser General Public License Usage
-** Alternatively, this file may be used under the terms of the GNU Lesser
-** General Public License version 3 as published by the Free Software
-** Foundation and appearing in the file LICENSE.LGPL3 included in the
-** packaging of this file. Please review the following information to
-** ensure the GNU Lesser General Public License version 3 requirements
-** will be met: https://www.gnu.org/licenses/lgpl-3.0.html.
-**
-** GNU General Public License Usage
-** Alternatively, this file may be used under the terms of the GNU
-** General Public License version 2.0 or (at your option) the GNU General
-** Public license version 3 or any later version approved by the KDE Free
-** Qt Foundation. The licenses are as published by the Free Software
-** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3
-** included in the packaging of this file. Please review the following
-** information to ensure the GNU General Public License requirements will
-** be met: https://www.gnu.org/licenses/gpl-2.0.html and
-** https://www.gnu.org/licenses/gpl-3.0.html.
-**
-** $QT_END_LICENSE$
-**
-****************************************************************************/
+// Copyright (C) 2016 The Qt Company Ltd.
+// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
#ifndef QV4GC_H
#define QV4GC_H
@@ -58,16 +22,79 @@
#include <private/qv4mmdefs_p.h>
#include <QVector>
-#define QV4_MM_MAXBLOCK_SHIFT "QV4_MM_MAXBLOCK_SHIFT"
-#define QV4_MM_MAX_CHUNK_SIZE "QV4_MM_MAX_CHUNK_SIZE"
-#define QV4_MM_STATS "QV4_MM_STATS"
-
#define MM_DEBUG 0
QT_BEGIN_NAMESPACE
namespace QV4 {
+enum GCState {
+ MarkStart = 0,
+ MarkGlobalObject,
+ MarkJSStack,
+ InitMarkPersistentValues,
+ MarkPersistentValues,
+ InitMarkWeakValues,
+ MarkWeakValues,
+ MarkDrain,
+ MarkReady,
+ InitCallDestroyObjects,
+ CallDestroyObjects,
+ FreeWeakMaps,
+ FreeWeakSets,
+ HandleQObjectWrappers,
+ DoSweep,
+ Invalid,
+ Count,
+};
+
+struct GCData { virtual ~GCData(){};};
+
+struct GCIteratorStorage {
+ PersistentValueStorage::Iterator it{nullptr, 0};
+};
+struct GCStateMachine;
+
+struct GCStateInfo {
+ using ExtraData = std::variant<std::monostate, GCIteratorStorage>;
+ GCState (*execute)(GCStateMachine *, ExtraData &) = nullptr; // Function to execute for this state, returns true if ready to transition
+ bool breakAfter{false};
+};
+
+struct GCStateMachine {
+ using ExtraData = GCStateInfo::ExtraData;
+ GCState state{GCState::Invalid};
+ std::chrono::microseconds timeLimit{};
+ QDeadlineTimer deadline;
+ std::array<GCStateInfo, GCState::Count> stateInfoMap;
+ MemoryManager *mm = nullptr;
+ ExtraData stateData; // extra date for specific states
+
+ GCStateMachine();
+
+ inline void step() {
+ if (!inProgress()) {
+ reset();
+ }
+ transition();
+ }
+
+ inline bool inProgress() {
+ return state != GCState::Invalid;
+ }
+
+ inline void reset() {
+ state = GCState::MarkStart;
+ }
+
+ Q_QML_EXPORT void transition();
+
+ inline void handleTimeout(GCState state) {
+ Q_UNUSED(state);
+ }
+};
+
+
struct ChunkAllocator;
struct MemorySegment;
@@ -103,7 +130,6 @@ struct BlockAllocator {
void sweep();
void freeAll();
void resetBlackBits();
- void collectGrayItems(MarkStack *markStack);
// bump allocations
HeapItem *nextFree = nullptr;
@@ -125,7 +151,6 @@ struct HugeItemAllocator {
void sweep(ClassDestroyStatsCallback classCountPtr);
void freeAll();
void resetBlackBits();
- void collectGrayItems(MarkStack *markStack);
size_t usedMem() const {
size_t used = 0;
@@ -154,15 +179,26 @@ public:
MemoryManager(ExecutionEngine *engine);
~MemoryManager();
+ template <typename ToBeMarked>
+ friend struct GCCriticalSection;
+
// TODO: this is only for 64bit (and x86 with SSE/AVX), so exend it for other architectures to be slightly more efficient (meaning, align on 8-byte boundaries).
// Note: all occurrences of "16" in alloc/dealloc are also due to the alignment.
constexpr static inline std::size_t align(std::size_t size)
{ return (size + Chunk::SlotSize - 1) & ~(Chunk::SlotSize - 1); }
+ /* NOTE: allocManaged comes in various overloads. If size is not passed explicitly
+ sizeof(ManagedType::Data) is used for size. However, there are quite a few cases
+ where we allocate more than sizeof(ManagedType::Data); that's generally the case
+ when the Object has a ValueArray member.
+ If no internal class pointer is provided, ManagedType::defaultInternalClass(engine)
+ will be used as the internal class.
+ */
+
template<typename ManagedType>
inline typename ManagedType::Data *allocManaged(std::size_t size, Heap::InternalClass *ic)
{
- Q_STATIC_ASSERT(std::is_trivial< typename ManagedType::Data >::value);
+ Q_STATIC_ASSERT(std::is_trivial_v<typename ManagedType::Data>);
size = align(size);
typename ManagedType::Data *d = static_cast<typename ManagedType::Data *>(allocData(size));
d->internalClass.set(engine, ic);
@@ -172,12 +208,24 @@ public:
}
template<typename ManagedType>
+ inline typename ManagedType::Data *allocManaged(Heap::InternalClass *ic)
+ {
+ return allocManaged<ManagedType>(sizeof(typename ManagedType::Data), ic);
+ }
+
+ template<typename ManagedType>
inline typename ManagedType::Data *allocManaged(std::size_t size, InternalClass *ic)
{
return allocManaged<ManagedType>(size, ic->d());
}
template<typename ManagedType>
+ inline typename ManagedType::Data *allocManaged(InternalClass *ic)
+ {
+ return allocManaged<ManagedType>(sizeof(typename ManagedType::Data), ic);
+ }
+
+ template<typename ManagedType>
inline typename ManagedType::Data *allocManaged(std::size_t size)
{
Scope scope(engine);
@@ -185,6 +233,15 @@ public:
return allocManaged<ManagedType>(size, ic);
}
+ template<typename ManagedType>
+ inline typename ManagedType::Data *allocManaged()
+ {
+ auto constexpr size = sizeof(typename ManagedType::Data);
+ Scope scope(engine);
+ Scoped<InternalClass> ic(scope, ManagedType::defaultInternalClass(engine));
+ return allocManaged<ManagedType>(size, ic);
+ }
+
template <typename ObjectType>
typename ObjectType::Data *allocateObject(Heap::InternalClass *ic)
{
@@ -250,12 +307,14 @@ public:
typename ManagedType::Data *alloc(Args&&... args)
{
Scope scope(engine);
- Scoped<ManagedType> t(scope, allocManaged<ManagedType>(sizeof(typename ManagedType::Data)));
+ Scoped<ManagedType> t(scope, allocManaged<ManagedType>());
t->d_unchecked()->init(std::forward<Args>(args)...);
return t->d();
}
void runGC();
+ bool tryForceGCCompletion();
+ void runFullGC();
void dumpStats() const;
@@ -267,6 +326,9 @@ public:
// and InternalClassDataPrivate<PropertyAttributes>.
void changeUnmanagedHeapSizeUsage(qptrdiff delta) { unmanagedHeapSize += delta; }
+ // called at the end of a gc cycle
+ void updateUnmanagedHeapSizeGCLimit();
+
template<typename ManagedType>
typename ManagedType::Data *allocIC()
{
@@ -277,6 +339,12 @@ public:
void registerWeakMap(Heap::MapObject *map);
void registerWeakSet(Heap::SetObject *set);
+ void onEventLoop();
+
+ //GC related methods
+ void setGCTimeLimit(int timeMs);
+ MarkStack* markStack() { return m_markStack.get(); }
+
protected:
/// expects size to be aligned
Heap::Base *allocString(std::size_t unmanagedSize);
@@ -288,33 +356,34 @@ private:
MinUnmanagedHeapSizeGCLimit = 128 * 1024
};
+public:
void collectFromJSStack(MarkStack *markStack) const;
- void mark();
void sweep(bool lastSweep = false, ClassDestroyStatsCallback classCountPtr = nullptr);
+ void cleanupDeletedQObjectWrappersInSweep();
+ bool isAboveUnmanagedHeapLimit()
+ {
+ const bool incrementalGCIsAlreadyRunning = m_markStack != nullptr;
+ const bool aboveUnmanagedHeapLimit = incrementalGCIsAlreadyRunning
+ ? unmanagedHeapSize > 3 * unmanagedHeapSizeGCLimit / 2
+ : unmanagedHeapSize > unmanagedHeapSizeGCLimit;
+ return aboveUnmanagedHeapLimit;
+ }
+private:
bool shouldRunGC() const;
- void collectRoots(MarkStack *markStack);
HeapItem *allocate(BlockAllocator *allocator, std::size_t size)
{
+ const bool incrementalGCIsAlreadyRunning = m_markStack != nullptr;
+
bool didGCRun = false;
if (aggressiveGC) {
- runGC();
+ runFullGC();
didGCRun = true;
}
- if (unmanagedHeapSize > unmanagedHeapSizeGCLimit) {
+ if (isAboveUnmanagedHeapLimit()) {
if (!didGCRun)
- runGC();
-
- if (3*unmanagedHeapSizeGCLimit <= 4 * unmanagedHeapSize) {
- // more than 75% full, raise limit
- unmanagedHeapSizeGCLimit = std::max(unmanagedHeapSizeGCLimit,
- unmanagedHeapSize) * 2;
- } else if (unmanagedHeapSize * 4 <= unmanagedHeapSizeGCLimit) {
- // less than 25% full, lower limit
- unmanagedHeapSizeGCLimit = qMax(std::size_t(MinUnmanagedHeapSizeGCLimit),
- unmanagedHeapSizeGCLimit/2);
- }
+ incrementalGCIsAlreadyRunning ? (void) tryForceGCCompletion() : runGC();
didGCRun = true;
}
@@ -342,11 +411,15 @@ public:
Heap::MapObject *weakMaps = nullptr;
Heap::SetObject *weakSets = nullptr;
+ std::unique_ptr<GCStateMachine> gcStateMachine{nullptr};
+ std::unique_ptr<MarkStack> m_markStack{nullptr};
+
std::size_t unmanagedHeapSize = 0; // the amount of bytes of heap that is not managed by the memory manager, but which is held onto by managed items.
std::size_t unmanagedHeapSizeGCLimit;
std::size_t usedSlotsAfterLastFullSweep = 0;
- bool gcBlocked = false;
+ enum Blockness : quint8 {Unblocked, NormalBlocked, InCriticalSection };
+ Blockness gcBlocked = Unblocked;
bool aggressiveGC = false;
bool gcStats = false;
bool gcCollectorStats = false;
@@ -362,6 +435,51 @@ public:
} statistics;
};
+/*!
+ \internal
+ GCCriticalSection prevets the gc from running, until it is destructed.
+ In its dtor, it runs a check whether we've reached the unmanaegd heap limit,
+ and triggers a gc run if necessary.
+ Lastly, it can optionally mark an object passed to it before runnig the gc.
+ */
+template <typename ToBeMarked = void>
+struct GCCriticalSection {
+ Q_DISABLE_COPY_MOVE(GCCriticalSection)
+
+ Q_NODISCARD_CTOR GCCriticalSection(QV4::ExecutionEngine *engine, ToBeMarked *toBeMarked = nullptr)
+ : m_engine(engine)
+ , m_oldState(std::exchange(engine->memoryManager->gcBlocked, MemoryManager::InCriticalSection))
+ , m_toBeMarked(toBeMarked)
+ {
+ // disallow nested critical sections
+ Q_ASSERT(m_oldState != MemoryManager::InCriticalSection);
+ }
+ ~GCCriticalSection()
+ {
+ m_engine->memoryManager->gcBlocked = m_oldState;
+ if (m_oldState != MemoryManager::Unblocked)
+ if constexpr (!std::is_same_v<ToBeMarked, void>)
+ if (m_toBeMarked)
+ m_toBeMarked->markObjects(m_engine->memoryManager->markStack());
+ /* because we blocked the gc, we might be using too much memoryon the unmanaged heap
+ and did not run the normal fixup logic. So recheck again, and trigger a gc run
+ if necessary*/
+ if (!m_engine->memoryManager->isAboveUnmanagedHeapLimit())
+ return;
+ if (!m_engine->isGCOngoing) {
+ m_engine->memoryManager->runGC();
+ } else {
+ [[maybe_unused]] bool gcFinished = m_engine->memoryManager->tryForceGCCompletion();
+ Q_ASSERT(gcFinished);
+ }
+ }
+
+private:
+ QV4::ExecutionEngine *m_engine;
+ MemoryManager::Blockness m_oldState;
+ ToBeMarked *m_toBeMarked;
+};
+
}
QT_END_NAMESPACE