aboutsummaryrefslogtreecommitdiffstats
path: root/src/qml/memory
diff options
context:
space:
mode:
authorFabian Kosmale <fabian.kosmale@qt.io>2024-01-15 16:36:50 +0100
committerFabian Kosmale <fabian.kosmale@qt.io>2024-03-05 14:06:29 +0100
commite44cd366eefbec33cc0387344a84f594636ec84b (patch)
tree40f3d256200286726e23ca6da5cca5abcfb63bd7 /src/qml/memory
parent14f549fdc5596c7ad0d9e5b516ec369f78ca57cf (diff)
qv4mm: Handle running out of native heap memory
With the current setup, we would have risked running out of native heap space if the incremental garbage collection takes too long. Remedy this by forcing the current gc run to complete immediately once we overshoot the limit by a factor of 3/2. We still need to respect that the gc must not run in a critical section: Handle this by rechecking the condition when leaving the critical section, and only forcing a gc run at that point if necessary. Note that we currently update the gc limits before the gc cycle finishes, so that the effective limit might actually not be 3/2 in that case. This will be remedied in an upcoming patch. Change-Id: I19c367bd1ff6ffc129293fc79e39b101f0728135 Reviewed-by: Ulf Hermann <ulf.hermann@qt.io>
Diffstat (limited to 'src/qml/memory')
-rw-r--r--src/qml/memory/qv4mm.cpp22
-rw-r--r--src/qml/memory/qv4mm_p.h62
2 files changed, 81 insertions, 3 deletions
diff --git a/src/qml/memory/qv4mm.cpp b/src/qml/memory/qv4mm.cpp
index c553aae17e..efa6a95fb4 100644
--- a/src/qml/memory/qv4mm.cpp
+++ b/src/qml/memory/qv4mm.cpp
@@ -1216,6 +1216,28 @@ static size_t dumpBins(BlockAllocator *b, const char *title)
return totalSlotMem*Chunk::SlotSize;
}
+/*!
+ \internal
+ Precondition: Incremental garbage collection must be currently active
+ Finishes incremental garbage collection, unless in a critical section
+ Code entering a critical section is expected to check if we need to
+ force a gc completion, and to trigger the gc again if necessary
+ when exiting the critcial section.
+ Returns \c true if the gc cycle completed, false otherwise.
+ */
+bool MemoryManager::tryForceGCCompletion()
+{
+ if (gcBlocked == InCriticalSection)
+ return false;
+ const bool incrementalGCIsAlreadyRunning = m_markStack != nullptr;
+ Q_ASSERT(incrementalGCIsAlreadyRunning);
+ auto oldTimeLimit = std::exchange(gcStateMachine->timeLimit, std::chrono::microseconds::max());
+ while (gcStateMachine->inProgress())
+ gcStateMachine->step();
+ gcStateMachine->timeLimit = oldTimeLimit;
+ return true;
+}
+
void MemoryManager::runGC()
{
if (gcBlocked != Unblocked) {
diff --git a/src/qml/memory/qv4mm_p.h b/src/qml/memory/qv4mm_p.h
index 1d9b03600b..4f7df449c5 100644
--- a/src/qml/memory/qv4mm_p.h
+++ b/src/qml/memory/qv4mm_p.h
@@ -179,6 +179,9 @@ public:
MemoryManager(ExecutionEngine *engine);
~MemoryManager();
+ template <typename ToBeMarked>
+ friend struct GCCriticalSection;
+
// TODO: this is only for 64bit (and x86 with SSE/AVX), so exend it for other architectures to be slightly more efficient (meaning, align on 8-byte boundaries).
// Note: all occurrences of "16" in alloc/dealloc are also due to the alignment.
constexpr static inline std::size_t align(std::size_t size)
@@ -310,6 +313,7 @@ public:
}
void runGC();
+ bool tryForceGCCompletion();
void dumpStats() const;
@@ -352,6 +356,14 @@ public:
void collectFromJSStack(MarkStack *markStack) const;
void sweep(bool lastSweep = false, ClassDestroyStatsCallback classCountPtr = nullptr);
void cleanupDeletedQObjectWrappersInSweep();
+ bool isAboveUnmanagedHeapLimit()
+ {
+ const bool incrementalGCIsAlreadyRunning = m_markStack != nullptr;
+ const bool aboveUnmanagedHeapLimit = incrementalGCIsAlreadyRunning
+ ? unmanagedHeapSize > 3 * unmanagedHeapSizeGCLimit / 2
+ : unmanagedHeapSize > unmanagedHeapSizeGCLimit;
+ return aboveUnmanagedHeapLimit;
+ }
private:
bool shouldRunGC() const;
@@ -367,9 +379,9 @@ private:
didGCRun = true;
}
- if (unmanagedHeapSize > unmanagedHeapSizeGCLimit) {
+ if (isAboveUnmanagedHeapLimit()) {
if (!didGCRun)
- runGC();
+ incrementalGCIsAlreadyRunning ? (void) tryForceGCCompletion() : runGC();
if (3*unmanagedHeapSizeGCLimit <= 4 * unmanagedHeapSize) {
// more than 75% full, raise limit
@@ -415,7 +427,6 @@ public:
std::size_t usedSlotsAfterLastFullSweep = 0;
enum Blockness : quint8 {Unblocked, NormalBlocked, InCriticalSection };
-
Blockness gcBlocked = Unblocked;
bool aggressiveGC = false;
bool gcStats = false;
@@ -432,6 +443,51 @@ public:
} statistics;
};
+/*!
+ \internal
+ GCCriticalSection prevets the gc from running, until it is destructed.
+ In its dtor, it runs a check whether we've reached the unmanaegd heap limit,
+ and triggers a gc run if necessary.
+ Lastly, it can optionally mark an object passed to it before runnig the gc.
+ */
+template <typename ToBeMarked = void>
+struct GCCriticalSection {
+ Q_DISABLE_COPY_MOVE(GCCriticalSection)
+
+ Q_NODISCARD_CTOR GCCriticalSection(QV4::ExecutionEngine *engine, ToBeMarked *toBeMarked = nullptr)
+ : m_engine(engine)
+ , m_oldState(std::exchange(engine->memoryManager->gcBlocked, MemoryManager::InCriticalSection))
+ , m_toBeMarked(toBeMarked)
+ {
+ // disallow nested critical sections
+ Q_ASSERT(m_oldState != MemoryManager::InCriticalSection);
+ }
+ ~GCCriticalSection()
+ {
+ m_engine->memoryManager->gcBlocked = m_oldState;
+ if (m_oldState != MemoryManager::Unblocked)
+ if constexpr (!std::is_same_v<ToBeMarked, void>)
+ if (m_toBeMarked)
+ m_toBeMarked->markObjects(m_engine->memoryManager->markStack());
+ /* because we blocked the gc, we might be using too much memoryon the unmanaged heap
+ and did not run the normal fixup logic. So recheck again, and trigger a gc run
+ if necessary*/
+ if (!m_engine->memoryManager->isAboveUnmanagedHeapLimit())
+ return;
+ if (!m_engine->isGCOngoing) {
+ m_engine->memoryManager->runGC();
+ } else {
+ [[maybe_unused]] bool gcFinished = m_engine->memoryManager->tryForceGCCompletion();
+ Q_ASSERT(gcFinished);
+ }
+ }
+
+private:
+ QV4::ExecutionEngine *m_engine;
+ MemoryManager::Blockness m_oldState;
+ ToBeMarked *m_toBeMarked;
+};
+
}
QT_END_NAMESPACE