summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastMalloc.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastMalloc.cpp')
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastMalloc.cpp248
1 files changed, 233 insertions, 15 deletions
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastMalloc.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastMalloc.cpp
index c855a41a4e..afb0220b6d 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastMalloc.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastMalloc.cpp
@@ -1,6 +1,6 @@
// Copyright (c) 2005, 2007, Google Inc.
// All rights reserved.
-// Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
@@ -89,12 +89,20 @@
#endif
#endif
-#if !defined(USE_SYSTEM_MALLOC) && defined(NDEBUG)
+#if !(defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC) && defined(NDEBUG)
#define FORCE_SYSTEM_MALLOC 0
#else
#define FORCE_SYSTEM_MALLOC 1
#endif
+// Use a background thread to periodically scavenge memory to release back to the system
+// https://bugs.webkit.org/show_bug.cgi?id=27900: don't turn this on for Tiger until we have figured out why it caused a crash.
+#if defined(BUILDING_ON_TIGER)
+#define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 0
+#else
+#define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1
+#endif
+
#ifndef NDEBUG
namespace WTF {
@@ -170,10 +178,10 @@ void* fastZeroedMalloc(size_t n)
return result;
}
-void* tryFastZeroedMalloc(size_t n)
+TryMallocReturnValue tryFastZeroedMalloc(size_t n)
{
- void* result = tryFastMalloc(n);
- if (!result)
+ void* result;
+ if (!tryFastMalloc(n).getValue(result))
return 0;
memset(result, 0, n);
return result;
@@ -192,7 +200,7 @@ void* tryFastZeroedMalloc(size_t n)
namespace WTF {
-void* tryFastMalloc(size_t n)
+TryMallocReturnValue tryFastMalloc(size_t n)
{
ASSERT(!isForbidden());
@@ -218,7 +226,9 @@ void* fastMalloc(size_t n)
ASSERT(!isForbidden());
#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
- void* result = tryFastMalloc(n);
+ TryMallocReturnValue returnValue = tryFastMalloc(n);
+ void* result;
+ returnValue.getValue(result);
#else
void* result = malloc(n);
#endif
@@ -228,7 +238,7 @@ void* fastMalloc(size_t n)
return result;
}
-void* tryFastCalloc(size_t n_elements, size_t element_size)
+TryMallocReturnValue tryFastCalloc(size_t n_elements, size_t element_size)
{
ASSERT(!isForbidden());
@@ -256,7 +266,9 @@ void* fastCalloc(size_t n_elements, size_t element_size)
ASSERT(!isForbidden());
#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
- void* result = tryFastCalloc(n_elements, element_size);
+ TryMallocReturnValue returnValue = tryFastCalloc(n_elements, element_size);
+ void* result;
+ returnValue.getValue(result);
#else
void* result = calloc(n_elements, element_size);
#endif
@@ -283,7 +295,7 @@ void fastFree(void* p)
#endif
}
-void* tryFastRealloc(void* p, size_t n)
+TryMallocReturnValue tryFastRealloc(void* p, size_t n)
{
ASSERT(!isForbidden());
@@ -315,7 +327,9 @@ void* fastRealloc(void* p, size_t n)
ASSERT(!isForbidden());
#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
- void* result = tryFastRealloc(p, n);
+ TryMallocReturnValue returnValue = tryFastRealloc(p, n);
+ void* result;
+ returnValue.getValue(result);
#else
void* result = realloc(p, n);
#endif
@@ -531,7 +545,7 @@ static const size_t kNumClasses = 68;
static const size_t kPageMapBigAllocationThreshold = 128 << 20;
// Minimum number of pages to fetch from system at a time. Must be
-// significantly bigger than kBlockSize to amortize system-call
+// significantly bigger than kPageSize to amortize system-call
// overhead, and also to reduce external fragementation. Also, we
// should keep this value big because various incarnations of Linux
// have small limits on the number of mmap() regions per
@@ -1187,6 +1201,32 @@ template <> class MapSelector<32> {
// contiguous runs of pages (called a "span").
// -------------------------------------------------------------------------
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+// The central page heap collects spans of memory that have been deleted but are still committed until they are released
+// back to the system. We use a background thread to periodically scan the list of free spans and release some back to the
+// system. Every 5 seconds, the background thread wakes up and does the following:
+// - Check if we needed to commit memory in the last 5 seconds. If so, skip this scavenge because it's a sign that we are short
+// of free committed pages and so we should not release them back to the system yet.
+// - Otherwise, go through the list of free spans (from largest to smallest) and release up to a fraction of the free committed pages
+// back to the system.
+// - If the number of free committed pages reaches kMinimumFreeCommittedPageCount, we can stop the scavenging and block the
+// scavenging thread until the number of free committed pages goes above kMinimumFreeCommittedPageCount.
+
+// Background thread wakes up every 5 seconds to scavenge as long as there is memory available to return to the system.
+static const int kScavengeTimerDelayInSeconds = 5;
+
+// Number of free committed pages that we want to keep around.
+static const size_t kMinimumFreeCommittedPageCount = 512;
+
+// During a scavenge, we'll release up to a fraction of the free committed pages.
+#if PLATFORM(WIN)
+// We are slightly less aggressive in releasing memory on Windows due to performance reasons.
+static const int kMaxScavengeAmountFactor = 3;
+#else
+static const int kMaxScavengeAmountFactor = 2;
+#endif
+#endif
+
class TCMalloc_PageHeap {
public:
void init();
@@ -1286,6 +1326,14 @@ class TCMalloc_PageHeap {
// Bytes allocated from system
uint64_t system_bytes_;
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+ // Number of pages kept in free lists that are still committed.
+ Length free_committed_pages_;
+
+ // Number of pages that we committed in the last scavenge wait interval.
+ Length pages_committed_since_last_scavenge_;
+#endif
+
bool GrowHeap(Length n);
// REQUIRES span->length >= n
@@ -1308,9 +1356,11 @@ class TCMalloc_PageHeap {
// span of exactly the specified length. Else, returns NULL.
Span* AllocLarge(Length n);
+#if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
// Incrementally release some memory to the system.
// IncrementalScavenge(n) is called whenever n pages are freed.
void IncrementalScavenge(Length n);
+#endif
// Number of pages to deallocate before doing more scavenging
int64_t scavenge_counter_;
@@ -1321,6 +1371,24 @@ class TCMalloc_PageHeap {
#if defined(WTF_CHANGES) && PLATFORM(DARWIN)
friend class FastMallocZone;
#endif
+
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+ static NO_RETURN void* runScavengerThread(void*);
+
+ NO_RETURN void scavengerThread();
+
+ void scavenge();
+
+ inline bool shouldContinueScavenging() const;
+
+ pthread_mutex_t m_scavengeMutex;
+
+ pthread_cond_t m_scavengeCondition;
+
+ // Keeps track of whether the background thread is actively scavenging memory every kScavengeTimerDelayInSeconds, or
+ // it's blocked waiting for more pages to be deleted.
+ bool m_scavengeThreadActive;
+#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
};
void TCMalloc_PageHeap::init()
@@ -1329,6 +1397,12 @@ void TCMalloc_PageHeap::init()
pagemap_cache_ = PageMapCache(0);
free_pages_ = 0;
system_bytes_ = 0;
+
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+ free_committed_pages_ = 0;
+ pages_committed_since_last_scavenge_ = 0;
+#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+
scavenge_counter_ = 0;
// Start scavenging at kMaxPages list
scavenge_index_ = kMaxPages-1;
@@ -1339,8 +1413,68 @@ void TCMalloc_PageHeap::init()
DLL_Init(&free_[i].normal);
DLL_Init(&free_[i].returned);
}
+
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+ pthread_mutex_init(&m_scavengeMutex, 0);
+ pthread_cond_init(&m_scavengeCondition, 0);
+ m_scavengeThreadActive = true;
+ pthread_t thread;
+ pthread_create(&thread, 0, runScavengerThread, this);
+#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+}
+
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+void* TCMalloc_PageHeap::runScavengerThread(void* context)
+{
+ static_cast<TCMalloc_PageHeap*>(context)->scavengerThread();
+#if COMPILER(MSVC)
+ // Without this, Visual Studio will complain that this method does not return a value.
+ return 0;
+#endif
}
+void TCMalloc_PageHeap::scavenge()
+{
+ // If we have to commit memory in the last 5 seconds, it means we don't have enough free committed pages
+ // for the amount of allocations that we do. So hold off on releasing memory back to the system.
+ if (pages_committed_since_last_scavenge_ > 0) {
+ pages_committed_since_last_scavenge_ = 0;
+ return;
+ }
+ Length pagesDecommitted = 0;
+ for (int i = kMaxPages; i >= 0; i--) {
+ SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i];
+ if (!DLL_IsEmpty(&slist->normal)) {
+ // Release the last span on the normal portion of this list
+ Span* s = slist->normal.prev;
+ // Only decommit up to a fraction of the free committed pages if pages_allocated_since_last_scavenge_ > 0.
+ if ((pagesDecommitted + s->length) * kMaxScavengeAmountFactor > free_committed_pages_)
+ continue;
+ DLL_Remove(s);
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
+ static_cast<size_t>(s->length << kPageShift));
+ if (!s->decommitted) {
+ pagesDecommitted += s->length;
+ s->decommitted = true;
+ }
+ DLL_Prepend(&slist->returned, s);
+ // We can stop scavenging if the number of free committed pages left is less than or equal to the minimum number we want to keep around.
+ if (free_committed_pages_ <= kMinimumFreeCommittedPageCount + pagesDecommitted)
+ break;
+ }
+ }
+ pages_committed_since_last_scavenge_ = 0;
+ ASSERT(free_committed_pages_ >= pagesDecommitted);
+ free_committed_pages_ -= pagesDecommitted;
+}
+
+inline bool TCMalloc_PageHeap::shouldContinueScavenging() const
+{
+ return free_committed_pages_ > kMinimumFreeCommittedPageCount;
+}
+
+#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+
inline Span* TCMalloc_PageHeap::New(Length n) {
ASSERT(Check());
ASSERT(n > 0);
@@ -1366,7 +1500,18 @@ inline Span* TCMalloc_PageHeap::New(Length n) {
if (result->decommitted) {
TCMalloc_SystemCommit(reinterpret_cast<void*>(result->start << kPageShift), static_cast<size_t>(n << kPageShift));
result->decommitted = false;
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+ pages_committed_since_last_scavenge_ += n;
+#endif
+ }
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+ else {
+ // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
+ // free committed pages count.
+ ASSERT(free_committed_pages_ >= n);
+ free_committed_pages_ -= n;
}
+#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
ASSERT(Check());
free_pages_ -= n;
return result;
@@ -1426,7 +1571,18 @@ Span* TCMalloc_PageHeap::AllocLarge(Length n) {
if (best->decommitted) {
TCMalloc_SystemCommit(reinterpret_cast<void*>(best->start << kPageShift), static_cast<size_t>(n << kPageShift));
best->decommitted = false;
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+ pages_committed_since_last_scavenge_ += n;
+#endif
+ }
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+ else {
+ // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
+ // free committed pages count.
+ ASSERT(free_committed_pages_ >= n);
+ free_committed_pages_ -= n;
}
+#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
ASSERT(Check());
free_pages_ -= n;
return best;
@@ -1508,6 +1664,10 @@ inline void TCMalloc_PageHeap::Delete(Span* span) {
// necessary. We do not bother resetting the stale pagemap
// entries for the pieces we are merging together because we only
// care about the pagemap entries for the boundaries.
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+ // Track the total size of the neighboring free spans that are committed.
+ Length neighboringCommittedSpansLength = 0;
+#endif
const PageID p = span->start;
const Length n = span->length;
Span* prev = GetDescriptor(p-1);
@@ -1515,6 +1675,10 @@ inline void TCMalloc_PageHeap::Delete(Span* span) {
// Merge preceding span into this span
ASSERT(prev->start + prev->length == p);
const Length len = prev->length;
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+ if (!prev->decommitted)
+ neighboringCommittedSpansLength += len;
+#endif
mergeDecommittedStates(span, prev);
DLL_Remove(prev);
DeleteSpan(prev);
@@ -1528,6 +1692,10 @@ inline void TCMalloc_PageHeap::Delete(Span* span) {
// Merge next span into this span
ASSERT(next->start == p+n);
const Length len = next->length;
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+ if (!next->decommitted)
+ neighboringCommittedSpansLength += len;
+#endif
mergeDecommittedStates(span, next);
DLL_Remove(next);
DeleteSpan(next);
@@ -1551,10 +1719,27 @@ inline void TCMalloc_PageHeap::Delete(Span* span) {
}
free_pages_ += n;
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+ if (span->decommitted) {
+ // If the merged span is decommitted, that means we decommitted any neighboring spans that were
+ // committed. Update the free committed pages count.
+ free_committed_pages_ -= neighboringCommittedSpansLength;
+ } else {
+ // If the merged span remains committed, add the deleted span's size to the free committed pages count.
+ free_committed_pages_ += n;
+ }
+
+ // Make sure the scavenge thread becomes active if we have enough freed pages to release some back to the system.
+ if (!m_scavengeThreadActive && shouldContinueScavenging())
+ pthread_cond_signal(&m_scavengeCondition);
+#else
IncrementalScavenge(n);
+#endif
+
ASSERT(Check());
}
+#if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
void TCMalloc_PageHeap::IncrementalScavenge(Length n) {
// Fast path; not yet time to release memory
scavenge_counter_ -= n;
@@ -1592,6 +1777,7 @@ void TCMalloc_PageHeap::IncrementalScavenge(Length n) {
// Nothing to scavenge, delay for a while
scavenge_counter_ = kDefaultReleaseDelay;
}
+#endif
void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) {
// Associate span object with all interior pages as well
@@ -1703,6 +1889,10 @@ bool TCMalloc_PageHeap::GrowHeap(Length n) {
}
ask = actual_size >> kPageShift;
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+ pages_committed_since_last_scavenge_ += ask;
+#endif
+
uint64_t old_system_bytes = system_bytes_;
system_bytes_ += (ask << kPageShift);
const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
@@ -2083,6 +2273,34 @@ static inline TCMalloc_PageHeap* getPageHeap()
#define pageheap getPageHeap()
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+#if PLATFORM(WIN)
+static void sleep(unsigned seconds)
+{
+ ::Sleep(seconds * 1000);
+}
+#endif
+
+void TCMalloc_PageHeap::scavengerThread()
+{
+ while (1) {
+ if (!shouldContinueScavenging()) {
+ pthread_mutex_lock(&m_scavengeMutex);
+ m_scavengeThreadActive = false;
+ // Block until there are enough freed pages to release back to the system.
+ pthread_cond_wait(&m_scavengeCondition, &m_scavengeMutex);
+ m_scavengeThreadActive = true;
+ pthread_mutex_unlock(&m_scavengeMutex);
+ }
+ sleep(kScavengeTimerDelayInSeconds);
+ {
+ SpinLockHolder h(&pageheap_lock);
+ pageheap->scavenge();
+ }
+ }
+}
+#endif
+
// If TLS is available, we also store a copy
// of the per-thread object in a __thread variable
// since __thread variables are faster to read
@@ -3364,7 +3582,7 @@ void* fastMalloc(size_t size)
return malloc<true>(size);
}
-void* tryFastMalloc(size_t size)
+TryMallocReturnValue tryFastMalloc(size_t size)
{
return malloc<false>(size);
}
@@ -3425,7 +3643,7 @@ void* fastCalloc(size_t n, size_t elem_size)
return calloc<true>(n, elem_size);
}
-void* tryFastCalloc(size_t n, size_t elem_size)
+TryMallocReturnValue tryFastCalloc(size_t n, size_t elem_size)
{
return calloc<false>(n, elem_size);
}
@@ -3489,7 +3707,7 @@ void* fastRealloc(void* old_ptr, size_t new_size)
return realloc<true>(old_ptr, new_size);
}
-void* tryFastRealloc(void* old_ptr, size_t new_size)
+TryMallocReturnValue tryFastRealloc(void* old_ptr, size_t new_size)
{
return realloc<false>(old_ptr, new_size);
}