summaryrefslogtreecommitdiffstats
path: root/chromium/third_party/WebKit/Source/wtf/PartitionAlloc.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/third_party/WebKit/Source/wtf/PartitionAlloc.cpp')
-rw-r--r--chromium/third_party/WebKit/Source/wtf/PartitionAlloc.cpp892
1 files changed, 655 insertions, 237 deletions
diff --git a/chromium/third_party/WebKit/Source/wtf/PartitionAlloc.cpp b/chromium/third_party/WebKit/Source/wtf/PartitionAlloc.cpp
index 3aeb5190455..15c8286edef 100644
--- a/chromium/third_party/WebKit/Source/wtf/PartitionAlloc.cpp
+++ b/chromium/third_party/WebKit/Source/wtf/PartitionAlloc.cpp
@@ -46,22 +46,48 @@ COMPILE_ASSERT(!(WTF::kSuperPageSize % WTF::kPartitionPageSize), ok_super_page_m
COMPILE_ASSERT(WTF::kSystemPageSize * 4 <= WTF::kPartitionPageSize, ok_partition_page_size);
COMPILE_ASSERT(!(WTF::kPartitionPageSize % WTF::kSystemPageSize), ok_partition_page_multiple);
COMPILE_ASSERT(sizeof(WTF::PartitionPage) <= WTF::kPageMetadataSize, PartitionPage_not_too_big);
+COMPILE_ASSERT(sizeof(WTF::PartitionBucket) <= WTF::kPageMetadataSize, PartitionBucket_not_too_big);
COMPILE_ASSERT(sizeof(WTF::PartitionSuperPageExtentEntry) <= WTF::kPageMetadataSize, PartitionSuperPageExtentEntry_not_too_big);
COMPILE_ASSERT(WTF::kPageMetadataSize * WTF::kNumPartitionPagesPerSuperPage <= WTF::kSystemPageSize, page_metadata_fits_in_hole);
+// Check that some of our zanier calculations worked out as expected.
+COMPILE_ASSERT(WTF::kGenericSmallestBucket == 8, generic_smallest_bucket);
+COMPILE_ASSERT(WTF::kGenericMaxBucketed == 983040, generic_max_bucketed);
namespace WTF {
-static size_t partitionBucketPageSize(size_t size)
+int PartitionRootBase::gInitializedLock = 0;
+bool PartitionRootBase::gInitialized = false;
+PartitionPage PartitionRootBase::gSeedPage;
+PartitionBucket PartitionRootBase::gPagedBucket;
+
+static size_t partitionBucketNumSystemPages(size_t size)
{
+ // This works out reasonably for the current bucket sizes of the generic
+ // allocator, and the current values of partition page size and constants.
+ // Specifically, we have enough room to always pack the slots perfectly into
+ // some number of system pages. The only waste is the waste associated with
+ // unfaulted pages (i.e. wasted address space).
+ // TODO: we end up using a lot of system pages for very small sizes. For
+ // example, we'll use 12 system pages for slot size 24. The slot size is
+ // so small that the waste would be tiny with just 4, or 1, system pages.
+ // Later, we can investigate whether there are anti-fragmentation benefits
+ // to using fewer system pages.
double bestWasteRatio = 1.0f;
size_t bestPages = 0;
- for (size_t i = kNumSystemPagesPerPartitionPage - 1; i <= kNumSystemPagesPerPartitionPage; ++i) {
+ if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
+ ASSERT(!(size % kSystemPageSize));
+ return size / kSystemPageSize;
+ }
+ ASSERT(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
+ for (size_t i = kNumSystemPagesPerPartitionPage - 1; i <= kMaxSystemPagesPerSlotSpan; ++i) {
size_t pageSize = kSystemPageSize * i;
size_t numSlots = pageSize / size;
size_t waste = pageSize - (numSlots * size);
- // Leave a page unfaulted is not free; the page will occupy an empty page table entry.
+ // Leaving a page unfaulted is not free; the page will occupy an empty page table entry.
// Make a simple attempt to account for that.
- waste += sizeof(void*) * (kNumSystemPagesPerPartitionPage - i);
+ size_t numRemainderPages = i & (kNumSystemPagesPerPartitionPage - 1);
+ size_t numUnfaultedPages = numRemainderPages ? (kNumSystemPagesPerPartitionPage - numRemainderPages) : 0;
+ waste += sizeof(void*) * numUnfaultedPages;
double wasteRatio = (double) waste / (double) pageSize;
if (wasteRatio < bestWasteRatio) {
bestWasteRatio = wasteRatio;
@@ -69,54 +95,142 @@ static size_t partitionBucketPageSize(size_t size)
}
}
ASSERT(bestPages > 0);
- return bestPages * kSystemPageSize;
-}
-
-static ALWAYS_INLINE void partitionPageMarkFree(PartitionPage* page)
-{
- ASSERT(!partitionPageIsFree(page));
- page->numAllocatedSlots = -1;
+ return bestPages;
}
-WTF_EXPORT void partitionAllocInit(PartitionRoot* root, size_t numBuckets, size_t maxAllocation)
+static void parititonAllocBaseInit(PartitionRootBase* root)
{
ASSERT(!root->initialized);
+
+ spinLockLock(&PartitionRootBase::gInitializedLock);
+ if (!PartitionRootBase::gInitialized) {
+ PartitionRootBase::gInitialized = true;
+ // We mark the seed page as free to make sure it is skipped by our
+ // logic to find a new active page.
+ PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGeneric::gSeedPage;
+ }
+ spinLockUnlock(&PartitionRootBase::gInitializedLock);
+
root->initialized = true;
- root->lock = 0;
+ root->totalSizeOfCommittedPages = 0;
root->totalSizeOfSuperPages = 0;
+ root->nextSuperPage = 0;
+ root->nextPartitionPage = 0;
+ root->nextPartitionPageEnd = 0;
+ root->firstExtent = 0;
+ root->currentExtent = 0;
+
+ memset(&root->globalEmptyPageRing, '\0', sizeof(root->globalEmptyPageRing));
+ root->globalEmptyPageRingIndex = 0;
+
+ // This is a "magic" value so we can test if a root pointer is valid.
+ root->invertedSelf = ~reinterpret_cast<uintptr_t>(root);
+}
+
+static void partitionBucketInitBase(PartitionBucket* bucket, PartitionRootBase* root)
+{
+ bucket->activePagesHead = &PartitionRootGeneric::gSeedPage;
+ bucket->freePagesHead = 0;
+ bucket->numFullPages = 0;
+ bucket->numSystemPagesPerSlotSpan = partitionBucketNumSystemPages(bucket->slotSize);
+}
+
+void partitionAllocInit(PartitionRoot* root, size_t numBuckets, size_t maxAllocation)
+{
+ parititonAllocBaseInit(root);
+
root->numBuckets = numBuckets;
root->maxAllocation = maxAllocation;
size_t i;
for (i = 0; i < root->numBuckets; ++i) {
PartitionBucket* bucket = &root->buckets()[i];
- bucket->root = root;
- bucket->activePagesHead = &root->seedPage;
- bucket->freePagesHead = 0;
- bucket->numFullPages = 0;
- size_t size = partitionBucketSize(bucket);
- bucket->pageSize = partitionBucketPageSize(size);
+ if (!i)
+ bucket->slotSize = kAllocationGranularity;
+ else
+ bucket->slotSize = i << kBucketShift;
+ partitionBucketInitBase(bucket, root);
}
+}
- root->nextSuperPage = 0;
- root->nextPartitionPage = 0;
- root->nextPartitionPageEnd = 0;
- root->currentExtent = &root->firstExtent;
- root->firstExtent.superPageBase = 0;
- root->firstExtent.superPagesEnd = 0;
- root->firstExtent.next = 0;
- root->seedPage.bucket = &root->seedBucket;
- root->seedPage.activePageNext = 0;
- root->seedPage.numAllocatedSlots = 0;
- root->seedPage.numUnprovisionedSlots = 0;
- partitionPageSetFreelistHead(&root->seedPage, 0);
- // We mark the seed page as free to make sure it is skipped by our logic to
- // find a new active page.
- partitionPageMarkFree(&root->seedPage);
-
- root->seedBucket.root = root;
- root->seedBucket.activePagesHead = 0;
- root->seedBucket.freePagesHead = 0;
- root->seedBucket.numFullPages = 0;
+void partitionAllocGenericInit(PartitionRootGeneric* root)
+{
+ parititonAllocBaseInit(root);
+
+ root->lock = 0;
+
+ // Precalculate some shift and mask constants used in the hot path.
+ // Example: malloc(41) == 101001 binary.
+ // Order is 6 (1 << 6-1)==32 is highest bit set.
+ // orderIndex is the next three MSB == 010 == 2.
+ // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 for the subOrderIndex).
+ size_t order;
+ for (order = 0; order <= kBitsPerSizet; ++order) {
+ size_t orderIndexShift;
+ if (order < kGenericNumBucketsPerOrderBits + 1)
+ orderIndexShift = 0;
+ else
+ orderIndexShift = order - (kGenericNumBucketsPerOrderBits + 1);
+ root->orderIndexShifts[order] = orderIndexShift;
+ size_t subOrderIndexMask;
+ if (order == kBitsPerSizet) {
+ // This avoids invoking undefined behavior for an excessive shift.
+ subOrderIndexMask = static_cast<size_t>(-1) >> (kGenericNumBucketsPerOrderBits + 1);
+ } else {
+ subOrderIndexMask = ((1 << order) - 1) >> (kGenericNumBucketsPerOrderBits + 1);
+ }
+ root->orderSubIndexMasks[order] = subOrderIndexMask;
+ }
+
+ // Set up the actual usable buckets first.
+ // Note that typical values (i.e. min allocation size of 8) will result in
+ // invalid buckets (size==9 etc. or more generally, size is not a multiple
+ // of the smallest allocation granularity).
+ // We avoid them in the bucket lookup map, but we tolerate them to keep the
+ // code simpler and the structures more generic.
+ size_t i, j;
+ size_t currentSize = kGenericSmallestBucket;
+ size_t currentIncrement = kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits;
+ PartitionBucket* bucket = &root->buckets[0];
+ for (i = 0; i < kGenericNumBucketedOrders; ++i) {
+ for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
+ bucket->slotSize = currentSize;
+ partitionBucketInitBase(bucket, root);
+ // Disable invalid buckets so that touching them faults.
+ if (currentSize % kGenericSmallestBucket)
+ bucket->activePagesHead = 0;
+ currentSize += currentIncrement;
+ ++bucket;
+ }
+ currentIncrement <<= 1;
+ }
+ ASSERT(currentSize == 1 << kGenericMaxBucketedOrder);
+ ASSERT(bucket == &root->buckets[0] + (kGenericNumBucketedOrders * kGenericNumBucketsPerOrder));
+
+ // Then set up the fast size -> bucket lookup table.
+ bucket = &root->buckets[0];
+ PartitionBucket** bucketPtr = &root->bucketLookups[0];
+ for (order = 0; order <= kBitsPerSizet; ++order) {
+ for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
+ if (order < kGenericMinBucketedOrder) {
+ // Use the bucket of finest granularity for malloc(0) etc.
+ *bucketPtr++ = &root->buckets[0];
+ } else if (order > kGenericMaxBucketedOrder) {
+ *bucketPtr++ = &PartitionRootGeneric::gPagedBucket;
+ } else {
+ PartitionBucket* validBucket = bucket;
+ // Skip over invalid buckets.
+ while (validBucket->slotSize % kGenericSmallestBucket)
+ validBucket++;
+ *bucketPtr++ = validBucket;
+ bucket++;
+ }
+ }
+ }
+ ASSERT(bucket == &root->buckets[0] + (kGenericNumBucketedOrders * kGenericNumBucketsPerOrder));
+ ASSERT(bucketPtr == &root->bucketLookups[0] + ((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder));
+ // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
+ // which tries to overflow to a non-existant order.
+ *bucketPtr = &PartitionRootGeneric::gPagedBucket;
}
static bool partitionAllocShutdownBucket(PartitionBucket* bucket)
@@ -124,35 +238,26 @@ static bool partitionAllocShutdownBucket(PartitionBucket* bucket)
// Failure here indicates a memory leak.
bool noLeaks = !bucket->numFullPages;
PartitionPage* page = bucket->activePagesHead;
- if (page == &bucket->root->seedPage)
- return noLeaks;
- do {
+ while (page) {
if (page->numAllocatedSlots)
noLeaks = false;
- page = page->activePageNext;
- } while (page);
+ page = page->nextPage;
+ }
return noLeaks;
}
-bool partitionAllocShutdown(PartitionRoot* root)
+static void partitionAllocBaseShutdown(PartitionRootBase* root)
{
- bool noLeaks = true;
ASSERT(root->initialized);
root->initialized = false;
- size_t i;
- for (i = 0; i < root->numBuckets; ++i) {
- PartitionBucket* bucket = &root->buckets()[i];
- if (!partitionAllocShutdownBucket(bucket))
- noLeaks = false;
- }
// Now that we've examined all partition pages in all buckets, it's safe
// to free all our super pages. We first collect the super page pointers
// on the stack because some of them are themselves store in super pages.
char* superPages[kMaxPartitionSize / kSuperPageSize];
size_t numSuperPages = 0;
- PartitionSuperPageExtentEntry* entry = &root->firstExtent;
+ PartitionSuperPageExtentEntry* entry = root->firstExtent;
while (entry) {
char* superPage = entry->superPageBase;
while (superPage != entry->superPagesEnd) {
@@ -163,40 +268,93 @@ bool partitionAllocShutdown(PartitionRoot* root)
entry = entry->next;
}
ASSERT(numSuperPages == root->totalSizeOfSuperPages / kSuperPageSize);
- for (size_t i = 0; i < numSuperPages; ++i) {
- SuperPageBitmap::unregisterSuperPage(superPages[i]);
+ for (size_t i = 0; i < numSuperPages; ++i)
freePages(superPages[i], kSuperPageSize);
+}
+
+bool partitionAllocShutdown(PartitionRoot* root)
+{
+ bool noLeaks = true;
+ size_t i;
+ for (i = 0; i < root->numBuckets; ++i) {
+ PartitionBucket* bucket = &root->buckets()[i];
+ if (!partitionAllocShutdownBucket(bucket))
+ noLeaks = false;
}
+ partitionAllocBaseShutdown(root);
+ return noLeaks;
+}
+
+bool partitionAllocGenericShutdown(PartitionRootGeneric* root)
+{
+ bool noLeaks = true;
+ size_t i;
+ for (i = 0; i < kGenericNumBucketedOrders * kGenericNumBucketsPerOrder; ++i) {
+ PartitionBucket* bucket = &root->buckets[i];
+ if (!partitionAllocShutdownBucket(bucket))
+ noLeaks = false;
+ }
+ partitionAllocBaseShutdown(root);
return noLeaks;
}
-static ALWAYS_INLINE void* partitionAllocPage(PartitionRoot* root)
+static NEVER_INLINE void partitionOutOfMemory()
+{
+ IMMEDIATE_CRASH();
+}
+
+static NEVER_INLINE void partitionFull()
+{
+ IMMEDIATE_CRASH();
+}
+
+static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root, void* addr, size_t len)
{
- if (LIKELY(root->nextPartitionPage != 0)) {
- // In this case, we can still hand out pages from a previous
- // super page allocation.
+ decommitSystemPages(addr, len);
+ ASSERT(root->totalSizeOfCommittedPages > len);
+ root->totalSizeOfCommittedPages -= len;
+}
+
+static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root, void* addr, size_t len)
+{
+ recommitSystemPages(addr, len);
+ root->totalSizeOfCommittedPages += len;
+}
+
+static ALWAYS_INLINE void* partitionAllocPartitionPages(PartitionRootBase* root, int flags, size_t numPartitionPages)
+{
+ ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPage) % kPartitionPageSize));
+ ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPageEnd) % kPartitionPageSize));
+ RELEASE_ASSERT(numPartitionPages <= kNumPartitionPagesPerSuperPage);
+ size_t totalSize = kPartitionPageSize * numPartitionPages;
+ root->totalSizeOfCommittedPages += totalSize;
+ size_t numPartitionPagesLeft = (root->nextPartitionPageEnd - root->nextPartitionPage) >> kPartitionPageShift;
+ if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) {
+ // In this case, we can still hand out pages from the current super page
+ // allocation.
char* ret = root->nextPartitionPage;
- root->nextPartitionPage += kPartitionPageSize;
- if (UNLIKELY(root->nextPartitionPage == root->nextPartitionPageEnd)) {
- // We ran out, need to get more pages next time.
- root->nextPartitionPage = 0;
- root->nextPartitionPageEnd = 0;
- }
+ root->nextPartitionPage += totalSize;
return ret;
}
// Need a new super page.
root->totalSizeOfSuperPages += kSuperPageSize;
- RELEASE_ASSERT(root->totalSizeOfSuperPages <= kMaxPartitionSize);
+ if (root->totalSizeOfSuperPages > kMaxPartitionSize)
+ partitionFull();
char* requestedAddress = root->nextSuperPage;
char* superPage = reinterpret_cast<char*>(allocPages(requestedAddress, kSuperPageSize, kSuperPageSize));
- SuperPageBitmap::registerSuperPage(superPage);
+ if (UNLIKELY(!superPage)) {
+ if (flags & PartitionAllocReturnNull)
+ return 0;
+ partitionOutOfMemory();
+ }
root->nextSuperPage = superPage + kSuperPageSize;
char* ret = superPage + kPartitionPageSize;
- root->nextPartitionPage = ret + kPartitionPageSize;
+ root->nextPartitionPage = ret + totalSize;
root->nextPartitionPageEnd = root->nextSuperPage - kPartitionPageSize;
- // Make the first partition page in the super page a guard page, but leave a // hole in the middle.
+ // Make the first partition page in the super page a guard page, but leave a
+ // hole in the middle.
// This is where we put page metadata and also a tiny amount of extent
// metadata.
setSystemPagesInaccessible(superPage, kSystemPageSize);
@@ -212,17 +370,19 @@ static ALWAYS_INLINE void* partitionAllocPage(PartitionRoot* root)
// We allocated a new super page so update super page metadata.
// First check if this is a new extent or not.
+ PartitionSuperPageExtentEntry* latestExtent = reinterpret_cast<PartitionSuperPageExtentEntry*>(partitionSuperPageToMetadataArea(superPage));
PartitionSuperPageExtentEntry* currentExtent = root->currentExtent;
bool isNewExtent = (superPage != requestedAddress);
if (UNLIKELY(isNewExtent)) {
- if (currentExtent->superPageBase) {
- // We already have a super page, so need to allocate metadata in the linked list.
- PartitionSuperPageExtentEntry* newEntry = reinterpret_cast<PartitionSuperPageExtentEntry*>(partitionSuperPageToMetadataArea(currentExtent->superPageBase));
- newEntry->next = 0;
- currentExtent->next = newEntry;
- currentExtent = newEntry;
- root->currentExtent = newEntry;
+ latestExtent->next = 0;
+ if (UNLIKELY(!currentExtent)) {
+ root->firstExtent = latestExtent;
+ } else {
+ ASSERT(currentExtent->superPageBase);
+ currentExtent->next = latestExtent;
}
+ root->currentExtent = latestExtent;
+ currentExtent = latestExtent;
currentExtent->superPageBase = superPage;
currentExtent->superPagesEnd = superPage + kSuperPageSize;
} else {
@@ -230,35 +390,55 @@ static ALWAYS_INLINE void* partitionAllocPage(PartitionRoot* root)
currentExtent->superPagesEnd += kSuperPageSize;
ASSERT(ret >= currentExtent->superPageBase && ret < currentExtent->superPagesEnd);
}
+ // By storing the root in every extent metadata object, we have a fast way
+ // to go from a pointer within the partition to the root object.
+ latestExtent->root = root;
return ret;
}
-static ALWAYS_INLINE void partitionUnusePage(PartitionPage* page)
+static ALWAYS_INLINE void partitionUnusePage(PartitionRootBase* root, PartitionPage* page)
{
+ ASSERT(page->bucket->numSystemPagesPerSlotSpan);
void* addr = partitionPageToPointer(page);
- decommitSystemPages(addr, kPartitionPageSize);
+ partitionDecommitSystemPages(root, addr, page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize);
}
static ALWAYS_INLINE size_t partitionBucketSlots(const PartitionBucket* bucket)
{
- return bucket->pageSize / partitionBucketSize(bucket);
+ return (bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / bucket->slotSize;
+}
+
+static ALWAYS_INLINE size_t partitionBucketPartitionPages(const PartitionBucket* bucket)
+{
+ return (bucket->numSystemPagesPerSlotSpan + (kNumSystemPagesPerPartitionPage - 1)) / kNumSystemPagesPerPartitionPage;
}
static ALWAYS_INLINE void partitionPageReset(PartitionPage* page, PartitionBucket* bucket)
{
- ASSERT(page != &bucket->root->seedPage);
+ ASSERT(page != &PartitionRootGeneric::gSeedPage);
page->numAllocatedSlots = 0;
page->numUnprovisionedSlots = partitionBucketSlots(bucket);
- ASSERT(page->numUnprovisionedSlots > 1);
+ ASSERT(page->numUnprovisionedSlots);
page->bucket = bucket;
+ page->nextPage = 0;
// NULLing the freelist is not strictly necessary but it makes an ASSERT in partitionPageFillFreelist simpler.
- partitionPageSetFreelistHead(page, 0);
+ page->freelistHead = 0;
+ page->pageOffset = 0;
+ page->freeCacheIndex = -1;
+ size_t numPartitionPages = partitionBucketPartitionPages(bucket);
+ size_t i;
+ char* pageCharPtr = reinterpret_cast<char*>(page);
+ for (i = 1; i < numPartitionPages; ++i) {
+ pageCharPtr += kPageMetadataSize;
+ PartitionPage* secondaryPage = reinterpret_cast<PartitionPage*>(pageCharPtr);
+ secondaryPage->pageOffset = i;
+ }
}
static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page)
{
- ASSERT(page != &page->bucket->root->seedPage);
+ ASSERT(page != &PartitionRootGeneric::gSeedPage);
size_t numSlots = page->numUnprovisionedSlots;
ASSERT(numSlots);
PartitionBucket* bucket = page->bucket;
@@ -266,248 +446,481 @@ static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page
// (The third state is "on the freelist". If we have a non-empty freelist, we should not get here.)
ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket));
// Similarly, make explicitly sure that the freelist is empty.
- ASSERT(!partitionPageFreelistHead(page));
+ ASSERT(!page->freelistHead);
+ ASSERT(page->numAllocatedSlots >= 0);
- size_t size = partitionBucketSize(bucket);
+ size_t size = bucket->slotSize;
char* base = reinterpret_cast<char*>(partitionPageToPointer(page));
char* returnObject = base + (size * page->numAllocatedSlots);
- char* nextFreeObject = returnObject + size;
- char* subPageLimit = reinterpret_cast<char*>((reinterpret_cast<uintptr_t>(returnObject) + kSystemPageSize) & kSystemPageBaseMask);
+ char* firstFreelistPointer = returnObject + size;
+ char* firstFreelistPointerExtent = firstFreelistPointer + sizeof(PartitionFreelistEntry*);
+ // Our goal is to fault as few system pages as possible. We calculate the
+ // page containing the "end" of the returned slot, and then allow freelist
+ // pointers to be written up to the end of that page.
+ char* subPageLimit = reinterpret_cast<char*>((reinterpret_cast<uintptr_t>(firstFreelistPointer) + kSystemPageOffsetMask) & kSystemPageBaseMask);
+ char* slotsLimit = returnObject + (size * page->numUnprovisionedSlots);
+ char* freelistLimit = subPageLimit;
+ if (UNLIKELY(slotsLimit < freelistLimit))
+ freelistLimit = slotsLimit;
size_t numNewFreelistEntries = 0;
- if (LIKELY(subPageLimit > nextFreeObject))
- numNewFreelistEntries = (subPageLimit - nextFreeObject) / size;
+ if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) {
+ // Only consider used space in the slot span. If we consider wasted
+ // space, we may get an off-by-one when a freelist pointer fits in the
+ // wasted space, but a slot does not.
+ // We know we can fit at least one freelist pointer.
+ numNewFreelistEntries = 1;
+ // Any further entries require space for the whole slot span.
+ numNewFreelistEntries += (freelistLimit - firstFreelistPointerExtent) / size;
+ }
// We always return an object slot -- that's the +1 below.
// We do not neccessarily create any new freelist entries, because we cross sub page boundaries frequently for large bucket sizes.
+ ASSERT(numNewFreelistEntries + 1 <= numSlots);
numSlots -= (numNewFreelistEntries + 1);
page->numUnprovisionedSlots = numSlots;
page->numAllocatedSlots++;
if (LIKELY(numNewFreelistEntries)) {
- PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>(nextFreeObject);
- partitionPageSetFreelistHead(page, entry);
+ char* freelistPointer = firstFreelistPointer;
+ PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>(freelistPointer);
+ page->freelistHead = entry;
while (--numNewFreelistEntries) {
- nextFreeObject += size;
- PartitionFreelistEntry* nextEntry = reinterpret_cast<PartitionFreelistEntry*>(nextFreeObject);
+ freelistPointer += size;
+ PartitionFreelistEntry* nextEntry = reinterpret_cast<PartitionFreelistEntry*>(freelistPointer);
entry->next = partitionFreelistMask(nextEntry);
entry = nextEntry;
}
entry->next = partitionFreelistMask(0);
} else {
- partitionPageSetFreelistHead(page, 0);
+ page->freelistHead = 0;
}
return returnObject;
}
// This helper function scans the active page list for a suitable new active
-// page, starting at the page passed in. When it finds a suitable new active
-// page (one that has free slots), it is also set as the new active page. If
-// there is no suitable new active page, the current active page is set to null.
-static ALWAYS_INLINE void partitionSetNewActivePage(PartitionBucket* bucket, PartitionPage* page)
+// page, starting at the passed in page.
+// When it finds a suitable new active page (one that has free slots), it is
+// set as the new active page and true is returned. If there is no suitable new
+// active page, false is returned and the current active page is set to null.
+// As potential pages are scanned, they are tidied up according to their state.
+// Freed pages are swept on to the free page list and full pages are unlinked
+// from any list.
+static ALWAYS_INLINE bool partitionSetNewActivePage(PartitionPage* page)
{
- ASSERT(page == &bucket->root->seedPage || page->bucket == bucket);
- for (; page; page = page->activePageNext) {
- // Skip over free pages. We need this check first so that we can trust
- // that the page union field really containts a freelist pointer.
- if (UNLIKELY(partitionPageIsFree(page)))
- continue;
+ if (page == &PartitionRootBase::gSeedPage) {
+ ASSERT(!page->nextPage);
+ return false;
+ }
+
+ PartitionPage* nextPage = 0;
+ PartitionBucket* bucket = page->bucket;
+
+ for (; page; page = nextPage) {
+ nextPage = page->nextPage;
+ ASSERT(page->bucket == bucket);
+ ASSERT(page != bucket->freePagesHead);
+ ASSERT(!bucket->freePagesHead || page != bucket->freePagesHead->nextPage);
// Page is usable if it has something on the freelist, or unprovisioned
// slots that can be turned into a freelist.
- if (LIKELY(partitionPageFreelistHead(page) != 0) || LIKELY(page->numUnprovisionedSlots))
- break;
- // If we get here, we found a full page. Skip over it too, and also tag
- // it as full (via a negative value). We need it tagged so that free'ing
- // can tell, and move it back into the active page list.
- ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSlots(bucket)));
- page->numAllocatedSlots = -page->numAllocatedSlots;
- ++bucket->numFullPages;
+ if (LIKELY(page->freelistHead != 0) || LIKELY(page->numUnprovisionedSlots)) {
+ bucket->activePagesHead = page;
+ return true;
+ }
+
+ ASSERT(page->numAllocatedSlots >= 0);
+ if (LIKELY(page->numAllocatedSlots == 0)) {
+ ASSERT(page->freeCacheIndex == -1);
+ // We hit a free page, so shepherd it on to the free page list.
+ page->nextPage = bucket->freePagesHead;
+ bucket->freePagesHead = page;
+ } else {
+ // If we get here, we found a full page. Skip over it too, and also
+ // tag it as full (via a negative value). We need it tagged so that
+ // free'ing can tell, and move it back into the active page list.
+ ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSlots(bucket)));
+ page->numAllocatedSlots = -page->numAllocatedSlots;
+ ++bucket->numFullPages;
+ // numFullPages is a uint16_t for efficient packing so guard against
+ // overflow to be safe.
+ RELEASE_ASSERT(bucket->numFullPages);
+ // Not necessary but might help stop accidents.
+ page->nextPage = 0;
+ }
}
- bucket->activePagesHead = page;
+ bucket->activePagesHead = 0;
+ return false;
+}
+
+struct PartitionDirectMapExtent {
+ size_t mapSize; // Mapped size, not including guard pages and meta-data.
+};
+
+static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent(PartitionPage* page)
+{
+ ASSERT(partitionBucketIsDirectMapped(page->bucket));
+ return reinterpret_cast<PartitionDirectMapExtent*>(reinterpret_cast<char*>(page) + 2 * kPageMetadataSize);
}
-static ALWAYS_INLINE void partitionPageSetFreePageNext(PartitionPage* page, PartitionPage* nextFree)
+static ALWAYS_INLINE void* partitionDirectMap(PartitionRootBase* root, int flags, size_t size)
{
- ASSERT(partitionPageIsFree(page));
- page->u.freePageNext = nextFree;
+ size = partitionDirectMapSize(size);
+
+ // Because we need to fake looking like a super page, We need to allocate
+ // a bunch of system pages more than "size":
+ // - The first few system pages are the partition page in which the super
+ // page metadata is stored. We fault just one system page out of a partition
+ // page sized clump.
+ // - We add a trailing guard page.
+ size_t mapSize = size + kPartitionPageSize + kSystemPageSize;
+ // Round up to the allocation granularity.
+ mapSize += kPageAllocationGranularityOffsetMask;
+ mapSize &= kPageAllocationGranularityBaseMask;
+
+ // TODO: we may want to let the operating system place these allocations
+ // where it pleases. On 32-bit, this might limit address space
+ // fragmentation and on 64-bit, this might have useful savings for TLB
+ // and page table overhead.
+ // TODO: if upsizing realloc()s are common on large sizes, we could
+ // consider over-allocating address space on 64-bit, "just in case".
+ // TODO: consider pre-populating page tables (e.g. MAP_POPULATE on Linux,
+ // MADV_WILLNEED on POSIX).
+ // TODO: these pages will be zero-filled. Consider internalizing an
+ // allocZeroed() API so we can avoid a memset() entirely in this case.
+ char* ptr = reinterpret_cast<char*>(allocPages(0, mapSize, kSuperPageSize));
+ if (!ptr) {
+ if (flags & PartitionAllocReturnNull)
+ return 0;
+ partitionOutOfMemory();
+ }
+ char* ret = ptr + kPartitionPageSize;
+ // TODO: due to all the guard paging, this arrangement creates 4 mappings.
+ // We could get it down to three by using read-only for the metadata page,
+ // or perhaps two by leaving out the trailing guard page on 64-bit.
+ setSystemPagesInaccessible(ptr, kSystemPageSize);
+ setSystemPagesInaccessible(ptr + (kSystemPageSize * 2), kPartitionPageSize - (kSystemPageSize * 2));
+ setSystemPagesInaccessible(ret + size, kSystemPageSize);
+
+ PartitionSuperPageExtentEntry* extent = reinterpret_cast<PartitionSuperPageExtentEntry*>(partitionSuperPageToMetadataArea(ptr));
+ extent->root = root;
+ PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ret);
+ PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>(reinterpret_cast<char*>(page) + kPageMetadataSize);
+ page->freelistHead = 0;
+ page->nextPage = 0;
+ page->bucket = bucket;
+ page->numAllocatedSlots = 1;
+ page->numUnprovisionedSlots = 0;
+ page->pageOffset = 0;
+ page->freeCacheIndex = 0;
+
+ bucket->activePagesHead = 0;
+ bucket->freePagesHead = 0;
+ bucket->slotSize = size;
+ bucket->numSystemPagesPerSlotSpan = 0;
+ bucket->numFullPages = 0;
+
+ PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page);
+ mapExtent->mapSize = mapSize - kPartitionPageSize - kSystemPageSize;
+
+ return ret;
}
-static ALWAYS_INLINE PartitionPage* partitionPageFreePageNext(PartitionPage* page)
+static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page)
{
- ASSERT(partitionPageIsFree(page));
- return page->u.freePageNext;
+ size_t unmapSize = partitionPageToDirectMapExtent(page)->mapSize;
+
+ // Add on the size of the trailing guard page and preceeding partition
+ // page.
+ unmapSize += kPartitionPageSize + kSystemPageSize;
+
+ ASSERT(!(unmapSize & kPageAllocationGranularityOffsetMask));
+
+ char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page));
+ // Account for the mapping starting a partition page before the actual
+ // allocation address.
+ ptr -= kPartitionPageSize;
+
+ freePages(ptr, unmapSize);
}
-void* partitionAllocSlowPath(PartitionBucket* bucket)
+void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, PartitionBucket* bucket)
{
// The slow path is called when the freelist is empty.
- ASSERT(!partitionPageFreelistHead(bucket->activePagesHead));
+ ASSERT(!bucket->activePagesHead->freelistHead);
+
+ // For the partitionAllocGeneric API, we have a bunch of buckets marked
+ // as special cases. We bounce them through to the slow path so that we
+ // can still have a blazing fast hot path due to lack of corner-case
+ // branches.
+ bool returnNull = flags & PartitionAllocReturnNull;
+ if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) {
+ ASSERT(size > kGenericMaxBucketed);
+ ASSERT(bucket == &PartitionRootBase::gPagedBucket);
+ if (size > kGenericMaxDirectMapped) {
+ if (returnNull)
+ return 0;
+ RELEASE_ASSERT(false);
+ }
+ return partitionDirectMap(root, flags, size);
+ }
// First, look for a usable page in the existing active pages list.
- PartitionPage* page = bucket->activePagesHead;
- partitionSetNewActivePage(bucket, page);
- page = bucket->activePagesHead;
- if (LIKELY(page != 0)) {
- if (LIKELY(partitionPageFreelistHead(page) != 0)) {
- PartitionFreelistEntry* ret = partitionPageFreelistHead(page);
- partitionPageSetFreelistHead(page, partitionFreelistMask(ret->next));
- page->numAllocatedSlots++;
+ // Change active page, accepting the current page as a candidate.
+ if (LIKELY(partitionSetNewActivePage(bucket->activePagesHead))) {
+ PartitionPage* newPage = bucket->activePagesHead;
+ if (LIKELY(newPage->freelistHead != 0)) {
+ PartitionFreelistEntry* ret = newPage->freelistHead;
+ newPage->freelistHead = partitionFreelistMask(ret->next);
+ newPage->numAllocatedSlots++;
return ret;
}
- ASSERT(page->numUnprovisionedSlots);
- return partitionPageAllocAndFillFreelist(page);
+ ASSERT(newPage->numUnprovisionedSlots);
+ return partitionPageAllocAndFillFreelist(newPage);
}
// Second, look in our list of freed but reserved pages.
PartitionPage* newPage = bucket->freePagesHead;
if (LIKELY(newPage != 0)) {
- ASSERT(newPage != &bucket->root->seedPage);
- bucket->freePagesHead = partitionPageFreePageNext(newPage);
+ ASSERT(newPage != &PartitionRootGeneric::gSeedPage);
+ ASSERT(!newPage->freelistHead);
+ ASSERT(!newPage->numAllocatedSlots);
+ ASSERT(!newPage->numUnprovisionedSlots);
+ ASSERT(newPage->freeCacheIndex == -1);
+ bucket->freePagesHead = newPage->nextPage;
+ void* addr = partitionPageToPointer(newPage);
+ partitionRecommitSystemPages(root, addr, newPage->bucket->numSystemPagesPerSlotSpan * kSystemPageSize);
} else {
// Third. If we get here, we need a brand new page.
- void* rawNewPage = partitionAllocPage(bucket->root);
+ size_t numPartitionPages = partitionBucketPartitionPages(bucket);
+ void* rawNewPage = partitionAllocPartitionPages(root, flags, numPartitionPages);
+ if (UNLIKELY(!rawNewPage)) {
+ ASSERT(returnNull);
+ return 0;
+ }
+ // Skip the alignment check because it depends on page->bucket, which is not yet set.
newPage = partitionPointerToPageNoAlignmentCheck(rawNewPage);
}
- newPage->activePageNext = 0;
partitionPageReset(newPage, bucket);
bucket->activePagesHead = newPage;
return partitionPageAllocAndFillFreelist(newPage);
}
+static ALWAYS_INLINE void partitionFreePage(PartitionRootBase* root, PartitionPage* page)
+{
+ ASSERT(page->freelistHead);
+ ASSERT(!page->numAllocatedSlots);
+ partitionUnusePage(root, page);
+ // We actually leave the freed page in the active list. We'll sweep it on
+ // to the free page list when we next walk the active page list. Pulling
+ // this trick enables us to use a singly-linked page list for all cases,
+ // which is critical in keeping the page metadata structure down to 32
+ // bytes in size.
+ page->freelistHead = 0;
+ page->numUnprovisionedSlots = 0;
+}
+
+static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page)
+{
+ PartitionRootBase* root = partitionPageToRoot(page);
+
+ // If the page is already registered as empty, give it another life.
+ if (page->freeCacheIndex != -1) {
+ ASSERT(page->freeCacheIndex >= 0);
+ ASSERT(static_cast<unsigned>(page->freeCacheIndex) < kMaxFreeableSpans);
+ ASSERT(root->globalEmptyPageRing[page->freeCacheIndex] == page);
+ root->globalEmptyPageRing[page->freeCacheIndex] = 0;
+ }
+
+ size_t currentIndex = root->globalEmptyPageRingIndex;
+ PartitionPage* pageToFree = root->globalEmptyPageRing[currentIndex];
+ // The page might well have been re-activated, filled up, etc. before we get
+ // around to looking at it here.
+ if (pageToFree) {
+ ASSERT(pageToFree != &PartitionRootBase::gSeedPage);
+ ASSERT(pageToFree->freeCacheIndex >= 0);
+ ASSERT(static_cast<unsigned>(pageToFree->freeCacheIndex) < kMaxFreeableSpans);
+ ASSERT(pageToFree == root->globalEmptyPageRing[pageToFree->freeCacheIndex]);
+ if (!pageToFree->numAllocatedSlots && pageToFree->freelistHead) {
+ // The page is still empty, and not freed, so _really_ free it.
+ partitionFreePage(root, pageToFree);
+ }
+ pageToFree->freeCacheIndex = -1;
+ }
+
+ // We put the empty slot span on our global list of "pages that were once
+ // empty". thus providing it a bit of breathing room to get re-used before
+ // we really free it. This improves performance, particularly on Mac OS X
+ // which has subpar memory management performance.
+ root->globalEmptyPageRing[currentIndex] = page;
+ page->freeCacheIndex = currentIndex;
+ ++currentIndex;
+ if (currentIndex == kMaxFreeableSpans)
+ currentIndex = 0;
+ root->globalEmptyPageRingIndex = currentIndex;
+}
+
void partitionFreeSlowPath(PartitionPage* page)
{
PartitionBucket* bucket = page->bucket;
- ASSERT(page != &bucket->root->seedPage);
- ASSERT(bucket->activePagesHead != &bucket->root->seedPage);
+ ASSERT(page != &PartitionRootGeneric::gSeedPage);
+ ASSERT(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage);
if (LIKELY(page->numAllocatedSlots == 0)) {
// Page became fully unused.
- // If it's the current page, change it!
- if (LIKELY(page == bucket->activePagesHead)) {
- PartitionPage* newPage = 0;
- if (page->activePageNext) {
- partitionSetNewActivePage(bucket, page->activePageNext);
- newPage = bucket->activePagesHead;
- }
-
- ASSERT(newPage != page);
- if (UNLIKELY(!newPage)) {
- // We do not free the last active page in a bucket.
- // To do so is a serious performance issue as we can get into
- // a repeating state change between 0 and 1 objects of a given
- // size allocated; and each state change incurs either a system
- // call or a page fault, or more.
- page->activePageNext = 0;
+ if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) {
+ partitionDirectUnmap(page);
+ return;
+ }
+ // If it's the current active page, attempt to change it. We'd prefer to leave
+ // the page empty as a gentle force towards defragmentation.
+ if (LIKELY(page == bucket->activePagesHead) && page->nextPage) {
+ if (partitionSetNewActivePage(page->nextPage)) {
+ ASSERT(bucket->activePagesHead != page);
+ // Link the empty page back in after the new current page, to
+ // avoid losing a reference to it.
+ // TODO: consider walking the list to link the empty page after
+ // all non-empty pages?
+ PartitionPage* currentPage = bucket->activePagesHead;
+ page->nextPage = currentPage->nextPage;
+ currentPage->nextPage = page;
+ } else {
bucket->activePagesHead = page;
- return;
+ page->nextPage = 0;
}
- bucket->activePagesHead = newPage;
}
-
- partitionUnusePage(page);
- // We actually leave the freed page in the active list as well as
- // putting it on the free list. We'll evict it from the active list soon
- // enough in partitionAllocSlowPath. Pulling this trick enables us to
- // use a singly-linked page list for all cases, which is critical in
- // keeping the page metadata structure down to 32-bytes in size.
- partitionPageMarkFree(page);
- partitionPageSetFreePageNext(page, bucket->freePagesHead);
- bucket->freePagesHead = page;
+ partitionRegisterEmptyPage(page);
} else {
// Ensure that the page is full. That's the only valid case if we
// arrive here.
ASSERT(page->numAllocatedSlots < 0);
+ // A transition of numAllocatedSlots from 0 to -1 is not legal, and
+ // likely indicates a double-free.
+ RELEASE_ASSERT(page->numAllocatedSlots != -1);
+ page->numAllocatedSlots = -page->numAllocatedSlots - 2;
+ ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSlots(bucket) - 1));
// Fully used page became partially used. It must be put back on the
// non-full page list. Also make it the current page to increase the
// chances of it being filled up again. The old current page will be
// the next page.
- page->numAllocatedSlots = -page->numAllocatedSlots - 2;
- ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSlots(bucket) - 1));
- page->activePageNext = bucket->activePagesHead;
+ page->nextPage = bucket->activePagesHead;
bucket->activePagesHead = page;
--bucket->numFullPages;
- // Note: there's an opportunity here to look to see if the old active
- // page is now freeable.
+ // Special case: for a partition page with just a single slot, it may
+ // now be empty and we want to run it through the empty logic.
+ if (UNLIKELY(page->numAllocatedSlots == 0))
+ partitionFreeSlowPath(page);
+ }
+}
+
+bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, PartitionPage* page, size_t newSize)
+{
+ ASSERT(partitionBucketIsDirectMapped(page->bucket));
+
+ newSize = partitionCookieSizeAdjustAdd(newSize);
+
+ // Note that the new size might be a bucketed size; this function is called
+ // whenever we're reallocating a direct mapped allocation.
+ newSize = partitionDirectMapSize(newSize);
+ if (newSize < kGenericMinDirectMappedDownsize)
+ return false;
+
+ // bucket->slotSize is the current size of the allocation.
+ size_t currentSize = page->bucket->slotSize;
+ if (newSize == currentSize)
+ return true;
+
+ char* charPtr = static_cast<char*>(partitionPageToPointer(page));
+
+ if (newSize < currentSize) {
+ size_t mapSize = partitionPageToDirectMapExtent(page)->mapSize;
+
+ // Don't reallocate in-place if new size is less than 80 % of the full
+ // map size, to avoid holding on to too much unused address space.
+ if ((newSize / kSystemPageSize) * 5 < (mapSize / kSystemPageSize) * 4)
+ return false;
+
+ // Shrink by decommitting unneeded pages and making them inaccessible.
+ size_t decommitSize = currentSize - newSize;
+ partitionDecommitSystemPages(root, charPtr + newSize, decommitSize);
+ setSystemPagesInaccessible(charPtr + newSize, decommitSize);
+ } else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) {
+ // Grow within the actually allocated memory. Just need to make the
+ // pages accessible again.
+ size_t recommitSize = newSize - currentSize;
+ setSystemPagesAccessible(charPtr + currentSize, recommitSize);
+ partitionRecommitSystemPages(root, charPtr + currentSize, recommitSize);
+
+#ifndef NDEBUG
+ memset(charPtr + currentSize, kUninitializedByte, recommitSize);
+#endif
+ } else {
+ // We can't perform the realloc in-place.
+ // TODO: support this too when possible.
+ return false;
}
+
+#ifndef NDEBUG
+ // Write a new trailing cookie.
+ partitionCookieWriteValue(charPtr + newSize - kCookieSize);
+#endif
+
+ page->bucket->slotSize = newSize;
+ return true;
}
-void* partitionReallocGeneric(PartitionRoot* root, void* ptr, size_t newSize)
+void* partitionReallocGeneric(PartitionRootGeneric* root, void* ptr, size_t newSize)
{
- RELEASE_ASSERT(newSize <= QuantizedAllocation::kMaxUnquantizedAllocation);
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return realloc(ptr, newSize);
#else
- size_t oldIndex;
- if (LIKELY(partitionPointerIsValid(root, ptr))) {
- void* realPtr = partitionCookieFreePointerAdjust(ptr);
- PartitionBucket* bucket = partitionPointerToPage(realPtr)->bucket;
- ASSERT(bucket->root == root);
- oldIndex = bucket - root->buckets();
- } else {
- oldIndex = root->numBuckets;
+ if (UNLIKELY(!ptr))
+ return partitionAllocGeneric(root, newSize);
+ if (UNLIKELY(!newSize)) {
+ partitionFreeGeneric(root, ptr);
+ return 0;
+ }
+
+ RELEASE_ASSERT(newSize <= kGenericMaxDirectMapped);
+
+ ASSERT(partitionPointerIsValid(partitionCookieFreePointerAdjust(ptr)));
+
+ PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
+
+ if (UNLIKELY(partitionBucketIsDirectMapped(page->bucket))) {
+ // We may be able to perform the realloc in place by changing the
+ // accessibility of memory pages and, if reducing the size, decommitting
+ // them.
+ if (partitionReallocDirectMappedInPlace(root, page, newSize))
+ return ptr;
}
- size_t allocSize = QuantizedAllocation::quantizedSize(newSize);
- allocSize = partitionCookieSizeAdjustAdd(allocSize);
- size_t newIndex = allocSize >> kBucketShift;
- if (newIndex > root->numBuckets)
- newIndex = root->numBuckets;
-
- if (oldIndex == newIndex) {
- // Same bucket. But kNumBuckets indicates the fastMalloc "bucket" so in
- // that case we're not done; we have to forward to fastRealloc.
- if (oldIndex == root->numBuckets)
- return WTF::fastRealloc(ptr, newSize);
+ size_t actualNewSize = partitionAllocActualSize(root, newSize);
+ size_t actualOldSize = partitionAllocGetSize(ptr);
+
+ // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
+ // new size is a significant percentage smaller. We could do the same if we
+ // determine it is a win.
+ if (actualNewSize == actualOldSize) {
+ // Trying to allocate a block of size newSize would give us a block of
+ // the same size as the one we've already got, so no point in doing
+ // anything here.
return ptr;
}
+
// This realloc cannot be resized in-place. Sadness.
void* ret = partitionAllocGeneric(root, newSize);
- size_t copySize = oldIndex << kBucketShift;
- copySize = partitionCookieSizeAdjustSubtract(copySize);
+ size_t copySize = actualOldSize;
if (newSize < copySize)
copySize = newSize;
+
memcpy(ret, ptr, copySize);
partitionFreeGeneric(root, ptr);
return ret;
#endif
}
-#if CPU(32BIT)
-unsigned char SuperPageBitmap::s_bitmap[1 << (32 - kSuperPageShift - 3)];
-
-static int bitmapLock = 0;
-
-void SuperPageBitmap::registerSuperPage(void* ptr)
-{
- ASSERT(!isPointerInSuperPage(ptr));
- uintptr_t raw = reinterpret_cast<uintptr_t>(ptr);
- raw >>= kSuperPageShift;
- size_t byteIndex = raw >> 3;
- size_t bit = raw & 7;
- ASSERT(byteIndex < sizeof(s_bitmap));
- // The read/modify/write is not guaranteed atomic, so take a lock.
- spinLockLock(&bitmapLock);
- s_bitmap[byteIndex] |= (1 << bit);
- spinLockUnlock(&bitmapLock);
-}
-
-void SuperPageBitmap::unregisterSuperPage(void* ptr)
-{
- ASSERT(isPointerInSuperPage(ptr));
- uintptr_t raw = reinterpret_cast<uintptr_t>(ptr);
- raw >>= kSuperPageShift;
- size_t byteIndex = raw >> 3;
- size_t bit = raw & 7;
- ASSERT(byteIndex < sizeof(s_bitmap));
- // The read/modify/write is not guaranteed atomic, so take a lock.
- spinLockLock(&bitmapLock);
- s_bitmap[byteIndex] &= ~(1 << bit);
- spinLockUnlock(&bitmapLock);
-}
-#endif
-
#ifndef NDEBUG
void partitionDumpStats(const PartitionRoot& root)
@@ -518,7 +931,7 @@ void partitionDumpStats(const PartitionRoot& root)
size_t totalFreeable = 0;
for (i = 0; i < root.numBuckets; ++i) {
const PartitionBucket& bucket = root.buckets()[i];
- if (bucket.activePagesHead == &bucket.root->seedPage && !bucket.freePagesHead && !bucket.numFullPages) {
+ if (bucket.activePagesHead == &PartitionRootGeneric::gSeedPage && !bucket.freePagesHead && !bucket.numFullPages) {
// Empty bucket with no freelist or full pages. Skip reporting it.
continue;
}
@@ -526,19 +939,24 @@ void partitionDumpStats(const PartitionRoot& root)
PartitionPage* freePages = bucket.freePagesHead;
while (freePages) {
++numFreePages;
- freePages = partitionPageFreePageNext(freePages);
+ freePages = freePages->nextPage;
}
- size_t bucketSlotSize = partitionBucketSize(&bucket);
+ size_t bucketSlotSize = bucket.slotSize;
size_t bucketNumSlots = partitionBucketSlots(&bucket);
size_t bucketUsefulStorage = bucketSlotSize * bucketNumSlots;
- size_t bucketWaste = bucket.pageSize - bucketUsefulStorage;
+ size_t bucketPageSize = bucket.numSystemPagesPerSlotSpan * kSystemPageSize;
+ size_t bucketWaste = bucketPageSize - bucketUsefulStorage;
size_t numActiveBytes = bucket.numFullPages * bucketUsefulStorage;
- size_t numResidentBytes = bucket.numFullPages * bucket.pageSize;
+ size_t numResidentBytes = bucket.numFullPages * bucketPageSize;
size_t numFreeableBytes = 0;
size_t numActivePages = 0;
const PartitionPage* page = bucket.activePagesHead;
- do {
- if (page != &bucket.root->seedPage) {
+ while (page) {
+ ASSERT(page != &PartitionRootGeneric::gSeedPage);
+ // A page may be on the active list but freed and not yet swept.
+ if (!page->freelistHead && !page->numUnprovisionedSlots && !page->numAllocatedSlots) {
+ ++numFreePages;
+ } else {
++numActivePages;
numActiveBytes += (page->numAllocatedSlots * bucketSlotSize);
size_t pageBytesResident = (bucketNumSlots - page->numUnprovisionedSlots) * bucketSlotSize;
@@ -548,12 +966,12 @@ void partitionDumpStats(const PartitionRoot& root)
if (!page->numAllocatedSlots)
numFreeableBytes += pageBytesResident;
}
- page = page->activePageNext;
- } while (page != bucket.activePagesHead);
+ page = page->nextPage;
+ }
totalLive += numActiveBytes;
totalResident += numResidentBytes;
totalFreeable += numFreeableBytes;
- printf("bucket size %zu (pageSize %zu waste %zu): %zu alloc/%zu commit/%zu freeable bytes, %zu/%zu/%zu full/active/free pages\n", bucketSlotSize, static_cast<size_t>(bucket.pageSize), bucketWaste, numActiveBytes, numResidentBytes, numFreeableBytes, static_cast<size_t>(bucket.numFullPages), numActivePages, numFreePages);
+ printf("bucket size %zu (pageSize %zu waste %zu): %zu alloc/%zu commit/%zu freeable bytes, %zu/%zu/%zu full/active/free pages\n", bucketSlotSize, bucketPageSize, bucketWaste, numActiveBytes, numResidentBytes, numFreeableBytes, static_cast<size_t>(bucket.numFullPages), numActivePages, numFreePages);
}
printf("total live: %zu bytes\n", totalLive);
printf("total resident: %zu bytes\n", totalResident);