summaryrefslogtreecommitdiffstats
path: root/chromium/base/allocator/partition_allocator/partition_page.h
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2022-02-04 17:20:24 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2022-02-12 08:15:25 +0000
commit8fa0776f1f79e91fc9c0b9c1ba11a0a29c05196b (patch)
tree788d8d7549712682703a0310ca4a0f0860d4802b /chromium/base/allocator/partition_allocator/partition_page.h
parent606d85f2a5386472314d39923da28c70c60dc8e7 (diff)
BASELINE: Update Chromium to 98.0.4758.90
Change-Id: Ib7c41539bf8a8e0376bd639f27d68294de90f3c8 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/base/allocator/partition_allocator/partition_page.h')
-rw-r--r--chromium/base/allocator/partition_allocator/partition_page.h216
1 files changed, 165 insertions, 51 deletions
diff --git a/chromium/base/allocator/partition_allocator/partition_page.h b/chromium/base/allocator/partition_allocator/partition_page.h
index fd4fe89749f..631b2c22585 100644
--- a/chromium/base/allocator/partition_allocator/partition_page.h
+++ b/chromium/base/allocator/partition_allocator/partition_page.h
@@ -24,7 +24,9 @@
#include "base/bits.h"
#include "base/compiler_specific.h"
#include "base/dcheck_is_on.h"
+#include "base/memory/tagging.h"
#include "base/thread_annotations.h"
+#include "build/build_config.h"
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
#include "base/allocator/partition_allocator/partition_ref_count.h"
@@ -49,27 +51,31 @@ static_assert(
sizeof(PartitionSuperPageExtentEntry<ThreadSafe>) <= kPageMetadataSize,
"PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
static_assert(
- kMaxSuperPages / kSuperPageSize <=
+ kMaxSuperPagesInPool / kSuperPageSize <=
std::numeric_limits<
decltype(PartitionSuperPageExtentEntry<
ThreadSafe>::number_of_consecutive_super_pages)>::max(),
"number_of_consecutive_super_pages must be big enough");
// Returns the base of the first super page in the range of consecutive super
-// pages. CAUTION! |extent| must point to the extent of the first super page in
-// the range of consecutive super pages.
+// pages.
+//
+// CAUTION! |extent| must point to the extent of the first super page in the
+// range of consecutive super pages.
template <bool thread_safe>
ALWAYS_INLINE char* SuperPagesBeginFromExtent(
PartitionSuperPageExtentEntry<thread_safe>* extent) {
PA_DCHECK(0 < extent->number_of_consecutive_super_pages);
- PA_DCHECK(IsManagedByNormalBuckets(extent));
+ PA_DCHECK(IsManagedByNormalBuckets(reinterpret_cast<uintptr_t>(extent)));
return base::bits::AlignDown(reinterpret_cast<char*>(extent),
kSuperPageAlignment);
}
// Returns the end of the last super page in the range of consecutive super
-// pages. CAUTION! |extent| must point to the extent of the first super page in
-// the range of consecutive super pages.
+// pages.
+//
+// CAUTION! |extent| must point to the extent of the first super page in the
+// range of consecutive super pages.
template <bool thread_safe>
ALWAYS_INLINE char* SuperPagesEndFromExtent(
PartitionSuperPageExtentEntry<thread_safe>* extent) {
@@ -112,10 +118,16 @@ using AllocationStateMap =
// from the empty/decommitted list on to the active list.
template <bool thread_safe>
struct __attribute__((packed)) SlotSpanMetadata {
+ private:
PartitionFreelistEntry* freelist_head = nullptr;
+
+ public:
SlotSpanMetadata<thread_safe>* next_slot_span = nullptr;
PartitionBucket<thread_safe>* const bucket = nullptr;
+ // TODO(lizeb): Make as many fields as possible private or const, to
+ // encapsulate things more clearly.
+ //
// Deliberately signed, 0 for empty or decommitted slot spans, -n for full
// slot spans:
int16_t num_allocated_slots = 0;
@@ -123,20 +135,45 @@ struct __attribute__((packed)) SlotSpanMetadata {
// -1 if not in the empty cache. < kMaxFreeableSpans.
int16_t empty_cache_index : kEmptyCacheIndexBits;
uint16_t can_store_raw_size : 1;
- uint16_t unused : (16 - kEmptyCacheIndexBits - 1);
+ uint16_t freelist_is_sorted : 1;
+ uint16_t unused : (16 - kEmptyCacheIndexBits - 1 - 1);
// Cannot use the full 64 bits in this bitfield, as this structure is embedded
// in PartitionPage, which has other fields as well, and must fit in 32 bytes.
+ // CHECK()ed in AllocNewSlotSpan().
+#if defined(PA_HAS_64_BITS_POINTERS) && defined(OS_APPLE)
+ // System page size is not a constant on Apple OSes, but is either 4 or 16kiB
+ // (1 << 12 or 1 << 14), as checked in PartitionRoot::Init(). And
+ // PartitionPageSize() is 4 times the OS page size.
+ static constexpr int16_t kMaxSlotsPerSlotSpan =
+ 4 * (1 << 14) / kSmallestBucket;
+#else
+ // A slot span can "span" multiple PartitionPages, but then its slot size is
+ // larger, so it doesn't have as many slots.
+ static constexpr int16_t kMaxSlotsPerSlotSpan =
+ PartitionPageSize() / kSmallestBucket;
+#endif // defined(PA_HAS_64_BITS_POINTERS) && defined(OS_APPLE)
+
explicit SlotSpanMetadata(PartitionBucket<thread_safe>* bucket);
// Public API
// Note the matching Alloc() functions are in PartitionPage.
- BASE_EXPORT NOINLINE void FreeSlowPath();
+ BASE_EXPORT NOINLINE void FreeSlowPath(size_t number_of_freed);
+ ALWAYS_INLINE PartitionFreelistEntry* PopForAlloc(size_t size);
ALWAYS_INLINE void Free(void* ptr);
+ // Appends the passed freelist to the slot-span's freelist. Please note that
+ // the function doesn't increment the tags of the passed freelist entries,
+ // since FreeNoHooks() did it already.
+ ALWAYS_INLINE void AppendFreeList(PartitionFreelistEntry* head,
+ PartitionFreelistEntry* tail,
+ size_t number_of_freed);
void Decommit(PartitionRoot<thread_safe>* root);
void DecommitIfPossible(PartitionRoot<thread_safe>* root);
+ // Sorts the freelist in ascending addresses order.
+ void SortFreelist();
+
// Pointer manipulation functions. These must be static as the input
// |slot_span| pointer may be the result of an offset calculation and
// therefore cannot be trusted. The objective of these functions is to
@@ -156,6 +193,9 @@ struct __attribute__((packed)) SlotSpanMetadata {
ALWAYS_INLINE void SetRawSize(size_t raw_size);
ALWAYS_INLINE size_t GetRawSize() const;
+ ALWAYS_INLINE PartitionFreelistEntry* get_freelist_head() const {
+ return freelist_head;
+ }
ALWAYS_INLINE void SetFreelistHead(PartitionFreelistEntry* new_head);
// Returns size of the region used within a slot. The used region comprises
@@ -174,7 +214,7 @@ struct __attribute__((packed)) SlotSpanMetadata {
// This includes padding due to rounding done at allocation; we don't know the
// requested size at deallocation, so we use this in both places.
- ALWAYS_INLINE size_t GetSizeForBookkeeping() const {
+ ALWAYS_INLINE size_t GetSlotSizeForBookkeeping() const {
// This could be more precise for allocations where CanStoreRawSize()
// returns true (large allocations). However this is called for *every*
// allocation, so we don't want an extra branch there.
@@ -192,13 +232,7 @@ struct __attribute__((packed)) SlotSpanMetadata {
// - Raw size minus extras, for large buckets and direct-mapped allocations
// (see also the comment in CanStoreRawSize() for more info). This is
// equal to requested size.
- size_t size_to_ajdust;
- if (LIKELY(!CanStoreRawSize())) {
- size_to_ajdust = bucket->slot_size;
- } else {
- size_to_ajdust = GetRawSize();
- }
- return root->AdjustSizeForExtrasSubtract(size_to_ajdust);
+ return root->AdjustSizeForExtrasSubtract(GetUtilizedSlotSize());
}
// Returns the total size of the slots that are currently provisioned.
@@ -239,7 +273,10 @@ struct __attribute__((packed)) SlotSpanMetadata {
static SlotSpanMetadata sentinel_slot_span_;
// For the sentinel.
constexpr SlotSpanMetadata() noexcept
- : empty_cache_index(0), can_store_raw_size(false), unused(0) {}
+ : empty_cache_index(0),
+ can_store_raw_size(false),
+ freelist_is_sorted(true),
+ unused(0) {}
};
static_assert(sizeof(SlotSpanMetadata<ThreadSafe>) <= kPageMetadataSize,
"SlotSpanMetadata must fit into a Page Metadata slot.");
@@ -326,20 +363,20 @@ static_assert(offsetof(PartitionPage<NotThreadSafe>,
subsequent_page_metadata) == 0,
"");
-ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
- PA_DCHECK(IsReservationStart(ptr));
- uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
- PA_DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
+// TODO(bartekn): char* -> PartitionPage* or uintptr_t
+ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(uintptr_t address) {
+ PA_DCHECK(IsReservationStart(address));
+ PA_DCHECK(!(address & kSuperPageOffsetMask));
// The metadata area is exactly one system page (the guard page) into the
// super page.
- return reinterpret_cast<char*>(pointer_as_uint + SystemPageSize());
+ return reinterpret_cast<char*>(address + SystemPageSize());
}
template <bool thread_safe>
ALWAYS_INLINE PartitionSuperPageExtentEntry<thread_safe>*
-PartitionSuperPageToExtent(char* ptr) {
+PartitionSuperPageToExtent(uintptr_t address) {
return reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>(
- PartitionSuperPageToMetadataArea(ptr));
+ PartitionSuperPageToMetadataArea(address));
}
// Size that should be reserved for state bitmap (if present) inside a super
@@ -410,13 +447,17 @@ ALWAYS_INLINE void PartitionSuperPageExtentEntry<
// area (i.e. area devoted to slot spans). It doesn't check whether it's within
// a valid slot span. It merely ensures it doesn't fall in a meta-data region
// that would surely never contain user data.
-ALWAYS_INLINE bool IsWithinSuperPagePayload(void* ptr, bool with_quarantine) {
- PA_DCHECK(IsManagedByNormalBuckets(ptr));
- char* super_page_base = reinterpret_cast<char*>(
- reinterpret_cast<uintptr_t>(ptr) & kSuperPageBaseMask);
- void* payload_start = SuperPagePayloadBegin(super_page_base, with_quarantine);
- void* payload_end = SuperPagePayloadEnd(super_page_base);
- return ptr >= payload_start && ptr < payload_end;
+ALWAYS_INLINE bool IsWithinSuperPagePayload(uintptr_t address,
+ bool with_quarantine) {
+ address = memory::UnmaskPtr(address);
+ PA_DCHECK(IsManagedByNormalBuckets(address));
+ // TODO(bartekn): char* -> uintptr_t, incl. callees
+ char* super_page_base = reinterpret_cast<char*>(address & kSuperPageBaseMask);
+ uintptr_t payload_start = reinterpret_cast<uintptr_t>(
+ SuperPagePayloadBegin(super_page_base, with_quarantine));
+ uintptr_t payload_end =
+ reinterpret_cast<uintptr_t>(SuperPagePayloadEnd(super_page_base));
+ return address >= payload_start && address < payload_end;
}
// Converts from a pointer to the PartitionPage object (within super pages's
@@ -440,28 +481,29 @@ ALWAYS_INLINE void* PartitionPage<thread_safe>::ToSlotSpanStartPtr(
// While it is generally valid for |ptr| to be in the middle of an allocation,
// care has to be taken with direct maps that span multiple super pages. This
// function's behavior is undefined if |ptr| lies in a subsequent super page.
+//
+// TODO(bartekn): void* -> uintptr_t, Ptr -> Addr
template <bool thread_safe>
ALWAYS_INLINE PartitionPage<thread_safe>* PartitionPage<thread_safe>::FromPtr(
void* ptr) {
- uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
- char* super_page_ptr =
- reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask);
+ uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
+ uintptr_t super_page = address & kSuperPageBaseMask;
#if DCHECK_IS_ON()
- PA_DCHECK(IsReservationStart(super_page_ptr));
- if (IsManagedByNormalBuckets(ptr)) {
+ PA_DCHECK(IsReservationStart(super_page));
+ if (IsManagedByNormalBuckets(address)) {
auto* extent =
reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>(
- PartitionSuperPageToMetadataArea(super_page_ptr));
+ PartitionSuperPageToMetadataArea(super_page));
PA_DCHECK(
- IsWithinSuperPagePayload(ptr, extent->root->IsQuarantineAllowed()));
+ IsWithinSuperPagePayload(address, extent->root->IsQuarantineAllowed()));
} else {
- PA_CHECK(ptr >= super_page_ptr + PartitionPageSize());
+ PA_CHECK(address >= super_page + PartitionPageSize());
}
#endif
uintptr_t partition_page_index =
- (pointer_as_uint & kSuperPageOffsetMask) >> PartitionPageShift();
+ (address & kSuperPageOffsetMask) >> PartitionPageShift();
// Index 0 is invalid because it is the super page extent metadata and the
// last index is invalid because the whole PartitionPage is set as guard
// pages. This repeats part of the payload PA_DCHECK above, which may check
@@ -469,13 +511,15 @@ ALWAYS_INLINE PartitionPage<thread_safe>* PartitionPage<thread_safe>::FromPtr(
PA_DCHECK(partition_page_index);
PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
return reinterpret_cast<PartitionPage<thread_safe>*>(
- PartitionSuperPageToMetadataArea(super_page_ptr) +
+ PartitionSuperPageToMetadataArea(super_page) +
(partition_page_index << kPageMetadataShift));
}
// Converts from a pointer to the SlotSpanMetadata object (within a super
// pages's metadata) into a pointer to the beginning of the slot span. This
// works on direct maps too.
+//
+// TODO(bartekn): void* -> uintptr_t, Ptr -> Addr
template <bool thread_safe>
ALWAYS_INLINE void* SlotSpanMetadata<thread_safe>::ToSlotSpanStartPtr(
const SlotSpanMetadata* slot_span) {
@@ -508,9 +552,12 @@ ALWAYS_INLINE void* SlotSpanMetadata<thread_safe>::ToSlotSpanStartPtr(
//
// CAUTION! For direct-mapped allocation, |ptr| has to be within the first
// partition page.
+//
+// TODO(bartekn): void* -> uintptr_t, Ptr -> Addr
template <bool thread_safe>
ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
SlotSpanMetadata<thread_safe>::FromSlotInnerPtr(void* ptr) {
+ ptr = memory::UnmaskPtr(ptr);
auto* page = PartitionPage<thread_safe>::FromPtr(ptr);
PA_DCHECK(page->is_valid);
// Partition pages in the same slot span share the same slot span metadata
@@ -530,8 +577,8 @@ SlotSpanMetadata<thread_safe>::FromSlotInnerPtr(void* ptr) {
template <bool thread_safe>
ALWAYS_INLINE PartitionSuperPageExtentEntry<thread_safe>*
SlotSpanMetadata<thread_safe>::ToSuperPageExtent() const {
- char* super_page_base = reinterpret_cast<char*>(
- reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask);
+ uintptr_t super_page_base =
+ reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask;
return reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>(
PartitionSuperPageToMetadataArea(super_page_base));
}
@@ -544,9 +591,10 @@ SlotSpanMetadata<thread_safe>::FromSlotStartPtr(void* slot_start) {
auto* slot_span = FromSlotInnerPtr(slot_start);
// Checks that the pointer is a multiple of slot size.
auto* slot_span_start = ToSlotSpanStartPtr(slot_span);
- PA_DCHECK(!((reinterpret_cast<uintptr_t>(slot_start) -
- reinterpret_cast<uintptr_t>(slot_span_start)) %
- slot_span->bucket->slot_size));
+ PA_DCHECK(
+ !((reinterpret_cast<uintptr_t>(memory::UnmaskPtr(slot_start)) -
+ reinterpret_cast<uintptr_t>(memory::UnmaskPtr(slot_span_start))) %
+ slot_span->bucket->slot_size));
return slot_span;
}
@@ -572,6 +620,23 @@ ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetFreelistHead(
(reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) ==
(reinterpret_cast<uintptr_t>(new_head) & kSuperPageBaseMask));
freelist_head = new_head;
+ // Inserted something new in the freelist, assume that it is not sorted
+ // anymore.
+ freelist_is_sorted = false;
+}
+
+template <bool thread_safe>
+ALWAYS_INLINE PartitionFreelistEntry*
+SlotSpanMetadata<thread_safe>::PopForAlloc(size_t size) {
+ // Not using bucket->slot_size directly as the compiler doesn't know that
+ // |bucket->slot_size| is the same as |size|.
+ PA_DCHECK(size == bucket->slot_size);
+ PartitionFreelistEntry* result = freelist_head;
+ // Not setting freelist_is_sorted to false since this doesn't destroy
+ // ordering.
+ freelist_head = freelist_head->GetNext(size);
+ num_allocated_slots++;
+ return result;
}
template <bool thread_safe>
@@ -594,7 +659,51 @@ ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::Free(void* slot_start)
SetFreelistHead(entry);
--num_allocated_slots;
if (UNLIKELY(num_allocated_slots <= 0)) {
- FreeSlowPath();
+ FreeSlowPath(1);
+ } else {
+ // All single-slot allocations must go through the slow path to
+ // correctly update the raw size.
+ PA_DCHECK(!CanStoreRawSize());
+ }
+}
+
+template <bool thread_safe>
+ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::AppendFreeList(
+ PartitionFreelistEntry* head,
+ PartitionFreelistEntry* tail,
+ size_t number_of_freed)
+ EXCLUSIVE_LOCKS_REQUIRED(
+ PartitionRoot<thread_safe>::FromSlotSpan(this)->lock_) {
+#if DCHECK_IS_ON()
+ auto* root = PartitionRoot<thread_safe>::FromSlotSpan(this);
+ root->lock_.AssertAcquired();
+ PA_DCHECK(!tail->GetNext(bucket->slot_size));
+ PA_DCHECK(number_of_freed);
+ PA_DCHECK(num_allocated_slots);
+ if (CanStoreRawSize()) {
+ PA_DCHECK(number_of_freed == 1);
+ }
+ {
+ size_t number_of_entries = 0;
+ for (auto* entry = head; entry;
+ entry = entry->GetNext(bucket->slot_size), ++number_of_entries) {
+ auto* unmasked_entry = reinterpret_cast<char*>(memory::UnmaskPtr(entry));
+ // Check that all entries belong to this slot span.
+ PA_DCHECK(ToSlotSpanStartPtr(this) <= unmasked_entry);
+ PA_DCHECK(unmasked_entry < (static_cast<char*>(ToSlotSpanStartPtr(this)) +
+ bucket->get_bytes_per_span()));
+ }
+ PA_DCHECK(number_of_entries == number_of_freed);
+ }
+#endif
+
+ tail->SetNext(freelist_head);
+ SetFreelistHead(head);
+ PA_DCHECK(static_cast<size_t>(num_allocated_slots) >= number_of_freed);
+ num_allocated_slots -= number_of_freed;
+
+ if (UNLIKELY(num_allocated_slots <= 0)) {
+ FreeSlowPath(number_of_freed);
} else {
// All single-slot allocations must go through the slow path to
// correctly update the raw size.
@@ -651,15 +760,19 @@ ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::Reset() {
// Returns the state bitmap from a pointer within a normal-bucket super page.
// It's the caller's responsibility to ensure that the bitmap exists.
+//
+// TODO(bartekn): void* -> address, Ptr -> Address
ALWAYS_INLINE AllocationStateMap* StateBitmapFromPointer(void* ptr) {
- PA_DCHECK(IsManagedByNormalBuckets(ptr));
- auto* super_page_base = reinterpret_cast<char*>(
- reinterpret_cast<uintptr_t>(ptr) & kSuperPageBaseMask);
+ uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
+ PA_DCHECK(IsManagedByNormalBuckets(address));
+ auto* super_page_base = reinterpret_cast<char*>(address & kSuperPageBaseMask);
return SuperPageStateBitmap(super_page_base);
}
// Iterates over all slot spans in a super-page. |Callback| must return true if
// early return is needed.
+//
+// TODO(bartekn): char* -> uintptr_t
template <bool thread_safe, typename Callback>
void IterateSlotSpans(char* super_page_base,
bool with_quarantine,
@@ -669,7 +782,8 @@ void IterateSlotSpans(char* super_page_base,
!(reinterpret_cast<uintptr_t>(super_page_base) % kSuperPageAlignment));
auto* extent_entry =
reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>(
- PartitionSuperPageToMetadataArea(super_page_base));
+ PartitionSuperPageToMetadataArea(
+ reinterpret_cast<uintptr_t>(super_page_base)));
extent_entry->root->lock_.AssertAcquired();
#endif