diff options
Diffstat (limited to 'chromium/base/allocator')
9 files changed, 283 insertions, 136 deletions
diff --git a/chromium/base/allocator/allocator.gni b/chromium/base/allocator/allocator.gni index 9c82dd29ba9..62e03b364d8 100644 --- a/chromium/base/allocator/allocator.gni +++ b/chromium/base/allocator/allocator.gni @@ -2,11 +2,13 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. +import("//build/config/chromecast_build.gni") import("//build/config/sanitizers/sanitizers.gni") # Temporarily disable tcmalloc on arm64 linux to get rid of compilation errors. if (is_android || is_mac || is_ios || is_asan || is_lsan || is_tsan || - is_msan || is_win || is_fuchsia || (is_linux && target_cpu == "arm64")) { + is_msan || is_win || is_fuchsia || (is_linux && target_cpu == "arm64") || + (is_cast_audio_only && target_cpu == "arm")) { _default_allocator = "none" } else { _default_allocator = "tcmalloc" diff --git a/chromium/base/allocator/debugallocation_shim.cc b/chromium/base/allocator/debugallocation_shim.cc index 479cfcad72d..24addf9d099 100644 --- a/chromium/base/allocator/debugallocation_shim.cc +++ b/chromium/base/allocator/debugallocation_shim.cc @@ -7,9 +7,10 @@ // AFDO can mess with them. Better not to use AFDO there. This is a // temporary hack. We will add a mechanism in the build system to // avoid using -fauto-profile for tcmalloc files. -#if !defined(__clang__) && (defined(OS_CHROMEOS) || __GNUC__ > 5) +#if !defined(__clang__) && \ + (defined(OS_CHROMEOS) || (__GNUC__ > 5 && __GNUC__ < 7)) // Note that this option only seems to be available in the chromeos GCC 4.9 -// toolchain, and stock GCC 5 and up. +// toolchain, and stock GCC 5 upto 7. #pragma GCC optimize ("no-auto-profile") #endif diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.cc b/chromium/base/allocator/partition_allocator/partition_alloc.cc index 4b6d55fdf35..297a6bbe76e 100644 --- a/chromium/base/allocator/partition_allocator/partition_alloc.cc +++ b/chromium/base/allocator/partition_allocator/partition_alloc.cc @@ -493,14 +493,14 @@ static size_t PartitionPurgePage(internal::PartitionPage* page, bool discard) { size_t slot_index = (reinterpret_cast<char*>(entry) - ptr) / slot_size; DCHECK(slot_index < num_slots); slot_usage[slot_index] = 0; - entry = internal::PartitionFreelistEntry::Transform(entry->next); + entry = internal::EncodedPartitionFreelistEntry::Decode(entry->next); #if !defined(OS_WIN) // If we have a slot where the masked freelist entry is 0, we can actually // discard that freelist entry because touching a discarded page is // guaranteed to return original content or 0. (Note that this optimization // won't fire on big-endian machines because the masking function is // negation.) - if (!internal::PartitionFreelistEntry::Transform(entry)) + if (!internal::PartitionFreelistEntry::Encode(entry)) last_slot = slot_index; #endif } @@ -534,25 +534,33 @@ static size_t PartitionPurgePage(internal::PartitionPage* page, bool discard) { DCHECK(truncated_slots > 0); size_t num_new_entries = 0; page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots); + // Rewrite the freelist. - internal::PartitionFreelistEntry** entry_ptr = &page->freelist_head; + internal::PartitionFreelistEntry* head = nullptr; + internal::PartitionFreelistEntry* back = head; for (size_t slot_index = 0; slot_index < num_slots; ++slot_index) { if (slot_usage[slot_index]) continue; + auto* entry = reinterpret_cast<internal::PartitionFreelistEntry*>( ptr + (slot_size * slot_index)); - *entry_ptr = internal::PartitionFreelistEntry::Transform(entry); - entry_ptr = reinterpret_cast<internal::PartitionFreelistEntry**>(entry); + if (!head) { + head = entry; + back = entry; + } else { + back->next = internal::PartitionFreelistEntry::Encode(entry); + back = entry; + } num_new_entries++; #if !defined(OS_WIN) last_slot = slot_index; #endif } - // Terminate the freelist chain. - *entry_ptr = nullptr; - // The freelist head is stored unmasked. - page->freelist_head = - internal::PartitionFreelistEntry::Transform(page->freelist_head); + + page->freelist_head = head; + if (back) + back->next = internal::PartitionFreelistEntry::Encode(nullptr); + DCHECK(num_new_entries == num_slots - page->num_allocated_slots); // Discard the memory. DiscardSystemPages(begin_ptr, unprovisioned_bytes); diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc b/chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc index 85b782a5eef..bdd85f0ae1d 100644 --- a/chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc +++ b/chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc @@ -2,13 +2,14 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include <atomic> #include <vector> + #include "base/allocator/partition_allocator/partition_alloc.h" +#include "base/threading/platform_thread.h" #include "base/time/time.h" #include "base/timer/lap_timer.h" - #include "build/build_config.h" - #include "testing/gtest/include/gtest/gtest.h" #include "testing/perf/perf_test.h" @@ -28,19 +29,47 @@ constexpr int kMultiBucketIncrement = 13; // Final size is 24 + (13 * 22) = 310 bytes. constexpr int kMultiBucketRounds = 22; -class MemoryAllocationPerfTest : public testing::Test { +class AllocatingThread : public PlatformThread::Delegate { public: - MemoryAllocationPerfTest() - : timer_(kWarmupRuns, kTimeLimit, kTimeCheckInterval) {} - void SetUp() override { alloc_.init(); } - void TearDown() override { - alloc_.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages | - PartitionPurgeDiscardUnusedSystemPages); + explicit AllocatingThread(PartitionAllocatorGeneric* allocator) + : allocator_(allocator), should_stop_(false) { + PlatformThread::Create(0, this, &thread_handle_); } - LapTimer timer_; - PartitionAllocatorGeneric alloc_; + + ~AllocatingThread() override { + should_stop_ = true; + PlatformThread::Join(thread_handle_); + } + + // Allocates and frees memory in a loop until |should_stop_| becomes true. + void ThreadMain() override { + uint64_t count = 0; + while (true) { + // Only check |should_stop_| every 2^15 iterations, as it is a + // sequentially consistent access, hence expensive. + if (count % (1 << 15) == 0 && should_stop_) + break; + void* data = allocator_->root()->Alloc(10, ""); + allocator_->root()->Free(data); + count++; + } + } + + PartitionAllocatorGeneric* allocator_; + std::atomic<bool> should_stop_; + PlatformThreadHandle thread_handle_; }; +void DisplayResults(const std::string& measurement, + const std::string& modifier, + size_t iterations_per_second) { + perf_test::PrintResult(measurement, modifier, "", iterations_per_second, + "runs/s", true); + perf_test::PrintResult(measurement, modifier, "", + static_cast<size_t>(1e9 / iterations_per_second), + "ns/run", true); +} + class MemoryAllocationPerfNode { public: MemoryAllocationPerfNode* GetNext() const { return next_; } @@ -59,110 +88,169 @@ class MemoryAllocationPerfNode { MemoryAllocationPerfNode* next_ = nullptr; }; -TEST_F(MemoryAllocationPerfTest, SingleBucket) { - timer_.Reset(); - MemoryAllocationPerfNode* first = reinterpret_cast<MemoryAllocationPerfNode*>( - alloc_.root()->Alloc(40, "<testing>")); - MemoryAllocationPerfNode* cur = first; - do { - MemoryAllocationPerfNode* next = +class MemoryAllocationPerfTest : public testing::Test { + public: + MemoryAllocationPerfTest() + : timer_(kWarmupRuns, kTimeLimit, kTimeCheckInterval) {} + void SetUp() override { alloc_.init(); } + void TearDown() override { + alloc_.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages | + PartitionPurgeDiscardUnusedSystemPages); + } + + protected: + void TestSingleBucket() { + MemoryAllocationPerfNode* first = reinterpret_cast<MemoryAllocationPerfNode*>( alloc_.root()->Alloc(40, "<testing>")); - CHECK_NE(next, nullptr); - cur->SetNext(next); - cur = next; - timer_.NextLap(); - } while (!timer_.HasTimeLimitExpired()); - // next_ = nullptr only works if the class constructor is called (it's not - // called in this case because then we can allocate arbitrary-length - // payloads.) - cur->SetNext(nullptr); - - MemoryAllocationPerfNode::FreeAll(first, alloc_); - - perf_test::PrintResult("MemoryAllocationPerfTest", - " single bucket allocation (40 bytes)", "", - timer_.LapsPerSecond(), "runs/s", true); -} - -TEST_F(MemoryAllocationPerfTest, SingleBucketWithFree) { - timer_.Reset(); - // Allocate an initial element to make sure the bucket stays set up. - void* elem = alloc_.root()->Alloc(40, "<testing>"); - do { - void* cur = alloc_.root()->Alloc(40, "<testing>"); - CHECK_NE(cur, nullptr); - alloc_.root()->Free(cur); - timer_.NextLap(); - } while (!timer_.HasTimeLimitExpired()); - - alloc_.root()->Free(elem); - perf_test::PrintResult("MemoryAllocationPerfTest", - " single bucket allocation + free (40 bytes)", "", - timer_.LapsPerSecond(), "runs/s", true); -} -// Failing on Nexus5x: crbug.com/949838 -#if defined(OS_ANDROID) -#define MAYBE_MultiBucket DISABLED_MultiBucket -#else -#define MAYBE_MultiBucket MultiBucket -#endif -TEST_F(MemoryAllocationPerfTest, MAYBE_MultiBucket) { - timer_.Reset(); - MemoryAllocationPerfNode* first = reinterpret_cast<MemoryAllocationPerfNode*>( - alloc_.root()->Alloc(40, "<testing>")); - MemoryAllocationPerfNode* cur = first; - do { - for (int i = 0; i < kMultiBucketRounds; i++) { + timer_.Reset(); + MemoryAllocationPerfNode* cur = first; + do { MemoryAllocationPerfNode* next = - reinterpret_cast<MemoryAllocationPerfNode*>(alloc_.root()->Alloc( - kMultiBucketMinimumSize + (i * kMultiBucketIncrement), - "<testing>")); + reinterpret_cast<MemoryAllocationPerfNode*>( + alloc_.root()->Alloc(40, "<testing>")); CHECK_NE(next, nullptr); cur->SetNext(next); cur = next; - } - timer_.NextLap(); - } while (!timer_.HasTimeLimitExpired()); - cur->SetNext(nullptr); + timer_.NextLap(); + } while (!timer_.HasTimeLimitExpired()); + // next_ = nullptr only works if the class constructor is called (it's not + // called in this case because then we can allocate arbitrary-length + // payloads.) + cur->SetNext(nullptr); - MemoryAllocationPerfNode::FreeAll(first, alloc_); + MemoryAllocationPerfNode::FreeAll(first, alloc_); - perf_test::PrintResult("MemoryAllocationPerfTest", " multi-bucket allocation", - "", timer_.LapsPerSecond() * kMultiBucketRounds, - "runs/s", true); -} + DisplayResults("MemoryAllocationPerfTest", + " single bucket allocation (40 bytes)", + timer_.LapsPerSecond()); + } -TEST_F(MemoryAllocationPerfTest, MultiBucketWithFree) { - timer_.Reset(); - std::vector<void*> elems; - // Do an initial round of allocation to make sure that the buckets stay in use - // (and aren't accidentally released back to the OS). - for (int i = 0; i < kMultiBucketRounds; i++) { - void* cur = alloc_.root()->Alloc( - kMultiBucketMinimumSize + (i * kMultiBucketIncrement), "<testing>"); - CHECK_NE(cur, nullptr); - elems.push_back(cur); + void TestSingleBucketWithFree() { + // Allocate an initial element to make sure the bucket stays set up. + void* elem = alloc_.root()->Alloc(40, "<testing>"); + + timer_.Reset(); + do { + void* cur = alloc_.root()->Alloc(40, "<testing>"); + CHECK_NE(cur, nullptr); + alloc_.root()->Free(cur); + timer_.NextLap(); + } while (!timer_.HasTimeLimitExpired()); + + alloc_.root()->Free(elem); + DisplayResults("MemoryAllocationPerfTest", + " single bucket allocation + free (40 bytes)", + timer_.LapsPerSecond()); + } + + void TestMultiBucket() { + MemoryAllocationPerfNode* first = + reinterpret_cast<MemoryAllocationPerfNode*>( + alloc_.root()->Alloc(40, "<testing>")); + MemoryAllocationPerfNode* cur = first; + + timer_.Reset(); + do { + for (int i = 0; i < kMultiBucketRounds; i++) { + MemoryAllocationPerfNode* next = + reinterpret_cast<MemoryAllocationPerfNode*>(alloc_.root()->Alloc( + kMultiBucketMinimumSize + (i * kMultiBucketIncrement), + "<testing>")); + CHECK_NE(next, nullptr); + cur->SetNext(next); + cur = next; + } + timer_.NextLap(); + } while (!timer_.HasTimeLimitExpired()); + cur->SetNext(nullptr); + + MemoryAllocationPerfNode::FreeAll(first, alloc_); + + DisplayResults("MemoryAllocationPerfTest", " multi-bucket allocation", + timer_.LapsPerSecond() * kMultiBucketRounds); } - do { + void TestMultiBucketWithFree() { + std::vector<void*> elems; + elems.reserve(kMultiBucketRounds); + // Do an initial round of allocation to make sure that the buckets stay in + // use (and aren't accidentally released back to the OS). for (int i = 0; i < kMultiBucketRounds; i++) { void* cur = alloc_.root()->Alloc( kMultiBucketMinimumSize + (i * kMultiBucketIncrement), "<testing>"); CHECK_NE(cur, nullptr); - alloc_.root()->Free(cur); + elems.push_back(cur); + } + + timer_.Reset(); + do { + for (int i = 0; i < kMultiBucketRounds; i++) { + void* cur = alloc_.root()->Alloc( + kMultiBucketMinimumSize + (i * kMultiBucketIncrement), "<testing>"); + CHECK_NE(cur, nullptr); + alloc_.root()->Free(cur); + } + timer_.NextLap(); + } while (!timer_.HasTimeLimitExpired()); + + for (void* ptr : elems) { + alloc_.root()->Free(ptr); } - timer_.NextLap(); - } while (!timer_.HasTimeLimitExpired()); - for (void* ptr : elems) { - alloc_.root()->Free(ptr); + DisplayResults("MemoryAllocationPerfTest", + " multi-bucket allocation + free", + timer_.LapsPerSecond() * kMultiBucketRounds); } - perf_test::PrintResult( - "MemoryAllocationPerfTest", " multi-bucket allocation + free", "", - timer_.LapsPerSecond() * kMultiBucketRounds, "runs/s", true); + LapTimer timer_; + PartitionAllocatorGeneric alloc_; +}; + +TEST_F(MemoryAllocationPerfTest, SingleBucket) { + TestSingleBucket(); +} + +TEST_F(MemoryAllocationPerfTest, SingleBucketWithCompetingThread) { + AllocatingThread t(&alloc_); + TestSingleBucket(); +} + +TEST_F(MemoryAllocationPerfTest, SingleBucketWithFree) { + TestSingleBucketWithFree(); +} + +TEST_F(MemoryAllocationPerfTest, SingleBucketWithFreeWithCompetingThread) { + AllocatingThread t(&alloc_); + TestSingleBucketWithFree(); +} + +// Failing on Nexus5x: crbug.com/949838 +#if defined(OS_ANDROID) +#define MAYBE_MultiBucket DISABLED_MultiBucket +#define MAYBE_MultiBucketWithCompetingThread \ + DISABLED_MultiBucketWithCompetingThread +#else +#define MAYBE_MultiBucket MultiBucket +#define MAYBE_MultiBucketWithCompetingThread MultiBucketWithCompetingThread +#endif +TEST_F(MemoryAllocationPerfTest, MAYBE_MultiBucket) { + TestMultiBucket(); +} + +TEST_F(MemoryAllocationPerfTest, MAYBE_MultiBucketWithCompetingThread) { + AllocatingThread t(&alloc_); + TestMultiBucket(); +} + +TEST_F(MemoryAllocationPerfTest, MultiBucketWithFree) { + TestMultiBucketWithFree(); +} + +TEST_F(MemoryAllocationPerfTest, MultiBucketWithFreeWithCompetingThread) { + AllocatingThread t(&alloc_); + TestMultiBucketWithFree(); } } // anonymous namespace diff --git a/chromium/base/allocator/partition_allocator/partition_bucket.cc b/chromium/base/allocator/partition_allocator/partition_bucket.cc index 8e54f552b22..2e239a80fa7 100644 --- a/chromium/base/allocator/partition_allocator/partition_bucket.cc +++ b/chromium/base/allocator/partition_allocator/partition_bucket.cc @@ -79,7 +79,7 @@ ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root, page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot); PartitionFreelistEntry* next_entry = reinterpret_cast<PartitionFreelistEntry*>(slot); - next_entry->next = PartitionFreelistEntry::Transform(nullptr); + next_entry->next = PartitionFreelistEntry::Encode(nullptr); DCHECK(!bucket->active_pages_head); DCHECK(!bucket->empty_pages_head); @@ -394,10 +394,10 @@ ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) { freelist_pointer += size; PartitionFreelistEntry* next_entry = reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer); - entry->next = PartitionFreelistEntry::Transform(next_entry); + entry->next = PartitionFreelistEntry::Encode(next_entry); entry = next_entry; } - entry->next = PartitionFreelistEntry::Transform(nullptr); + entry->next = PartitionFreelistEntry::Encode(nullptr); } else { page->freelist_head = nullptr; } @@ -555,7 +555,7 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root, if (LIKELY(new_page->freelist_head != nullptr)) { PartitionFreelistEntry* entry = new_page->freelist_head; PartitionFreelistEntry* new_head = - PartitionFreelistEntry::Transform(entry->next); + EncodedPartitionFreelistEntry::Decode(entry->next); new_page->freelist_head = new_head; new_page->num_allocated_slots++; return entry; diff --git a/chromium/base/allocator/partition_allocator/partition_freelist_entry.h b/chromium/base/allocator/partition_allocator/partition_freelist_entry.h index 7e3282ef412..ce3763b88d7 100644 --- a/chromium/base/allocator/partition_allocator/partition_freelist_entry.h +++ b/chromium/base/allocator/partition_allocator/partition_freelist_entry.h @@ -15,33 +15,56 @@ namespace base { namespace internal { -// TODO(ajwong): Introduce an EncodedFreelistEntry type and then replace -// Transform() with Encode()/Decode() such that the API provides some static -// type safety. -// -// https://crbug.com/787153 +struct EncodedPartitionFreelistEntry; + struct PartitionFreelistEntry { - PartitionFreelistEntry* next; + EncodedPartitionFreelistEntry* next; + + PartitionFreelistEntry() = delete; + ~PartitionFreelistEntry() = delete; - static ALWAYS_INLINE PartitionFreelistEntry* Transform( + ALWAYS_INLINE static EncodedPartitionFreelistEntry* Encode( PartitionFreelistEntry* ptr) { -// We use bswap on little endian as a fast mask for two reasons: -// 1) If an object is freed and its vtable used where the attacker doesn't -// get the chance to run allocations between the free and use, the vtable -// dereference is likely to fault. -// 2) If the attacker has a linear buffer overflow and elects to try and -// corrupt a freelist pointer, partial pointer overwrite attacks are -// thwarted. -// For big endian, similar guarantees are arrived at with a negation. + return reinterpret_cast<EncodedPartitionFreelistEntry*>(Transform(ptr)); + } + + private: + friend struct EncodedPartitionFreelistEntry; + static ALWAYS_INLINE void* Transform(void* ptr) { + // We use bswap on little endian as a fast mask for two reasons: + // 1) If an object is freed and its vtable used where the attacker doesn't + // get the chance to run allocations between the free and use, the vtable + // dereference is likely to fault. + // 2) If the attacker has a linear buffer overflow and elects to try and + // corrupt a freelist pointer, partial pointer overwrite attacks are + // thwarted. + // For big endian, similar guarantees are arrived at with a negation. #if defined(ARCH_CPU_BIG_ENDIAN) uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr); #else uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr)); #endif - return reinterpret_cast<PartitionFreelistEntry*>(masked); + return reinterpret_cast<void*>(masked); + } +}; + +struct EncodedPartitionFreelistEntry { + char scrambled[sizeof(PartitionFreelistEntry*)]; + + EncodedPartitionFreelistEntry() = delete; + ~EncodedPartitionFreelistEntry() = delete; + + ALWAYS_INLINE static PartitionFreelistEntry* Decode( + EncodedPartitionFreelistEntry* ptr) { + return reinterpret_cast<PartitionFreelistEntry*>( + PartitionFreelistEntry::Transform(ptr)); } }; +static_assert(sizeof(PartitionFreelistEntry) == + sizeof(EncodedPartitionFreelistEntry), + "Should not have padding"); + } // namespace internal } // namespace base diff --git a/chromium/base/allocator/partition_allocator/partition_page.h b/chromium/base/allocator/partition_allocator/partition_page.h index 5a0e70f9711..d2e580bdda8 100644 --- a/chromium/base/allocator/partition_allocator/partition_page.h +++ b/chromium/base/allocator/partition_allocator/partition_page.h @@ -11,8 +11,27 @@ #include "base/allocator/partition_allocator/partition_bucket.h" #include "base/allocator/partition_allocator/partition_cookie.h" #include "base/allocator/partition_allocator/partition_freelist_entry.h" +#include "base/allocator/partition_allocator/random.h" #include "base/logging.h" +namespace { + +// Returns true if we've hit the end of a random-length period. We don't want to +// invoke `RandomValue` too often, because we call this function in a hot spot +// (`Free`), and `RandomValue` incurs the cost of atomics. +#if !DCHECK_IS_ON() +bool RandomPeriod() { + static thread_local uint8_t counter = 0; + if (UNLIKELY(counter == 0)) { + counter = base::RandomValue(); + } + counter--; + return counter == 0; +} +#endif + +} // namespace + namespace base { namespace internal { @@ -201,29 +220,35 @@ ALWAYS_INLINE size_t PartitionPage::get_raw_size() const { } ALWAYS_INLINE void PartitionPage::Free(void* ptr) { -#if DCHECK_IS_ON() size_t slot_size = this->bucket->slot_size; const size_t raw_size = get_raw_size(); if (raw_size) { slot_size = raw_size; } +#if DCHECK_IS_ON() // If these asserts fire, you probably corrupted memory. PartitionCookieCheckValue(ptr); PartitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slot_size - kCookieSize); memset(ptr, kFreedByte, slot_size); +#else + // `memset` only once in a while. + if (UNLIKELY(RandomPeriod())) { + memset(ptr, kFreedByte, slot_size); + } #endif DCHECK(this->num_allocated_slots); - CHECK(ptr != freelist_head); // Catches an immediate double free. + // Catches an immediate double free. + CHECK(ptr != freelist_head); // Look for double free one level deeper in debug. - DCHECK(!freelist_head || ptr != internal::PartitionFreelistEntry::Transform( - freelist_head->next)); + DCHECK(!freelist_head || + ptr != EncodedPartitionFreelistEntry::Decode(freelist_head->next)); internal::PartitionFreelistEntry* entry = static_cast<internal::PartitionFreelistEntry*>(ptr); - entry->next = internal::PartitionFreelistEntry::Transform(freelist_head); + entry->next = internal::PartitionFreelistEntry::Encode(freelist_head); freelist_head = entry; --this->num_allocated_slots; if (UNLIKELY(this->num_allocated_slots <= 0)) { diff --git a/chromium/base/allocator/partition_allocator/partition_root_base.h b/chromium/base/allocator/partition_allocator/partition_root_base.h index 9e971f9f712..a3f9175b3cb 100644 --- a/chromium/base/allocator/partition_allocator/partition_root_base.h +++ b/chromium/base/allocator/partition_allocator/partition_root_base.h @@ -107,8 +107,8 @@ ALWAYS_INLINE void* PartitionRootBase::AllocFromBucket(PartitionBucket* bucket, // the size metadata. DCHECK(page->get_raw_size() == 0); internal::PartitionFreelistEntry* new_head = - internal::PartitionFreelistEntry::Transform( - static_cast<internal::PartitionFreelistEntry*>(ret)->next); + internal::EncodedPartitionFreelistEntry::Decode( + page->freelist_head->next); page->freelist_head = new_head; page->num_allocated_slots++; } else { diff --git a/chromium/base/allocator/partition_allocator/random.h b/chromium/base/allocator/partition_allocator/random.h index 85cb66d21de..a9aaa7f6ccb 100644 --- a/chromium/base/allocator/partition_allocator/random.h +++ b/chromium/base/allocator/partition_allocator/random.h @@ -15,7 +15,7 @@ namespace base { // `base::RandUint64` which is very unpredictable, but which is expensive due to // the need to call into the kernel. Therefore this generator uses a fast, // entirely user-space function after initialization. -uint32_t RandomValue(); +BASE_EXPORT uint32_t RandomValue(); // Sets the seed for the random number generator to a known value, to cause the // RNG to generate a predictable sequence of outputs. May be called multiple |