summaryrefslogtreecommitdiffstats
path: root/chromium/base/allocator/partition_allocator/partition_freelist_entry.h
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/base/allocator/partition_allocator/partition_freelist_entry.h')
-rw-r--r--chromium/base/allocator/partition_allocator/partition_freelist_entry.h55
1 files changed, 39 insertions, 16 deletions
diff --git a/chromium/base/allocator/partition_allocator/partition_freelist_entry.h b/chromium/base/allocator/partition_allocator/partition_freelist_entry.h
index 7e3282ef412..ce3763b88d7 100644
--- a/chromium/base/allocator/partition_allocator/partition_freelist_entry.h
+++ b/chromium/base/allocator/partition_allocator/partition_freelist_entry.h
@@ -15,33 +15,56 @@
namespace base {
namespace internal {
-// TODO(ajwong): Introduce an EncodedFreelistEntry type and then replace
-// Transform() with Encode()/Decode() such that the API provides some static
-// type safety.
-//
-// https://crbug.com/787153
+struct EncodedPartitionFreelistEntry;
+
struct PartitionFreelistEntry {
- PartitionFreelistEntry* next;
+ EncodedPartitionFreelistEntry* next;
+
+ PartitionFreelistEntry() = delete;
+ ~PartitionFreelistEntry() = delete;
- static ALWAYS_INLINE PartitionFreelistEntry* Transform(
+ ALWAYS_INLINE static EncodedPartitionFreelistEntry* Encode(
PartitionFreelistEntry* ptr) {
-// We use bswap on little endian as a fast mask for two reasons:
-// 1) If an object is freed and its vtable used where the attacker doesn't
-// get the chance to run allocations between the free and use, the vtable
-// dereference is likely to fault.
-// 2) If the attacker has a linear buffer overflow and elects to try and
-// corrupt a freelist pointer, partial pointer overwrite attacks are
-// thwarted.
-// For big endian, similar guarantees are arrived at with a negation.
+ return reinterpret_cast<EncodedPartitionFreelistEntry*>(Transform(ptr));
+ }
+
+ private:
+ friend struct EncodedPartitionFreelistEntry;
+ static ALWAYS_INLINE void* Transform(void* ptr) {
+ // We use bswap on little endian as a fast mask for two reasons:
+ // 1) If an object is freed and its vtable used where the attacker doesn't
+ // get the chance to run allocations between the free and use, the vtable
+ // dereference is likely to fault.
+ // 2) If the attacker has a linear buffer overflow and elects to try and
+ // corrupt a freelist pointer, partial pointer overwrite attacks are
+ // thwarted.
+ // For big endian, similar guarantees are arrived at with a negation.
#if defined(ARCH_CPU_BIG_ENDIAN)
uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
#else
uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
#endif
- return reinterpret_cast<PartitionFreelistEntry*>(masked);
+ return reinterpret_cast<void*>(masked);
+ }
+};
+
+struct EncodedPartitionFreelistEntry {
+ char scrambled[sizeof(PartitionFreelistEntry*)];
+
+ EncodedPartitionFreelistEntry() = delete;
+ ~EncodedPartitionFreelistEntry() = delete;
+
+ ALWAYS_INLINE static PartitionFreelistEntry* Decode(
+ EncodedPartitionFreelistEntry* ptr) {
+ return reinterpret_cast<PartitionFreelistEntry*>(
+ PartitionFreelistEntry::Transform(ptr));
}
};
+static_assert(sizeof(PartitionFreelistEntry) ==
+ sizeof(EncodedPartitionFreelistEntry),
+ "Should not have padding");
+
} // namespace internal
} // namespace base