summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFlorian Mayer <fmayer@google.com>2024-02-23 11:28:20 -0800
committerGitHub <noreply@github.com>2024-02-23 11:28:20 -0800
commit6dd6d487d012a9000fe975133b7935c1f8c658eb (patch)
treeb46a6c218f811753e98cb07c725c9e9f03224780
parenta64ff9630ccd305a63fca3ea9cc4bc4b49098495 (diff)
[NFC] Make RingBuffer an atomic pointer (#82547)
This will allow us to atomically swap out RingBuffer and StackDepot. Patched into AOSP and ran debuggerd_tests.
-rw-r--r--compiler-rt/lib/scudo/standalone/combined.h148
1 files changed, 86 insertions, 62 deletions
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index f13cf9498a79..cd5a07be1576 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -177,6 +177,18 @@ public:
mapAndInitializeRingBuffer();
}
+ void enableRingBuffer() {
+ AllocationRingBuffer *RB = getRingBuffer();
+ if (RB)
+ RB->Depot->enable();
+ }
+
+ void disableRingBuffer() {
+ AllocationRingBuffer *RB = getRingBuffer();
+ if (RB)
+ RB->Depot->disable();
+ }
+
// Initialize the embedded GWP-ASan instance. Requires the main allocator to
// be functional, best called from PostInitCallback.
void initGwpAsan() {
@@ -688,14 +700,12 @@ public:
Quarantine.disable();
Primary.disable();
Secondary.disable();
- if (Depot)
- Depot->disable();
+ disableRingBuffer();
}
void enable() NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
- if (Depot)
- Depot->enable();
+ enableRingBuffer();
Secondary.enable();
Primary.enable();
Quarantine.enable();
@@ -920,12 +930,14 @@ public:
const char *getStackDepotAddress() {
initThreadMaybe();
- return reinterpret_cast<char *>(Depot);
+ AllocationRingBuffer *RB = getRingBuffer();
+ return RB ? reinterpret_cast<char *>(RB->Depot) : nullptr;
}
uptr getStackDepotSize() {
initThreadMaybe();
- return StackDepotSize;
+ AllocationRingBuffer *RB = getRingBuffer();
+ return RB ? RB->StackDepotSize : 0;
}
const char *getRegionInfoArrayAddress() const {
@@ -938,12 +950,15 @@ public:
const char *getRingBufferAddress() {
initThreadMaybe();
- return RawRingBuffer;
+ return reinterpret_cast<char *>(getRingBuffer());
}
uptr getRingBufferSize() {
initThreadMaybe();
- return RingBufferElements ? ringBufferSizeInBytes(RingBufferElements) : 0;
+ AllocationRingBuffer *RB = getRingBuffer();
+ return RB && RB->RingBufferElements
+ ? ringBufferSizeInBytes(RB->RingBufferElements)
+ : 0;
}
static const uptr MaxTraceSize = 64;
@@ -1048,10 +1063,6 @@ private:
uptr GuardedAllocSlotSize = 0;
#endif // GWP_ASAN_HOOKS
- StackDepot *Depot = nullptr;
- uptr StackDepotSize = 0;
- MemMapT RawStackDepotMap;
-
struct AllocationRingBuffer {
struct Entry {
atomic_uptr Ptr;
@@ -1061,16 +1072,23 @@ private:
atomic_u32 DeallocationTrace;
atomic_u32 DeallocationTid;
};
-
+ StackDepot *Depot = nullptr;
+ uptr StackDepotSize = 0;
+ MemMapT RawRingBufferMap;
+ MemMapT RawStackDepotMap;
+ u32 RingBufferElements = 0;
atomic_uptr Pos;
// An array of Size (at least one) elements of type Entry is immediately
// following to this struct.
};
// Pointer to memory mapped area starting with AllocationRingBuffer struct,
// and immediately followed by Size elements of type Entry.
- char *RawRingBuffer = {};
- u32 RingBufferElements = 0;
- MemMapT RawRingBufferMap;
+ atomic_uptr RingBufferAddress = {};
+
+ AllocationRingBuffer *getRingBuffer() {
+ return reinterpret_cast<AllocationRingBuffer *>(
+ atomic_load(&RingBufferAddress, memory_order_acquire));
+ }
// The following might get optimized out by the compiler.
NOINLINE void performSanityChecks() {
@@ -1259,27 +1277,24 @@ private:
storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
}
- StackDepot *getDepotIfEnabled(const Options &Options) {
- if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
- return nullptr;
- return Depot;
- }
-
void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
- auto *Depot = getDepotIfEnabled(Options);
- if (!Depot)
+ if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
+ return;
+ AllocationRingBuffer *RB = getRingBuffer();
+ if (!RB)
return;
auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
- Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(Depot);
+ Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(RB->Depot);
Ptr32[MemTagAllocationTidIndex] = getThreadID();
}
- void storeRingBufferEntry(void *Ptr, u32 AllocationTrace, u32 AllocationTid,
+ void storeRingBufferEntry(AllocationRingBuffer *RB, void *Ptr,
+ u32 AllocationTrace, u32 AllocationTid,
uptr AllocationSize, u32 DeallocationTrace,
u32 DeallocationTid) {
- uptr Pos = atomic_fetch_add(&getRingBuffer()->Pos, 1, memory_order_relaxed);
+ uptr Pos = atomic_fetch_add(&RB->Pos, 1, memory_order_relaxed);
typename AllocationRingBuffer::Entry *Entry =
- getRingBufferEntry(RawRingBuffer, Pos % RingBufferElements);
+ getRingBufferEntry(RB, Pos % RB->RingBufferElements);
// First invalidate our entry so that we don't attempt to interpret a
// partially written state in getSecondaryErrorInfo(). The fences below
@@ -1300,32 +1315,36 @@ private:
void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
uptr Size) {
- auto *Depot = getDepotIfEnabled(Options);
- if (!Depot)
+ if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
return;
- u32 Trace = collectStackTrace(Depot);
+ AllocationRingBuffer *RB = getRingBuffer();
+ if (!RB)
+ return;
+ u32 Trace = collectStackTrace(RB->Depot);
u32 Tid = getThreadID();
auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
Ptr32[MemTagAllocationTraceIndex] = Trace;
Ptr32[MemTagAllocationTidIndex] = Tid;
- storeRingBufferEntry(untagPointer(Ptr), Trace, Tid, Size, 0, 0);
+ storeRingBufferEntry(RB, untagPointer(Ptr), Trace, Tid, Size, 0, 0);
}
void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
u8 PrevTag, uptr Size) {
- auto *Depot = getDepotIfEnabled(Options);
- if (!Depot)
+ if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
+ return;
+ AllocationRingBuffer *RB = getRingBuffer();
+ if (!RB)
return;
auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
- u32 DeallocationTrace = collectStackTrace(Depot);
+ u32 DeallocationTrace = collectStackTrace(RB->Depot);
u32 DeallocationTid = getThreadID();
- storeRingBufferEntry(addFixedTag(untagPointer(Ptr), PrevTag),
+ storeRingBufferEntry(RB, addFixedTag(untagPointer(Ptr), PrevTag),
AllocationTrace, AllocationTid, Size,
DeallocationTrace, DeallocationTid);
}
@@ -1434,7 +1453,7 @@ private:
for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements &&
NextErrorReport != NumErrorReports;
--I) {
- auto *Entry = getRingBufferEntry(RingBufferPtr, I % RingBufferElements);
+ auto *Entry = getRingBufferEntry(RingBuffer, I % RingBufferElements);
uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
if (!EntryPtr)
continue;
@@ -1502,14 +1521,18 @@ private:
}
static typename AllocationRingBuffer::Entry *
- getRingBufferEntry(char *RawRingBuffer, uptr N) {
+ getRingBufferEntry(AllocationRingBuffer *RB, uptr N) {
+ char *RBEntryStart =
+ &reinterpret_cast<char *>(RB)[sizeof(AllocationRingBuffer)];
return &reinterpret_cast<typename AllocationRingBuffer::Entry *>(
- &RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
+ RBEntryStart)[N];
}
static const typename AllocationRingBuffer::Entry *
- getRingBufferEntry(const char *RawRingBuffer, uptr N) {
+ getRingBufferEntry(const AllocationRingBuffer *RB, uptr N) {
+ const char *RBEntryStart =
+ &reinterpret_cast<const char *>(RB)[sizeof(AllocationRingBuffer)];
return &reinterpret_cast<const typename AllocationRingBuffer::Entry *>(
- &RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
+ RBEntryStart)[N];
}
void mapAndInitializeRingBuffer() {
@@ -1549,15 +1572,14 @@ private:
u32 RingSize = static_cast<u32>(TabSize * kFramesPerStack);
DCHECK(isPowerOfTwo(RingSize));
- StackDepotSize = sizeof(StackDepot) + sizeof(atomic_u64) * RingSize +
- sizeof(atomic_u32) * TabSize;
+ uptr StackDepotSize = sizeof(StackDepot) + sizeof(atomic_u64) * RingSize +
+ sizeof(atomic_u32) * TabSize;
MemMapT DepotMap;
DepotMap.map(
/*Addr=*/0U, roundUp(StackDepotSize, getPageSizeCached()),
"scudo:stack_depot");
- Depot = reinterpret_cast<StackDepot *>(DepotMap.getBase());
+ auto *Depot = reinterpret_cast<StackDepot *>(DepotMap.getBase());
Depot->init(RingSize, TabSize);
- RawStackDepotMap = DepotMap;
MemMapT MemMap;
MemMap.map(
@@ -1565,9 +1587,15 @@ private:
roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
getPageSizeCached()),
"scudo:ring_buffer");
- RawRingBuffer = reinterpret_cast<char *>(MemMap.getBase());
- RawRingBufferMap = MemMap;
- RingBufferElements = AllocationRingBufferSize;
+ auto *RB = reinterpret_cast<AllocationRingBuffer *>(MemMap.getBase());
+ RB->RawRingBufferMap = MemMap;
+ RB->RingBufferElements = AllocationRingBufferSize;
+ RB->Depot = Depot;
+ RB->StackDepotSize = StackDepotSize;
+ RB->RawStackDepotMap = DepotMap;
+
+ atomic_store(&RingBufferAddress, reinterpret_cast<uptr>(RB),
+ memory_order_release);
static_assert(sizeof(AllocationRingBuffer) %
alignof(typename AllocationRingBuffer::Entry) ==
0,
@@ -1575,16 +1603,16 @@ private:
}
void unmapRingBuffer() {
- auto *RingBuffer = getRingBuffer();
- if (RingBuffer != nullptr) {
- RawRingBufferMap.unmap(RawRingBufferMap.getBase(),
- RawRingBufferMap.getCapacity());
- }
- RawRingBuffer = nullptr;
- if (Depot) {
- RawStackDepotMap.unmap(RawStackDepotMap.getBase(),
- RawStackDepotMap.getCapacity());
- }
+ AllocationRingBuffer *RB = getRingBuffer();
+ if (RB == nullptr)
+ return;
+ // N.B. because RawStackDepotMap is part of RawRingBufferMap, the order
+ // is very important.
+ RB->RawStackDepotMap.unmap(RB->RawStackDepotMap.getBase(),
+ RB->RawStackDepotMap.getCapacity());
+ RB->RawRingBufferMap.unmap(RB->RawRingBufferMap.getBase(),
+ RB->RawRingBufferMap.getCapacity());
+ atomic_store(&RingBufferAddress, 0, memory_order_release);
}
static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
@@ -1599,10 +1627,6 @@ private:
return (Bytes - sizeof(AllocationRingBuffer)) /
sizeof(typename AllocationRingBuffer::Entry);
}
-
- inline AllocationRingBuffer *getRingBuffer() {
- return reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
- }
};
} // namespace scudo