summaryrefslogtreecommitdiffstats
path: root/chromium/base/trace_event
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@theqtcompany.com>2016-08-01 12:59:39 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2016-08-04 12:40:43 +0000
commit28b1110370900897ab652cb420c371fab8857ad4 (patch)
tree41b32127d23b0df4f2add2a27e12dc87bddb260e /chromium/base/trace_event
parent399c965b6064c440ddcf4015f5f8e9d131c7a0a6 (diff)
BASELINE: Update Chromium to 53.0.2785.41
Also adds a few extra files for extensions. Change-Id: Iccdd55d98660903331cf8b7b29188da781830af4 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/base/trace_event')
-rw-r--r--chromium/base/trace_event/etw_manifest/BUILD.gn10
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context.cc11
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context.h6
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc4
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc4
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_register.cc275
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_register.h392
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_register_posix.cc9
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_register_unittest.cc97
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_register_win.cc9
-rw-r--r--chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc3
-rw-r--r--chromium/base/trace_event/malloc_dump_provider.cc2
-rw-r--r--chromium/base/trace_event/memory_allocator_dump.cc7
-rw-r--r--chromium/base/trace_event/memory_allocator_dump_unittest.cc5
-rw-r--r--chromium/base/trace_event/memory_dump_manager.cc252
-rw-r--r--chromium/base/trace_event/memory_dump_manager.h44
-rw-r--r--chromium/base/trace_event/memory_dump_manager_unittest.cc90
-rw-r--r--chromium/base/trace_event/memory_dump_provider.h6
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.cc4
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.h33
-rw-r--r--chromium/base/trace_event/memory_infra_background_whitelist.cc131
-rw-r--r--chromium/base/trace_event/memory_infra_background_whitelist.h33
-rw-r--r--chromium/base/trace_event/process_memory_dump.cc51
-rw-r--r--chromium/base/trace_event/process_memory_dump.h30
-rw-r--r--chromium/base/trace_event/process_memory_dump_unittest.cc142
-rw-r--r--chromium/base/trace_event/trace_config.cc49
-rw-r--r--chromium/base/trace_event/trace_config.h27
-rw-r--r--chromium/base/trace_event/trace_config_memory_test_util.h24
-rw-r--r--chromium/base/trace_event/trace_config_unittest.cc162
-rw-r--r--chromium/base/trace_event/trace_event.gypi2
-rw-r--r--chromium/base/trace_event/trace_event_android.cc3
-rw-r--r--chromium/base/trace_event/trace_event_argument.cc8
-rw-r--r--chromium/base/trace_event/trace_event_impl.cc2
-rw-r--r--chromium/base/trace_event/trace_event_memory_overhead.cc2
-rw-r--r--chromium/base/trace_event/trace_event_system_stats_monitor_unittest.cc5
-rw-r--r--chromium/base/trace_event/trace_event_unittest.cc115
-rw-r--r--chromium/base/trace_event/trace_log.cc14
-rw-r--r--chromium/base/trace_event/trace_sampling_thread.cc4
-rw-r--r--chromium/base/trace_event/winheap_dump_provider_win.cc9
-rw-r--r--chromium/base/trace_event/winheap_dump_provider_win_unittest.cc2
40 files changed, 1479 insertions, 599 deletions
diff --git a/chromium/base/trace_event/etw_manifest/BUILD.gn b/chromium/base/trace_event/etw_manifest/BUILD.gn
index 1e16672825e..19c4ecfdc40 100644
--- a/chromium/base/trace_event/etw_manifest/BUILD.gn
+++ b/chromium/base/trace_event/etw_manifest/BUILD.gn
@@ -18,8 +18,12 @@ message_compiler("chrome_events_win") {
user_mode_logging = true
- # TOOD(brucedawson) bug 569989: Enable ETW manifest and compile and link it
- # into the proper places. Enabling as-is may add the resources to too many
- # targets. See the bug for more information.
+ # The only code generated from chrome_events_win.man is a header file that
+ # is included by trace_event_etw_export_win.cc, so there is no need to
+ # compile any generated code. The other thing which compile_generated_code
+ # controls in this context is linking in the .res file generated from the
+ # manifest. However this is only needed for ETW provider registration which
+ # is done by UIforETW (https://github.com/google/UIforETW) and therefore the
+ # manifest resource can be skipped in Chrome.
compile_generated_code = false
}
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context.cc b/chromium/base/trace_event/heap_profiler_allocation_context.cc
index 374d5043d19..0f330a817ed 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context.cc
@@ -31,12 +31,23 @@ bool operator==(const Backtrace& lhs, const Backtrace& rhs) {
return std::equal(lhs.frames, lhs.frames + lhs.frame_count, rhs.frames);
}
+bool operator!=(const Backtrace& lhs, const Backtrace& rhs) {
+ return !(lhs == rhs);
+}
+
AllocationContext::AllocationContext(): type_name(nullptr) {}
+AllocationContext::AllocationContext(const Backtrace& backtrace,
+ const char* type_name)
+ : backtrace(backtrace), type_name(type_name) {}
+
bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) {
return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name);
}
+bool operator!=(const AllocationContext& lhs, const AllocationContext& rhs) {
+ return !(lhs == rhs);
+}
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context.h b/chromium/base/trace_event/heap_profiler_allocation_context.h
index 3566dd08f5d..24e2dec73f1 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context.h
+++ b/chromium/base/trace_event/heap_profiler_allocation_context.h
@@ -71,18 +71,20 @@ struct BASE_EXPORT Backtrace {
// If the stack is higher than what can be stored here, the bottom frames
// (the ones closer to main()) are stored. Depth of 12 is enough for most
// pseudo traces (see above), but not for native traces, where we need more.
- enum { kMaxFrameCount = 24 };
+ enum { kMaxFrameCount = 48 };
StackFrame frames[kMaxFrameCount];
size_t frame_count;
};
bool BASE_EXPORT operator==(const Backtrace& lhs, const Backtrace& rhs);
+bool BASE_EXPORT operator!=(const Backtrace& lhs, const Backtrace& rhs);
// The |AllocationContext| is context metadata that is kept for every allocation
// when heap profiling is enabled. To simplify memory management for book-
// keeping, this struct has a fixed size.
struct BASE_EXPORT AllocationContext {
AllocationContext();
+ AllocationContext(const Backtrace& backtrace, const char* type_name);
Backtrace backtrace;
@@ -95,6 +97,8 @@ struct BASE_EXPORT AllocationContext {
bool BASE_EXPORT operator==(const AllocationContext& lhs,
const AllocationContext& rhs);
+bool BASE_EXPORT operator!=(const AllocationContext& lhs,
+ const AllocationContext& rhs);
// Struct to store the size and count of the allocations.
struct AllocationMetrics {
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
index fac4a8a7b43..31f311a918e 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -168,8 +168,8 @@ AllocationContext AllocationContextTracker::GetContextSnapshot() {
CaptureMode mode = static_cast<CaptureMode>(
subtle::NoBarrier_Load(&capture_mode_));
- auto backtrace = std::begin(ctx.backtrace.frames);
- auto backtrace_end = std::end(ctx.backtrace.frames);
+ auto* backtrace = std::begin(ctx.backtrace.frames);
+ auto* backtrace_end = std::end(ctx.backtrace.frames);
if (!thread_name_) {
// Ignore the string allocation made by GetAndLeakThreadName to avoid
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
index 07d5f253dd4..3064a6a7117 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -34,8 +34,8 @@ void AssertBacktraceEquals(const StackFrame(&expected_backtrace)[N]) {
AllocationContextTracker::GetInstanceForCurrentThread()
->GetContextSnapshot();
- auto actual = std::begin(ctx.backtrace.frames);
- auto actual_bottom = actual + ctx.backtrace.frame_count;
+ auto* actual = std::begin(ctx.backtrace.frames);
+ auto* actual_bottom = actual + ctx.backtrace.frame_count;
auto expected = std::begin(expected_backtrace);
auto expected_bottom = std::end(expected_backtrace);
diff --git a/chromium/base/trace_event/heap_profiler_allocation_register.cc b/chromium/base/trace_event/heap_profiler_allocation_register.cc
index a0fc4be282d..2c2cd378bbd 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_register.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_register.cc
@@ -4,116 +4,20 @@
#include "base/trace_event/heap_profiler_allocation_register.h"
+#include <algorithm>
+
#include "base/trace_event/trace_event_memory_overhead.h"
namespace base {
namespace trace_event {
-AllocationRegister::AllocationRegister()
- : AllocationRegister(kNumBuckets * kNumCellsPerBucket) {}
-
-AllocationRegister::AllocationRegister(uint32_t num_cells)
- // Reserve enough address space to store |num_cells_| entries if necessary,
- // with a guard page after it to crash the program when attempting to store
- // more entries.
- : num_cells_(num_cells),
- cells_(static_cast<Cell*>(AllocateVirtualMemory(num_cells_ *
- sizeof(Cell)))),
- buckets_(static_cast<CellIndex*>(
- AllocateVirtualMemory(kNumBuckets * sizeof(CellIndex)))),
-
- // The free list is empty. The first unused cell is cell 1, because index
- // 0 is used as list terminator.
- free_list_(0),
- next_unused_cell_(1) {}
-
-AllocationRegister::~AllocationRegister() {
- FreeVirtualMemory(buckets_, kNumBuckets * sizeof(CellIndex));
- FreeVirtualMemory(cells_, num_cells_ * sizeof(Cell));
-}
-
-void AllocationRegister::Insert(void* address,
- size_t size,
- AllocationContext context) {
- DCHECK(address != nullptr);
- if (size == 0)
- return;
-
- CellIndex* idx_ptr = Lookup(address);
-
- // If the index is 0, the address is not yet present, so insert it.
- if (*idx_ptr == 0) {
- *idx_ptr = GetFreeCell();
-
- // The address stored in a cell is const as long as it is exposed (via the
- // iterators or |Get|), but because cells are re-used, a const cast is
- // required to set it on insert and remove.
- void* const& allocation_address = cells_[*idx_ptr].allocation.address;
- const_cast<void*&>(allocation_address) = address;
- cells_[*idx_ptr].next = 0;
- }
-
- cells_[*idx_ptr].allocation.size = size;
- cells_[*idx_ptr].allocation.context = context;
-}
-
-void AllocationRegister::Remove(void* address) {
- // Get a pointer to the index of the cell that stores |address|. The index can
- // be an element of |buckets_| or the |next| member of a cell.
- CellIndex* idx_ptr = Lookup(address);
- CellIndex freed_idx = *idx_ptr;
-
- // If the index is 0, the address was not there in the first place.
- if (freed_idx == 0)
- return;
-
- // The cell at the index is now free, remove it from the linked list for
- // |Hash(address)|.
- Cell* freed_cell = &cells_[freed_idx];
- *idx_ptr = freed_cell->next;
-
- // Put the free cell at the front of the free list.
- freed_cell->next = free_list_;
- free_list_ = freed_idx;
-
- // Reset the address, so that on iteration the free cell is ignored.
- const_cast<void*&>(freed_cell->allocation.address) = nullptr;
-}
-
-AllocationRegister::Allocation* AllocationRegister::Get(void* address) {
- CellIndex* idx_ptr = Lookup(address);
-
- // If the index is 0, the address is not present in the table.
- return *idx_ptr == 0 ? nullptr : &cells_[*idx_ptr].allocation;
-}
-
-AllocationRegister::ConstIterator AllocationRegister::begin() const {
- // Initialize the iterator's index to 0. Cell 0 never stores an entry.
- ConstIterator iterator(*this, 0);
- // Incrementing will advance the iterator to the first used cell.
- ++iterator;
- return iterator;
-}
-
-AllocationRegister::ConstIterator AllocationRegister::end() const {
- // Cell |next_unused_cell_ - 1| is the last cell that could contain an entry,
- // so index |next_unused_cell_| is an iterator past the last element, in line
- // with the STL iterator conventions.
- return ConstIterator(*this, next_unused_cell_);
-}
-
AllocationRegister::ConstIterator::ConstIterator(
- const AllocationRegister& alloc_register,
- CellIndex index)
- : register_(alloc_register), index_(index) {}
+ const AllocationRegister& alloc_register, AllocationIndex index)
+ : register_(alloc_register),
+ index_(index) {}
void AllocationRegister::ConstIterator::operator++() {
- // Find the next cell with a non-null address until all cells that could
- // possibly be used have been iterated. A null address indicates a free cell.
- do {
- index_++;
- } while (index_ < register_.next_unused_cell_ &&
- register_.cells_[index_].allocation.address == nullptr);
+ index_ = register_.allocations_.Next(index_ + 1);
}
bool AllocationRegister::ConstIterator::operator!=(
@@ -121,53 +25,38 @@ bool AllocationRegister::ConstIterator::operator!=(
return index_ != other.index_;
}
-const AllocationRegister::Allocation& AllocationRegister::ConstIterator::
-operator*() const {
- return register_.cells_[index_].allocation;
+AllocationRegister::Allocation
+AllocationRegister::ConstIterator::operator*() const {
+ return register_.GetAllocation(index_);
}
-AllocationRegister::CellIndex* AllocationRegister::Lookup(void* address) {
- // The list head is in |buckets_| at the hash offset.
- CellIndex* idx_ptr = &buckets_[Hash(address)];
+size_t AllocationRegister::BacktraceHasher::operator () (
+ const Backtrace& backtrace) const {
+ const size_t kSampleLength = 10;
- // Chase down the list until the cell that holds |address| is found,
- // or until the list ends.
- while (*idx_ptr != 0 && cells_[*idx_ptr].allocation.address != address)
- idx_ptr = &cells_[*idx_ptr].next;
+ uintptr_t total_value = 0;
- return idx_ptr;
-}
-
-AllocationRegister::CellIndex AllocationRegister::GetFreeCell() {
- // First try to re-use a cell from the freelist.
- if (free_list_) {
- CellIndex idx = free_list_;
- free_list_ = cells_[idx].next;
- return idx;
+ size_t head_end = std::min(backtrace.frame_count, kSampleLength);
+ for (size_t i = 0; i != head_end; ++i) {
+ total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
}
- // Otherwise pick the next cell that has not been touched before.
- CellIndex idx = next_unused_cell_;
- next_unused_cell_++;
-
- // If the hash table has too little capacity (when too little address space
- // was reserved for |cells_|), |next_unused_cell_| can be an index outside of
- // the allocated storage. A guard page is allocated there to crash the
- // program in that case. There are alternative solutions:
- // - Deal with it, increase capacity by reallocating |cells_|.
- // - Refuse to insert and let the caller deal with it.
- // Because free cells are re-used before accessing fresh cells with a higher
- // index, and because reserving address space without touching it is cheap,
- // the simplest solution is to just allocate a humongous chunk of address
- // space.
+ size_t tail_start = backtrace.frame_count -
+ std::min(backtrace.frame_count - head_end, kSampleLength);
+ for (size_t i = tail_start; i != backtrace.frame_count; ++i) {
+ total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
+ }
- DCHECK_LT(next_unused_cell_, num_cells_ + 1);
+ total_value += backtrace.frame_count;
- return idx;
+ // These magic constants give best results in terms of average collisions
+ // per backtrace. They were found by replaying real backtraces from Linux
+ // and Android against different hash functions.
+ return (total_value * 131101) >> 14;
}
-// static
-uint32_t AllocationRegister::Hash(void* address) {
+size_t AllocationRegister::AddressHasher::operator () (
+ const void* address) const {
// The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
// been chosen carefully based on measurements with real-word data (addresses
// recorded from a Chrome trace run). It is the first prime after 2^17. For
@@ -178,22 +67,114 @@ uint32_t AllocationRegister::Hash(void* address) {
const uintptr_t a = 131101;
const uintptr_t shift = 14;
const uintptr_t h = (key * a) >> shift;
- return static_cast<uint32_t>(h) & kNumBucketsMask;
+ return h;
+}
+
+AllocationRegister::AllocationRegister()
+ : AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {}
+
+AllocationRegister::AllocationRegister(size_t allocation_capacity,
+ size_t backtrace_capacity)
+ : allocations_(allocation_capacity),
+ backtraces_(backtrace_capacity) {}
+
+AllocationRegister::~AllocationRegister() {
+}
+
+void AllocationRegister::Insert(const void* address,
+ size_t size,
+ const AllocationContext& context) {
+ DCHECK(address != nullptr);
+ if (size == 0) {
+ return;
+ }
+
+ AllocationInfo info = {
+ size,
+ context.type_name,
+ InsertBacktrace(context.backtrace)
+ };
+
+ // Try to insert the allocation.
+ auto index_and_flag = allocations_.Insert(address, info);
+ if (!index_and_flag.second) {
+ // |address| is already there - overwrite the allocation info.
+ auto& old_info = allocations_.Get(index_and_flag.first).second;
+ RemoveBacktrace(old_info.backtrace_index);
+ old_info = info;
+ }
+}
+
+void AllocationRegister::Remove(const void* address) {
+ auto index = allocations_.Find(address);
+ if (index == AllocationMap::kInvalidKVIndex) {
+ return;
+ }
+
+ const AllocationInfo& info = allocations_.Get(index).second;
+ RemoveBacktrace(info.backtrace_index);
+ allocations_.Remove(index);
+}
+
+bool AllocationRegister::Get(const void* address,
+ Allocation* out_allocation) const {
+ auto index = allocations_.Find(address);
+ if (index == AllocationMap::kInvalidKVIndex) {
+ return false;
+ }
+
+ if (out_allocation) {
+ *out_allocation = GetAllocation(index);
+ }
+ return true;
+}
+
+AllocationRegister::ConstIterator AllocationRegister::begin() const {
+ return ConstIterator(*this, allocations_.Next(0));
+}
+
+AllocationRegister::ConstIterator AllocationRegister::end() const {
+ return ConstIterator(*this, AllocationMap::kInvalidKVIndex);
}
void AllocationRegister::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) const {
- // Estimate memory overhead by counting all of the cells that have ever been
- // touched. Don't report mmapped memory as allocated, because it has not been
- // allocated by malloc.
size_t allocated = sizeof(AllocationRegister);
size_t resident = sizeof(AllocationRegister)
- // Include size of touched cells (size of |*cells_|).
- + sizeof(Cell) * next_unused_cell_
- // Size of |*buckets_|.
- + sizeof(CellIndex) * kNumBuckets;
+ + allocations_.EstimateUsedMemory()
+ + backtraces_.EstimateUsedMemory();
overhead->Add("AllocationRegister", allocated, resident);
}
+AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
+ const Backtrace& backtrace) {
+ auto index = backtraces_.Insert(backtrace, 0).first;
+ auto& backtrace_and_count = backtraces_.Get(index);
+ backtrace_and_count.second++;
+ return index;
+}
+
+void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
+ auto& backtrace_and_count = backtraces_.Get(index);
+ if (--backtrace_and_count.second == 0) {
+ // Backtrace is not referenced anymore - remove it.
+ backtraces_.Remove(index);
+ }
+}
+
+AllocationRegister::Allocation AllocationRegister::GetAllocation(
+ AllocationMap::KVIndex index) const {
+ const auto& address_and_info = allocations_.Get(index);
+ const auto& backtrace_and_count = backtraces_.Get(
+ address_and_info.second.backtrace_index);
+ return {
+ address_and_info.first,
+ address_and_info.second.size,
+ AllocationContext(
+ backtrace_and_count.first,
+ address_and_info.second.type_name)
+ };
+}
+
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/heap_profiler_allocation_register.h b/chromium/base/trace_event/heap_profiler_allocation_register.h
index 976f2f50a9c..86e2721c56e 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_register.h
+++ b/chromium/base/trace_event/heap_profiler_allocation_register.h
@@ -8,77 +8,288 @@
#include <stddef.h>
#include <stdint.h>
+#include <utility>
+
+#include "base/bits.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/process/process_metrics.h"
+#include "base/template_util.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
namespace base {
namespace trace_event {
+class AllocationRegisterTest;
+
+namespace internal {
+
+// Allocates a region of virtual address space of |size| rounded up to the
+// system page size. The memory is zeroed by the system. A guard page is
+// added after the end.
+void* AllocateGuardedVirtualMemory(size_t size);
+
+// Frees a region of virtual address space allocated by a call to
+// |AllocateVirtualMemory|.
+void FreeGuardedVirtualMemory(void* address, size_t allocated_size);
+
+// Hash map that mmaps memory only once in the constructor. Its API is
+// similar to std::unordered_map, only index (KVIndex) is used to address
+template <size_t NumBuckets, class Key, class Value, class KeyHasher>
+class FixedHashMap {
+ // To keep things simple we don't call destructors.
+ static_assert(is_trivially_destructible<Key>::value &&
+ is_trivially_destructible<Value>::value,
+ "Key and Value shouldn't have destructors");
+ public:
+ using KVPair = std::pair<const Key, Value>;
+
+ // For implementation simplicity API uses integer index instead
+ // of iterators. Most operations (except FindValidIndex) on KVIndex
+ // are O(1).
+ using KVIndex = size_t;
+ static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1);
+
+ // Capacity controls how many items this hash map can hold, and largely
+ // affects memory footprint.
+ FixedHashMap(size_t capacity)
+ : num_cells_(capacity),
+ cells_(static_cast<Cell*>(
+ AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
+ buckets_(static_cast<Bucket*>(
+ AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
+ free_list_(nullptr),
+ next_unused_cell_(0) {}
+
+ ~FixedHashMap() {
+ FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell));
+ FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket));
+ }
+
+ std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) {
+ Cell** p_cell = Lookup(key);
+ Cell* cell = *p_cell;
+ if (cell) {
+ return {static_cast<KVIndex>(cell - cells_), false}; // not inserted
+ }
+
+ // Get a free cell and link it.
+ *p_cell = cell = GetFreeCell();
+ cell->p_prev = p_cell;
+ cell->next = nullptr;
+
+ // Initialize key/value pair. Since key is 'const Key' this is the
+ // only way to initialize it.
+ new (&cell->kv) KVPair(key, value);
+
+ return {static_cast<KVIndex>(cell - cells_), true}; // inserted
+ }
+
+ void Remove(KVIndex index) {
+ DCHECK_LT(index, next_unused_cell_);
+
+ Cell* cell = &cells_[index];
+
+ // Unlink the cell.
+ *cell->p_prev = cell->next;
+ if (cell->next) {
+ cell->next->p_prev = cell->p_prev;
+ }
+ cell->p_prev = nullptr; // mark as free
+
+ // Add it to the free list.
+ cell->next = free_list_;
+ free_list_ = cell;
+ }
+
+ KVIndex Find(const Key& key) const {
+ Cell* cell = *Lookup(key);
+ return cell ? static_cast<KVIndex>(cell - cells_) : kInvalidKVIndex;
+ }
+
+ KVPair& Get(KVIndex index) {
+ return cells_[index].kv;
+ }
+
+ const KVPair& Get(KVIndex index) const {
+ return cells_[index].kv;
+ }
+
+ // Finds next index that has a KVPair associated with it. Search starts
+ // with the specified index. Returns kInvalidKVIndex if nothing was found.
+ // To find the first valid index, call this function with 0. Continue
+ // calling with the last_index + 1 until kInvalidKVIndex is returned.
+ KVIndex Next(KVIndex index) const {
+ for (;index < next_unused_cell_; ++index) {
+ if (cells_[index].p_prev) {
+ return index;
+ }
+ }
+ return kInvalidKVIndex;
+ }
+
+ // Estimates number of bytes used in allocated memory regions.
+ size_t EstimateUsedMemory() const {
+ size_t page_size = base::GetPageSize();
+ // |next_unused_cell_| is the first cell that wasn't touched, i.e.
+ // it's the number of touched cells.
+ return bits::Align(sizeof(Cell) * next_unused_cell_, page_size) +
+ bits::Align(sizeof(Bucket) * NumBuckets, page_size);
+ }
+
+ private:
+ friend base::trace_event::AllocationRegisterTest;
+
+ struct Cell {
+ KVPair kv;
+ Cell* next;
+
+ // Conceptually this is |prev| in a doubly linked list. However, buckets
+ // also participate in the bucket's cell list - they point to the list's
+ // head and also need to be linked / unlinked properly. To treat these two
+ // cases uniformly, instead of |prev| we're storing "pointer to a Cell*
+ // that points to this Cell" kind of thing. So |p_prev| points to a bucket
+ // for the first cell in a list, and points to |next| of the previous cell
+ // for any other cell. With that Lookup() is the only function that handles
+ // buckets / cells differently.
+ // If |p_prev| is nullptr, the cell is in the free list.
+ Cell** p_prev;
+ };
+
+ using Bucket = Cell*;
+
+ // Returns a pointer to the cell that contains or should contain the entry
+ // for |key|. The pointer may point at an element of |buckets_| or at the
+ // |next| member of an element of |cells_|.
+ Cell** Lookup(const Key& key) const {
+ // The list head is in |buckets_| at the hash offset.
+ Cell** p_cell = &buckets_[Hash(key)];
+
+ // Chase down the list until the cell that holds |key| is found,
+ // or until the list ends.
+ while (*p_cell && (*p_cell)->kv.first != key) {
+ p_cell = &(*p_cell)->next;
+ }
+
+ return p_cell;
+ }
+
+ // Returns a cell that is not being used to store an entry (either by
+ // recycling from the free list or by taking a fresh cell).
+ Cell* GetFreeCell() {
+ // First try to re-use a cell from the free list.
+ if (free_list_) {
+ Cell* cell = free_list_;
+ free_list_ = cell->next;
+ return cell;
+ }
+
+ // Otherwise pick the next cell that has not been touched before.
+ size_t idx = next_unused_cell_;
+ next_unused_cell_++;
+
+ // If the hash table has too little capacity (when too little address space
+ // was reserved for |cells_|), |next_unused_cell_| can be an index outside
+ // of the allocated storage. A guard page is allocated there to crash the
+ // program in that case. There are alternative solutions:
+ // - Deal with it, increase capacity by reallocating |cells_|.
+ // - Refuse to insert and let the caller deal with it.
+ // Because free cells are re-used before accessing fresh cells with a higher
+ // index, and because reserving address space without touching it is cheap,
+ // the simplest solution is to just allocate a humongous chunk of address
+ // space.
+
+ DCHECK_LT(next_unused_cell_, num_cells_ + 1);
+
+ return &cells_[idx];
+ }
+
+ // Returns a value in the range [0, NumBuckets - 1] (inclusive).
+ size_t Hash(const Key& key) const {
+ if (NumBuckets == (NumBuckets & ~(NumBuckets - 1))) {
+ // NumBuckets is a power of 2.
+ return KeyHasher()(key) & (NumBuckets - 1);
+ } else {
+ return KeyHasher()(key) % NumBuckets;
+ }
+ }
+
+ // Number of cells.
+ size_t const num_cells_;
+
+ // The array of cells. This array is backed by mmapped memory. Lower indices
+ // are accessed first, higher indices are accessed only when the |free_list_|
+ // is empty. This is to minimize the amount of resident memory used.
+ Cell* const cells_;
+
+ // The array of buckets (pointers into |cells_|). |buckets_[Hash(key)]| will
+ // contain the pointer to the linked list of cells for |Hash(key)|.
+ // This array is backed by mmapped memory.
+ mutable Bucket* buckets_;
+
+ // The head of the free list.
+ Cell* free_list_;
+
+ // The index of the first element of |cells_| that has not been used before.
+ // If the free list is empty and a new cell is needed, the cell at this index
+ // is used. This is the high water mark for the number of entries stored.
+ size_t next_unused_cell_;
+
+ DISALLOW_COPY_AND_ASSIGN(FixedHashMap);
+};
+
+} // namespace internal
+
class TraceEventMemoryOverhead;
// The allocation register keeps track of all allocations that have not been
-// freed. It is a memory map-backed hash table that stores size and context
-// indexed by address. The hash table is tailored specifically for this use
-// case. The common case is that an entry is inserted and removed after a
-// while, lookup without modifying the table is not an intended use case. The
-// hash table is implemented as an array of linked lists. The size of this
-// array is fixed, but it does not limit the amount of entries that can be
-// stored.
-//
-// Replaying a recording of Chrome's allocations and frees against this hash
-// table takes about 15% of the time that it takes to replay them against
-// |std::map|.
+// freed. Internally it has two hashtables: one for Backtraces and one for
+// actual allocations. Sizes of both hashtables are fixed, and this class
+// allocates (mmaps) only in its constructor.
class BASE_EXPORT AllocationRegister {
public:
- // The data stored in the hash table;
- // contains the details about an allocation.
+ // Details about an allocation.
struct Allocation {
- void* const address;
+ const void* address;
size_t size;
AllocationContext context;
};
- // An iterator that iterates entries in the hash table efficiently, but in no
- // particular order. It can do this by iterating the cells and ignoring the
- // linked lists altogether. Instead of checking whether a cell is in the free
- // list to see if it should be skipped, a null address is used to indicate
- // that a cell is free.
+ // An iterator that iterates entries in no particular order.
class BASE_EXPORT ConstIterator {
public:
void operator++();
bool operator!=(const ConstIterator& other) const;
- const Allocation& operator*() const;
+ Allocation operator*() const;
private:
friend class AllocationRegister;
- using CellIndex = uint32_t;
+ using AllocationIndex = size_t;
- ConstIterator(const AllocationRegister& alloc_register, CellIndex index);
+ ConstIterator(const AllocationRegister& alloc_register,
+ AllocationIndex index);
const AllocationRegister& register_;
- CellIndex index_;
+ AllocationIndex index_;
};
AllocationRegister();
- explicit AllocationRegister(uint32_t num_cells);
+ AllocationRegister(size_t allocation_capacity, size_t backtrace_capacity);
~AllocationRegister();
// Inserts allocation details into the table. If the address was present
- // already, its details are updated. |address| must not be null. (This is
- // because null is used to mark free cells, to allow efficient iteration of
- // the hash table.)
- void Insert(void* address, size_t size, AllocationContext context);
+ // already, its details are updated. |address| must not be null.
+ void Insert(const void* address,
+ size_t size,
+ const AllocationContext& context);
// Removes the address from the table if it is present. It is ok to call this
// with a null pointer.
- void Remove(void* address);
+ void Remove(const void* address);
- // Returns a pointer to the allocation at the address, or null if there is no
- // allocation at that address. This can be used to change the allocation
- // context after insertion, for example to change the type name.
- Allocation* Get(void* address);
+ // Finds allocation for the address and fills |out_allocation|.
+ bool Get(const void* address, Allocation* out_allocation) const;
ConstIterator begin() const;
ConstIterator end() const;
@@ -87,85 +298,54 @@ class BASE_EXPORT AllocationRegister {
void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const;
private:
- friend class AllocationRegisterTest;
- using CellIndex = uint32_t;
-
- // A cell can store allocation details (size and context) by address. Cells
- // are part of a linked list via the |next| member. This list is either the
- // list for a particular hash, or the free list. All cells are contiguous in
- // memory in one big array. Therefore, on 64-bit systems, space can be saved
- // by storing 32-bit indices instead of pointers as links. Index 0 is used as
- // the list terminator.
- struct Cell {
- CellIndex next;
- Allocation allocation;
+ friend AllocationRegisterTest;
+
+ // Expect max 1.5M allocations. Number of buckets is 2^18 for optimal
+ // hashing and should be changed together with AddressHasher.
+ static const size_t kAllocationBuckets = 1 << 18;
+ static const size_t kAllocationCapacity = 1500000;
+
+ // Expect max 2^15 unique backtraces. Can be changed to 2^16 without
+ // needing to tweak BacktraceHasher implementation.
+ static const size_t kBacktraceBuckets = 1 << 15;
+ static const size_t kBacktraceCapacity = kBacktraceBuckets;
+
+ struct BacktraceHasher {
+ size_t operator () (const Backtrace& backtrace) const;
};
- // The number of buckets, 2^17, approximately 130 000, has been tuned for
- // Chrome's typical number of outstanding allocations. (This number varies
- // between processes. Most processes have a sustained load of ~30k unfreed
- // allocations, but some processes have peeks around 100k-400k allocations.)
- // Because of the size of the table, it is likely that every |buckets_|
- // access and every |cells_| access will incur a cache miss. Microbenchmarks
- // suggest that it is worthwile to use more memory for the table to avoid
- // chasing down the linked list, until the size is 2^18. The number of buckets
- // is a power of two so modular indexing can be done with bitwise and.
- static const uint32_t kNumBuckets = 0x20000;
- static const uint32_t kNumBucketsMask = kNumBuckets - 1;
-
- // Reserve address space to store at most this number of entries. High
- // capacity does not imply high memory usage due to the access pattern. The
- // only constraint on the number of cells is that on 32-bit systems address
- // space is scarce (i.e. reserving 2GiB of address space for the entries is
- // not an option). A value of ~3M entries is large enough to handle spikes in
- // the number of allocations, and modest enough to require no more than a few
- // dozens of MiB of address space.
- static const uint32_t kNumCellsPerBucket = 10;
-
- // Returns a value in the range [0, kNumBuckets - 1] (inclusive).
- static uint32_t Hash(void* address);
-
- // Allocates a region of virtual address space of |size| rounded up to the
- // system page size. The memory is zeroed by the system. A guard page is
- // added after the end.
- static void* AllocateVirtualMemory(size_t size);
-
- // Frees a region of virtual address space allocated by a call to
- // |AllocateVirtualMemory|.
- static void FreeVirtualMemory(void* address, size_t allocated_size);
-
- // Returns a pointer to the variable that contains or should contain the
- // index of the cell that stores the entry for |address|. The pointer may
- // point at an element of |buckets_| or at the |next| member of an element of
- // |cells_|. If the value pointed at is 0, |address| is not in the table.
- CellIndex* Lookup(void* address);
-
- // Takes a cell that is not being used to store an entry (either by recycling
- // from the free list or by taking a fresh cell) and returns its index.
- CellIndex GetFreeCell();
-
- // The maximum number of cells which can be allocated.
- uint32_t const num_cells_;
+ using BacktraceMap = internal::FixedHashMap<
+ kBacktraceBuckets,
+ Backtrace,
+ size_t, // Number of references to the backtrace (the key). Incremented
+ // when an allocation that references the backtrace is inserted,
+ // and decremented when the allocation is removed. When the
+ // number drops to zero, the backtrace is removed from the map.
+ BacktraceHasher>;
- // The array of cells. This array is backed by mmapped memory. Lower indices
- // are accessed first, higher indices are only accessed when required. In
- // this way, even if a huge amount of address space has been mmapped, only
- // the cells that are actually used will be backed by physical memory.
- Cell* const cells_;
+ struct AllocationInfo {
+ size_t size;
+ const char* type_name;
+ BacktraceMap::KVIndex backtrace_index;
+ };
- // The array of indices into |cells_|. |buckets_[Hash(address)]| will contain
- // the index of the head of the linked list for |Hash(address)|. A value of 0
- // indicates an empty list. This array is backed by mmapped memory.
- CellIndex* const buckets_;
+ struct AddressHasher {
+ size_t operator () (const void* address) const;
+ };
- // The head of the free list. This is the index of the cell. A value of 0
- // means that the free list is empty.
- CellIndex free_list_;
+ using AllocationMap = internal::FixedHashMap<
+ kAllocationBuckets,
+ const void*,
+ AllocationInfo,
+ AddressHasher>;
- // The index of the first element of |cells_| that has not been used before.
- // If the free list is empty and a new cell is needed, the cell at this index
- // is used. This is the high water mark for the number of entries stored.
- CellIndex next_unused_cell_;
+ BacktraceMap::KVIndex InsertBacktrace(const Backtrace& backtrace);
+ void RemoveBacktrace(BacktraceMap::KVIndex index);
+
+ Allocation GetAllocation(AllocationMap::KVIndex) const;
+
+ AllocationMap allocations_;
+ BacktraceMap backtraces_;
DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
};
diff --git a/chromium/base/trace_event/heap_profiler_allocation_register_posix.cc b/chromium/base/trace_event/heap_profiler_allocation_register_posix.cc
index c38d7e69182..94eeb4df88a 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_register_posix.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_register_posix.cc
@@ -18,6 +18,7 @@
namespace base {
namespace trace_event {
+namespace internal {
namespace {
size_t GetGuardSize() {
@@ -25,8 +26,7 @@ size_t GetGuardSize() {
}
}
-// static
-void* AllocationRegister::AllocateVirtualMemory(size_t size) {
+void* AllocateGuardedVirtualMemory(size_t size) {
size = bits::Align(size, GetPageSize());
// Add space for a guard page at the end.
@@ -48,12 +48,11 @@ void* AllocationRegister::AllocateVirtualMemory(size_t size) {
return addr;
}
-// static
-void AllocationRegister::FreeVirtualMemory(void* address,
- size_t allocated_size) {
+void FreeGuardedVirtualMemory(void* address, size_t allocated_size) {
size_t size = bits::Align(allocated_size, GetPageSize()) + GetGuardSize();
munmap(address, size);
}
+} // namespace internal
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/heap_profiler_allocation_register_unittest.cc b/chromium/base/trace_event/heap_profiler_allocation_register_unittest.cc
index b356aa7853b..7eee61aa35e 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_register_unittest.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_register_unittest.cc
@@ -16,20 +16,21 @@ namespace trace_event {
class AllocationRegisterTest : public testing::Test {
public:
- static const uint32_t kNumBuckets = AllocationRegister::kNumBuckets;
+ // Use a lower number of backtrace cells for unittests to avoid reserving
+ // a virtual region which is too big.
+ static const size_t kAllocationBuckets =
+ AllocationRegister::kAllocationBuckets + 100;
+ static const size_t kAllocationCapacity = kAllocationBuckets;
+ static const size_t kBacktraceCapacity = 10;
// Returns the number of cells that the |AllocationRegister| can store per
// system page.
- size_t GetNumCellsPerPage() {
- return GetPageSize() / sizeof(AllocationRegister::Cell);
+ size_t GetAllocationCapacityPerPage() {
+ return GetPageSize() / sizeof(AllocationRegister::AllocationMap::Cell);
}
- uint32_t GetHighWaterMark(const AllocationRegister& reg) {
- return reg.next_unused_cell_;
- }
-
- uint32_t GetNumCells(const AllocationRegister& reg) {
- return reg.num_cells_;
+ size_t GetHighWaterMark(const AllocationRegister& reg) {
+ return reg.allocations_.next_unused_cell_;
}
};
@@ -56,7 +57,7 @@ size_t SumAllSizes(const AllocationRegister& reg) {
}
TEST_F(AllocationRegisterTest, InsertRemove) {
- AllocationRegister reg;
+ AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
AllocationContext ctx;
// Zero-sized allocations should be discarded.
@@ -90,7 +91,7 @@ TEST_F(AllocationRegisterTest, InsertRemove) {
}
TEST_F(AllocationRegisterTest, DoubleFreeIsAllowed) {
- AllocationRegister reg;
+ AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
AllocationContext ctx;
reg.Insert(reinterpret_cast<void*>(1), 1, ctx);
@@ -103,9 +104,7 @@ TEST_F(AllocationRegisterTest, DoubleFreeIsAllowed) {
}
TEST_F(AllocationRegisterTest, DoubleInsertOverwrites) {
- // TODO(ruuda): Although double insert happens in practice, it should not.
- // Find out the cause and ban double insert if possible.
- AllocationRegister reg;
+ AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
AllocationContext ctx;
StackFrame frame1 = StackFrame::FromTraceEventName("Foo");
StackFrame frame2 = StackFrame::FromTraceEventName("Bar");
@@ -139,12 +138,12 @@ TEST_F(AllocationRegisterTest, DoubleInsertOverwrites) {
// register still behaves correctly.
TEST_F(AllocationRegisterTest, InsertRemoveCollisions) {
size_t expected_sum = 0;
- AllocationRegister reg;
+ AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
AllocationContext ctx;
// By inserting 100 more entries than the number of buckets, there will be at
- // least 100 collisions.
- for (uintptr_t i = 1; i <= kNumBuckets + 100; i++) {
+ // least 100 collisions (100 = kAllocationCapacity - kAllocationBuckets).
+ for (uintptr_t i = 1; i <= kAllocationCapacity; i++) {
size_t size = i % 31;
expected_sum += size;
reg.Insert(reinterpret_cast<void*>(i), size, ctx);
@@ -156,7 +155,7 @@ TEST_F(AllocationRegisterTest, InsertRemoveCollisions) {
EXPECT_EQ(expected_sum, SumAllSizes(reg));
- for (uintptr_t i = 1; i <= kNumBuckets + 100; i++) {
+ for (uintptr_t i = 1; i <= kAllocationCapacity; i++) {
size_t size = i % 31;
expected_sum -= size;
reg.Remove(reinterpret_cast<void*>(i));
@@ -176,7 +175,7 @@ TEST_F(AllocationRegisterTest, InsertRemoveCollisions) {
// free list is utilised properly.
TEST_F(AllocationRegisterTest, InsertRemoveRandomOrder) {
size_t expected_sum = 0;
- AllocationRegister reg;
+ AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
AllocationContext ctx;
uintptr_t generator = 3;
@@ -216,74 +215,52 @@ TEST_F(AllocationRegisterTest, InsertRemoveRandomOrder) {
TEST_F(AllocationRegisterTest, ChangeContextAfterInsertion) {
using Allocation = AllocationRegister::Allocation;
- const char kStdString[] = "std::string";
- AllocationRegister reg;
+ AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
AllocationContext ctx;
reg.Insert(reinterpret_cast<void*>(17), 1, ctx);
reg.Insert(reinterpret_cast<void*>(19), 2, ctx);
reg.Insert(reinterpret_cast<void*>(23), 3, ctx);
+ Allocation a;
+
// Looking up addresses that were not inserted should return null.
// A null pointer lookup is a valid thing to do.
- EXPECT_EQ(nullptr, reg.Get(nullptr));
- EXPECT_EQ(nullptr, reg.Get(reinterpret_cast<void*>(13)));
-
- Allocation* a17 = reg.Get(reinterpret_cast<void*>(17));
- Allocation* a19 = reg.Get(reinterpret_cast<void*>(19));
- Allocation* a23 = reg.Get(reinterpret_cast<void*>(23));
+ EXPECT_FALSE(reg.Get(nullptr, &a));
+ EXPECT_FALSE(reg.Get(reinterpret_cast<void*>(13), &a));
- EXPECT_NE(nullptr, a17);
- EXPECT_NE(nullptr, a19);
- EXPECT_NE(nullptr, a23);
-
- a17->size = 100;
- a19->context.type_name = kStdString;
+ EXPECT_TRUE(reg.Get(reinterpret_cast<void*>(17), &a));
+ EXPECT_TRUE(reg.Get(reinterpret_cast<void*>(19), &a));
+ EXPECT_TRUE(reg.Get(reinterpret_cast<void*>(23), &a));
reg.Remove(reinterpret_cast<void*>(23));
// Lookup should not find any garbage after removal.
- EXPECT_EQ(nullptr, reg.Get(reinterpret_cast<void*>(23)));
-
- // Mutating allocations should have modified the allocations in the register.
- for (const Allocation& allocation : reg) {
- if (allocation.address == reinterpret_cast<void*>(17))
- EXPECT_EQ(100u, allocation.size);
- if (allocation.address == reinterpret_cast<void*>(19))
- EXPECT_EQ(kStdString, allocation.context.type_name);
- }
+ EXPECT_FALSE(reg.Get(reinterpret_cast<void*>(23), &a));
reg.Remove(reinterpret_cast<void*>(17));
reg.Remove(reinterpret_cast<void*>(19));
- EXPECT_EQ(nullptr, reg.Get(reinterpret_cast<void*>(17)));
- EXPECT_EQ(nullptr, reg.Get(reinterpret_cast<void*>(19)));
+ EXPECT_FALSE(reg.Get(reinterpret_cast<void*>(17), &a));
+ EXPECT_FALSE(reg.Get(reinterpret_cast<void*>(19), &a));
}
// Check that the process aborts due to hitting the guard page when inserting
// too many elements.
#if GTEST_HAS_DEATH_TEST
TEST_F(AllocationRegisterTest, OverflowDeathTest) {
- // Use a smaller register to prevent OOM errors on low-end devices.
- AllocationRegister reg(static_cast<uint32_t>(GetNumCellsPerPage()));
+ const size_t allocation_capacity = GetAllocationCapacityPerPage();
+ AllocationRegister reg(allocation_capacity, kBacktraceCapacity);
AllocationContext ctx;
- uintptr_t i;
+ size_t i;
- // Fill up all of the memory allocated for the register. |GetNumCells(reg)|
- // minus 1 elements are inserted, because cell 0 is unused, so this should
- // fill up the available cells exactly.
- for (i = 1; i < GetNumCells(reg); i++) {
- reg.Insert(reinterpret_cast<void*>(i), 1, ctx);
+ // Fill up all of the memory allocated for the register's allocation map.
+ for (i = 0; i < allocation_capacity; i++) {
+ reg.Insert(reinterpret_cast<void*>(i + 1), 1, ctx);
}
- // Adding just one extra element might still work because the allocated memory
- // is rounded up to the page size. Adding a page full of elements should cause
- // overflow.
- const size_t cells_per_page = GetNumCellsPerPage();
-
- ASSERT_DEATH(for (size_t j = 0; j < cells_per_page; j++) {
- reg.Insert(reinterpret_cast<void*>(i + j), 1, ctx);
- }, "");
+ // Adding just one extra element should cause overflow.
+ ASSERT_DEATH(reg.Insert(reinterpret_cast<void*>(i + 1), 1, ctx), "");
}
#endif
diff --git a/chromium/base/trace_event/heap_profiler_allocation_register_win.cc b/chromium/base/trace_event/heap_profiler_allocation_register_win.cc
index bc0afbf3401..39cbb180728 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_register_win.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_register_win.cc
@@ -13,6 +13,7 @@
namespace base {
namespace trace_event {
+namespace internal {
namespace {
size_t GetGuardSize() {
@@ -20,8 +21,7 @@ size_t GetGuardSize() {
}
}
-// static
-void* AllocationRegister::AllocateVirtualMemory(size_t size) {
+void* AllocateGuardedVirtualMemory(size_t size) {
size = bits::Align(size, GetPageSize());
// Add space for a guard page at the end.
@@ -50,14 +50,13 @@ void* AllocationRegister::AllocateVirtualMemory(size_t size) {
return addr;
}
-// static
-void AllocationRegister::FreeVirtualMemory(void* address,
- size_t allocated_size) {
+void FreeGuardedVirtualMemory(void* address, size_t allocated_size) {
// For |VirtualFree|, the size passed with |MEM_RELEASE| mut be 0. Windows
// automatically frees the entire region that was reserved by the
// |VirtualAlloc| with flag |MEM_RESERVE|.
VirtualFree(address, 0, MEM_RELEASE);
}
+} // namespace internal
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc b/chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc
index 6b31bd6e9f5..44e43875441 100644
--- a/chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc
+++ b/chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc
@@ -12,8 +12,9 @@ namespace trace_event {
TEST(JavaHeapDumpProviderTest, JavaHeapDump) {
auto jhdp = JavaHeapDumpProvider::GetInstance();
- std::unique_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(nullptr));
MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+ std::unique_ptr<ProcessMemoryDump> pmd(
+ new ProcessMemoryDump(nullptr, dump_args));
jhdp->OnMemoryDump(dump_args, pmd.get());
}
diff --git a/chromium/base/trace_event/malloc_dump_provider.cc b/chromium/base/trace_event/malloc_dump_provider.cc
index cf69859404e..3b1a933bce5 100644
--- a/chromium/base/trace_event/malloc_dump_provider.cc
+++ b/chromium/base/trace_event/malloc_dump_provider.cc
@@ -229,7 +229,7 @@ void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
// This is the case of GetInstanceForCurrentThread() being called for the
// first time, which causes a new() inside the tracker which re-enters the
// heap profiler, in which case we just want to early out.
- auto tracker = AllocationContextTracker::GetInstanceForCurrentThread();
+ auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread();
if (!tracker)
return;
AllocationContext context = tracker->GetContextSnapshot();
diff --git a/chromium/base/trace_event/memory_allocator_dump.cc b/chromium/base/trace_event/memory_allocator_dump.cc
index f9b5799c05e..7583763889e 100644
--- a/chromium/base/trace_event/memory_allocator_dump.cc
+++ b/chromium/base/trace_event/memory_allocator_dump.cc
@@ -80,6 +80,13 @@ void MemoryAllocatorDump::AddScalarF(const char* name,
void MemoryAllocatorDump::AddString(const char* name,
const char* units,
const std::string& value) {
+ // String attributes are disabled in background mode.
+ if (process_memory_dump_->dump_args().level_of_detail ==
+ MemoryDumpLevelOfDetail::BACKGROUND) {
+ NOTREACHED();
+ return;
+ }
+
attributes_->BeginDictionary(name);
attributes_->SetString("type", kTypeString);
attributes_->SetString("units", units);
diff --git a/chromium/base/trace_event/memory_allocator_dump_unittest.cc b/chromium/base/trace_event/memory_allocator_dump_unittest.cc
index 359f081154e..1bf9715917d 100644
--- a/chromium/base/trace_event/memory_allocator_dump_unittest.cc
+++ b/chromium/base/trace_event/memory_allocator_dump_unittest.cc
@@ -129,8 +129,8 @@ TEST(MemoryAllocatorDumpTest, GuidGeneration) {
TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
FakeMemoryAllocatorDumpProvider fmadp;
- ProcessMemoryDump pmd(new MemoryDumpSessionState);
MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+ ProcessMemoryDump pmd(new MemoryDumpSessionState, dump_args);
fmadp.OnMemoryDump(dump_args, &pmd);
@@ -176,7 +176,8 @@ TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
#if !defined(NDEBUG) && !defined(OS_ANDROID) && !defined(OS_IOS)
TEST(MemoryAllocatorDumpTest, ForbidDuplicatesDeathTest) {
FakeMemoryAllocatorDumpProvider fmadp;
- ProcessMemoryDump pmd(new MemoryDumpSessionState);
+ MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+ ProcessMemoryDump pmd(new MemoryDumpSessionState, dump_args);
pmd.CreateAllocatorDump("foo_allocator");
pmd.CreateAllocatorDump("bar_allocator/heap");
ASSERT_DEATH(pmd.CreateAllocatorDump("foo_allocator"), "");
diff --git a/chromium/base/trace_event/memory_dump_manager.cc b/chromium/base/trace_event/memory_dump_manager.cc
index b14d265f19e..eed070a7829 100644
--- a/chromium/base/trace_event/memory_dump_manager.cc
+++ b/chromium/base/trace_event/memory_dump_manager.cc
@@ -23,6 +23,7 @@
#include "base/trace_event/malloc_dump_provider.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/memory_dump_session_state.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_argument.h"
@@ -46,27 +47,8 @@ const char* kTraceEventArgNames[] = {"dumps"};
const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
StaticAtomicSequenceNumber g_next_guid;
-uint32_t g_periodic_dumps_count = 0;
-uint32_t g_heavy_dumps_rate = 0;
MemoryDumpManager* g_instance_for_testing = nullptr;
-void RequestPeriodicGlobalDump() {
- MemoryDumpLevelOfDetail level_of_detail;
- if (g_heavy_dumps_rate == 0) {
- level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
- } else {
- level_of_detail = g_periodic_dumps_count == 0
- ? MemoryDumpLevelOfDetail::DETAILED
- : MemoryDumpLevelOfDetail::LIGHT;
-
- if (++g_periodic_dumps_count == g_heavy_dumps_rate)
- g_periodic_dumps_count = 0;
- }
-
- MemoryDumpManager::GetInstance()->RequestGlobalDump(
- MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
-}
-
// Callback wrapper to hook upon the completion of RequestGlobalDump() and
// inject trace markers.
void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
@@ -116,6 +98,9 @@ const char* const MemoryDumpManager::kTraceCategory =
TRACE_DISABLED_BY_DEFAULT("memory-infra");
// static
+const char* const MemoryDumpManager::kLogPrefix = "Memory-infra dump";
+
+// static
const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
// static
@@ -272,8 +257,10 @@ void MemoryDumpManager::RegisterDumpProviderInternal(
if (dumper_registrations_ignored_for_testing_)
return;
+ bool whitelisted_for_background_mode = IsMemoryDumpProviderWhitelisted(name);
scoped_refptr<MemoryDumpProviderInfo> mdpinfo =
- new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options);
+ new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
+ whitelisted_for_background_mode);
{
AutoLock lock(lock_);
@@ -351,8 +338,13 @@ void MemoryDumpManager::RequestGlobalDump(
MemoryDumpType dump_type,
MemoryDumpLevelOfDetail level_of_detail,
const MemoryDumpCallback& callback) {
- // Bail out immediately if tracing is not enabled at all.
- if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) {
+ // Bail out immediately if tracing is not enabled at all or if the dump mode
+ // is not allowed.
+ if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) ||
+ !IsDumpModeAllowed(level_of_detail)) {
+ VLOG(1) << kLogPrefix << " failed because " << kTraceCategory
+ << " tracing category is not enabled or the requested dump mode is "
+ "not allowed by trace config.";
if (!callback.is_null())
callback.Run(0u /* guid */, false /* success */);
return;
@@ -396,15 +388,33 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump",
TRACE_ID_MANGLE(args.dump_guid));
+ // If argument filter is enabled then only background mode dumps should be
+ // allowed. In case the trace config passed for background tracing session
+ // missed the allowed modes argument, it crashes here instead of creating
+ // unexpected dumps.
+ if (TraceLog::GetInstance()
+ ->GetCurrentTraceConfig()
+ .IsArgumentFilterEnabled()) {
+ CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail);
+ }
+
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
{
AutoLock lock(lock_);
+
// |dump_thread_| can be nullptr is tracing was disabled before reaching
// here. SetupNextMemoryDump() is robust enough to tolerate it and will
// NACK the dump.
pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
args, dump_providers_, session_state_, callback,
dump_thread_ ? dump_thread_->task_runner() : nullptr));
+
+ // Safety check to prevent reaching here without calling RequestGlobalDump,
+ // with disallowed modes. If |session_state_| is null then tracing is
+ // disabled.
+ CHECK(!session_state_ ||
+ session_state_->memory_dump_config().allowed_dump_modes.count(
+ args.level_of_detail));
}
TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
@@ -438,6 +448,14 @@ void MemoryDumpManager::SetupNextMemoryDump(
// Anyway either tracing is stopped or this was the last hop, create a trace
// event, add it to the trace and finalize process dump invoking the callback.
if (!pmd_async_state->dump_thread_task_runner.get()) {
+ if (pmd_async_state->pending_dump_providers.empty()) {
+ VLOG(1) << kLogPrefix << " failed because dump thread was destroyed"
+ << " before finalizing the dump";
+ } else {
+ VLOG(1) << kLogPrefix << " failed because dump thread was destroyed"
+ << " before dumping "
+ << pmd_async_state->pending_dump_providers.back().get()->name;
+ }
pmd_async_state->dump_successful = false;
pmd_async_state->pending_dump_providers.clear();
}
@@ -449,6 +467,15 @@ void MemoryDumpManager::SetupNextMemoryDump(
MemoryDumpProviderInfo* mdpinfo =
pmd_async_state->pending_dump_providers.back().get();
+ // If we are in background tracing, we should invoke only the whitelisted
+ // providers. Ignore other providers and continue.
+ if (pmd_async_state->req_args.level_of_detail ==
+ MemoryDumpLevelOfDetail::BACKGROUND &&
+ !mdpinfo->whitelisted_for_background_mode) {
+ pmd_async_state->pending_dump_providers.pop_back();
+ return SetupNextMemoryDump(std::move(pmd_async_state));
+ }
+
// If the dump provider did not specify a task runner affinity, dump on
// |dump_thread_| which is already checked above for presence.
SequencedTaskRunner* task_runner = mdpinfo->task_runner.get();
@@ -547,9 +574,10 @@ void MemoryDumpManager::InvokeOnMemoryDump(
// process), non-zero when the coordinator process creates dumps on behalf
// of child processes (see crbug.com/461788).
ProcessId target_pid = mdpinfo->options.target_pid;
- ProcessMemoryDump* pmd =
- pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid);
MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail};
+ ProcessMemoryDump* pmd =
+ pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid,
+ args);
bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
mdpinfo->consecutive_failures =
dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
@@ -602,8 +630,11 @@ void MemoryDumpManager::FinalizeDumpAndAddToTrace(
bool tracing_still_enabled;
TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &tracing_still_enabled);
- if (!tracing_still_enabled)
+ if (!tracing_still_enabled) {
pmd_async_state->dump_successful = false;
+ VLOG(1) << kLogPrefix << " failed because tracing was disabled before"
+ << " the dump was completed";
+ }
if (!pmd_async_state->callback.is_null()) {
pmd_async_state->callback.Run(dump_guid, pmd_async_state->dump_successful);
@@ -632,78 +663,57 @@ void MemoryDumpManager::OnTraceLogEnabled() {
return;
}
- AutoLock lock(lock_);
-
- DCHECK(delegate_); // At this point we must have a delegate.
- session_state_ = new MemoryDumpSessionState;
-
+ const TraceConfig trace_config =
+ TraceLog::GetInstance()->GetCurrentTraceConfig();
+ scoped_refptr<MemoryDumpSessionState> session_state =
+ new MemoryDumpSessionState;
+ session_state->SetMemoryDumpConfig(trace_config.memory_dump_config());
if (heap_profiling_enabled_) {
// If heap profiling is enabled, the stack frame deduplicator and type name
// deduplicator will be in use. Add a metadata events to write the frames
// and type IDs.
- session_state_->SetStackFrameDeduplicator(
+ session_state->SetStackFrameDeduplicator(
WrapUnique(new StackFrameDeduplicator));
- session_state_->SetTypeNameDeduplicator(
+ session_state->SetTypeNameDeduplicator(
WrapUnique(new TypeNameDeduplicator));
TRACE_EVENT_API_ADD_METADATA_EVENT(
TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
"stackFrames",
- WrapUnique(
- new SessionStateConvertableProxy<StackFrameDeduplicator>(
- session_state_,
- &MemoryDumpSessionState::stack_frame_deduplicator)));
+ WrapUnique(new SessionStateConvertableProxy<StackFrameDeduplicator>(
+ session_state, &MemoryDumpSessionState::stack_frame_deduplicator)));
TRACE_EVENT_API_ADD_METADATA_EVENT(
TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
"typeNames",
WrapUnique(new SessionStateConvertableProxy<TypeNameDeduplicator>(
- session_state_, &MemoryDumpSessionState::type_name_deduplicator)));
+ session_state, &MemoryDumpSessionState::type_name_deduplicator)));
}
- DCHECK(!dump_thread_);
- dump_thread_ = std::move(dump_thread);
- subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
+ {
+ AutoLock lock(lock_);
- // TODO(primiano): This is a temporary hack to disable periodic memory dumps
- // when running memory benchmarks until telemetry uses TraceConfig to
- // enable/disable periodic dumps. See crbug.com/529184 .
- if (!is_coordinator_ ||
- CommandLine::ForCurrentProcess()->HasSwitch(
- "enable-memory-benchmarking")) {
- return;
- }
+ DCHECK(delegate_); // At this point we must have a delegate.
+ session_state_ = session_state;
- // Enable periodic dumps. At the moment the periodic support is limited to at
- // most one low-detail periodic dump and at most one high-detail periodic
- // dump. If both are specified the high-detail period must be an integer
- // multiple of the low-level one.
- g_periodic_dumps_count = 0;
- const TraceConfig trace_config =
- TraceLog::GetInstance()->GetCurrentTraceConfig();
- session_state_->SetMemoryDumpConfig(trace_config.memory_dump_config());
- const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list =
- trace_config.memory_dump_config().triggers;
- if (triggers_list.empty())
- return;
+ DCHECK(!dump_thread_);
+ dump_thread_ = std::move(dump_thread);
- uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
- uint32_t heavy_dump_period_ms = 0;
- DCHECK_LE(triggers_list.size(), 2u);
- for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
- DCHECK(config.periodic_interval_ms);
- if (config.level_of_detail == MemoryDumpLevelOfDetail::DETAILED)
- heavy_dump_period_ms = config.periodic_interval_ms;
- min_timer_period_ms =
- std::min(min_timer_period_ms, config.periodic_interval_ms);
+ subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
+
+ // TODO(primiano): This is a temporary hack to disable periodic memory dumps
+ // when running memory benchmarks until telemetry uses TraceConfig to
+ // enable/disable periodic dumps. See crbug.com/529184 .
+ if (!is_coordinator_ ||
+ CommandLine::ForCurrentProcess()->HasSwitch(
+ "enable-memory-benchmarking")) {
+ return;
+ }
}
- DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
- g_heavy_dumps_rate = heavy_dump_period_ms / min_timer_period_ms;
- periodic_dump_timer_.Start(FROM_HERE,
- TimeDelta::FromMilliseconds(min_timer_period_ms),
- base::Bind(&RequestPeriodicGlobalDump));
+ // Enable periodic dumps if necessary.
+ periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers);
}
void MemoryDumpManager::OnTraceLogDisabled() {
@@ -725,6 +735,14 @@ void MemoryDumpManager::OnTraceLogDisabled() {
dump_thread->Stop();
}
+bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
+ AutoLock lock(lock_);
+ if (!session_state_)
+ return false;
+ return session_state_->memory_dump_config().allowed_dump_modes.count(
+ dump_mode) != 0;
+}
+
uint64_t MemoryDumpManager::GetTracingProcessId() const {
return delegate_->GetTracingProcessId();
}
@@ -733,13 +751,15 @@ MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
MemoryDumpProvider* dump_provider,
const char* name,
scoped_refptr<SequencedTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options)
+ const MemoryDumpProvider::Options& options,
+ bool whitelisted_for_background_mode)
: dump_provider(dump_provider),
name(name),
task_runner(std::move(task_runner)),
options(options),
consecutive_failures(0),
- disabled(false) {}
+ disabled(false),
+ whitelisted_for_background_mode(whitelisted_for_background_mode) {}
MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
@@ -765,7 +785,7 @@ MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
session_state(std::move(session_state)),
callback(callback),
dump_successful(true),
- callback_task_runner(MessageLoop::current()->task_runner()),
+ callback_task_runner(ThreadTaskRunnerHandle::Get()),
dump_thread_task_runner(std::move(dump_thread_task_runner)) {
pending_dump_providers.reserve(dump_providers.size());
pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
@@ -775,15 +795,89 @@ MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
}
ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState::
- GetOrCreateMemoryDumpContainerForProcess(ProcessId pid) {
+ GetOrCreateMemoryDumpContainerForProcess(ProcessId pid,
+ const MemoryDumpArgs& dump_args) {
auto iter = process_dumps.find(pid);
if (iter == process_dumps.end()) {
std::unique_ptr<ProcessMemoryDump> new_pmd(
- new ProcessMemoryDump(session_state));
+ new ProcessMemoryDump(session_state, dump_args));
iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
}
return iter->second.get();
}
+MemoryDumpManager::PeriodicGlobalDumpTimer::PeriodicGlobalDumpTimer() {}
+
+MemoryDumpManager::PeriodicGlobalDumpTimer::~PeriodicGlobalDumpTimer() {
+ Stop();
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::Start(
+ const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list) {
+ if (triggers_list.empty())
+ return;
+
+ // At the moment the periodic support is limited to at most one periodic
+ // trigger per dump mode. All intervals should be an integer multiple of the
+ // smallest interval specified.
+ periodic_dumps_count_ = 0;
+ uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
+ uint32_t light_dump_period_ms = 0;
+ uint32_t heavy_dump_period_ms = 0;
+ DCHECK_LE(triggers_list.size(), 3u);
+ auto* mdm = MemoryDumpManager::GetInstance();
+ for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
+ DCHECK_NE(0u, config.periodic_interval_ms);
+ switch (config.level_of_detail) {
+ case MemoryDumpLevelOfDetail::BACKGROUND:
+ DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND));
+ break;
+ case MemoryDumpLevelOfDetail::LIGHT:
+ DCHECK_EQ(0u, light_dump_period_ms);
+ DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT));
+ light_dump_period_ms = config.periodic_interval_ms;
+ break;
+ case MemoryDumpLevelOfDetail::DETAILED:
+ DCHECK_EQ(0u, heavy_dump_period_ms);
+ DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED));
+ heavy_dump_period_ms = config.periodic_interval_ms;
+ break;
+ }
+ min_timer_period_ms =
+ std::min(min_timer_period_ms, config.periodic_interval_ms);
+ }
+
+ DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms);
+ light_dump_rate_ = light_dump_period_ms / min_timer_period_ms;
+ DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
+ heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms;
+
+ timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms),
+ base::Bind(&PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump,
+ base::Unretained(this)));
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::Stop() {
+ if (IsRunning()) {
+ timer_.Stop();
+ }
+}
+
+bool MemoryDumpManager::PeriodicGlobalDumpTimer::IsRunning() {
+ return timer_.IsRunning();
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump() {
+ MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
+ if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0)
+ level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
+ if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0)
+ level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
+ ++periodic_dumps_count_;
+
+ MemoryDumpManager::GetInstance()->RequestGlobalDump(
+ MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
+}
+
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/memory_dump_manager.h b/chromium/base/trace_event/memory_dump_manager.h
index 817768afedd..06b772c6e4b 100644
--- a/chromium/base/trace_event/memory_dump_manager.h
+++ b/chromium/base/trace_event/memory_dump_manager.h
@@ -40,6 +40,7 @@ class MemoryDumpSessionState;
class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
public:
static const char* const kTraceCategory;
+ static const char* const kLogPrefix;
// This value is returned as the tracing id of the child processes by
// GetTracingProcessId() when tracing is not enabled.
@@ -115,10 +116,14 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
void OnTraceLogEnabled() override;
void OnTraceLogDisabled() override;
+ // Returns true if the dump mode is allowed for current tracing session.
+ bool IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode);
+
// Returns the MemoryDumpSessionState object, which is shared by all the
// ProcessMemoryDump and MemoryAllocatorDump instances through all the tracing
// session lifetime.
- const scoped_refptr<MemoryDumpSessionState>& session_state() const {
+ const scoped_refptr<MemoryDumpSessionState>& session_state_for_testing()
+ const {
return session_state_;
}
@@ -176,7 +181,8 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
const char* name,
scoped_refptr<SequencedTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options);
+ const MemoryDumpProvider::Options& options,
+ bool whitelisted_for_background_mode);
MemoryDumpProvider* const dump_provider;
@@ -200,6 +206,9 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// Flagged either by the auto-disable logic or during unregistration.
bool disabled;
+ // True if the dump provider is whitelisted for background mode.
+ const bool whitelisted_for_background_mode;
+
private:
friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
~MemoryDumpProviderInfo();
@@ -221,7 +230,9 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
~ProcessMemoryDumpAsyncState();
// Gets or creates the memory dump container for the given target process.
- ProcessMemoryDump* GetOrCreateMemoryDumpContainerForProcess(ProcessId pid);
+ ProcessMemoryDump* GetOrCreateMemoryDumpContainerForProcess(
+ ProcessId pid,
+ const MemoryDumpArgs& dump_args);
// A map of ProcessId -> ProcessMemoryDump, one for each target process
// being dumped from the current process. Typically each process dumps only
@@ -262,6 +273,31 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDumpAsyncState);
};
+ // Sets up periodic memory dump timers to start global dump requests based on
+ // the dump triggers from trace config.
+ class BASE_EXPORT PeriodicGlobalDumpTimer {
+ public:
+ PeriodicGlobalDumpTimer();
+ ~PeriodicGlobalDumpTimer();
+
+ void Start(const std::vector<TraceConfig::MemoryDumpConfig::Trigger>&
+ triggers_list);
+ void Stop();
+
+ bool IsRunning();
+
+ private:
+ // Periodically called by the timer.
+ void RequestPeriodicGlobalDump();
+
+ RepeatingTimer timer_;
+ uint32_t periodic_dumps_count_;
+ uint32_t light_dump_rate_;
+ uint32_t heavy_dump_rate_;
+
+ DISALLOW_COPY_AND_ASSIGN(PeriodicGlobalDumpTimer);
+ };
+
static const int kMaxConsecutiveFailuresCount;
static const char* const kSystemAllocatorPoolName;
@@ -325,7 +361,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
subtle::AtomicWord memory_tracing_enabled_;
// For time-triggered periodic dumps.
- RepeatingTimer periodic_dump_timer_;
+ PeriodicGlobalDumpTimer periodic_dump_timer_;
// Thread used for MemoryDumpProviders which don't specify a task runner
// affinity.
diff --git a/chromium/base/trace_event/memory_dump_manager_unittest.cc b/chromium/base/trace_event/memory_dump_manager_unittest.cc
index c1295efac65..d14093cbcc3 100644
--- a/chromium/base/trace_event/memory_dump_manager_unittest.cc
+++ b/chromium/base/trace_event/memory_dump_manager_unittest.cc
@@ -23,6 +23,7 @@
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_buffer.h"
#include "base/trace_event/trace_config_memory_test_util.h"
@@ -48,16 +49,24 @@ MATCHER(IsLightDump, "") {
return arg.level_of_detail == MemoryDumpLevelOfDetail::LIGHT;
}
+MATCHER(IsBackgroundDump, "") {
+ return arg.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND;
+}
+
namespace {
+const char* kMDPName = "TestDumpProvider";
+const char* kWhitelistedMDPName = "WhitelistedTestDumpProvider";
+const char* const kTestMDPWhitelist[] = {kWhitelistedMDPName, nullptr};
+
void RegisterDumpProvider(
MemoryDumpProvider* mdp,
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options) {
+ const MemoryDumpProvider::Options& options,
+ const char* name = kMDPName) {
MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
mdm->set_dumper_registrations_ignored_for_testing(false);
- const char* kMDPName = "TestDumpProvider";
- mdm->RegisterDumpProvider(mdp, kMDPName, std::move(task_runner), options);
+ mdm->RegisterDumpProvider(mdp, name, std::move(task_runner), options);
mdm->set_dumper_registrations_ignored_for_testing(true);
}
@@ -71,7 +80,6 @@ void RegisterDumpProviderWithSequencedTaskRunner(
const MemoryDumpProvider::Options& options) {
MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
mdm->set_dumper_registrations_ignored_for_testing(false);
- const char* kMDPName = "TestDumpProvider";
mdm->RegisterDumpProviderWithSequencedTaskRunner(mdp, kMDPName, task_runner,
options);
mdm->set_dumper_registrations_ignored_for_testing(true);
@@ -218,7 +226,7 @@ class MemoryDumpManagerTest : public testing::Test {
RunLoop run_loop;
MemoryDumpCallback callback =
Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
- MessageLoop::current()->task_runner(), run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
mdm_->RequestGlobalDump(dump_type, level_of_detail, callback);
run_loop.Run();
}
@@ -331,7 +339,8 @@ TEST_F(MemoryDumpManagerTest, SharedSessionState) {
RegisterDumpProvider(&mdp2);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
- const MemoryDumpSessionState* session_state = mdm_->session_state().get();
+ const MemoryDumpSessionState* session_state =
+ mdm_->session_state_for_testing().get();
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
EXPECT_CALL(mdp1, OnMemoryDump(_, _))
.Times(2)
@@ -464,11 +473,11 @@ TEST_F(MemoryDumpManagerTest, RespectTaskRunnerAffinity) {
// invoked a number of times equal to its index.
for (uint32_t i = kNumInitialThreads; i > 0; --i) {
threads.push_back(WrapUnique(new Thread("test thread")));
- auto thread = threads.back().get();
+ auto* thread = threads.back().get();
thread->Start();
scoped_refptr<SingleThreadTaskRunner> task_runner = thread->task_runner();
mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
- auto mdp = mdps.back().get();
+ auto* mdp = mdps.back().get();
RegisterDumpProvider(mdp, task_runner, kDefaultOptions);
EXPECT_CALL(*mdp, OnMemoryDump(_, _))
.Times(i)
@@ -895,7 +904,9 @@ TEST_F(MemoryDumpManagerTest, TraceConfigExpectationsWhenIsCoordinator) {
// Tests against race conditions that might arise when disabling tracing in the
// middle of a global memory dump.
TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
- base::WaitableEvent tracing_disabled_event(false, false);
+ base::WaitableEvent tracing_disabled_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
InitializeMemoryDumpManager(false /* is_coordinator */);
// Register a bound dump provider.
@@ -932,7 +943,7 @@ TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
RunLoop run_loop;
MemoryDumpCallback callback =
Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
- MessageLoop::current()->task_runner(), run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
MemoryDumpLevelOfDetail::DETAILED, callback);
DisableTracing();
@@ -945,7 +956,9 @@ TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
// Tests against race conditions that can happen if tracing is disabled before
// the CreateProcessDump() call. Real-world regression: crbug.com/580295 .
TEST_F(MemoryDumpManagerTest, DisableTracingRightBeforeStartOfDump) {
- base::WaitableEvent tracing_disabled_event(false, false);
+ base::WaitableEvent tracing_disabled_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
InitializeMemoryDumpManager(false /* is_coordinator */);
std::unique_ptr<Thread> mdp_thread(new Thread("test thread"));
@@ -1099,5 +1112,60 @@ TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoonDuringDump) {
DisableTracing();
}
+TEST_F(MemoryDumpManagerTest, TestWhitelistingMDP) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ SetDumpProviderWhitelistForTesting(kTestMDPWhitelist);
+ std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider);
+ RegisterDumpProvider(mdp1.get());
+ std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider);
+ RegisterDumpProvider(mdp2.get(), nullptr, kDefaultOptions,
+ kWhitelistedMDPName);
+
+ EXPECT_CALL(*mdp1, OnMemoryDump(_, _)).Times(0);
+ EXPECT_CALL(*mdp2, OnMemoryDump(_, _)).Times(1).WillOnce(Return(true));
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_FALSE(IsPeriodicDumpingEnabled());
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::BACKGROUND);
+ DisableTracing();
+}
+
+TEST_F(MemoryDumpManagerTest, TestBackgroundTracingSetup) {
+ InitializeMemoryDumpManager(true /* is_coordinator */);
+
+ RunLoop run_loop;
+ auto quit_closure = run_loop.QuitClosure();
+
+ testing::InSequence sequence;
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(IsBackgroundDump(), _))
+ .Times(5);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(IsBackgroundDump(), _))
+ .WillOnce(Invoke([quit_closure](const MemoryDumpRequestArgs& args,
+ const MemoryDumpCallback& callback) {
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, quit_closure);
+ }));
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(AnyNumber());
+
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
+ 1 /* period_ms */));
+
+ // Only background mode dumps should be allowed with the trace config.
+ last_callback_success_ = false;
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::LIGHT);
+ EXPECT_FALSE(last_callback_success_);
+ last_callback_success_ = false;
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ EXPECT_FALSE(last_callback_success_);
+
+ ASSERT_TRUE(IsPeriodicDumpingEnabled());
+ run_loop.Run();
+ DisableTracing();
+}
+
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/memory_dump_provider.h b/chromium/base/trace_event/memory_dump_provider.h
index 79ab7934866..c899ea9c346 100644
--- a/chromium/base/trace_event/memory_dump_provider.h
+++ b/chromium/base/trace_event/memory_dump_provider.h
@@ -15,12 +15,6 @@ namespace trace_event {
class ProcessMemoryDump;
-// Args passed to OnMemoryDump(). This is to avoid rewriting all the subclasses
-// in the codebase when extending the MemoryDumpProvider API.
-struct MemoryDumpArgs {
- MemoryDumpLevelOfDetail level_of_detail;
-};
-
// The contract interface that memory dump providers must implement.
class BASE_EXPORT MemoryDumpProvider {
public:
diff --git a/chromium/base/trace_event/memory_dump_request_args.cc b/chromium/base/trace_event/memory_dump_request_args.cc
index 48b5ba6d2c0..e6c5b87b226 100644
--- a/chromium/base/trace_event/memory_dump_request_args.cc
+++ b/chromium/base/trace_event/memory_dump_request_args.cc
@@ -28,6 +28,8 @@ const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
const char* MemoryDumpLevelOfDetailToString(
const MemoryDumpLevelOfDetail& level_of_detail) {
switch (level_of_detail) {
+ case MemoryDumpLevelOfDetail::BACKGROUND:
+ return "background";
case MemoryDumpLevelOfDetail::LIGHT:
return "light";
case MemoryDumpLevelOfDetail::DETAILED:
@@ -39,6 +41,8 @@ const char* MemoryDumpLevelOfDetailToString(
MemoryDumpLevelOfDetail StringToMemoryDumpLevelOfDetail(
const std::string& str) {
+ if (str == "background")
+ return MemoryDumpLevelOfDetail::BACKGROUND;
if (str == "light")
return MemoryDumpLevelOfDetail::LIGHT;
if (str == "detailed")
diff --git a/chromium/base/trace_event/memory_dump_request_args.h b/chromium/base/trace_event/memory_dump_request_args.h
index 00d560ec6ac..f3ff9d8e3b5 100644
--- a/chromium/base/trace_event/memory_dump_request_args.h
+++ b/chromium/base/trace_event/memory_dump_request_args.h
@@ -28,13 +28,25 @@ enum class MemoryDumpType {
};
// Tells the MemoryDumpProvider(s) how much detailed their dumps should be.
-// MemoryDumpProvider instances must guarantee that level of detail does not
-// affect the total size reported in the root node, but only the granularity of
-// the child MemoryAllocatorDump(s).
-enum class MemoryDumpLevelOfDetail {
- LIGHT, // Few entries, typically a fixed number, per dump.
- DETAILED, // Unrestricted amount of entries per dump.
- LAST = DETAILED // For IPC Macros.
+enum class MemoryDumpLevelOfDetail : uint32_t {
+ FIRST,
+
+ // For background tracing mode. The dump time is quick, and typically just the
+ // totals are expected. Suballocations need not be specified. Dump name must
+ // contain only pre-defined strings and string arguments cannot be added.
+ BACKGROUND = FIRST,
+
+ // For the levels below, MemoryDumpProvider instances must guarantee that the
+ // total size reported in the root node is consistent. Only the granularity of
+ // the child MemoryAllocatorDump(s) differs with the levels.
+
+ // Few entries, typically a fixed number, per dump.
+ LIGHT,
+
+ // Unrestricted amount of entries per dump.
+ DETAILED,
+
+ LAST = DETAILED
};
// Initial request arguments for a global memory dump. (see
@@ -49,6 +61,13 @@ struct BASE_EXPORT MemoryDumpRequestArgs {
MemoryDumpLevelOfDetail level_of_detail;
};
+// Args for ProcessMemoryDump and passed to OnMemoryDump calls for memory dump
+// providers. Dump providers are expected to read the args for creating dumps.
+struct MemoryDumpArgs {
+ // Specifies how detailed the dumps should be.
+ MemoryDumpLevelOfDetail level_of_detail;
+};
+
using MemoryDumpCallback = Callback<void(uint64_t dump_guid, bool success)>;
BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
diff --git a/chromium/base/trace_event/memory_infra_background_whitelist.cc b/chromium/base/trace_event/memory_infra_background_whitelist.cc
new file mode 100644
index 00000000000..aed187fa1d8
--- /dev/null
+++ b/chromium/base/trace_event/memory_infra_background_whitelist.cc
@@ -0,0 +1,131 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_infra_background_whitelist.h"
+
+#include <ctype.h>
+#include <string.h>
+
+#include <string>
+
+namespace base {
+namespace trace_event {
+namespace {
+
+// The names of dump providers whitelisted for background tracing. Dump
+// providers can be added here only if the background mode dump has very
+// less performance and memory overhead.
+const char* const kDumpProviderWhitelist[] = {
+ "BlinkGC",
+ "ChildDiscardableSharedMemoryManager",
+ "DOMStorage",
+ "HostDiscardableSharedMemoryManager",
+ "IndexedDBBackingStore",
+ "JavaHeap",
+ "LeveldbValueStore",
+ "Malloc",
+ "PartitionAlloc",
+ "ProcessMemoryMetrics",
+ "Skia",
+ "Sql",
+ "V8Isolate",
+ "WinHeap",
+ nullptr // End of list marker.
+};
+
+// A list of string names that are allowed for the memory allocator dumps in
+// background mode.
+const char* const kAllocatorDumpNameWhitelist[] = {
+ "blink_gc",
+ "blink_gc/allocated_objects",
+ "discardable",
+ "discardable/child_0x?",
+ "dom_storage/0x?/cache_size",
+ "dom_storage/session_storage_0x?",
+ "java_heap",
+ "java_heap/allocated_objects",
+ "leveldb/index_db/0x?",
+ "leveldb/value_store/Extensions.Database.Open.Settings/0x?",
+ "leveldb/value_store/Extensions.Database.Open.Rules/0x?",
+ "leveldb/value_store/Extensions.Database.Open.State/0x?",
+ "leveldb/value_store/Extensions.Database.Open/0x?",
+ "leveldb/value_store/Extensions.Database.Restore/0x?",
+ "leveldb/value_store/Extensions.Database.Value.Restore/0x?",
+ "malloc",
+ "malloc/allocated_objects",
+ "malloc/metadata_fragmentation_caches",
+ "partition_alloc/allocated_objects",
+ "partition_alloc/partitions",
+ "partition_alloc/partitions/buffer",
+ "partition_alloc/partitions/fast_malloc",
+ "partition_alloc/partitions/layout",
+ "skia/sk_glyph_cache",
+ "skia/sk_resource_cache",
+ "sqlite",
+ "v8/isolate_0x?/heap_spaces",
+ "v8/isolate_0x?/heap_spaces/code_space",
+ "v8/isolate_0x?/heap_spaces/large_object_space",
+ "v8/isolate_0x?/heap_spaces/map_space",
+ "v8/isolate_0x?/heap_spaces/new_space",
+ "v8/isolate_0x?/heap_spaces/old_space",
+ "v8/isolate_0x?/heap_spaces/other_spaces",
+ "v8/isolate_0x?/malloc",
+ "v8/isolate_0x?/zapped_for_debug",
+ "winheap",
+ "winheap/allocated_objects",
+ nullptr // End of list marker.
+};
+
+const char* const* g_dump_provider_whitelist = kDumpProviderWhitelist;
+const char* const* g_allocator_dump_name_whitelist =
+ kAllocatorDumpNameWhitelist;
+
+} // namespace
+
+bool IsMemoryDumpProviderWhitelisted(const char* mdp_name) {
+ for (size_t i = 0; g_dump_provider_whitelist[i] != nullptr; ++i) {
+ if (strcmp(mdp_name, g_dump_provider_whitelist[i]) == 0)
+ return true;
+ }
+ return false;
+}
+
+bool IsMemoryAllocatorDumpNameWhitelisted(const std::string& name) {
+ // Remove special characters, numbers (including hexadecimal which are marked
+ // by '0x') from the given string.
+ const size_t length = name.size();
+ std::string stripped_str;
+ stripped_str.reserve(length);
+ bool parsing_hex = false;
+ for (size_t i = 0; i < length; ++i) {
+ if (parsing_hex && isxdigit(name[i]))
+ continue;
+ parsing_hex = false;
+ if (i + 1 < length && name[i] == '0' && name[i + 1] == 'x') {
+ parsing_hex = true;
+ stripped_str.append("0x?");
+ ++i;
+ } else {
+ stripped_str.push_back(name[i]);
+ }
+ }
+
+ for (size_t i = 0; g_allocator_dump_name_whitelist[i] != nullptr; ++i) {
+ if (stripped_str == g_allocator_dump_name_whitelist[i]) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void SetDumpProviderWhitelistForTesting(const char* const* list) {
+ g_dump_provider_whitelist = list;
+}
+
+void SetAllocatorDumpNameWhitelistForTesting(const char* const* list) {
+ g_allocator_dump_name_whitelist = list;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/memory_infra_background_whitelist.h b/chromium/base/trace_event/memory_infra_background_whitelist.h
new file mode 100644
index 00000000000..b8d704ae241
--- /dev/null
+++ b/chromium/base/trace_event/memory_infra_background_whitelist.h
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
+#define BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
+
+// This file contains the whitelists for background mode to limit the tracing
+// overhead and remove sensitive information from traces.
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace trace_event {
+
+// Checks if the given |mdp_name| is in the whitelist.
+bool BASE_EXPORT IsMemoryDumpProviderWhitelisted(const char* mdp_name);
+
+// Checks if the given |name| matches any of the whitelisted patterns.
+bool BASE_EXPORT IsMemoryAllocatorDumpNameWhitelisted(const std::string& name);
+
+// The whitelist is replaced with the given list for tests. The last element of
+// the list must be nullptr.
+void BASE_EXPORT SetDumpProviderWhitelistForTesting(const char* const* list);
+void BASE_EXPORT
+SetAllocatorDumpNameWhitelistForTesting(const char* const* list);
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
diff --git a/chromium/base/trace_event/process_memory_dump.cc b/chromium/base/trace_event/process_memory_dump.cc
index 52eccbe1a0c..826989237bc 100644
--- a/chromium/base/trace_event/process_memory_dump.cc
+++ b/chromium/base/trace_event/process_memory_dump.cc
@@ -12,6 +12,7 @@
#include "base/process/process_metrics.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/heap_profiler_heap_dump_writer.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/process_memory_totals.h"
#include "base/trace_event/trace_event_argument.h"
#include "build/build_config.h"
@@ -48,6 +49,9 @@ size_t GetSystemPageCount(size_t mapped_size, size_t page_size) {
} // namespace
+// static
+bool ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = false;
+
#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
// static
size_t ProcessMemoryDump::GetSystemPageSize() {
@@ -148,10 +152,12 @@ size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
#endif // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
ProcessMemoryDump::ProcessMemoryDump(
- scoped_refptr<MemoryDumpSessionState> session_state)
+ scoped_refptr<MemoryDumpSessionState> session_state,
+ const MemoryDumpArgs& dump_args)
: has_process_totals_(false),
has_process_mmaps_(false),
- session_state_(std::move(session_state)) {}
+ session_state_(std::move(session_state)),
+ dump_args_(dump_args) {}
ProcessMemoryDump::~ProcessMemoryDump() {}
@@ -170,6 +176,13 @@ MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
std::unique_ptr<MemoryAllocatorDump> mad) {
+ // In background mode return the black hole dump, if invalid dump name is
+ // given.
+ if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND &&
+ !IsMemoryAllocatorDumpNameWhitelisted(mad->absolute_name())) {
+ return GetBlackHoleMad();
+ }
+
auto insertion_result = allocator_dumps_.insert(
std::make_pair(mad->absolute_name(), std::move(mad)));
MemoryAllocatorDump* inserted_mad = insertion_result.first->second.get();
@@ -181,7 +194,11 @@ MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
const std::string& absolute_name) const {
auto it = allocator_dumps_.find(absolute_name);
- return it == allocator_dumps_.end() ? nullptr : it->second.get();
+ if (it != allocator_dumps_.end())
+ return it->second.get();
+ if (black_hole_mad_)
+ return black_hole_mad_.get();
+ return nullptr;
}
MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
@@ -192,6 +209,10 @@ MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
const MemoryAllocatorDumpGuid& guid) {
+ // Global dumps are disabled in background mode.
+ if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+ return GetBlackHoleMad();
+
// A shared allocator dump can be shared within a process and the guid could
// have been created already.
MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
@@ -206,6 +227,10 @@ MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
MemoryAllocatorDump* ProcessMemoryDump::CreateWeakSharedGlobalAllocatorDump(
const MemoryAllocatorDumpGuid& guid) {
+ // Global dumps are disabled in background mode.
+ if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+ return GetBlackHoleMad();
+
MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
if (mad)
return mad;
@@ -219,21 +244,16 @@ MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
return GetAllocatorDump(GetSharedGlobalAllocatorDumpName(guid));
}
-void ProcessMemoryDump::AddHeapDump(const std::string& absolute_name,
- std::unique_ptr<TracedValue> heap_dump) {
- DCHECK_EQ(0ul, heap_dumps_.count(absolute_name));
- heap_dumps_[absolute_name] = std::move(heap_dump);
-}
-
void ProcessMemoryDump::DumpHeapUsage(
const base::hash_map<base::trace_event::AllocationContext,
base::trace_event::AllocationMetrics>& metrics_by_context,
base::trace_event::TraceEventMemoryOverhead& overhead,
const char* allocator_name) {
if (!metrics_by_context.empty()) {
+ DCHECK_EQ(0ul, heap_dumps_.count(allocator_name));
std::unique_ptr<TracedValue> heap_dump = ExportHeapDump(
metrics_by_context, *session_state());
- AddHeapDump(allocator_name, std::move(heap_dump));
+ heap_dumps_[allocator_name] = std::move(heap_dump);
}
std::string base_name = base::StringPrintf("tracing/heap_profiler_%s",
@@ -333,10 +353,21 @@ void ProcessMemoryDump::AddOwnershipEdge(
void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
const std::string& target_node_name) {
+ // Do not create new dumps for suballocations in background mode.
+ if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+ return;
+
std::string child_mad_name = target_node_name + "/__" + source.ToString();
MemoryAllocatorDump* target_child_mad = CreateAllocatorDump(child_mad_name);
AddOwnershipEdge(source, target_child_mad->guid());
}
+MemoryAllocatorDump* ProcessMemoryDump::GetBlackHoleMad() {
+ DCHECK(is_black_hole_non_fatal_for_testing_);
+ if (!black_hole_mad_)
+ black_hole_mad_.reset(new MemoryAllocatorDump("discarded", this));
+ return black_hole_mad_.get();
+}
+
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/process_memory_dump.h b/chromium/base/trace_event/process_memory_dump.h
index 51e4b5f5150..d020c7d652f 100644
--- a/chromium/base/trace_event/process_memory_dump.h
+++ b/chromium/base/trace_event/process_memory_dump.h
@@ -16,6 +16,7 @@
#include "base/memory/scoped_vector.h"
#include "base/trace_event/memory_allocator_dump.h"
#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/memory_dump_session_state.h"
#include "base/trace_event/process_memory_maps.h"
#include "base/trace_event/process_memory_totals.h"
@@ -67,7 +68,8 @@ class BASE_EXPORT ProcessMemoryDump {
static size_t CountResidentBytes(void* start_address, size_t mapped_size);
#endif
- ProcessMemoryDump(scoped_refptr<MemoryDumpSessionState> session_state);
+ ProcessMemoryDump(scoped_refptr<MemoryDumpSessionState> session_state,
+ const MemoryDumpArgs& dump_args);
~ProcessMemoryDump();
// Creates a new MemoryAllocatorDump with the given name and returns the
@@ -116,14 +118,6 @@ class BASE_EXPORT ProcessMemoryDump {
// Returns the map of the MemoryAllocatorDumps added to this dump.
const AllocatorDumpsMap& allocator_dumps() const { return allocator_dumps_; }
- // Adds a heap dump for the allocator with |absolute_name|. The |TracedValue|
- // must have the correct format. |trace_event::HeapDumper| will generate such
- // a value from a |trace_event::AllocationRegister|.
- // TODO(bashi): Remove this when WebMemoryDumpProvider is gone.
- // http://crbug.com/605822
- void AddHeapDump(const std::string& absolute_name,
- std::unique_ptr<TracedValue> heap_dump);
-
// Dumps heap usage with |allocator_name|.
void DumpHeapUsage(const base::hash_map<base::trace_event::AllocationContext,
base::trace_event::AllocationMetrics>&
@@ -183,10 +177,16 @@ class BASE_EXPORT ProcessMemoryDump {
const HeapDumpsMap& heap_dumps() const { return heap_dumps_; }
+ const MemoryDumpArgs& dump_args() const { return dump_args_; }
+
private:
+ FRIEND_TEST_ALL_PREFIXES(ProcessMemoryDumpTest, BackgroundModeTest);
+
MemoryAllocatorDump* AddAllocatorDumpInternal(
std::unique_ptr<MemoryAllocatorDump> mad);
+ MemoryAllocatorDump* GetBlackHoleMad();
+
ProcessMemoryTotals process_totals_;
bool has_process_totals_;
@@ -202,6 +202,18 @@ class BASE_EXPORT ProcessMemoryDump {
// Keeps track of relationships between MemoryAllocatorDump(s).
std::vector<MemoryAllocatorDumpEdge> allocator_dumps_edges_;
+ // Level of detail of the current dump.
+ const MemoryDumpArgs dump_args_;
+
+ // This allocator dump is returned when an invalid dump is created in
+ // background mode. The attributes of the dump are ignored and not added to
+ // the trace.
+ std::unique_ptr<MemoryAllocatorDump> black_hole_mad_;
+
+ // When set to true, the DCHECK(s) for invalid dump creations on the
+ // background mode are disabled for testing.
+ static bool is_black_hole_non_fatal_for_testing_;
+
DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDump);
};
diff --git a/chromium/base/trace_event/process_memory_dump_unittest.cc b/chromium/base/trace_event/process_memory_dump_unittest.cc
index 3a93b2c489d..571774a10ca 100644
--- a/chromium/base/trace_event/process_memory_dump_unittest.cc
+++ b/chromium/base/trace_event/process_memory_dump_unittest.cc
@@ -7,8 +7,10 @@
#include <stddef.h>
#include "base/memory/aligned_memory.h"
+#include "base/memory/ptr_util.h"
#include "base/process/process_metrics.h"
#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/trace_event_argument.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -16,14 +18,22 @@ namespace base {
namespace trace_event {
namespace {
+
+const MemoryDumpArgs kDetailedDumpArgs = {MemoryDumpLevelOfDetail::DETAILED};
+const char* const kTestDumpNameWhitelist[] = {
+ "Whitelisted/TestName", "Whitelisted/TestName_0x?",
+ "Whitelisted/0x?/TestName", nullptr};
+
TracedValue* GetHeapDump(const ProcessMemoryDump& pmd, const char* name) {
auto it = pmd.heap_dumps().find(name);
return it == pmd.heap_dumps().end() ? nullptr : it->second.get();
}
+
} // namespace
TEST(ProcessMemoryDumpTest, Clear) {
- std::unique_ptr<ProcessMemoryDump> pmd1(new ProcessMemoryDump(nullptr));
+ std::unique_ptr<ProcessMemoryDump> pmd1(
+ new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
pmd1->CreateAllocatorDump("mad1");
pmd1->CreateAllocatorDump("mad2");
ASSERT_FALSE(pmd1->allocator_dumps().empty());
@@ -58,10 +68,10 @@ TEST(ProcessMemoryDumpTest, Clear) {
pmd1->AsValueInto(traced_value.get());
// Check that the pmd can be reused and behaves as expected.
- auto mad1 = pmd1->CreateAllocatorDump("mad1");
- auto mad3 = pmd1->CreateAllocatorDump("mad3");
- auto shared_mad1 = pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
- auto shared_mad2 =
+ auto* mad1 = pmd1->CreateAllocatorDump("mad1");
+ auto* mad3 = pmd1->CreateAllocatorDump("mad3");
+ auto* shared_mad1 = pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+ auto* shared_mad2 =
pmd1->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
ASSERT_EQ(4u, pmd1->allocator_dumps().size());
ASSERT_EQ(mad1, pmd1->GetAllocatorDump("mad1"));
@@ -80,35 +90,36 @@ TEST(ProcessMemoryDumpTest, Clear) {
TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
std::unique_ptr<TracedValue> traced_value(new TracedValue);
- TracedValue* heap_dumps_ptr[4];
- std::unique_ptr<TracedValue> heap_dump;
-
- std::unique_ptr<ProcessMemoryDump> pmd1(new ProcessMemoryDump(nullptr));
- auto mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
- auto mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
+ hash_map<AllocationContext, AllocationMetrics> metrics_by_context;
+ metrics_by_context[AllocationContext()] = { 1, 1 };
+ TraceEventMemoryOverhead overhead;
+
+ scoped_refptr<MemoryDumpSessionState> session_state =
+ new MemoryDumpSessionState;
+ session_state->SetStackFrameDeduplicator(
+ WrapUnique(new StackFrameDeduplicator));
+ session_state->SetTypeNameDeduplicator(
+ WrapUnique(new TypeNameDeduplicator));
+ std::unique_ptr<ProcessMemoryDump> pmd1(
+ new ProcessMemoryDump(session_state.get(), kDetailedDumpArgs));
+ auto* mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
+ auto* mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
pmd1->AddOwnershipEdge(mad1_1->guid(), mad1_2->guid());
- heap_dump.reset(new TracedValue);
- heap_dumps_ptr[0] = heap_dump.get();
- pmd1->AddHeapDump("pmd1/heap_dump1", std::move(heap_dump));
- heap_dump.reset(new TracedValue);
- heap_dumps_ptr[1] = heap_dump.get();
- pmd1->AddHeapDump("pmd1/heap_dump2", std::move(heap_dump));
-
- std::unique_ptr<ProcessMemoryDump> pmd2(new ProcessMemoryDump(nullptr));
- auto mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
- auto mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
+ pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump1");
+ pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump2");
+
+ std::unique_ptr<ProcessMemoryDump> pmd2(
+ new ProcessMemoryDump(session_state.get(), kDetailedDumpArgs));
+ auto* mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
+ auto* mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
pmd2->AddOwnershipEdge(mad2_1->guid(), mad2_2->guid());
- heap_dump.reset(new TracedValue);
- heap_dumps_ptr[2] = heap_dump.get();
- pmd2->AddHeapDump("pmd2/heap_dump1", std::move(heap_dump));
- heap_dump.reset(new TracedValue);
- heap_dumps_ptr[3] = heap_dump.get();
- pmd2->AddHeapDump("pmd2/heap_dump2", std::move(heap_dump));
+ pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump1");
+ pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump2");
MemoryAllocatorDumpGuid shared_mad_guid1(1);
MemoryAllocatorDumpGuid shared_mad_guid2(2);
- auto shared_mad1 = pmd2->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
- auto shared_mad2 =
+ auto* shared_mad1 = pmd2->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+ auto* shared_mad2 =
pmd2->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
pmd1->TakeAllDumpsFrom(pmd2.get());
@@ -141,10 +152,10 @@ TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
ASSERT_TRUE(MemoryAllocatorDump::Flags::WEAK & shared_mad2->flags());
ASSERT_EQ(4u, pmd1->heap_dumps().size());
- ASSERT_EQ(heap_dumps_ptr[0], GetHeapDump(*pmd1, "pmd1/heap_dump1"));
- ASSERT_EQ(heap_dumps_ptr[1], GetHeapDump(*pmd1, "pmd1/heap_dump2"));
- ASSERT_EQ(heap_dumps_ptr[2], GetHeapDump(*pmd1, "pmd2/heap_dump1"));
- ASSERT_EQ(heap_dumps_ptr[3], GetHeapDump(*pmd1, "pmd2/heap_dump2"));
+ ASSERT_TRUE(GetHeapDump(*pmd1, "pmd1/heap_dump1") != nullptr);
+ ASSERT_TRUE(GetHeapDump(*pmd1, "pmd1/heap_dump2") != nullptr);
+ ASSERT_TRUE(GetHeapDump(*pmd1, "pmd2/heap_dump1") != nullptr);
+ ASSERT_TRUE(GetHeapDump(*pmd1, "pmd2/heap_dump2") != nullptr);
// Check that calling AsValueInto() doesn't cause a crash.
traced_value.reset(new TracedValue);
@@ -154,17 +165,18 @@ TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
}
TEST(ProcessMemoryDumpTest, Suballocations) {
- std::unique_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(nullptr));
+ std::unique_ptr<ProcessMemoryDump> pmd(
+ new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
const std::string allocator_dump_name = "fakealloc/allocated_objects";
pmd->CreateAllocatorDump(allocator_dump_name);
// Create one allocation with an auto-assigned guid and mark it as a
// suballocation of "fakealloc/allocated_objects".
- auto pic1_dump = pmd->CreateAllocatorDump("picturemanager/picture1");
+ auto* pic1_dump = pmd->CreateAllocatorDump("picturemanager/picture1");
pmd->AddSuballocation(pic1_dump->guid(), allocator_dump_name);
// Same here, but this time create an allocation with an explicit guid.
- auto pic2_dump = pmd->CreateAllocatorDump("picturemanager/picture2",
+ auto* pic2_dump = pmd->CreateAllocatorDump("picturemanager/picture2",
MemoryAllocatorDumpGuid(0x42));
pmd->AddSuballocation(pic2_dump->guid(), allocator_dump_name);
@@ -198,29 +210,75 @@ TEST(ProcessMemoryDumpTest, Suballocations) {
}
TEST(ProcessMemoryDumpTest, GlobalAllocatorDumpTest) {
- std::unique_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(nullptr));
+ std::unique_ptr<ProcessMemoryDump> pmd(
+ new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
MemoryAllocatorDumpGuid shared_mad_guid(1);
- auto shared_mad1 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+ auto* shared_mad1 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
ASSERT_EQ(shared_mad_guid, shared_mad1->guid());
ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
- auto shared_mad2 = pmd->GetSharedGlobalAllocatorDump(shared_mad_guid);
+ auto* shared_mad2 = pmd->GetSharedGlobalAllocatorDump(shared_mad_guid);
ASSERT_EQ(shared_mad1, shared_mad2);
ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
- auto shared_mad3 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+ auto* shared_mad3 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
ASSERT_EQ(shared_mad1, shared_mad3);
ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
- auto shared_mad4 = pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
+ auto* shared_mad4 = pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
ASSERT_EQ(shared_mad1, shared_mad4);
ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
- auto shared_mad5 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+ auto* shared_mad5 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
ASSERT_EQ(shared_mad1, shared_mad5);
ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
}
+TEST(ProcessMemoryDumpTest, BackgroundModeTest) {
+ MemoryDumpArgs background_args = {MemoryDumpLevelOfDetail::BACKGROUND};
+ std::unique_ptr<ProcessMemoryDump> pmd(
+ new ProcessMemoryDump(nullptr, background_args));
+ ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = true;
+ SetAllocatorDumpNameWhitelistForTesting(kTestDumpNameWhitelist);
+ MemoryAllocatorDump* black_hole_mad = pmd->GetBlackHoleMad();
+
+ // Invalid dump names.
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("NotWhitelisted/TestName"));
+ EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("TestName"));
+ EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("Whitelisted/Test"));
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("Not/Whitelisted/TestName"));
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/TestName/Google"));
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/TestName/0x1a2Google"));
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/TestName/__12/Google"));
+
+ // Global dumps.
+ MemoryAllocatorDumpGuid guid(1);
+ EXPECT_EQ(black_hole_mad, pmd->CreateSharedGlobalAllocatorDump(guid));
+ EXPECT_EQ(black_hole_mad, pmd->CreateWeakSharedGlobalAllocatorDump(guid));
+ EXPECT_EQ(black_hole_mad, pmd->GetSharedGlobalAllocatorDump(guid));
+
+ // Suballocations.
+ pmd->AddSuballocation(guid, "malloc/allocated_objects");
+ EXPECT_EQ(0u, pmd->allocator_dumps_edges_.size());
+ EXPECT_EQ(0u, pmd->allocator_dumps_.size());
+
+ // Valid dump names.
+ EXPECT_NE(black_hole_mad, pmd->CreateAllocatorDump("Whitelisted/TestName"));
+ EXPECT_NE(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/TestName_0xA1b2"));
+ EXPECT_NE(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/0xaB/TestName"));
+
+ // GetAllocatorDump is consistent.
+ EXPECT_EQ(black_hole_mad, pmd->GetAllocatorDump("NotWhitelisted/TestName"));
+ EXPECT_NE(black_hole_mad, pmd->GetAllocatorDump("Whitelisted/TestName"));
+}
+
#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
TEST(ProcessMemoryDumpTest, CountResidentBytes) {
const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
diff --git a/chromium/base/trace_event/trace_config.cc b/chromium/base/trace_event/trace_config.cc
index 25a0cd6d40b..d4dc2cc2e4e 100644
--- a/chromium/base/trace_event/trace_config.cc
+++ b/chromium/base/trace_event/trace_config.cc
@@ -47,6 +47,7 @@ const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
// String parameters that is used to parse memory dump config in trace config
// string.
const char kMemoryDumpConfigParam[] = "memory_dump_config";
+const char kAllowedDumpModesParam[] = "allowed_dump_modes";
const char kTriggersParam[] = "triggers";
const char kPeriodicIntervalParam[] = "periodic_interval_ms";
const char kModeParam[] = "mode";
@@ -75,6 +76,15 @@ class ConvertableTraceConfigToTraceFormat
const TraceConfig trace_config_;
};
+std::set<MemoryDumpLevelOfDetail> GetDefaultAllowedMemoryDumpModes() {
+ std::set<MemoryDumpLevelOfDetail> all_modes;
+ for (uint32_t mode = static_cast<uint32_t>(MemoryDumpLevelOfDetail::FIRST);
+ mode <= static_cast<uint32_t>(MemoryDumpLevelOfDetail::LAST); mode++) {
+ all_modes.insert(static_cast<MemoryDumpLevelOfDetail>(mode));
+ }
+ return all_modes;
+}
+
} // namespace
@@ -85,6 +95,12 @@ void TraceConfig::MemoryDumpConfig::HeapProfiler::Clear() {
breakdown_threshold_bytes = kDefaultBreakdownThresholdBytes;
}
+void TraceConfig::ResetMemoryDumpConfig(
+ const TraceConfig::MemoryDumpConfig& memory_dump_config) {
+ memory_dump_config_.Clear();
+ memory_dump_config_ = memory_dump_config;
+}
+
TraceConfig::MemoryDumpConfig::MemoryDumpConfig() {};
TraceConfig::MemoryDumpConfig::MemoryDumpConfig(
@@ -93,6 +109,7 @@ TraceConfig::MemoryDumpConfig::MemoryDumpConfig(
TraceConfig::MemoryDumpConfig::~MemoryDumpConfig() {};
void TraceConfig::MemoryDumpConfig::Clear() {
+ allowed_dump_modes.clear();
triggers.clear();
heap_profiler_options.Clear();
}
@@ -311,8 +328,6 @@ void TraceConfig::InitializeDefault() {
enable_sampling_ = false;
enable_systrace_ = false;
enable_argument_filter_ = false;
- excluded_categories_.push_back("*Debug");
- excluded_categories_.push_back("*Test");
}
void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
@@ -361,7 +376,7 @@ void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
// category enabled. So, use the default periodic dump config.
const base::DictionaryValue* memory_dump_config = nullptr;
if (dict.GetDictionary(kMemoryDumpConfigParam, &memory_dump_config))
- SetMemoryDumpConfig(*memory_dump_config);
+ SetMemoryDumpConfigFromConfigDict(*memory_dump_config);
else
SetDefaultMemoryDumpConfig();
}
@@ -509,11 +524,25 @@ void TraceConfig::AddCategoryToDict(base::DictionaryValue& dict,
dict.Set(param, std::move(list));
}
-void TraceConfig::SetMemoryDumpConfig(
+void TraceConfig::SetMemoryDumpConfigFromConfigDict(
const base::DictionaryValue& memory_dump_config) {
+ // Set allowed dump modes.
+ memory_dump_config_.allowed_dump_modes.clear();
+ const base::ListValue* allowed_modes_list;
+ if (memory_dump_config.GetList(kAllowedDumpModesParam, &allowed_modes_list)) {
+ for (size_t i = 0; i < allowed_modes_list->GetSize(); ++i) {
+ std::string level_of_detail_str;
+ allowed_modes_list->GetString(i, &level_of_detail_str);
+ memory_dump_config_.allowed_dump_modes.insert(
+ StringToMemoryDumpLevelOfDetail(level_of_detail_str));
+ }
+ } else {
+ // If allowed modes param is not given then allow all modes by default.
+ memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
+ }
+
// Set triggers
memory_dump_config_.triggers.clear();
-
const base::ListValue* trigger_list = nullptr;
if (memory_dump_config.GetList(kTriggersParam, &trigger_list) &&
trigger_list->GetSize() > 0) {
@@ -559,6 +588,7 @@ void TraceConfig::SetDefaultMemoryDumpConfig() {
memory_dump_config_.Clear();
memory_dump_config_.triggers.push_back(kDefaultHeavyMemoryDumpTrigger);
memory_dump_config_.triggers.push_back(kDefaultLightMemoryDumpTrigger);
+ memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
}
void TraceConfig::ToDict(base::DictionaryValue& dict) const {
@@ -605,6 +635,15 @@ void TraceConfig::ToDict(base::DictionaryValue& dict) const {
if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
std::unique_ptr<base::DictionaryValue> memory_dump_config(
new base::DictionaryValue());
+ std::unique_ptr<base::ListValue> allowed_modes_list(new base::ListValue());
+ for (MemoryDumpLevelOfDetail dump_mode :
+ memory_dump_config_.allowed_dump_modes) {
+ allowed_modes_list->AppendString(
+ MemoryDumpLevelOfDetailToString(dump_mode));
+ }
+ memory_dump_config->Set(kAllowedDumpModesParam,
+ std::move(allowed_modes_list));
+
std::unique_ptr<base::ListValue> triggers_list(new base::ListValue());
for (const MemoryDumpConfig::Trigger& config
: memory_dump_config_.triggers) {
diff --git a/chromium/base/trace_event/trace_config.h b/chromium/base/trace_event/trace_config.h
index 5b119eae98c..c3a940e2592 100644
--- a/chromium/base/trace_event/trace_config.h
+++ b/chromium/base/trace_event/trace_config.h
@@ -7,6 +7,7 @@
#include <stdint.h>
+#include <set>
#include <string>
#include <vector>
@@ -42,7 +43,7 @@ class BASE_EXPORT TraceConfig {
// Specifies the memory dump config for tracing.
// Used only when "memory-infra" category is enabled.
- struct MemoryDumpConfig {
+ struct BASE_EXPORT MemoryDumpConfig {
MemoryDumpConfig();
MemoryDumpConfig(const MemoryDumpConfig& other);
~MemoryDumpConfig();
@@ -69,6 +70,11 @@ class BASE_EXPORT TraceConfig {
// Reset the values in the config.
void Clear();
+ // Set of memory dump modes allowed for the tracing session. The explicitly
+ // triggered dumps will be successful only if the dump mode is allowed in
+ // the config.
+ std::set<MemoryDumpLevelOfDetail> allowed_dump_modes;
+
std::vector<Trigger> triggers;
HeapProfiler heap_profiler_options;
};
@@ -139,7 +145,7 @@ class BASE_EXPORT TraceConfig {
// "inc_pattern*",
// "disabled-by-default-memory-infra"],
// "excluded_categories": ["excluded", "exc_pattern*"],
- // "synthetic_delays": ["test.Delay1;16", "test.Delay2;32"]
+ // "synthetic_delays": ["test.Delay1;16", "test.Delay2;32"],
// "memory_dump_config": {
// "triggers": [
// {
@@ -188,7 +194,8 @@ class BASE_EXPORT TraceConfig {
std::string ToCategoryFilterString() const;
// Returns true if at least one category in the list is enabled by this
- // trace config.
+ // trace config. This is used to determine if the category filters are
+ // enabled in the TRACE_* macros.
bool IsCategoryGroupEnabled(const char* category_group) const;
// Merges config with the current TraceConfig
@@ -196,6 +203,9 @@ class BASE_EXPORT TraceConfig {
void Clear();
+ // Clears and resets the memory dump config.
+ void ResetMemoryDumpConfig(const MemoryDumpConfig& memory_dump_config);
+
const MemoryDumpConfig& memory_dump_config() const {
return memory_dump_config_;
}
@@ -204,7 +214,6 @@ class BASE_EXPORT TraceConfig {
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidLegacyFormat);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
TraceConfigFromInvalidLegacyStrings);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, ConstructDefaultTraceConfig);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidString);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromInvalidString);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
@@ -212,6 +221,8 @@ class BASE_EXPORT TraceConfig {
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromMemoryConfigString);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, LegacyStringToMemoryDumpConfig);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, EmptyMemoryDumpConfigTest);
+ FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
+ EmptyAndAsteriskCategoryFilterString);
// The default trace config, used when none is provided.
// Allows all non-disabled-by-default categories through, except if they end
@@ -235,7 +246,8 @@ class BASE_EXPORT TraceConfig {
const char* param,
const StringList& categories) const;
- void SetMemoryDumpConfig(const base::DictionaryValue& memory_dump_config);
+ void SetMemoryDumpConfigFromConfigDict(
+ const base::DictionaryValue& memory_dump_config);
void SetDefaultMemoryDumpConfig();
// Convert TraceConfig to the dict representation of the TraceConfig.
@@ -249,7 +261,10 @@ class BASE_EXPORT TraceConfig {
void WriteCategoryFilterString(const StringList& delays,
std::string* out) const;
- // Returns true if category is enable according to this trace config.
+ // Returns true if the category is enabled according to this trace config.
+ // This tells whether a category is enabled from the TraceConfig's
+ // perspective. Please refer to IsCategoryGroupEnabled() to determine if a
+ // category is enabled from the tracing runtime's perspective.
bool IsCategoryEnabled(const char* category_name) const;
static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(
diff --git a/chromium/base/trace_event/trace_config_memory_test_util.h b/chromium/base/trace_event/trace_config_memory_test_util.h
index 1acc62b9cec..6b47f8dc550 100644
--- a/chromium/base/trace_event/trace_config_memory_test_util.h
+++ b/chromium/base/trace_event/trace_config_memory_test_util.h
@@ -24,6 +24,7 @@ class TraceConfigMemoryTestUtil {
"\"%s\""
"],"
"\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
"\"heap_profiler_options\":{"
"\"breakdown_threshold_bytes\":2048"
"},"
@@ -52,6 +53,7 @@ class TraceConfigMemoryTestUtil {
"\"%s\""
"],"
"\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
"\"triggers\":["
"]"
"},"
@@ -71,6 +73,28 @@ class TraceConfigMemoryTestUtil {
"\"record_mode\":\"record-until-full\""
"}", MemoryDumpManager::kTraceCategory);
}
+
+ static std::string GetTraceConfig_BackgroundTrigger(int period_ms) {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_sampling\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\"],"
+ "\"triggers\":["
+ "{"
+ "\"mode\":\"background\","
+ "\"periodic_interval_ms\":%d"
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}", MemoryDumpManager::kTraceCategory, period_ms);
+ }
};
} // namespace trace_event
diff --git a/chromium/base/trace_event/trace_config_unittest.cc b/chromium/base/trace_event/trace_config_unittest.cc
index a17337619b2..4b46b2fefdd 100644
--- a/chromium/base/trace_event/trace_config_unittest.cc
+++ b/chromium/base/trace_event/trace_config_unittest.cc
@@ -21,7 +21,6 @@ const char kDefaultTraceConfigString[] =
"\"enable_argument_filter\":false,"
"\"enable_sampling\":false,"
"\"enable_systrace\":false,"
- "\"excluded_categories\":[\"*Debug\",\"*Test\"],"
"\"record_mode\":\"record-until-full\""
"}";
@@ -36,6 +35,7 @@ const char kCustomTraceConfigString[] =
"\"disabled-by-default-cc\","
"\"disabled-by-default-memory-infra\"],"
"\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
"\"heap_profiler_options\":{"
"\"breakdown_threshold_bytes\":10240"
"},"
@@ -48,6 +48,24 @@ const char kCustomTraceConfigString[] =
"\"synthetic_delays\":[\"test.Delay1;16\",\"test.Delay2;32\"]"
"}";
+void CheckDefaultTraceConfigBehavior(const TraceConfig& tc) {
+ EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+ EXPECT_FALSE(tc.IsSamplingEnabled());
+ EXPECT_FALSE(tc.IsSystraceEnabled());
+ EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+
+ // Default trace config enables every category filter except the
+ // disabled-by-default-* ones.
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("not-excluded-category"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-cc"));
+
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1,not-excluded-category"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1,disabled-by-default-cc"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled(
+ "disabled-by-default-cc,disabled-by-default-cc2"));
+}
+
} // namespace
TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
@@ -155,9 +173,6 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
config.ToTraceOptionsString().c_str());
// From category filter strings
- config = TraceConfig("-*Debug,-*Test", "");
- EXPECT_STREQ("-*Debug,-*Test", config.ToCategoryFilterString().c_str());
-
config = TraceConfig("included,-excluded,inc_pattern*,-exc_pattern*", "");
EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
config.ToCategoryFilterString().c_str());
@@ -257,38 +272,79 @@ TEST(TraceConfigTest, TraceConfigFromInvalidLegacyStrings) {
}
TEST(TraceConfigTest, ConstructDefaultTraceConfig) {
- // Make sure that upon an empty string, we fall back to the default config.
TraceConfig tc;
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
- EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
- EXPECT_FALSE(tc.IsSystraceEnabled());
- EXPECT_FALSE(tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
+
+ // Constructors from category filter string and trace option string.
+ TraceConfig tc_asterisk("*", "");
+ EXPECT_STREQ("*", tc_asterisk.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc_asterisk);
+
+ TraceConfig tc_empty_category_filter("", "");
+ EXPECT_STREQ("", tc_empty_category_filter.ToCategoryFilterString().c_str());
+ EXPECT_STREQ(kDefaultTraceConfigString,
+ tc_empty_category_filter.ToString().c_str());
+ CheckDefaultTraceConfigBehavior(tc_empty_category_filter);
+
+ // Constructor from JSON formated config string.
+ TraceConfig tc_empty_json_string("");
+ EXPECT_STREQ("", tc_empty_json_string.ToCategoryFilterString().c_str());
+ EXPECT_STREQ(kDefaultTraceConfigString,
+ tc_empty_json_string.ToString().c_str());
+ CheckDefaultTraceConfigBehavior(tc_empty_json_string);
+
+ // Constructor from dictionary value.
+ DictionaryValue dict;
+ TraceConfig tc_dict(dict);
+ EXPECT_STREQ("", tc_dict.ToCategoryFilterString().c_str());
+ EXPECT_STREQ(kDefaultTraceConfigString, tc_dict.ToString().c_str());
+ CheckDefaultTraceConfigBehavior(tc_dict);
+}
- EXPECT_FALSE(tc.IsCategoryEnabled("Category1"));
- EXPECT_FALSE(tc.IsCategoryEnabled("not-excluded-category"));
- EXPECT_FALSE(tc.IsCategoryEnabled("CategoryTest"));
- EXPECT_FALSE(tc.IsCategoryEnabled("CategoryDebug"));
- EXPECT_FALSE(tc.IsCategoryEnabled("disabled-by-default-cc"));
+TEST(TraceConfigTest, EmptyAndAsteriskCategoryFilterString) {
+ TraceConfig tc_empty("", "");
+ TraceConfig tc_asterisk("*", "");
- EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1"));
- EXPECT_TRUE(tc.IsCategoryGroupEnabled("not-excluded-category"));
- EXPECT_FALSE(tc.IsCategoryGroupEnabled("CategoryTest"));
- EXPECT_FALSE(tc.IsCategoryGroupEnabled("CategoryDebug"));
- EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-cc"));
+ EXPECT_STREQ("", tc_empty.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("*", tc_asterisk.ToCategoryFilterString().c_str());
+
+ // Both fall back to default config.
+ CheckDefaultTraceConfigBehavior(tc_empty);
+ CheckDefaultTraceConfigBehavior(tc_asterisk);
+
+ // They differ only for internal checking.
+ EXPECT_FALSE(tc_empty.IsCategoryEnabled("Category1"));
+ EXPECT_FALSE(tc_empty.IsCategoryEnabled("not-excluded-category"));
+ EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("Category1"));
+ EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("not-excluded-category"));
+}
- EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1,CategoryDebug"));
- EXPECT_TRUE(tc.IsCategoryGroupEnabled("CategoryDebug,Category1"));
- EXPECT_TRUE(tc.IsCategoryGroupEnabled("CategoryTest,not-excluded-category"));
- EXPECT_FALSE(tc.IsCategoryGroupEnabled("CategoryDebug,CategoryTest"));
+TEST(TraceConfigTest, DisabledByDefaultCategoryFilterString) {
+ TraceConfig tc("foo,disabled-by-default-foo", "");
+ EXPECT_STREQ("foo,disabled-by-default-foo",
+ tc.ToCategoryFilterString().c_str());
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-foo"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-bar"));
+
+ // Enabling only the disabled-by-default-* category means the default ones
+ // are also enabled.
+ tc = TraceConfig("disabled-by-default-foo", "");
+ EXPECT_STREQ("disabled-by-default-foo", tc.ToCategoryFilterString().c_str());
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-foo"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-bar"));
}
TEST(TraceConfigTest, TraceConfigFromDict) {
- // Passing in empty dictionary will not result in default trace config.
+ // Passing in empty dictionary will result in default trace config.
DictionaryValue dict;
TraceConfig tc(dict);
- EXPECT_STRNE(kDefaultTraceConfigString, tc.ToString().c_str());
+ EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
@@ -307,7 +363,7 @@ TEST(TraceConfigTest, TraceConfigFromDict) {
EXPECT_FALSE(default_tc.IsSamplingEnabled());
EXPECT_FALSE(default_tc.IsSystraceEnabled());
EXPECT_FALSE(default_tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", default_tc.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("", default_tc.ToCategoryFilterString().c_str());
std::unique_ptr<Value> custom_value(
JSONReader::Read(kCustomTraceConfigString));
@@ -405,7 +461,8 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
tc = TraceConfig("This is an invalid config string.");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
@@ -413,7 +470,8 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
tc = TraceConfig("[\"This\", \"is\", \"not\", \"a\", \"dictionary\"]");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
@@ -421,7 +479,8 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
tc = TraceConfig("{\"record_mode\": invalid-value-needs-double-quote}");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
@@ -429,7 +488,8 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
// If the config string a dictionary formatted as a JSON string, it will
// initialize TraceConfig with best effort.
@@ -439,6 +499,7 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
tc = TraceConfig("{\"arbitrary-key\":\"arbitrary-value\"}");
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
@@ -446,6 +507,7 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
const char invalid_config_string[] =
"{"
@@ -487,9 +549,7 @@ TEST(TraceConfigTest, MergingTraceConfigs) {
"\"enable_argument_filter\":false,"
"\"enable_sampling\":false,"
"\"enable_systrace\":false,"
- "\"excluded_categories\":["
- "\"*Debug\",\"*Test\",\"excluded\",\"exc_pattern*\""
- "],"
+ "\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
"\"record_mode\":\"record-until-full\""
"}",
tc.ToString().c_str());
@@ -568,22 +628,34 @@ TEST(TraceConfigTest, SetTraceOptionValues) {
}
TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
- std::string tc_str =
+ std::string tc_str1 =
TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(200, 2000);
- TraceConfig tc(tc_str);
- EXPECT_EQ(tc_str, tc.ToString());
- EXPECT_TRUE(tc.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
- ASSERT_EQ(2u, tc.memory_dump_config_.triggers.size());
+ TraceConfig tc1(tc_str1);
+ EXPECT_EQ(tc_str1, tc1.ToString());
+ EXPECT_TRUE(tc1.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+ ASSERT_EQ(2u, tc1.memory_dump_config_.triggers.size());
- EXPECT_EQ(200u, tc.memory_dump_config_.triggers[0].periodic_interval_ms);
+ EXPECT_EQ(200u, tc1.memory_dump_config_.triggers[0].periodic_interval_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
- tc.memory_dump_config_.triggers[0].level_of_detail);
+ tc1.memory_dump_config_.triggers[0].level_of_detail);
- EXPECT_EQ(2000u, tc.memory_dump_config_.triggers[1].periodic_interval_ms);
+ EXPECT_EQ(2000u, tc1.memory_dump_config_.triggers[1].periodic_interval_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
- tc.memory_dump_config_.triggers[1].level_of_detail);
- EXPECT_EQ(2048u, tc.memory_dump_config_.heap_profiler_options.
- breakdown_threshold_bytes);
+ tc1.memory_dump_config_.triggers[1].level_of_detail);
+ EXPECT_EQ(
+ 2048u,
+ tc1.memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
+
+ std::string tc_str2 =
+ TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
+ 1 /* period_ms */);
+ TraceConfig tc2(tc_str2);
+ EXPECT_EQ(tc_str2, tc2.ToString());
+ EXPECT_TRUE(tc2.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+ ASSERT_EQ(1u, tc2.memory_dump_config_.triggers.size());
+ EXPECT_EQ(1u, tc2.memory_dump_config_.triggers[0].periodic_interval_ms);
+ EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
+ tc2.memory_dump_config_.triggers[0].level_of_detail);
}
TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
diff --git a/chromium/base/trace_event/trace_event.gypi b/chromium/base/trace_event/trace_event.gypi
index 4335ea1b981..f915780de5f 100644
--- a/chromium/base/trace_event/trace_event.gypi
+++ b/chromium/base/trace_event/trace_event.gypi
@@ -35,6 +35,8 @@
'trace_event/memory_dump_request_args.h',
'trace_event/memory_dump_session_state.cc',
'trace_event/memory_dump_session_state.h',
+ 'trace_event/memory_infra_background_whitelist.cc',
+ 'trace_event/memory_infra_background_whitelist.h',
'trace_event/process_memory_dump.cc',
'trace_event/process_memory_dump.h',
'trace_event/process_memory_maps.cc',
diff --git a/chromium/base/trace_event/trace_event_android.cc b/chromium/base/trace_event/trace_event_android.cc
index d406d2cafae..a28c54a8b90 100644
--- a/chromium/base/trace_event/trace_event_android.cc
+++ b/chromium/base/trace_event/trace_event_android.cc
@@ -128,7 +128,8 @@ void TraceLog::StopATrace() {
// TraceLog::Flush() requires the current thread to have a message loop, but
// this thread called from Java may not have one, so flush in another thread.
Thread end_chrome_tracing_thread("end_chrome_tracing");
- WaitableEvent complete_event(false, false);
+ WaitableEvent complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
end_chrome_tracing_thread.Start();
end_chrome_tracing_thread.task_runner()->PostTask(
FROM_HERE, base::Bind(&EndChromeTracing, Unretained(this),
diff --git a/chromium/base/trace_event/trace_event_argument.cc b/chromium/base/trace_event/trace_event_argument.cc
index 8babf3b47fa..336d964bff4 100644
--- a/chromium/base/trace_event/trace_event_argument.cc
+++ b/chromium/base/trace_event/trace_event_argument.cc
@@ -288,7 +288,7 @@ void TracedValue::SetBaseValueWithCopiedName(base::StringPiece name,
const ListValue* list_value;
value.GetAsList(&list_value);
BeginArrayWithCopiedName(name);
- for (base::Value* base_value : *list_value)
+ for (const auto& base_value : *list_value)
AppendBaseValue(*base_value);
EndArray();
} break;
@@ -342,7 +342,7 @@ void TracedValue::AppendBaseValue(const base::Value& value) {
const ListValue* list_value;
value.GetAsList(&list_value);
BeginArray();
- for (base::Value* base_value : *list_value)
+ for (const auto& base_value : *list_value)
AppendBaseValue(*base_value);
EndArray();
} break;
@@ -361,7 +361,7 @@ std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
DCHECK((cur_dict && !cur_list) || (cur_list && !cur_dict));
switch (*type) {
case kTypeStartDict: {
- auto new_dict = new DictionaryValue();
+ auto* new_dict = new DictionaryValue();
if (cur_dict) {
cur_dict->SetWithoutPathExpansion(ReadKeyName(it),
WrapUnique(new_dict));
@@ -386,7 +386,7 @@ std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
} break;
case kTypeStartArray: {
- auto new_list = new ListValue();
+ auto* new_list = new ListValue();
if (cur_dict) {
cur_dict->SetWithoutPathExpansion(ReadKeyName(it),
WrapUnique(new_list));
diff --git a/chromium/base/trace_event/trace_event_impl.cc b/chromium/base/trace_event/trace_event_impl.cc
index e2e250ed567..f469f2f6bcb 100644
--- a/chromium/base/trace_event/trace_event_impl.cc
+++ b/chromium/base/trace_event/trace_event_impl.cc
@@ -261,7 +261,7 @@ void TraceEvent::AppendValueAsJSON(unsigned char type,
// So as not to lose bits from a 64-bit pointer, output as a hex string.
StringAppendF(
out, "\"0x%" PRIx64 "\"",
- static_cast<uint64_t>(reinterpret_cast<intptr_t>(value.as_pointer)));
+ static_cast<uint64_t>(reinterpret_cast<uintptr_t>(value.as_pointer)));
break;
case TRACE_VALUE_TYPE_STRING:
case TRACE_VALUE_TYPE_COPY_STRING:
diff --git a/chromium/base/trace_event/trace_event_memory_overhead.cc b/chromium/base/trace_event/trace_event_memory_overhead.cc
index ba7207d6163..23579cbb22d 100644
--- a/chromium/base/trace_event/trace_event_memory_overhead.cc
+++ b/chromium/base/trace_event/trace_event_memory_overhead.cc
@@ -104,7 +104,7 @@ void TraceEventMemoryOverhead::AddValue(const Value& value) {
const ListValue* list_value = nullptr;
value.GetAsList(&list_value);
Add("ListValue", sizeof(ListValue));
- for (const Value* v : *list_value)
+ for (const auto& v : *list_value)
AddValue(*v);
} break;
diff --git a/chromium/base/trace_event/trace_event_system_stats_monitor_unittest.cc b/chromium/base/trace_event/trace_event_system_stats_monitor_unittest.cc
index e834ded3700..3ec6eab8163 100644
--- a/chromium/base/trace_event/trace_event_system_stats_monitor_unittest.cc
+++ b/chromium/base/trace_event/trace_event_system_stats_monitor_unittest.cc
@@ -8,6 +8,7 @@
#include <string>
#include "base/macros.h"
+#include "base/run_loop.h"
#include "base/trace_event/trace_event_impl.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -48,12 +49,12 @@ TEST_F(TraceSystemStatsMonitorTest, TraceEventSystemStatsMonitor) {
// Simulate enabling tracing.
system_stats_monitor->StartProfiling();
- message_loop.RunUntilIdle();
+ RunLoop().RunUntilIdle();
EXPECT_TRUE(system_stats_monitor->IsTimerRunningForTest());
// Simulate disabling tracing.
system_stats_monitor->StopProfiling();
- message_loop.RunUntilIdle();
+ RunLoop().RunUntilIdle();
EXPECT_FALSE(system_stats_monitor->IsTimerRunningForTest());
// Deleting the observer removes it from the TraceLog observer list.
diff --git a/chromium/base/trace_event/trace_event_unittest.cc b/chromium/base/trace_event/trace_event_unittest.cc
index e626a779ed5..ff8ec2de788 100644
--- a/chromium/base/trace_event/trace_event_unittest.cc
+++ b/chromium/base/trace_event/trace_event_unittest.cc
@@ -10,6 +10,7 @@
#include <cstdlib>
#include <memory>
+#include <utility>
#include "base/bind.h"
#include "base/command_line.h"
@@ -96,14 +97,18 @@ class TraceEventTestFixture : public testing::Test {
}
void CancelTrace() {
- WaitableEvent flush_complete_event(false, false);
+ WaitableEvent flush_complete_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
CancelTraceAsync(&flush_complete_event);
flush_complete_event.Wait();
}
void EndTraceAndFlush() {
num_flush_callbacks_ = 0;
- WaitableEvent flush_complete_event(false, false);
+ WaitableEvent flush_complete_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
EndTraceAndFlushAsync(&flush_complete_event);
flush_complete_event.Wait();
}
@@ -111,7 +116,9 @@ class TraceEventTestFixture : public testing::Test {
// Used when testing thread-local buffers which requires the thread initiating
// flush to have a message loop.
void EndTraceAndFlushInThreadWithMessageLoop() {
- WaitableEvent flush_complete_event(false, false);
+ WaitableEvent flush_complete_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
Thread flush_thread("flush");
flush_thread.Start();
flush_thread.task_runner()->PostTask(
@@ -199,7 +206,7 @@ void TraceEventTestFixture::OnTraceDataCollected(
while (root_list->GetSize()) {
std::unique_ptr<Value> item;
root_list->Remove(0, &item);
- trace_parsed_.Append(item.release());
+ trace_parsed_.Append(std::move(item));
}
if (!has_more_events)
@@ -997,6 +1004,17 @@ void ValidateInstantEventPresentOnEveryThread(const ListValue& trace_parsed,
}
}
+void CheckTraceDefaultCategoryFilters(const TraceLog& trace_log) {
+ // Default enables all category filters except the disabled-by-default-* ones.
+ EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("bar"));
+ EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("foo,bar"));
+ EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled(
+ "foo,disabled-by-default-foo"));
+ EXPECT_FALSE(*trace_log.GetCategoryGroupEnabled(
+ "disabled-by-default-foo,disabled-by-default-bar"));
+}
+
} // namespace
// Simple Test for emitting data and validating it was received.
@@ -1609,7 +1627,8 @@ TEST_F(TraceEventTestFixture, DataCapturedOnThread) {
BeginTrace();
Thread thread("1");
- WaitableEvent task_complete_event(false, false);
+ WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.Start();
thread.task_runner()->PostTask(
@@ -1631,7 +1650,9 @@ TEST_F(TraceEventTestFixture, DataCapturedManyThreads) {
WaitableEvent* task_complete_events[num_threads];
for (int i = 0; i < num_threads; i++) {
threads[i] = new Thread(StringPrintf("Thread %d", i));
- task_complete_events[i] = new WaitableEvent(false, false);
+ task_complete_events[i] =
+ new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
threads[i]->Start();
threads[i]->task_runner()->PostTask(
FROM_HERE, base::Bind(&TraceManyInstantEvents, i, num_events,
@@ -1678,7 +1699,9 @@ TEST_F(TraceEventTestFixture, ThreadNames) {
// Now run some trace code on these threads.
WaitableEvent* task_complete_events[kNumThreads];
for (int i = 0; i < kNumThreads; i++) {
- task_complete_events[i] = new WaitableEvent(false, false);
+ task_complete_events[i] =
+ new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
threads[i]->Start();
thread_ids[i] = threads[i]->GetThreadId();
threads[i]->task_runner()->PostTask(
@@ -1951,7 +1974,7 @@ TEST_F(TraceEventTestFixture, TraceCategoriesAfterNestedEnable) {
EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("baz"));
EXPECT_STREQ(
- "-*Debug,-*Test",
+ "",
trace_log->GetCurrentTraceConfig().ToCategoryFilterString().c_str());
trace_log->SetDisabled();
trace_log->SetDisabled();
@@ -1988,6 +2011,48 @@ TEST_F(TraceEventTestFixture, TraceCategoriesAfterNestedEnable) {
trace_log->SetDisabled();
}
+TEST_F(TraceEventTestFixture, TraceWithDefaultCategoryFilters) {
+ TraceLog* trace_log = TraceLog::GetInstance();
+
+ trace_log->SetEnabled(TraceConfig(), TraceLog::RECORDING_MODE);
+ CheckTraceDefaultCategoryFilters(*trace_log);
+ trace_log->SetDisabled();
+
+ trace_log->SetEnabled(TraceConfig("", ""), TraceLog::RECORDING_MODE);
+ CheckTraceDefaultCategoryFilters(*trace_log);
+ trace_log->SetDisabled();
+
+ trace_log->SetEnabled(TraceConfig("*", ""), TraceLog::RECORDING_MODE);
+ CheckTraceDefaultCategoryFilters(*trace_log);
+ trace_log->SetDisabled();
+
+ trace_log->SetEnabled(TraceConfig(""), TraceLog::RECORDING_MODE);
+ CheckTraceDefaultCategoryFilters(*trace_log);
+ trace_log->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceWithDisabledByDefaultCategoryFilters) {
+ TraceLog* trace_log = TraceLog::GetInstance();
+
+ trace_log->SetEnabled(TraceConfig("foo,disabled-by-default-foo", ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-foo"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-bar"));
+ trace_log->SetDisabled();
+
+ // Enabling only the disabled-by-default-* category means the default ones
+ // are also enabled.
+ trace_log->SetEnabled(TraceConfig("disabled-by-default-foo", ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-bar"));
+ trace_log->SetDisabled();
+}
+
TEST_F(TraceEventTestFixture, TraceSampling) {
TraceLog::GetInstance()->SetEnabled(
TraceConfig(kRecordAllCategoryFilter, "record-until-full,enable-sampling"),
@@ -2823,7 +2888,8 @@ TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopBeforeTracing) {
BeginTrace();
Thread thread("1");
- WaitableEvent task_complete_event(false, false);
+ WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.Start();
thread.task_runner()->PostTask(
FROM_HERE, Bind(&TraceLog::SetCurrentThreadBlocksMessageLoop,
@@ -2833,8 +2899,10 @@ TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopBeforeTracing) {
FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
task_complete_event.Wait();
- WaitableEvent task_start_event(false, false);
- WaitableEvent task_stop_event(false, false);
+ WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.task_runner()->PostTask(
FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
task_start_event.Wait();
@@ -2895,15 +2963,18 @@ TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopAfterTracing) {
BeginTrace();
Thread thread("1");
- WaitableEvent task_complete_event(false, false);
+ WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.Start();
thread.task_runner()->PostTask(
FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
task_complete_event.Wait();
- WaitableEvent task_start_event(false, false);
- WaitableEvent task_stop_event(false, false);
+ WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.task_runner()->PostTask(
FROM_HERE, Bind(&SetBlockingFlagAndBlockUntilStopped, &task_start_event,
&task_stop_event));
@@ -2920,7 +2991,8 @@ TEST_F(TraceEventTestFixture, ThreadOnceBlocking) {
BeginTrace();
Thread thread("1");
- WaitableEvent task_complete_event(false, false);
+ WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.Start();
thread.task_runner()->PostTask(
@@ -2928,8 +3000,10 @@ TEST_F(TraceEventTestFixture, ThreadOnceBlocking) {
task_complete_event.Wait();
task_complete_event.Reset();
- WaitableEvent task_start_event(false, false);
- WaitableEvent task_stop_event(false, false);
+ WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.task_runner()->PostTask(
FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
task_start_event.Wait();
@@ -3099,5 +3173,12 @@ TEST_F(TraceEventTestFixture, SyntheticDelayConfigurationToString) {
EXPECT_EQ(filter, config.ToCategoryFilterString());
}
+TEST_F(TraceEventTestFixture, ClockSyncEventsAreAlwaysAddedToTrace) {
+ BeginSpecificTrace("-*");
+ TRACE_EVENT_CLOCK_SYNC_RECEIVER(1);
+ EndTraceAndFlush();
+ EXPECT_TRUE(FindNamePhase("clock_sync", "c"));
+}
+
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/trace_log.cc b/chromium/base/trace_event/trace_log.cc
index cc40ba98eb3..0661caf6fd9 100644
--- a/chromium/base/trace_event/trace_log.cc
+++ b/chromium/base/trace_event/trace_log.cc
@@ -86,7 +86,7 @@ const size_t kEchoToConsoleTraceEventBufferChunks = 256;
const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
const int kThreadFlushTimeoutMs = 3000;
-#define MAX_CATEGORY_GROUPS 105
+#define MAX_CATEGORY_GROUPS 200
// Parallel arrays g_category_groups and g_category_group_enabled are separate
// so that a pointer to a member of g_category_group_enabled can be easily
@@ -402,7 +402,7 @@ void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
if (thread_blocks_message_loop_.Get() || !MessageLoop::current())
return;
HEAP_PROFILER_SCOPED_IGNORE;
- auto thread_local_event_buffer = thread_local_event_buffer_.Get();
+ auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
if (thread_local_event_buffer &&
!CheckGeneration(thread_local_event_buffer->generation())) {
delete thread_local_event_buffer;
@@ -479,6 +479,12 @@ void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
}
#endif
+ // TODO(primiano): this is a temporary workaround for catapult:#2341,
+ // to guarantee that metadata events are always added even if the category
+ // filter is "-*". See crbug.com/618054 for more details and long-term fix.
+ if (mode_ == RECORDING_MODE && !strcmp(category_group, "__metadata"))
+ enabled_flag |= ENABLED_FOR_RECORDING;
+
g_category_group_enabled[category_index] = enabled_flag;
}
@@ -1038,7 +1044,7 @@ void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
for (hash_set<MessageLoop*>::const_iterator it =
thread_message_loops_.begin();
it != thread_message_loops_.end(); ++it) {
- LOG(WARNING) << "Thread: " << (*it)->thread_name();
+ LOG(WARNING) << "Thread: " << (*it)->GetThreadName();
}
}
FinishFlush(generation, discard_events);
@@ -1221,7 +1227,7 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
// |thread_local_event_buffer_| can be null if the current thread doesn't have
// a message loop or the message loop is blocked.
InitializeThreadLocalEventBufferIfSupported();
- auto thread_local_event_buffer = thread_local_event_buffer_.Get();
+ auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
// Check and update the current thread name only if the event is for the
// current thread to avoid locks in most cases.
diff --git a/chromium/base/trace_event/trace_sampling_thread.cc b/chromium/base/trace_event/trace_sampling_thread.cc
index a8d32d6ee2a..5a0d2f8a024 100644
--- a/chromium/base/trace_event/trace_sampling_thread.cc
+++ b/chromium/base/trace_event/trace_sampling_thread.cc
@@ -25,7 +25,9 @@ class TraceBucketData {
};
TraceSamplingThread::TraceSamplingThread()
- : thread_running_(false), waitable_event_for_testing_(false, false) {}
+ : thread_running_(false),
+ waitable_event_for_testing_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
TraceSamplingThread::~TraceSamplingThread() {}
diff --git a/chromium/base/trace_event/winheap_dump_provider_win.cc b/chromium/base/trace_event/winheap_dump_provider_win.cc
index 80956369cca..f918aafad19 100644
--- a/chromium/base/trace_event/winheap_dump_provider_win.cc
+++ b/chromium/base/trace_event/winheap_dump_provider_win.cc
@@ -9,7 +9,6 @@
#include "base/debug/profiler.h"
#include "base/strings/string_util.h"
#include "base/trace_event/process_memory_dump.h"
-#include "base/win/windows_version.h"
namespace base {
namespace trace_event {
@@ -56,13 +55,7 @@ bool WinHeapDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
// then chaos should be assumed. This flakyness is acceptable for tracing.
// - The MSDN page for HeapLock says: "If the HeapLock function is called on
// a heap created with the HEAP_NO_SERIALIZATION flag, the results are
- // undefined.". This is a problem on Windows XP where some system DLLs are
- // known for creating heaps with this particular flag. For this reason
- // this function should be disabled on XP.
- //
- // See https://crbug.com/487291 for more details about this.
- if (base::win::GetVersion() < base::win::VERSION_VISTA)
- return false;
+ // undefined."
// Disable this dump provider for the SyzyASan instrumented build
// because they don't support the heap walking functions yet.
diff --git a/chromium/base/trace_event/winheap_dump_provider_win_unittest.cc b/chromium/base/trace_event/winheap_dump_provider_win_unittest.cc
index d7488fee457..c2dc01c195c 100644
--- a/chromium/base/trace_event/winheap_dump_provider_win_unittest.cc
+++ b/chromium/base/trace_event/winheap_dump_provider_win_unittest.cc
@@ -14,8 +14,8 @@ namespace base {
namespace trace_event {
TEST(WinHeapDumpProviderTest, OnMemoryDump) {
- ProcessMemoryDump pmd(new MemoryDumpSessionState);
MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+ ProcessMemoryDump pmd(new MemoryDumpSessionState, dump_args);
WinHeapDumpProvider* winheap_dump_provider =
WinHeapDumpProvider::GetInstance();