summaryrefslogtreecommitdiffstats
path: root/chromium/base/task/thread_pool/thread_group_impl.cc
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/base/task/thread_pool/thread_group_impl.cc')
-rw-r--r--chromium/base/task/thread_pool/thread_group_impl.cc123
1 files changed, 64 insertions, 59 deletions
diff --git a/chromium/base/task/thread_pool/thread_group_impl.cc b/chromium/base/task/thread_pool/thread_group_impl.cc
index 378493afc78..0c17b47d31b 100644
--- a/chromium/base/task/thread_pool/thread_group_impl.cc
+++ b/chromium/base/task/thread_pool/thread_group_impl.cc
@@ -41,25 +41,6 @@
#include "base/win/windows_version.h"
#endif // defined(OS_WIN)
-// Data from deprecated UMA histograms:
-//
-// ThreadPool.NumTasksBetweenWaits.(Browser/Renderer).Foreground, August 2019
-// Number of tasks between two waits by a foreground worker thread in a
-// browser/renderer process.
-//
-// Windows (browser/renderer)
-// 1 at 87th percentile / 92th percentile
-// 2 at 95th percentile / 98th percentile
-// 5 at 99th percentile / 100th percentile
-// Mac (browser/renderer)
-// 1 at 81th percentile / 90th percentile
-// 2 at 92th percentile / 97th percentile
-// 5 at 98th percentile / 100th percentile
-// Android (browser/renderer)
-// 1 at 92th percentile / 96th percentile
-// 2 at 97th percentile / 98th percentile
-// 5 at 99th percentile / 100th percentile
-
namespace base {
namespace internal {
@@ -342,40 +323,57 @@ ThreadGroupImpl::ThreadGroupImpl(StringPiece histogram_label,
priority_hint_(priority_hint),
idle_workers_stack_cv_for_testing_(lock_.CreateConditionVariable()),
// Mimics the UMA_HISTOGRAM_LONG_TIMES macro.
- detach_duration_histogram_(Histogram::FactoryTimeGet(
- JoinString({kDetachDurationHistogramPrefix, histogram_label}, ""),
- TimeDelta::FromMilliseconds(1),
- TimeDelta::FromHours(1),
- 50,
- HistogramBase::kUmaTargetedHistogramFlag)),
+ detach_duration_histogram_(
+ histogram_label.empty()
+ ? nullptr
+ : Histogram::FactoryTimeGet(
+ JoinString(
+ {kDetachDurationHistogramPrefix, histogram_label},
+ ""),
+ TimeDelta::FromMilliseconds(1),
+ TimeDelta::FromHours(1),
+ 50,
+ HistogramBase::kUmaTargetedHistogramFlag)),
// Mimics the UMA_HISTOGRAM_COUNTS_1000 macro. When a worker runs more
// than 1000 tasks before detaching, there is no need to know the exact
// number of tasks that ran.
- num_tasks_before_detach_histogram_(Histogram::FactoryGet(
- JoinString({kNumTasksBeforeDetachHistogramPrefix, histogram_label},
- ""),
- 1,
- 1000,
- 50,
- HistogramBase::kUmaTargetedHistogramFlag)),
+ num_tasks_before_detach_histogram_(
+ histogram_label.empty()
+ ? nullptr
+ : Histogram::FactoryGet(
+ JoinString(
+ {kNumTasksBeforeDetachHistogramPrefix, histogram_label},
+ ""),
+ 1,
+ 1000,
+ 50,
+ HistogramBase::kUmaTargetedHistogramFlag)),
// Mimics the UMA_HISTOGRAM_COUNTS_100 macro. A ThreadGroup is
// expected to run between zero and a few tens of workers.
// When it runs more than 100 worker, there is no need to know the exact
// number of workers that ran.
- num_workers_histogram_(Histogram::FactoryGet(
- JoinString({kNumWorkersHistogramPrefix, histogram_label}, ""),
- 1,
- 100,
- 50,
- HistogramBase::kUmaTargetedHistogramFlag)),
- num_active_workers_histogram_(Histogram::FactoryGet(
- JoinString({kNumActiveWorkersHistogramPrefix, histogram_label}, ""),
- 1,
- 100,
- 50,
- HistogramBase::kUmaTargetedHistogramFlag)),
+ num_workers_histogram_(
+ histogram_label.empty()
+ ? nullptr
+ : Histogram::FactoryGet(
+ JoinString({kNumWorkersHistogramPrefix, histogram_label},
+ ""),
+ 1,
+ 100,
+ 50,
+ HistogramBase::kUmaTargetedHistogramFlag)),
+ num_active_workers_histogram_(
+ histogram_label.empty()
+ ? nullptr
+ : Histogram::FactoryGet(
+ JoinString(
+ {kNumActiveWorkersHistogramPrefix, histogram_label},
+ ""),
+ 1,
+ 100,
+ 50,
+ HistogramBase::kUmaTargetedHistogramFlag)),
tracked_ref_factory_(this) {
- DCHECK(!histogram_label.empty());
DCHECK(!thread_group_label_.empty());
}
@@ -484,10 +482,6 @@ void ThreadGroupImpl::WaitForWorkersCleanedUpForTesting(size_t n) {
}
void ThreadGroupImpl::JoinForTesting() {
-#if DCHECK_IS_ON()
- join_for_testing_started_.Set();
-#endif
-
decltype(workers_) workers_copy;
{
CheckedAutoLock auto_lock(lock_);
@@ -496,6 +490,8 @@ void ThreadGroupImpl::JoinForTesting() {
DCHECK_GT(workers_.size(), size_t(0))
<< "Joined an unstarted thread group.";
+ join_for_testing_started_ = true;
+
// Ensure WorkerThreads in |workers_| do not attempt to cleanup while
// being joined.
worker_cleanup_disallowed_for_testing_ = true;
@@ -531,10 +527,13 @@ size_t ThreadGroupImpl::NumberOfIdleWorkersForTesting() const {
void ThreadGroupImpl::ReportHeartbeatMetrics() const {
CheckedAutoLock auto_lock(lock_);
- num_workers_histogram_->Add(workers_.size());
-
- num_active_workers_histogram_->Add(workers_.size() -
- idle_workers_stack_.Size());
+ if (num_workers_histogram_) {
+ num_workers_histogram_->Add(workers_.size());
+ }
+ if (num_active_workers_histogram_) {
+ num_active_workers_histogram_->Add(workers_.size() -
+ idle_workers_stack_.Size());
+ }
}
ThreadGroupImpl::WorkerThreadDelegateImpl::WorkerThreadDelegateImpl(
@@ -710,10 +709,13 @@ bool ThreadGroupImpl::WorkerThreadDelegateImpl::CanCleanupLockRequired(
void ThreadGroupImpl::WorkerThreadDelegateImpl::CleanupLockRequired(
WorkerThread* worker) {
+ DCHECK(!outer_->join_for_testing_started_);
DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
- outer_->num_tasks_before_detach_histogram_->Add(
- worker_only().num_tasks_since_last_detach);
+ if (outer_->num_tasks_before_detach_histogram_) {
+ outer_->num_tasks_before_detach_histogram_->Add(
+ worker_only().num_tasks_since_last_detach);
+ }
outer_->cleanup_timestamps_.push(subtle::TimeTicksNowIgnoringOverride());
worker->Cleanup();
outer_->idle_workers_stack_.Remove(worker);
@@ -756,7 +758,7 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::OnMainExit(
// |workers_| by the time the thread is about to exit. (except in the cases
// where the thread group is no longer going to be used - in which case,
// it's fine for there to be invalid workers in the thread group.
- if (!shutdown_complete && !outer_->join_for_testing_started_.IsSet()) {
+ if (!shutdown_complete && !outer_->join_for_testing_started_) {
DCHECK(!outer_->idle_workers_stack_.Contains(worker));
DCHECK(!ContainsWorker(outer_->workers_, worker));
}
@@ -944,6 +946,7 @@ void ThreadGroupImpl::MaintainAtLeastOneIdleWorkerLockRequired(
scoped_refptr<WorkerThread>
ThreadGroupImpl::CreateAndRegisterWorkerLockRequired(
ScopedWorkersExecutor* executor) {
+ DCHECK(!join_for_testing_started_);
DCHECK_LT(workers_.size(), max_tasks_);
DCHECK_LT(workers_.size(), kMaxNumberOfWorkers);
DCHECK(idle_workers_stack_.IsEmpty());
@@ -962,8 +965,10 @@ ThreadGroupImpl::CreateAndRegisterWorkerLockRequired(
DCHECK_LE(workers_.size(), max_tasks_);
if (!cleanup_timestamps_.empty()) {
- detach_duration_histogram_->AddTime(subtle::TimeTicksNowIgnoringOverride() -
- cleanup_timestamps_.top());
+ if (detach_duration_histogram_) {
+ detach_duration_histogram_->AddTime(
+ subtle::TimeTicksNowIgnoringOverride() - cleanup_timestamps_.top());
+ }
cleanup_timestamps_.pop();
}
@@ -1011,7 +1016,7 @@ void ThreadGroupImpl::DidUpdateCanRunPolicy() {
void ThreadGroupImpl::EnsureEnoughWorkersLockRequired(
BaseScopedWorkersExecutor* base_executor) {
// Don't do anything if the thread group isn't started.
- if (max_tasks_ == 0)
+ if (max_tasks_ == 0 || UNLIKELY(join_for_testing_started_))
return;
ScopedWorkersExecutor* executor =