summaryrefslogtreecommitdiffstats
path: root/chromium/base/task/thread_pool/task_tracker.cc
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/base/task/thread_pool/task_tracker.cc')
-rw-r--r--chromium/base/task/thread_pool/task_tracker.cc316
1 files changed, 163 insertions, 153 deletions
diff --git a/chromium/base/task/thread_pool/task_tracker.cc b/chromium/base/task/thread_pool/task_tracker.cc
index d21422bc9fa..a74f14003b3 100644
--- a/chromium/base/task/thread_pool/task_tracker.cc
+++ b/chromium/base/task/thread_pool/task_tracker.cc
@@ -20,6 +20,7 @@
#include "base/sequence_token.h"
#include "base/synchronization/condition_variable.h"
#include "base/task/scoped_set_task_priority_for_current_thread.h"
+#include "base/task/task_executor.h"
#include "base/threading/sequence_local_storage_map.h"
#include "base/threading/sequenced_task_runner_handle.h"
#include "base/threading/thread_restrictions.h"
@@ -27,6 +28,7 @@
#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
#include "base/values.h"
+#include "build/build_config.h"
namespace base {
namespace internal {
@@ -75,59 +77,34 @@ void TaskTracingInfo::AppendAsTraceFormat(std::string* out) const {
out->append(tmp);
}
-// Constructs a histogram to track latency which is logging to
-// "ThreadPool.{histogram_name}.{histogram_label}.{task_type_suffix}".
-HistogramBase* GetLatencyHistogram(StringPiece histogram_name,
- StringPiece histogram_label,
- StringPiece task_type_suffix) {
- DCHECK(!histogram_name.empty());
- DCHECK(!histogram_label.empty());
- DCHECK(!task_type_suffix.empty());
- // Mimics the UMA_HISTOGRAM_HIGH_RESOLUTION_CUSTOM_TIMES macro. The minimums
- // and maximums were chosen to place the 1ms mark at around the 70% range
- // coverage for buckets giving us good info for tasks that have a latency
- // below 1ms (most of them) and enough info to assess how bad the latency is
- // for tasks that exceed this threshold.
- const std::string histogram = JoinString(
- {"ThreadPool", histogram_name, histogram_label, task_type_suffix}, ".");
- return Histogram::FactoryMicrosecondsTimeGet(
- histogram, TimeDelta::FromMicroseconds(1),
- TimeDelta::FromMilliseconds(20), 50,
- HistogramBase::kUmaTargetedHistogramFlag);
-}
-
-// Constructs a histogram to track task count which is logging to
-// "ThreadPool.{histogram_name}.{histogram_label}.{task_type_suffix}".
-HistogramBase* GetCountHistogram(StringPiece histogram_name,
- StringPiece histogram_label,
- StringPiece task_type_suffix) {
- DCHECK(!histogram_name.empty());
- DCHECK(!histogram_label.empty());
- DCHECK(!task_type_suffix.empty());
- // Mimics the UMA_HISTOGRAM_CUSTOM_COUNTS macro.
- const std::string histogram = JoinString(
- {"ThreadPool", histogram_name, histogram_label, task_type_suffix}, ".");
- // 500 was chosen as the maximum number of tasks run while queuing because
- // values this high would likely indicate an error, beyond which knowing the
- // actual number of tasks is not informative.
- return Histogram::FactoryGet(histogram, 1, 500, 50,
- HistogramBase::kUmaTargetedHistogramFlag);
-}
-
-// Returns a histogram stored in a 2D array indexed by task priority and
-// whether it may block.
-// TODO(jessemckenna): use the STATIC_HISTOGRAM_POINTER_GROUP macro from
-// histogram_macros.h instead.
-HistogramBase* GetHistogramForTaskTraits(
- TaskTraits task_traits,
- HistogramBase* const (*histograms)[2]) {
- return histograms[static_cast<int>(task_traits.priority())]
- [task_traits.may_block() ||
- task_traits.with_base_sync_primitives()
- ? 1
- : 0];
+const char* GetTaskPrioritySuffix(TaskPriority priority) {
+ switch (priority) {
+ case TaskPriority::BEST_EFFORT:
+ return "BackgroundTaskPriority";
+ case TaskPriority::USER_VISIBLE:
+ return "UserVisibleTaskPriority";
+ case TaskPriority::USER_BLOCKING:
+ return "UserBlockingTaskPriority";
+ }
}
+// Records |time_sample| to the histogram |histogram_name|.|priority suffix|,
+// where |priority_suffix| is derived from |priority|.
+//
+// The minimums and maximums were chosen to place the 1ms mark at around the 70%
+// range coverage for buckets giving us good info for tasks that have a latency
+// below 1ms (most of them) and enough info to assess how bad the latency is for
+// tasks that exceed this threshold.
+#define STATIC_LATENCY_HISTOGRAM_POINTER_GROUP(histogram_name, priority, \
+ time_sample) \
+ STATIC_HISTOGRAM_POINTER_GROUP( \
+ histogram_name, static_cast<int>(priority), \
+ static_cast<int>(TaskPriority::HIGHEST) + 1, AddTime(time_sample), \
+ Histogram::FactoryMicrosecondsTimeGet( \
+ histogram_name, TimeDelta::FromMicroseconds(1), \
+ TimeDelta::FromMilliseconds(20), 50, \
+ HistogramBase::kUmaTargetedHistogramFlag));
+
bool HasLogBestEffortTasksSwitch() {
// The CommandLine might not be initialized if ThreadPool is initialized in a
// dynamic library which doesn't have access to argc/argv.
@@ -136,6 +113,84 @@ bool HasLogBestEffortTasksSwitch() {
switches::kLogBestEffortTasks);
}
+// Needed for PostTaskHere and CurrentThread. This executor lives for the
+// duration of a threadpool task invocation.
+class EphemeralTaskExecutor : public TaskExecutor {
+ public:
+ // |sequenced_task_runner| and |single_thread_task_runner| must outlive this
+ // EphemeralTaskExecutor.
+ EphemeralTaskExecutor(SequencedTaskRunner* sequenced_task_runner,
+ SingleThreadTaskRunner* single_thread_task_runner,
+ const TaskTraits* sequence_traits)
+ : sequenced_task_runner_(sequenced_task_runner),
+ single_thread_task_runner_(single_thread_task_runner),
+ sequence_traits_(sequence_traits) {
+ SetTaskExecutorForCurrentThread(this);
+ }
+
+ ~EphemeralTaskExecutor() override {
+ SetTaskExecutorForCurrentThread(nullptr);
+ }
+
+ // TaskExecutor:
+ bool PostDelayedTask(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task,
+ TimeDelta delay) override {
+ CheckTraitsCompatibleWithSequenceTraits(traits);
+ return sequenced_task_runner_->PostDelayedTask(from_here, std::move(task),
+ delay);
+ }
+
+ scoped_refptr<TaskRunner> CreateTaskRunner(
+ const TaskTraits& traits) override {
+ CheckTraitsCompatibleWithSequenceTraits(traits);
+ return sequenced_task_runner_;
+ }
+
+ scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunner(
+ const TaskTraits& traits) override {
+ CheckTraitsCompatibleWithSequenceTraits(traits);
+ return sequenced_task_runner_;
+ }
+
+ scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunner(
+ const TaskTraits& traits,
+ SingleThreadTaskRunnerThreadMode thread_mode) override {
+ CheckTraitsCompatibleWithSequenceTraits(traits);
+ return single_thread_task_runner_;
+ }
+
+#if defined(OS_WIN)
+ scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunner(
+ const TaskTraits& traits,
+ SingleThreadTaskRunnerThreadMode thread_mode) override {
+ CheckTraitsCompatibleWithSequenceTraits(traits);
+ return single_thread_task_runner_;
+ }
+#endif // defined(OS_WIN)
+
+ private:
+ // Currently ignores |traits.priority()|.
+ void CheckTraitsCompatibleWithSequenceTraits(const TaskTraits& traits) {
+ if (traits.shutdown_behavior_set_explicitly()) {
+ DCHECK_EQ(traits.shutdown_behavior(),
+ sequence_traits_->shutdown_behavior());
+ }
+
+ DCHECK(!traits.may_block() ||
+ traits.may_block() == sequence_traits_->may_block());
+
+ DCHECK(!traits.with_base_sync_primitives() ||
+ traits.with_base_sync_primitives() ==
+ sequence_traits_->with_base_sync_primitives());
+ }
+
+ SequencedTaskRunner* const sequenced_task_runner_;
+ SingleThreadTaskRunner* const single_thread_task_runner_;
+ const TaskTraits* const sequence_traits_;
+};
+
} // namespace
// Atomic internal state used by TaskTracker to track items that are blocking
@@ -228,78 +283,14 @@ class TaskTracker::State {
DISALLOW_COPY_AND_ASSIGN(State);
};
-// TODO(jessemckenna): Write a helper function to avoid code duplication below.
TaskTracker::TaskTracker(StringPiece histogram_label)
- : has_log_best_effort_tasks_switch_(HasLogBestEffortTasksSwitch()),
+ : histogram_label_(histogram_label),
+ has_log_best_effort_tasks_switch_(HasLogBestEffortTasksSwitch()),
state_(new State),
can_run_policy_(CanRunPolicy::kAll),
flush_cv_(flush_lock_.CreateConditionVariable()),
shutdown_lock_(&flush_lock_),
- task_latency_histograms_{
- {GetLatencyHistogram("TaskLatencyMicroseconds",
- histogram_label,
- "BackgroundTaskPriority"),
- GetLatencyHistogram("TaskLatencyMicroseconds",
- histogram_label,
- "BackgroundTaskPriority_MayBlock")},
- {GetLatencyHistogram("TaskLatencyMicroseconds",
- histogram_label,
- "UserVisibleTaskPriority"),
- GetLatencyHistogram("TaskLatencyMicroseconds",
- histogram_label,
- "UserVisibleTaskPriority_MayBlock")},
- {GetLatencyHistogram("TaskLatencyMicroseconds",
- histogram_label,
- "UserBlockingTaskPriority"),
- GetLatencyHistogram("TaskLatencyMicroseconds",
- histogram_label,
- "UserBlockingTaskPriority_MayBlock")}},
- heartbeat_latency_histograms_{
- {GetLatencyHistogram("HeartbeatLatencyMicroseconds",
- histogram_label,
- "BackgroundTaskPriority"),
- GetLatencyHistogram("HeartbeatLatencyMicroseconds",
- histogram_label,
- "BackgroundTaskPriority_MayBlock")},
- {GetLatencyHistogram("HeartbeatLatencyMicroseconds",
- histogram_label,
- "UserVisibleTaskPriority"),
- GetLatencyHistogram("HeartbeatLatencyMicroseconds",
- histogram_label,
- "UserVisibleTaskPriority_MayBlock")},
- {GetLatencyHistogram("HeartbeatLatencyMicroseconds",
- histogram_label,
- "UserBlockingTaskPriority"),
- GetLatencyHistogram("HeartbeatLatencyMicroseconds",
- histogram_label,
- "UserBlockingTaskPriority_MayBlock")}},
- num_tasks_run_while_queuing_histograms_{
- {GetCountHistogram("NumTasksRunWhileQueuing",
- histogram_label,
- "BackgroundTaskPriority"),
- GetCountHistogram("NumTasksRunWhileQueuing",
- histogram_label,
- "BackgroundTaskPriority_MayBlock")},
- {GetCountHistogram("NumTasksRunWhileQueuing",
- histogram_label,
- "UserVisibleTaskPriority"),
- GetCountHistogram("NumTasksRunWhileQueuing",
- histogram_label,
- "UserVisibleTaskPriority_MayBlock")},
- {GetCountHistogram("NumTasksRunWhileQueuing",
- histogram_label,
- "UserBlockingTaskPriority"),
- GetCountHistogram("NumTasksRunWhileQueuing",
- histogram_label,
- "UserBlockingTaskPriority_MayBlock")}},
- tracked_ref_factory_(this) {
- // Confirm that all |task_latency_histograms_| have been initialized above.
- for (TaskPriorityType i = 0; i < kNumTaskPriorities; ++i) {
- for (uint8_t j = 0; j < kNumBlockingModes; ++j) {
- DCHECK(task_latency_histograms_[i][j]);
- }
- }
-}
+ tracked_ref_factory_(this) {}
TaskTracker::~TaskTracker() = default;
@@ -441,7 +432,7 @@ RegisteredTaskSource TaskTracker::RunAndPopNextTask(
RegisteredTaskSource task_source) {
DCHECK(task_source);
- const bool can_run_worker_task =
+ const bool task_is_worker_task =
BeforeRunTask(task_source->shutdown_behavior());
// Run the next task in |task_source|.
@@ -449,7 +440,7 @@ RegisteredTaskSource TaskTracker::RunAndPopNextTask(
TaskTraits traits{ThreadPool()};
{
auto transaction = task_source->BeginTransaction();
- task = can_run_worker_task ? task_source.TakeTask(&transaction)
+ task = task_is_worker_task ? task_source.TakeTask(&transaction)
: task_source.Clear(&transaction);
traits = transaction.traits();
}
@@ -458,13 +449,12 @@ RegisteredTaskSource TaskTracker::RunAndPopNextTask(
// Run the |task| (whether it's a worker task or the Clear() closure).
RunTask(std::move(task.value()), task_source.get(), traits);
}
- if (can_run_worker_task) {
+ if (task_is_worker_task)
AfterRunTask(task_source->shutdown_behavior());
- const bool task_source_must_be_queued = task_source.DidProcessTask();
- // |task_source| should be reenqueued iff requested by DidProcessTask().
- if (task_source_must_be_queued)
- return task_source;
- }
+ const bool task_source_must_be_queued = task_source.DidProcessTask();
+ // |task_source| should be reenqueued iff requested by DidProcessTask().
+ if (task_source_must_be_queued)
+ return task_source;
return nullptr;
}
@@ -477,37 +467,50 @@ bool TaskTracker::IsShutdownComplete() const {
return shutdown_event_ && shutdown_event_->IsSignaled();
}
-void TaskTracker::RecordLatencyHistogram(
- LatencyHistogramType latency_histogram_type,
- TaskTraits task_traits,
- TimeTicks posted_time) const {
- const TimeDelta task_latency = TimeTicks::Now() - posted_time;
+void TaskTracker::RecordLatencyHistogram(TaskPriority priority,
+ TimeTicks posted_time) const {
+ if (histogram_label_.empty())
+ return;
- DCHECK(latency_histogram_type == LatencyHistogramType::TASK_LATENCY ||
- latency_histogram_type == LatencyHistogramType::HEARTBEAT_LATENCY);
- const auto& histograms =
- latency_histogram_type == LatencyHistogramType::TASK_LATENCY
- ? task_latency_histograms_
- : heartbeat_latency_histograms_;
- GetHistogramForTaskTraits(task_traits, histograms)
- ->AddTimeMicrosecondsGranularity(task_latency);
+ auto get_latency_histogram_name = [this, priority]() {
+ return JoinString({"ThreadPool.TaskLatencyMicroseconds", histogram_label_,
+ GetTaskPrioritySuffix(priority)},
+ ".");
+ };
+ STATIC_LATENCY_HISTOGRAM_POINTER_GROUP(get_latency_histogram_name(), priority,
+ TimeTicks::Now() - posted_time);
}
void TaskTracker::RecordHeartbeatLatencyAndTasksRunWhileQueuingHistograms(
- TaskPriority task_priority,
- bool may_block,
+ TaskPriority priority,
TimeTicks posted_time,
int num_tasks_run_when_posted) const {
- TaskTraits task_traits{ThreadPool()};
- if (may_block)
- task_traits = TaskTraits(ThreadPool(), task_priority, MayBlock());
- else
- task_traits = TaskTraits(ThreadPool(), task_priority);
- RecordLatencyHistogram(LatencyHistogramType::HEARTBEAT_LATENCY, task_traits,
- posted_time);
- GetHistogramForTaskTraits(task_traits,
- num_tasks_run_while_queuing_histograms_)
- ->Add(GetNumTasksRun() - num_tasks_run_when_posted);
+ if (histogram_label_.empty())
+ return;
+
+ auto get_heartbeat_latency_histogram_name = [this, priority]() {
+ return JoinString({"ThreadPool.HeartbeatLatencyMicroseconds",
+ histogram_label_, GetTaskPrioritySuffix(priority)},
+ ".");
+ };
+ STATIC_LATENCY_HISTOGRAM_POINTER_GROUP(get_heartbeat_latency_histogram_name(),
+ priority,
+ TimeTicks::Now() - posted_time);
+
+ auto get_num_tasks_run_while_queuing_histogram_name = [this, priority]() {
+ return JoinString({"ThreadPool.NumTasksRunWhileQueuing", histogram_label_,
+ GetTaskPrioritySuffix(priority)},
+ ".");
+ };
+ STATIC_HISTOGRAM_POINTER_GROUP(
+ get_num_tasks_run_while_queuing_histogram_name(),
+ static_cast<int>(priority), static_cast<int>(TaskPriority::HIGHEST) + 1,
+ Add(GetNumTasksRun() - num_tasks_run_when_posted),
+ // 500 was chosen as the maximum number of tasks run while queuing because
+ // values this high would likely indicate an error, beyond which knowing
+ // the actual number of tasks is not informative.
+ Histogram::FactoryGet(get_num_tasks_run_while_queuing_histogram_name(), 1,
+ 500, 50, HistogramBase::kUmaTargetedHistogramFlag));
}
int TaskTracker::GetNumTasksRun() const {
@@ -522,8 +525,7 @@ void TaskTracker::RunTask(Task task,
TaskSource* task_source,
const TaskTraits& traits) {
DCHECK(task_source);
- RecordLatencyHistogram(LatencyHistogramType::TASK_LATENCY, traits,
- task.queue_time);
+ RecordLatencyHistogram(traits.priority(), task.queue_time);
const auto environment = task_source->GetExecutionEnvironment();
@@ -557,6 +559,7 @@ void TaskTracker::RunTask(Task task,
// Set up TaskRunnerHandle as expected for the scope of the task.
Optional<SequencedTaskRunnerHandle> sequenced_task_runner_handle;
Optional<ThreadTaskRunnerHandle> single_thread_task_runner_handle;
+ Optional<EphemeralTaskExecutor> ephemiral_task_executor;
switch (task_source->execution_mode()) {
case TaskSourceExecutionMode::kJob:
case TaskSourceExecutionMode::kParallel:
@@ -565,11 +568,18 @@ void TaskTracker::RunTask(Task task,
DCHECK(task_source->task_runner());
sequenced_task_runner_handle.emplace(
static_cast<SequencedTaskRunner*>(task_source->task_runner()));
+ ephemiral_task_executor.emplace(
+ static_cast<SequencedTaskRunner*>(task_source->task_runner()),
+ nullptr, &traits);
break;
case TaskSourceExecutionMode::kSingleThread:
DCHECK(task_source->task_runner());
single_thread_task_runner_handle.emplace(
static_cast<SingleThreadTaskRunner*>(task_source->task_runner()));
+ ephemiral_task_executor.emplace(
+ static_cast<SequencedTaskRunner*>(task_source->task_runner()),
+ static_cast<SingleThreadTaskRunner*>(task_source->task_runner()),
+ &traits);
break;
}