summaryrefslogtreecommitdiffstats
path: root/chromium/base
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@theqtcompany.com>2016-08-01 12:59:39 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2016-08-04 12:40:43 +0000
commit28b1110370900897ab652cb420c371fab8857ad4 (patch)
tree41b32127d23b0df4f2add2a27e12dc87bddb260e /chromium/base
parent399c965b6064c440ddcf4015f5f8e9d131c7a0a6 (diff)
BASELINE: Update Chromium to 53.0.2785.41
Also adds a few extra files for extensions. Change-Id: Iccdd55d98660903331cf8b7b29188da781830af4 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/base')
-rw-r--r--chromium/base/BUILD.gn107
-rw-r--r--chromium/base/OWNERS32
-rw-r--r--chromium/base/allocator/BUILD.gn12
-rw-r--r--chromium/base/allocator/allocator.gyp16
-rw-r--r--chromium/base/allocator/allocator_shim_unittest.cc24
-rw-r--r--chromium/base/allocator/tcmalloc_unittest.cc17
-rw-r--r--chromium/base/android/application_status_listener_unittest.cc6
-rw-r--r--chromium/base/android/build_info.cc2
-rw-r--r--chromium/base/android/build_info.h5
-rw-r--r--chromium/base/android/callback_android.cc35
-rw-r--r--chromium/base/android/callback_android.h33
-rw-r--r--chromium/base/android/trace_event_binding.cc8
-rw-r--r--chromium/base/base.gyp71
-rw-r--r--chromium/base/base.gypi30
-rw-r--r--chromium/base/base_paths.h5
-rw-r--r--chromium/base/base_paths_android.cc6
-rw-r--r--chromium/base/base_paths_win.cc10
-rw-r--r--chromium/base/base_paths_win.h13
-rw-r--r--chromium/base/big_endian.h4
-rw-r--r--chromium/base/bind.h49
-rw-r--r--chromium/base/bind_helpers.h137
-rw-r--r--chromium/base/bind_internal.h162
-rw-r--r--chromium/base/bind_internal_win.h4
-rw-r--r--chromium/base/bind_unittest.cc22
-rw-r--r--chromium/base/bind_unittest.nc2
-rw-r--r--chromium/base/callback.h21
-rw-r--r--chromium/base/callback_helpers.cc15
-rw-r--r--chromium/base/callback_helpers.h15
-rw-r--r--chromium/base/callback_helpers_unittest.cc43
-rw-r--r--chromium/base/callback_unittest.cc53
-rw-r--r--chromium/base/chromeos/logging.h28
-rw-r--r--chromium/base/command_line.cc11
-rw-r--r--chromium/base/command_line.h7
-rw-r--r--chromium/base/debug/profiler.cc4
-rw-r--r--chromium/base/debug/stack_trace.cc55
-rw-r--r--chromium/base/debug/stack_trace_win.cc5
-rw-r--r--chromium/base/deferred_sequenced_task_runner_unittest.cc19
-rw-r--r--chromium/base/environment.cc33
-rw-r--r--chromium/base/environment.h16
-rw-r--r--chromium/base/feature_list.cc19
-rw-r--r--chromium/base/feature_list.h4
-rw-r--r--chromium/base/feature_list_unittest.cc11
-rw-r--r--chromium/base/file_version_info_win.cc121
-rw-r--r--chromium/base/file_version_info_win.h30
-rw-r--r--chromium/base/file_version_info_win_unittest.cc (renamed from chromium/base/file_version_info_unittest.cc)116
-rw-r--r--chromium/base/files/file.h13
-rw-r--r--chromium/base/files/file_path.cc8
-rw-r--r--chromium/base/files/file_path_watcher_unittest.cc11
-rw-r--r--chromium/base/files/file_proxy_unittest.cc31
-rw-r--r--chromium/base/files/file_util.h6
-rw-r--r--chromium/base/files/file_util_posix.cc21
-rw-r--r--chromium/base/files/file_util_proxy_unittest.cc7
-rw-r--r--chromium/base/files/file_util_unittest.cc47
-rw-r--r--chromium/base/files/important_file_writer_unittest.cc6
-rw-r--r--chromium/base/files/scoped_file.cc12
-rw-r--r--chromium/base/i18n/icu_util.cc6
-rw-r--r--chromium/base/i18n/number_formatting.cc10
-rw-r--r--chromium/base/i18n/number_formatting.h7
-rw-r--r--chromium/base/i18n/number_formatting_unittest.cc26
-rw-r--r--chromium/base/i18n/time_formatting_unittest.cc15
-rw-r--r--chromium/base/ios/ios_util.h3
-rw-r--r--chromium/base/ios/ios_util.mm4
-rw-r--r--chromium/base/json/json_writer.cc4
-rw-r--r--chromium/base/json/json_writer_unittest.cc9
-rw-r--r--chromium/base/logging.cc24
-rw-r--r--chromium/base/logging.h25
-rw-r--r--chromium/base/logging_unittest.cc7
-rw-r--r--chromium/base/mac/bind_objc_block.h7
-rw-r--r--chromium/base/mac/bind_objc_block_unittest.mm8
-rw-r--r--chromium/base/mac/bind_objc_block_unittest_arc.mm109
-rw-r--r--chromium/base/mac/call_with_eh_frame.cc46
-rw-r--r--chromium/base/mac/libdispatch_task_runner.cc3
-rw-r--r--chromium/base/mac/libdispatch_task_runner_unittest.cc20
-rw-r--r--chromium/base/mac/mac_logging.mm4
-rw-r--r--chromium/base/mac/mac_util.h48
-rw-r--r--chromium/base/mac/mac_util.mm34
-rw-r--r--chromium/base/mac/mac_util_unittest.mm56
-rw-r--r--chromium/base/mac/mach_port_broker_unittest.cc3
-rw-r--r--chromium/base/mac/objc_property_releaser.h4
-rw-r--r--chromium/base/mac/scoped_block.h17
-rw-r--r--chromium/base/mac/scoped_nsobject.h159
-rw-r--r--chromium/base/mac/scoped_nsobject.mm23
-rw-r--r--chromium/base/mac/scoped_nsobject_unittest.mm8
-rw-r--r--chromium/base/mac/scoped_nsobject_unittest_arc.mm137
-rw-r--r--chromium/base/mac/scoped_typeref.h42
-rw-r--r--chromium/base/mac/sdk_forward_declarations.h377
-rw-r--r--chromium/base/macros.h8
-rw-r--r--chromium/base/memory/memory_pressure_monitor_mac.cc3
-rw-r--r--chromium/base/memory/ref_counted.h2
-rw-r--r--chromium/base/memory/scoped_vector.h6
-rw-r--r--chromium/base/memory/shared_memory.h8
-rw-r--r--chromium/base/memory/shared_memory_mac.cc7
-rw-r--r--chromium/base/memory/shared_memory_unittest.cc5
-rw-r--r--chromium/base/memory/shared_memory_win.cc98
-rw-r--r--chromium/base/memory/weak_ptr.cc6
-rw-r--r--chromium/base/memory/weak_ptr.h64
-rw-r--r--chromium/base/memory/weak_ptr_unittest.cc44
-rw-r--r--chromium/base/memory/weak_ptr_unittest.nc12
-rw-r--r--chromium/base/message_loop/incoming_task_queue.cc109
-rw-r--r--chromium/base/message_loop/incoming_task_queue.h11
-rw-r--r--chromium/base/message_loop/message_loop.cc42
-rw-r--r--chromium/base/message_loop/message_loop.h39
-rw-r--r--chromium/base/message_loop/message_loop_task_runner_unittest.cc16
-rw-r--r--chromium/base/message_loop/message_loop_test.cc341
-rw-r--r--chromium/base/message_loop/message_loop_unittest.cc44
-rw-r--r--chromium/base/message_loop/message_pump.cc7
-rw-r--r--chromium/base/message_loop/message_pump.h9
-rw-r--r--chromium/base/message_loop/message_pump_default.cc30
-rw-r--r--chromium/base/message_loop/message_pump_libevent_unittest.cc28
-rw-r--r--chromium/base/message_loop/message_pump_perftest.cc10
-rw-r--r--chromium/base/message_loop/message_pump_win.cc338
-rw-r--r--chromium/base/message_loop/message_pump_win.h59
-rw-r--r--chromium/base/metrics/OWNERS1
-rw-r--r--chromium/base/metrics/field_trial.cc111
-rw-r--r--chromium/base/metrics/field_trial.h14
-rw-r--r--chromium/base/metrics/histogram.cc7
-rw-r--r--chromium/base/metrics/histogram_base_unittest.cc2
-rw-r--r--chromium/base/metrics/histogram_delta_serialization_unittest.cc3
-rw-r--r--chromium/base/metrics/histogram_snapshot_manager_unittest.cc5
-rw-r--r--chromium/base/metrics/histogram_unittest.cc2
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator.cc107
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator.h35
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator_unittest.cc87
-rw-r--r--chromium/base/metrics/persistent_memory_allocator.cc117
-rw-r--r--chromium/base/metrics/persistent_memory_allocator.h28
-rw-r--r--chromium/base/metrics/persistent_memory_allocator_unittest.cc111
-rw-r--r--chromium/base/metrics/sparse_histogram.cc1
-rw-r--r--chromium/base/metrics/sparse_histogram_unittest.cc2
-rw-r--r--chromium/base/metrics/statistics_recorder.cc9
-rw-r--r--chromium/base/metrics/statistics_recorder.h21
-rw-r--r--chromium/base/metrics/statistics_recorder_unittest.cc2
-rw-r--r--chromium/base/metrics/user_metrics.h6
-rw-r--r--chromium/base/metrics/user_metrics_action.h13
-rw-r--r--chromium/base/move.h44
-rw-r--r--chromium/base/native_library.h16
-rw-r--r--chromium/base/native_library_ios.mm9
-rw-r--r--chromium/base/native_library_mac.mm36
-rw-r--r--chromium/base/native_library_posix.cc12
-rw-r--r--chromium/base/native_library_unittest.cc22
-rw-r--r--chromium/base/native_library_win.cc13
-rw-r--r--chromium/base/nix/xdg_util_unittest.cc38
-rw-r--r--chromium/base/numerics/safe_numerics_unittest.cc140
-rw-r--r--chromium/base/observer_list_threadsafe.h9
-rw-r--r--chromium/base/observer_list_unittest.cc2
-rw-r--r--chromium/base/optional.h122
-rw-r--r--chromium/base/optional_unittest.cc81
-rw-r--r--chromium/base/pending_task.cc4
-rw-r--r--chromium/base/pending_task.h9
-rw-r--r--chromium/base/posix/global_descriptors.h7
-rw-r--r--chromium/base/posix/unix_domain_socket_linux_unittest.cc3
-rw-r--r--chromium/base/process/kill_win.cc6
-rw-r--r--chromium/base/process/launch.h4
-rw-r--r--chromium/base/process/launch_win.cc12
-rw-r--r--chromium/base/process/memory_mac.mm16
-rw-r--r--chromium/base/process/process.h6
-rw-r--r--chromium/base/process/process_metrics.h8
-rw-r--r--chromium/base/process/process_metrics_linux.cc10
-rw-r--r--chromium/base/process/process_util_unittest.cc8
-rw-r--r--chromium/base/profiler/stack_sampling_profiler.cc3
-rw-r--r--chromium/base/profiler/stack_sampling_profiler_unittest.cc91
-rw-r--r--chromium/base/run_loop.cc8
-rw-r--r--chromium/base/run_loop.h28
-rw-r--r--chromium/base/run_loop_unittest.cc116
-rw-r--r--chromium/base/scoped_generic.h5
-rw-r--r--chromium/base/scoped_native_library_unittest.cc9
-rw-r--r--chromium/base/sequence_checker_unittest.cc5
-rw-r--r--chromium/base/sha1.cc (renamed from chromium/base/sha1_portable.cc)9
-rw-r--r--chromium/base/sha1_win.cc67
-rw-r--r--chromium/base/strings/string16.h2
-rw-r--r--chromium/base/strings/string_number_conversions.cc2
-rw-r--r--chromium/base/strings/string_number_conversions.h8
-rw-r--r--chromium/base/strings/string_number_conversions_unittest.cc51
-rw-r--r--chromium/base/strings/string_util.cc1
-rw-r--r--chromium/base/sync_socket_win.cc14
-rw-r--r--chromium/base/synchronization/condition_variable_win.cc4
-rw-r--r--chromium/base/synchronization/read_write_lock.h105
-rw-r--r--chromium/base/synchronization/read_write_lock_nacl.cc49
-rw-r--r--chromium/base/synchronization/read_write_lock_posix.cc40
-rw-r--r--chromium/base/synchronization/read_write_lock_unittest.cc225
-rw-r--r--chromium/base/synchronization/read_write_lock_win.cc31
-rw-r--r--chromium/base/synchronization/waitable_event.h19
-rw-r--r--chromium/base/synchronization/waitable_event_posix.cc22
-rw-r--r--chromium/base/synchronization/waitable_event_unittest.cc26
-rw-r--r--chromium/base/synchronization/waitable_event_watcher_unittest.cc18
-rw-r--r--chromium/base/synchronization/waitable_event_win.cc11
-rw-r--r--chromium/base/sys_byteorder.h30
-rw-r--r--chromium/base/sys_byteorder_unittest.cc122
-rw-r--r--chromium/base/sys_info.h10
-rw-r--r--chromium/base/sys_info_android.cc2
-rw-r--r--chromium/base/sys_info_linux.cc24
-rw-r--r--chromium/base/sys_info_posix.cc30
-rw-r--r--chromium/base/sys_info_unittest.cc17
-rw-r--r--chromium/base/sys_info_win.cc43
-rw-r--r--chromium/base/task_runner.h5
-rw-r--r--chromium/base/task_scheduler/delayed_task_manager.cc30
-rw-r--r--chromium/base/task_scheduler/delayed_task_manager.h17
-rw-r--r--chromium/base/task_scheduler/delayed_task_manager_unittest.cc50
-rw-r--r--chromium/base/task_scheduler/priority_queue.h10
-rw-r--r--chromium/base/task_scheduler/priority_queue_unittest.cc3
-rw-r--r--chromium/base/task_scheduler/scheduler_lock_unittest.cc7
-rw-r--r--chromium/base/task_scheduler/scheduler_service_thread.cc33
-rw-r--r--chromium/base/task_scheduler/scheduler_service_thread.h10
-rw-r--r--chromium/base/task_scheduler/scheduler_service_thread_unittest.cc33
-rw-r--r--chromium/base/task_scheduler/scheduler_thread_pool.h67
-rw-r--r--chromium/base/task_scheduler/scheduler_thread_pool_impl.h164
-rw-r--r--chromium/base/task_scheduler/scheduler_unique_stack.h91
-rw-r--r--chromium/base/task_scheduler/scheduler_unique_stack_unittest.cc110
-rw-r--r--chromium/base/task_scheduler/scheduler_worker.cc222
-rw-r--r--chromium/base/task_scheduler/scheduler_worker.h152
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool.h66
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl.cc (renamed from chromium/base/task_scheduler/scheduler_thread_pool_impl.cc)346
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl.h164
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc (renamed from chromium/base/task_scheduler/scheduler_thread_pool_impl_unittest.cc)164
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_stack.cc39
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_stack.h56
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc (renamed from chromium/base/task_scheduler/scheduler_worker_thread_stack_unittest.cc)106
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_thread.cc110
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_thread.h114
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_thread_stack.cc40
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_thread_stack.h56
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_unittest.cc (renamed from chromium/base/task_scheduler/scheduler_worker_thread_unittest.cc)219
-rw-r--r--chromium/base/task_scheduler/sequence.h4
-rw-r--r--chromium/base/task_scheduler/task_scheduler.cc6
-rw-r--r--chromium/base/task_scheduler/task_scheduler.h23
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl.cc98
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl.h65
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl_unittest.cc89
-rw-r--r--chromium/base/task_scheduler/task_tracker.cc4
-rw-r--r--chromium/base/task_scheduler/task_tracker_unittest.cc6
-rw-r--r--chromium/base/task_scheduler/task_traits.h2
-rw-r--r--chromium/base/template_util.h46
-rw-r--r--chromium/base/template_util_unittest.cc77
-rw-r--r--chromium/base/third_party/libevent/README.chromium2
-rw-r--r--chromium/base/third_party/libevent/kqueue.c22
-rw-r--r--chromium/base/threading/platform_thread.h8
-rw-r--r--chromium/base/threading/platform_thread_posix.cc5
-rw-r--r--chromium/base/threading/platform_thread_unittest.cc56
-rw-r--r--chromium/base/threading/platform_thread_win.cc19
-rw-r--r--chromium/base/threading/sequenced_task_runner_handle.cc1
-rw-r--r--chromium/base/threading/sequenced_task_runner_handle.h3
-rw-r--r--chromium/base/threading/sequenced_task_runner_handle_unittest.cc6
-rw-r--r--chromium/base/threading/sequenced_worker_pool_unittest.cc6
-rw-r--r--chromium/base/threading/simple_thread.cc25
-rw-r--r--chromium/base/threading/simple_thread_unittest.cc9
-rw-r--r--chromium/base/threading/thread.cc10
-rw-r--r--chromium/base/threading/thread_local_unittest.cc3
-rw-r--r--chromium/base/threading/thread_perftest.cc27
-rw-r--r--chromium/base/threading/thread_restrictions.h12
-rw-r--r--chromium/base/threading/thread_task_runner_handle.cc1
-rw-r--r--chromium/base/threading/thread_task_runner_handle.h3
-rw-r--r--chromium/base/threading/thread_unittest.cc3
-rw-r--r--chromium/base/threading/worker_pool_posix_unittest.cc3
-rw-r--r--chromium/base/threading/worker_pool_unittest.cc11
-rw-r--r--chromium/base/time/OWNERS1
-rw-r--r--chromium/base/time/time.cc13
-rw-r--r--chromium/base/time/time.h52
-rw-r--r--chromium/base/time/time_mac.cc28
-rw-r--r--chromium/base/time/time_posix.cc24
-rw-r--r--chromium/base/time/time_unittest.cc64
-rw-r--r--chromium/base/time/time_win.cc14
-rw-r--r--chromium/base/timer/timer_unittest.cc26
-rw-r--r--chromium/base/trace_event/etw_manifest/BUILD.gn10
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context.cc11
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context.h6
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc4
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc4
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_register.cc275
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_register.h392
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_register_posix.cc9
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_register_unittest.cc97
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_register_win.cc9
-rw-r--r--chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc3
-rw-r--r--chromium/base/trace_event/malloc_dump_provider.cc2
-rw-r--r--chromium/base/trace_event/memory_allocator_dump.cc7
-rw-r--r--chromium/base/trace_event/memory_allocator_dump_unittest.cc5
-rw-r--r--chromium/base/trace_event/memory_dump_manager.cc252
-rw-r--r--chromium/base/trace_event/memory_dump_manager.h44
-rw-r--r--chromium/base/trace_event/memory_dump_manager_unittest.cc90
-rw-r--r--chromium/base/trace_event/memory_dump_provider.h6
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.cc4
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.h33
-rw-r--r--chromium/base/trace_event/memory_infra_background_whitelist.cc131
-rw-r--r--chromium/base/trace_event/memory_infra_background_whitelist.h33
-rw-r--r--chromium/base/trace_event/process_memory_dump.cc51
-rw-r--r--chromium/base/trace_event/process_memory_dump.h30
-rw-r--r--chromium/base/trace_event/process_memory_dump_unittest.cc142
-rw-r--r--chromium/base/trace_event/trace_config.cc49
-rw-r--r--chromium/base/trace_event/trace_config.h27
-rw-r--r--chromium/base/trace_event/trace_config_memory_test_util.h24
-rw-r--r--chromium/base/trace_event/trace_config_unittest.cc162
-rw-r--r--chromium/base/trace_event/trace_event.gypi2
-rw-r--r--chromium/base/trace_event/trace_event_android.cc3
-rw-r--r--chromium/base/trace_event/trace_event_argument.cc8
-rw-r--r--chromium/base/trace_event/trace_event_impl.cc2
-rw-r--r--chromium/base/trace_event/trace_event_memory_overhead.cc2
-rw-r--r--chromium/base/trace_event/trace_event_system_stats_monitor_unittest.cc5
-rw-r--r--chromium/base/trace_event/trace_event_unittest.cc115
-rw-r--r--chromium/base/trace_event/trace_log.cc14
-rw-r--r--chromium/base/trace_event/trace_sampling_thread.cc4
-rw-r--r--chromium/base/trace_event/winheap_dump_provider_win.cc9
-rw-r--r--chromium/base/trace_event/winheap_dump_provider_win_unittest.cc2
-rw-r--r--chromium/base/tracked_objects.cc9
-rw-r--r--chromium/base/tracked_objects.h2
-rw-r--r--chromium/base/tuple.h94
-rw-r--r--chromium/base/tuple_unittest.cc81
-rw-r--r--chromium/base/values.cc133
-rw-r--r--chromium/base/values.h18
-rw-r--r--chromium/base/values_unittest.cc2
-rw-r--r--chromium/base/win/BUILD.gn23
-rw-r--r--chromium/base/win/registry.cc5
-rw-r--r--chromium/base/win/scoped_handle.cc3
-rw-r--r--chromium/base/win/scoped_handle.h5
-rw-r--r--chromium/base/win/scoped_handle_test_dll.cc3
-rw-r--r--chromium/base/win/win_util.cc140
-rw-r--r--chromium/base/win/win_util.h11
315 files changed, 8008 insertions, 4719 deletions
diff --git a/chromium/base/BUILD.gn b/chromium/base/BUILD.gn
index fcd6803a6e0..afba0abaa67 100644
--- a/chromium/base/BUILD.gn
+++ b/chromium/base/BUILD.gn
@@ -54,6 +54,7 @@ config("base_flags") {
config("base_implementation") {
defines = [ "BASE_IMPLEMENTATION" ]
+ configs = [ "//build/config/compiler:wexit_time_destructors" ]
}
if (is_win) {
@@ -124,12 +125,14 @@ config("android_system_libs") {
# test code (test support and anything in the test directory) which should use
# source_set as is recommended for GN targets).
component("base") {
- # TODO(phosek) bug 570839: If field_trial.cc is in a static library,
- # hacl_helper_nonsfi doesn't link properly on Linux in debug builds. The
- # reasons for this seem to involve obscure toolchain bugs. This should be
- # fixed and this target should always be a static_library in the
- # non-component case.
- component_never_use_source_set = !is_nacl_nonsfi
+ if (is_nacl_nonsfi) {
+ # TODO(phosek) bug 570839: If field_trial.cc is in a static library,
+ # nacl_helper_nonsfi doesn't link properly on Linux in debug builds. The
+ # reasons for this seem to involve obscure toolchain bugs. This should be
+ # fixed and this target should always be a static_library in the
+ # non-component case.
+ static_component_type = "source_set"
+ }
sources = [
"allocator/allocator_check.cc",
@@ -148,6 +151,8 @@ component("base") {
"android/base_jni_registrar.h",
"android/build_info.cc",
"android/build_info.h",
+ "android/callback_android.cc",
+ "android/callback_android.h",
"android/command_line_android.cc",
"android/command_line_android.h",
"android/content_uri_utils.cc",
@@ -392,6 +397,7 @@ component("base") {
"mac/call_with_eh_frame.cc",
"mac/call_with_eh_frame.h",
"mac/call_with_eh_frame_asm.S",
+ "mac/close_nocancel.cc",
"mac/cocoa_protocols.h",
"mac/dispatch_source_mach.cc",
"mac/dispatch_source_mach.h",
@@ -431,6 +437,7 @@ component("base") {
"mac/scoped_nsautorelease_pool.h",
"mac/scoped_nsautorelease_pool.mm",
"mac/scoped_nsobject.h",
+ "mac/scoped_nsobject.mm",
"mac/scoped_objc_class_swizzler.h",
"mac/scoped_objc_class_swizzler.mm",
"mac/scoped_sending_event.h",
@@ -540,7 +547,6 @@ component("base") {
"metrics/user_metrics.cc",
"metrics/user_metrics.h",
"metrics/user_metrics_action.h",
- "move.h",
"native_library.h",
"native_library_ios.mm",
"native_library_mac.mm",
@@ -675,9 +681,8 @@ component("base") {
"sequenced_task_runner.cc",
"sequenced_task_runner.h",
"sequenced_task_runner_helpers.h",
+ "sha1.cc",
"sha1.h",
- "sha1_portable.cc",
- "sha1_win.cc",
"single_thread_task_runner.h",
"stl_util.h",
"strings/latin1_string_conversions.cc",
@@ -730,6 +735,10 @@ component("base") {
"synchronization/lock_impl.h",
"synchronization/lock_impl_posix.cc",
"synchronization/lock_impl_win.cc",
+ "synchronization/read_write_lock.h",
+ "synchronization/read_write_lock_nacl.cc",
+ "synchronization/read_write_lock_posix.cc",
+ "synchronization/read_write_lock_win.cc",
"synchronization/spin_wait.h",
"synchronization/waitable_event.h",
"synchronization/waitable_event_posix.cc",
@@ -767,13 +776,13 @@ component("base") {
"task_scheduler/scheduler_lock_impl.h",
"task_scheduler/scheduler_service_thread.cc",
"task_scheduler/scheduler_service_thread.h",
- "task_scheduler/scheduler_thread_pool.h",
- "task_scheduler/scheduler_thread_pool_impl.cc",
- "task_scheduler/scheduler_thread_pool_impl.h",
- "task_scheduler/scheduler_worker_thread.cc",
- "task_scheduler/scheduler_worker_thread.h",
- "task_scheduler/scheduler_worker_thread_stack.cc",
- "task_scheduler/scheduler_worker_thread_stack.h",
+ "task_scheduler/scheduler_worker.cc",
+ "task_scheduler/scheduler_worker.h",
+ "task_scheduler/scheduler_worker_pool.h",
+ "task_scheduler/scheduler_worker_pool_impl.cc",
+ "task_scheduler/scheduler_worker_pool_impl.h",
+ "task_scheduler/scheduler_worker_stack.cc",
+ "task_scheduler/scheduler_worker_stack.h",
"task_scheduler/sequence.cc",
"task_scheduler/sequence.h",
"task_scheduler/sequence_sort_key.cc",
@@ -897,6 +906,8 @@ component("base") {
"trace_event/memory_dump_request_args.h",
"trace_event/memory_dump_session_state.cc",
"trace_event/memory_dump_session_state.h",
+ "trace_event/memory_infra_background_whitelist.cc",
+ "trace_event/memory_infra_background_whitelist.h",
"trace_event/process_memory_dump.cc",
"trace_event/process_memory_dump.h",
"trace_event/process_memory_maps.cc",
@@ -1122,6 +1133,7 @@ component("base") {
"process/process_posix.cc",
"scoped_native_library.cc",
"sync_socket_posix.cc",
+ "synchronization/read_write_lock_posix.cc",
"sys_info.cc",
"sys_info_posix.cc",
"trace_event/trace_event_system_stats_monitor.cc",
@@ -1156,6 +1168,7 @@ component("base") {
"os_compat_nacl.cc",
"os_compat_nacl.h",
"rand_util_nacl.cc",
+ "synchronization/read_write_lock_nacl.cc",
]
}
@@ -1169,13 +1182,12 @@ component("base") {
sources -= [
"message_loop/message_pump_libevent.cc",
"strings/string16.cc",
-
- # Not using sha1_win.cc because it may have caused a
- # regression to page cycler moz.
- "sha1_win.cc",
]
- deps += [ "//base/trace_event/etw_manifest:chrome_events_win" ]
+ deps += [
+ "//base/trace_event/etw_manifest:chrome_events_win",
+ "//base/win:base_win_features",
+ ]
if (is_component_build) {
# Copy the VS runtime DLLs into the isolate so that they don't have to be
@@ -1254,7 +1266,6 @@ component("base") {
libs = [
"cfgmgr32.lib",
- "netapi32.lib",
"powrprof.lib",
"setupapi.lib",
"userenv.lib",
@@ -1395,6 +1406,7 @@ component("base") {
"mac/mach_logging.h",
"mac/objc_property_releaser.h",
"mac/objc_property_releaser.mm",
+ "mac/scoped_block.h",
"mac/scoped_mach_port.cc",
"mac/scoped_mach_port.h",
"mac/scoped_mach_vm.cc",
@@ -1402,8 +1414,10 @@ component("base") {
"mac/scoped_nsautorelease_pool.h",
"mac/scoped_nsautorelease_pool.mm",
"mac/scoped_nsobject.h",
+ "mac/scoped_nsobject.mm",
"mac/scoped_objc_class_swizzler.h",
"mac/scoped_objc_class_swizzler.mm",
+ "mac/scoped_typeref.h",
"memory/shared_memory_posix.cc",
"message_loop/message_pump_mac.h",
"message_loop/message_pump_mac.mm",
@@ -1629,10 +1643,11 @@ if (is_win) {
]
deps = [
":base",
+ "//base/win:base_win_features",
]
}
- if (target_cpu == "x64") {
+ if (current_cpu == "x64") {
# Must be a shared library so that it can be unloaded during testing.
shared_library("base_profiler_test_support_library") {
sources = [
@@ -1676,6 +1691,23 @@ bundle_data("base_unittests_bundle_data") {
]
}
+if (is_ios || is_mac) {
+ source_set("base_unittests_arc") {
+ testonly = true
+ set_sources_assignment_filter([])
+ sources = [
+ "mac/bind_objc_block_unittest_arc.mm",
+ "mac/scoped_nsobject_unittest_arc.mm",
+ ]
+ set_sources_assignment_filter(sources_assignment_filter)
+ configs += [ "//build/config/compiler:enable_arc" ]
+ deps = [
+ ":base",
+ "//testing/gtest",
+ ]
+ }
+}
+
test("base_unittests") {
sources = [
"allocator/tcmalloc_unittest.cc",
@@ -1720,7 +1752,7 @@ test("base_unittests") {
"deferred_sequenced_task_runner_unittest.cc",
"environment_unittest.cc",
"feature_list_unittest.cc",
- "file_version_info_unittest.cc",
+ "file_version_info_win_unittest.cc",
"files/dir_reader_posix_unittest.cc",
"files/file_locking_unittest.cc",
"files/file_path_unittest.cc",
@@ -1825,6 +1857,7 @@ test("base_unittests") {
"profiler/stack_sampling_profiler_unittest.cc",
"profiler/tracked_time_unittest.cc",
"rand_util_unittest.cc",
+ "run_loop_unittest.cc",
"scoped_clear_errno_unittest.cc",
"scoped_generic_unittest.cc",
"scoped_native_library_unittest.cc",
@@ -1852,8 +1885,10 @@ test("base_unittests") {
"synchronization/cancellation_flag_unittest.cc",
"synchronization/condition_variable_unittest.cc",
"synchronization/lock_unittest.cc",
+ "synchronization/read_write_lock_unittest.cc",
"synchronization/waitable_event_unittest.cc",
"synchronization/waitable_event_watcher_unittest.cc",
+ "sys_byteorder_unittest.cc",
"sys_info_unittest.cc",
"system_monitor/system_monitor_unittest.cc",
"task/cancelable_task_tracker_unittest.cc",
@@ -1862,9 +1897,9 @@ test("base_unittests") {
"task_scheduler/priority_queue_unittest.cc",
"task_scheduler/scheduler_lock_unittest.cc",
"task_scheduler/scheduler_service_thread_unittest.cc",
- "task_scheduler/scheduler_thread_pool_impl_unittest.cc",
- "task_scheduler/scheduler_worker_thread_stack_unittest.cc",
- "task_scheduler/scheduler_worker_thread_unittest.cc",
+ "task_scheduler/scheduler_worker_pool_impl_unittest.cc",
+ "task_scheduler/scheduler_worker_stack_unittest.cc",
+ "task_scheduler/scheduler_worker_unittest.cc",
"task_scheduler/sequence_sort_key_unittest.cc",
"task_scheduler/sequence_unittest.cc",
"task_scheduler/task_scheduler_impl_unittest.cc",
@@ -1959,6 +1994,10 @@ test("base_unittests") {
"//third_party/icu",
]
+ if (is_ios || is_mac) {
+ deps += [ ":base_unittests_arc" ]
+ }
+
public_deps = [
":base_unittests_bundle_data",
]
@@ -2019,8 +2058,6 @@ test("base_unittests") {
}
if (is_linux) {
- sources -= [ "file_version_info_unittest.cc" ]
-
if (is_desktop_linux) {
sources += [ "nix/xdg_util_unittest.cc" ]
}
@@ -2058,7 +2095,7 @@ test("base_unittests") {
if (is_win) {
deps += [ "//base:scoped_handle_test_dll" ]
- if (target_cpu == "x64") {
+ if (current_cpu == "x64") {
sources += [ "profiler/win32_stack_frame_unwinder_unittest.cc" ]
deps += [ ":base_profiler_test_support_library" ]
}
@@ -2132,6 +2169,7 @@ if (is_android) {
"android/java/src/org/chromium/base/ApkAssets.java",
"android/java/src/org/chromium/base/ApplicationStatus.java",
"android/java/src/org/chromium/base/BuildInfo.java",
+ "android/java/src/org/chromium/base/Callback.java",
"android/java/src/org/chromium/base/CommandLine.java",
"android/java/src/org/chromium/base/ContentUriUtils.java",
"android/java/src/org/chromium/base/ContextUtils.java",
@@ -2213,6 +2251,7 @@ if (is_android) {
"android/java/src/org/chromium/base/PerfTraceEvent.java",
"android/java/src/org/chromium/base/PowerMonitor.java",
"android/java/src/org/chromium/base/PowerStatusReceiver.java",
+ "android/java/src/org/chromium/base/Promise.java",
"android/java/src/org/chromium/base/ResourceExtractor.java",
"android/java/src/org/chromium/base/SecureRandomInitializer.java",
"android/java/src/org/chromium/base/StreamUtil.java",
@@ -2277,6 +2316,7 @@ if (is_android) {
]
java_files = [
"test/android/javatests/src/org/chromium/base/test/BaseActivityInstrumentationTestCase.java",
+ "test/android/javatests/src/org/chromium/base/test/BaseChromiumInstrumentationTestRunner.java",
"test/android/javatests/src/org/chromium/base/test/BaseInstrumentationTestRunner.java",
"test/android/javatests/src/org/chromium/base/test/BaseTestResult.java",
"test/android/javatests/src/org/chromium/base/test/util/AdvancedMockContext.java",
@@ -2297,6 +2337,7 @@ if (is_android) {
"test/android/javatests/src/org/chromium/base/test/util/PerfTest.java",
"test/android/javatests/src/org/chromium/base/test/util/Restriction.java",
"test/android/javatests/src/org/chromium/base/test/util/RestrictionSkipCheck.java",
+ "test/android/javatests/src/org/chromium/base/test/util/RetryOnFailure.java",
"test/android/javatests/src/org/chromium/base/test/util/ScalableTimeout.java",
"test/android/javatests/src/org/chromium/base/test/util/SkipCheck.java",
"test/android/javatests/src/org/chromium/base/test/util/TestFileUtil.java",
@@ -2330,6 +2371,7 @@ if (is_android) {
java_files = [
"android/junit/src/org/chromium/base/BaseChromiumApplicationTest.java",
"android/junit/src/org/chromium/base/LogTest.java",
+ "android/junit/src/org/chromium/base/PromiseTest.java",
"test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java",
"test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java",
"test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java",
@@ -2361,6 +2403,11 @@ if (is_android) {
"android/java/templates/BuildConfig.template",
]
package_name = "org/chromium/base"
+
+ defines = []
+ if (!is_java_debug) {
+ defines += [ "NDEBUG" ]
+ }
}
# GYP: //base/base.gyp:base_native_libraries_gen
diff --git a/chromium/base/OWNERS b/chromium/base/OWNERS
index 4d4a2391a4c..b6cfce4dfd2 100644
--- a/chromium/base/OWNERS
+++ b/chromium/base/OWNERS
@@ -1,8 +1,5 @@
-mark@chromium.org
-thakis@chromium.org
-danakj@chromium.org
-thestig@chromium.org
-
+# About src/base:
+#
# Chromium is a very mature project, most things that are generally useful are
# already here, and that things not here aren't generally useful.
#
@@ -15,11 +12,20 @@ thestig@chromium.org
# Adding a new logging macro DPVELOG_NE is not more clear than just
# writing the stuff you want to log in a regular logging statement, even
# if it makes your calling code longer. Just add it to your own code.
+#
+# If the code in question does not need to be used inside base, but will have
+# multiple consumers across the codebase, consider placing it in a new directory
+# under components/ instead.
-per-file *.isolate=maruel@chromium.org
-per-file *.isolate=tandrii@chromium.org
-per-file *.isolate=vadimsh@chromium.org
-per-file security_unittest.cc=jln@chromium.org
+mark@chromium.org
+thakis@chromium.org
+danakj@chromium.org
+thestig@chromium.org
+dcheng@chromium.org
+
+# For Bind/Callback:
+per-file bind*=tzik@chromium.org
+per-file callback*=tzik@chromium.org
# For Android-specific changes:
per-file *android*=nyquist@chromium.org
@@ -30,3 +36,11 @@ per-file *android*=yfriedman@chromium.org
# For FeatureList API:
per-file feature_list*=asvitkine@chromium.org
per-file feature_list*=isherman@chromium.org
+
+# For bot infrastructure:
+per-file *.isolate=maruel@chromium.org
+per-file *.isolate=tandrii@chromium.org
+per-file *.isolate=vadimsh@chromium.org
+
+# For TCMalloc tests:
+per-file security_unittest.cc=jln@chromium.org
diff --git a/chromium/base/allocator/BUILD.gn b/chromium/base/allocator/BUILD.gn
index 96ccad239a5..d25239ae6f8 100644
--- a/chromium/base/allocator/BUILD.gn
+++ b/chromium/base/allocator/BUILD.gn
@@ -236,8 +236,16 @@ if (use_allocator == "tcmalloc") {
"-Wno-unused-result",
]
- configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
- configs += [ "//build/config/gcc:symbol_visibility_default" ]
+ # Compiling tcmalloc with -fvisibility=default is only necessary when
+ # not using the allocator shim, which provides the correct visibility
+ # annotations for those symbols which need to be exported (see
+ # //base/allocator/allocator_shim_override_glibc_weak_symbols.h and
+ # //base/allocator/allocator_shim_internals.h for the definition of
+ # SHIM_ALWAYS_EXPORT).
+ if (!use_experimental_allocator_shim) {
+ configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
+ configs += [ "//build/config/gcc:symbol_visibility_default" ]
+ }
ldflags = [
# Don't let linker rip this symbol out, otherwise the heap&cpu
diff --git a/chromium/base/allocator/allocator.gyp b/chromium/base/allocator/allocator.gyp
index 3844c08add8..674d4d645f7 100644
--- a/chromium/base/allocator/allocator.gyp
+++ b/chromium/base/allocator/allocator.gyp
@@ -301,9 +301,6 @@
'-Wno-sign-compare',
'-Wno-unused-result',
],
- 'cflags!': [
- '-fvisibility=hidden',
- ],
'link_settings': {
'ldflags': [
# Don't let linker rip this symbol out, otherwise the heap&cpu
@@ -315,6 +312,19 @@
'-Wl,-u_ZN15HeapLeakChecker12IgnoreObjectEPKv,-u_ZN15HeapLeakChecker14UnIgnoreObjectEPKv',
],
},
+ # Compiling tcmalloc with -fvisibility=default is only necessary when
+ # not using the allocator shim, which provides the correct visibility
+ # annotations for those symbols which need to be exported (see
+ # //base/allocator/allocator_shim_override_glibc_weak_symbols.h and
+ # //base/allocator/allocator_shim_internals.h for the definition of
+ # SHIM_ALWAYS_EXPORT).
+ 'conditions': [
+ ['use_experimental_allocator_shim==0', {
+ 'cflags!': [
+ '-fvisibility=hidden',
+ ],
+ }],
+ ],
}],
['profiling!=1', {
'sources!': [
diff --git a/chromium/base/allocator/allocator_shim_unittest.cc b/chromium/base/allocator/allocator_shim_unittest.cc
index e61dd73af7f..e6abc698360 100644
--- a/chromium/base/allocator/allocator_shim_unittest.cc
+++ b/chromium/base/allocator/allocator_shim_unittest.cc
@@ -76,15 +76,15 @@ class AllocatorShimTest : public testing::Test {
void* address,
size_t size) {
if (instance_) {
- // Address 0x42 is a special sentinel for the NewHandlerConcurrency test.
+ // Address 0x420 is a special sentinel for the NewHandlerConcurrency test.
// The first time (but only the first one) it is hit it fails, causing the
// invocation of the std::new_handler.
- if (address == reinterpret_cast<void*>(0x42)) {
- if (!instance_->did_fail_realloc_0x42_once->Get()) {
- instance_->did_fail_realloc_0x42_once->Set(true);
+ if (address == reinterpret_cast<void*>(0x420)) {
+ if (!instance_->did_fail_realloc_0x420_once->Get()) {
+ instance_->did_fail_realloc_0x420_once->Set(true);
return nullptr;
} else {
- return reinterpret_cast<void*>(0x42ul);
+ return reinterpret_cast<void*>(0x420ul);
}
}
@@ -120,7 +120,7 @@ class AllocatorShimTest : public testing::Test {
memset(&aligned_allocs_intercepted_by_alignment, 0, array_size);
memset(&reallocs_intercepted_by_size, 0, array_size);
memset(&frees_intercepted_by_addr, 0, array_size);
- did_fail_realloc_0x42_once.reset(new ThreadLocalBoolean());
+ did_fail_realloc_0x420_once.reset(new ThreadLocalBoolean());
subtle::Release_Store(&num_new_handler_calls, 0);
instance_ = this;
}
@@ -135,7 +135,7 @@ class AllocatorShimTest : public testing::Test {
size_t reallocs_intercepted_by_size[kMaxSizeTracked];
size_t reallocs_intercepted_by_addr[kMaxSizeTracked];
size_t frees_intercepted_by_addr[kMaxSizeTracked];
- std::unique_ptr<ThreadLocalBoolean> did_fail_realloc_0x42_once;
+ std::unique_ptr<ThreadLocalBoolean> did_fail_realloc_0x420_once;
subtle::Atomic32 num_new_handler_calls;
private:
@@ -158,8 +158,8 @@ class ThreadDelegateForNewHandlerTest : public PlatformThread::Delegate {
void ThreadMain() override {
event_->Wait();
- void* res = realloc(reinterpret_cast<void*>(0x42ul), 1);
- EXPECT_EQ(0x42u, reinterpret_cast<uintptr_t>(res));
+ void* res = realloc(reinterpret_cast<void*>(0x420ul), 1);
+ EXPECT_EQ(reinterpret_cast<void*>(0x420ul), res);
}
private:
@@ -292,7 +292,8 @@ TEST_F(AllocatorShimTest, InterceptCppSymbols) {
// This test exercises the case of concurrent OOM failure, which would end up
// invoking std::new_handler concurrently. This is to cover the CallNewHandler()
// paths of allocator_shim.cc and smoke-test its thread safey.
-// The test creates kNumThreads threads. Each of them does just a realloc(0x42).
+// The test creates kNumThreads threads. Each of them does just a
+// realloc(0x420).
// The shim intercepts such realloc and makes it fail only once on each thread.
// We expect to see excactly kNumThreads invocations of the new_handler.
TEST_F(AllocatorShimTest, NewHandlerConcurrency) {
@@ -301,7 +302,8 @@ TEST_F(AllocatorShimTest, NewHandlerConcurrency) {
// The WaitableEvent here is used to attempt to trigger all the threads at
// the same time, after they have been initialized.
- WaitableEvent event(/*manual_reset=*/true, /*initially_signaled=*/false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
ThreadDelegateForNewHandlerTest mock_thread_main(&event);
diff --git a/chromium/base/allocator/tcmalloc_unittest.cc b/chromium/base/allocator/tcmalloc_unittest.cc
index 37c0ef61eff..5313bfdf652 100644
--- a/chromium/base/allocator/tcmalloc_unittest.cc
+++ b/chromium/base/allocator/tcmalloc_unittest.cc
@@ -5,28 +5,27 @@
#include <stddef.h>
#include <stdio.h>
+#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/process/process_metrics.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
#if defined(USE_TCMALLOC)
-extern "C" {
-void* tc_malloc(size_t size);
-void tc_free(void*);
-}
-
namespace {
using std::min;
#ifdef NDEBUG
-void* TCMallocDoMallocForTest(size_t size) {
- return tc_malloc(size);
+// We wrap malloc and free in noinline functions to ensure that we test the real
+// implementation of the allocator. Otherwise, the compiler may specifically
+// recognize the calls to malloc and free in our tests and optimize them away.
+NOINLINE void* TCMallocDoMallocForTest(size_t size) {
+ return malloc(size);
}
-void TCMallocDoFreeForTest(void* ptr) {
- tc_free(ptr);
+NOINLINE void TCMallocDoFreeForTest(void* ptr) {
+ free(ptr);
}
#endif
diff --git a/chromium/base/android/application_status_listener_unittest.cc b/chromium/base/android/application_status_listener_unittest.cc
index 896bbe89389..803dedb1280 100644
--- a/chromium/base/android/application_status_listener_unittest.cc
+++ b/chromium/base/android/application_status_listener_unittest.cc
@@ -43,10 +43,10 @@ class MultiThreadedTest {
public:
MultiThreadedTest()
: state_(kInvalidApplicationState),
- event_(false, false),
+ event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED),
thread_("ApplicationStatusTest thread"),
- main_() {
- }
+ main_() {}
void Run() {
// Start the thread and tell it to register for events.
diff --git a/chromium/base/android/build_info.cc b/chromium/base/android/build_info.cc
index 368d53c7427..ef8f572497d 100644
--- a/chromium/base/android/build_info.cc
+++ b/chromium/base/android/build_info.cc
@@ -51,6 +51,8 @@ BuildInfo::BuildInfo(JNIEnv* env)
android_build_id_(StrDupJString(Java_BuildInfo_getAndroidBuildId(env))),
android_build_fp_(StrDupJString(
Java_BuildInfo_getAndroidBuildFingerprint(env))),
+ gms_version_code_(StrDupJString(Java_BuildInfo_getGMSVersionCode(
+ env, GetApplicationContext()))),
package_version_code_(StrDupJString(Java_BuildInfo_getPackageVersionCode(
env, GetApplicationContext()))),
package_version_name_(StrDupJString(Java_BuildInfo_getPackageVersionName(
diff --git a/chromium/base/android/build_info.h b/chromium/base/android/build_info.h
index 07dba43de79..838d6f802ca 100644
--- a/chromium/base/android/build_info.h
+++ b/chromium/base/android/build_info.h
@@ -74,6 +74,10 @@ class BASE_EXPORT BuildInfo {
return android_build_fp_;
}
+ const char* gms_version_code() const {
+ return gms_version_code_;
+ }
+
const char* package_version_code() const {
return package_version_code_;
}
@@ -123,6 +127,7 @@ class BASE_EXPORT BuildInfo {
const char* const brand_;
const char* const android_build_id_;
const char* const android_build_fp_;
+ const char* const gms_version_code_;
const char* const package_version_code_;
const char* const package_version_name_;
const char* const package_label_;
diff --git a/chromium/base/android/callback_android.cc b/chromium/base/android/callback_android.cc
new file mode 100644
index 00000000000..abcec2f85a9
--- /dev/null
+++ b/chromium/base/android/callback_android.cc
@@ -0,0 +1,35 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/android/callback_android.h"
+
+#include "jni/Callback_jni.h"
+
+namespace base {
+namespace android {
+
+void RunCallbackAndroid(const JavaRef<jobject>& callback,
+ const JavaRef<jobject>& arg) {
+ Java_Callback_onResultFromNativeV_JLO(base::android::AttachCurrentThread(),
+ callback.obj(),
+ arg.obj());
+}
+
+void RunCallbackAndroid(const JavaRef<jobject>& callback, bool arg) {
+ Java_Callback_onResultFromNativeV_Z(base::android::AttachCurrentThread(),
+ callback.obj(),
+ static_cast<jboolean>(arg));
+}
+
+void RunCallbackAndroid(const JavaRef<jobject>& callback, int arg) {
+ Java_Callback_onResultFromNativeV_I(base::android::AttachCurrentThread(),
+ callback.obj(), arg);
+}
+
+bool RegisterCallbackAndroid(JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+} // namespace android
+} // namespace base
diff --git a/chromium/base/android/callback_android.h b/chromium/base/android/callback_android.h
new file mode 100644
index 00000000000..20254ac7435
--- /dev/null
+++ b/chromium/base/android/callback_android.h
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ANDROID_CALLBACK_ANDROID_H_
+#define BASE_ANDROID_CALLBACK_ANDROID_H_
+
+#include <jni.h>
+
+#include "base/android/scoped_java_ref.h"
+#include "base/base_export.h"
+
+namespace base {
+namespace android {
+
+// Runs the given |callback| with the specified |arg|.
+void BASE_EXPORT RunCallbackAndroid(const JavaRef<jobject>& callback,
+ const JavaRef<jobject>& arg);
+
+// Runs the given |callback| with the specified |arg|.
+void BASE_EXPORT RunCallbackAndroid(const JavaRef<jobject>& callback,
+ bool arg);
+
+// Runs the given |callback| with the specified |arg|.
+void BASE_EXPORT RunCallbackAndroid(const JavaRef<jobject>& callback, int arg);
+
+// JNI registration boilerplate.
+bool RegisterCallbackAndroid(JNIEnv* env);
+
+} // namespace android
+} // namespace base
+
+#endif // BASE_ANDROID_CALLBACK_ANDROID_H_
diff --git a/chromium/base/android/trace_event_binding.cc b/chromium/base/android/trace_event_binding.cc
index 534d8552fcd..8551f99f961 100644
--- a/chromium/base/android/trace_event_binding.cc
+++ b/chromium/base/android/trace_event_binding.cc
@@ -28,10 +28,7 @@ const char kLooperDispatchMessage[] = "Looper.dispatchMessage";
class TraceEventDataConverter {
public:
TraceEventDataConverter(JNIEnv* env, jstring jname, jstring jarg)
- : env_(env),
- jname_(jname),
- jarg_(jarg),
- name_(ConvertJavaStringToUTF8(env, jname)),
+ : name_(ConvertJavaStringToUTF8(env, jname)),
has_arg_(jarg != nullptr),
arg_(jarg ? ConvertJavaStringToUTF8(env, jarg) : "") {}
~TraceEventDataConverter() {
@@ -43,9 +40,6 @@ class TraceEventDataConverter {
const char* arg() { return has_arg_ ? arg_.c_str() : nullptr; }
private:
- JNIEnv* env_;
- jstring jname_;
- jstring jarg_;
std::string name_;
bool has_arg_;
std::string arg_;
diff --git a/chromium/base/base.gyp b/chromium/base/base.gyp
index 7c9417d013c..7e98715e466 100644
--- a/chromium/base/base.gyp
+++ b/chromium/base/base.gyp
@@ -24,6 +24,7 @@
'allocator/allocator.gyp:allocator',
'allocator/allocator.gyp:allocator_features#target',
'base_debugging_flags#target',
+ 'base_win_features#target',
'base_static',
'base_build_date#target',
'../testing/gtest.gyp:gtest_prod',
@@ -406,7 +407,7 @@
'deferred_sequenced_task_runner_unittest.cc',
'environment_unittest.cc',
'feature_list_unittest.cc',
- 'file_version_info_unittest.cc',
+ 'file_version_info_win_unittest.cc',
'files/dir_reader_posix_unittest.cc',
'files/file_locking_unittest.cc',
'files/file_path_unittest.cc',
@@ -514,6 +515,7 @@
'profiler/stack_sampling_profiler_unittest.cc',
'profiler/tracked_time_unittest.cc',
'rand_util_unittest.cc',
+ 'run_loop_unittest.cc',
'scoped_clear_errno_unittest.cc',
'scoped_generic_unittest.cc',
'scoped_native_library_unittest.cc',
@@ -541,8 +543,10 @@
'synchronization/cancellation_flag_unittest.cc',
'synchronization/condition_variable_unittest.cc',
'synchronization/lock_unittest.cc',
+ 'synchronization/read_write_lock_unittest.cc',
'synchronization/waitable_event_unittest.cc',
'synchronization/waitable_event_watcher_unittest.cc',
+ 'sys_byteorder_unittest.cc',
'sys_info_unittest.cc',
'system_monitor/system_monitor_unittest.cc',
'task/cancelable_task_tracker_unittest.cc',
@@ -551,9 +555,9 @@
'task_scheduler/priority_queue_unittest.cc',
'task_scheduler/scheduler_lock_unittest.cc',
'task_scheduler/scheduler_service_thread_unittest.cc',
- 'task_scheduler/scheduler_thread_pool_impl_unittest.cc',
- 'task_scheduler/scheduler_worker_thread_stack_unittest.cc',
- 'task_scheduler/scheduler_worker_thread_unittest.cc',
+ 'task_scheduler/scheduler_worker_unittest.cc',
+ 'task_scheduler/scheduler_worker_pool_impl_unittest.cc',
+ 'task_scheduler/scheduler_worker_stack_unittest.cc',
'task_scheduler/sequence_sort_key_unittest.cc',
'task_scheduler/sequence_unittest.cc',
'task_scheduler/task_scheduler_impl_unittest.cc',
@@ -636,6 +640,11 @@
'module_dir': 'base'
},
'conditions': [
+ ['OS == "ios" or OS == "mac"', {
+ 'dependencies': [
+ 'base_unittests_arc',
+ ],
+ }],
['OS == "android"', {
'dependencies': [
'android/jni_generator/jni_generator.gyp:jni_generator_tests',
@@ -675,9 +684,6 @@
'defines': [
'USE_SYMBOLIZE',
],
- 'sources!': [
- 'file_version_info_unittest.cc',
- ],
'conditions': [
[ 'desktop_linux==1', {
'sources': [
@@ -1019,7 +1025,7 @@
},
{
# GN version: //base/debug:debugging_flags
- # Since this generates a file, it most only be referenced in the target
+ # Since this generates a file, it must only be referenced in the target
# toolchain or there will be multiple rules that generate the header.
# When referenced from a target that might be compiled in the host
# toolchain, always refer to 'base_debugging_flags#target'.
@@ -1033,6 +1039,27 @@
},
},
{
+ # GN version: //base/win:base_win_features
+ # Since this generates a file, it must only be referenced in the target
+ # toolchain or there will be multiple rules that generate the header.
+ # When referenced from a target that might be compiled in the host
+ # toolchain, always refer to 'base_win_features#target'.
+ 'target_name': 'base_win_features',
+ 'conditions': [
+ ['OS=="win"', {
+ 'includes': [ '../build/buildflag_header.gypi' ],
+ 'variables': {
+ 'buildflag_header_path': 'base/win/base_features.h',
+ 'buildflag_flags': [
+ 'SINGLE_MODULE_MODE_HANDLE_VERIFIER=<(single_module_mode_handle_verifier)',
+ ],
+ },
+ }, {
+ 'type': 'none',
+ }],
+ ],
+ },
+ {
'type': 'none',
'target_name': 'base_build_date',
'hard_dependency': 1,
@@ -1383,6 +1410,7 @@
'android/java/src/org/chromium/base/ApplicationStatus.java',
'android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java',
'android/java/src/org/chromium/base/BuildInfo.java',
+ 'android/java/src/org/chromium/base/Callback.java',
'android/java/src/org/chromium/base/CommandLine.java',
'android/java/src/org/chromium/base/ContentUriUtils.java',
'android/java/src/org/chromium/base/ContextUtils.java',
@@ -1735,5 +1763,32 @@
},
],
}],
+ ['OS == "ios" or OS == "mac"', {
+ 'targets': [
+ {
+ 'target_name': 'base_unittests_arc',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'base',
+ '../testing/gtest.gyp:gtest',
+ ],
+ 'sources': [
+ 'mac/bind_objc_block_unittest_arc.mm',
+ 'mac/scoped_nsobject_unittest_arc.mm'
+ ],
+ 'xcode_settings': {
+ 'CLANG_ENABLE_OBJC_ARC': 'YES',
+ },
+ 'target_conditions': [
+ ['OS == "ios" and _toolset != "host"', {
+ 'sources/': [
+ ['include', 'mac/bind_objc_block_unittest_arc\\.mm$'],
+ ['include', 'mac/scoped_nsobject_unittest_arc\\.mm$'],
+ ],
+ }]
+ ],
+ },
+ ],
+ }],
],
}
diff --git a/chromium/base/base.gypi b/chromium/base/base.gypi
index 368348d736e..750a081743f 100644
--- a/chromium/base/base.gypi
+++ b/chromium/base/base.gypi
@@ -32,6 +32,8 @@
'android/base_jni_registrar.h',
'android/build_info.cc',
'android/build_info.h',
+ 'android/callback_android.cc',
+ 'android/callback_android.h',
'android/command_line_android.cc',
'android/command_line_android.h',
'android/content_uri_utils.cc',
@@ -326,6 +328,7 @@
'mac/scoped_nsautorelease_pool.h',
'mac/scoped_nsautorelease_pool.mm',
'mac/scoped_nsobject.h',
+ 'mac/scoped_nsobject.mm',
'mac/scoped_objc_class_swizzler.h',
'mac/scoped_objc_class_swizzler.mm',
'mac/scoped_sending_event.h',
@@ -427,7 +430,6 @@
'metrics/user_metrics.cc',
'metrics/user_metrics.h',
'metrics/user_metrics_action.h',
- 'move.h',
'native_library.h',
'native_library_ios.mm',
'native_library_mac.mm',
@@ -556,9 +558,8 @@
'sequenced_task_runner.cc',
'sequenced_task_runner.h',
'sequenced_task_runner_helpers.h',
+ 'sha1.cc',
'sha1.h',
- 'sha1_portable.cc',
- 'sha1_win.cc',
'single_thread_task_runner.h',
'stl_util.h',
'strings/latin1_string_conversions.cc',
@@ -608,6 +609,10 @@
'synchronization/lock_impl.h',
'synchronization/lock_impl_posix.cc',
'synchronization/lock_impl_win.cc',
+ 'synchronization/read_write_lock.h',
+ 'synchronization/read_write_lock_nacl.cc',
+ 'synchronization/read_write_lock_posix.cc',
+ 'synchronization/read_write_lock_win.cc',
'synchronization/spin_wait.h',
'synchronization/waitable_event.h',
'synchronization/waitable_event_posix.cc',
@@ -644,13 +649,13 @@
'task_scheduler/scheduler_lock_impl.h',
'task_scheduler/scheduler_service_thread.cc',
'task_scheduler/scheduler_service_thread.h',
- 'task_scheduler/scheduler_thread_pool.h',
- 'task_scheduler/scheduler_thread_pool_impl.cc',
- 'task_scheduler/scheduler_thread_pool_impl.h',
- 'task_scheduler/scheduler_worker_thread.cc',
- 'task_scheduler/scheduler_worker_thread.h',
- 'task_scheduler/scheduler_worker_thread_stack.cc',
- 'task_scheduler/scheduler_worker_thread_stack.h',
+ 'task_scheduler/scheduler_worker.cc',
+ 'task_scheduler/scheduler_worker.h',
+ 'task_scheduler/scheduler_worker_pool.h',
+ 'task_scheduler/scheduler_worker_pool_impl.cc',
+ 'task_scheduler/scheduler_worker_pool_impl.h',
+ 'task_scheduler/scheduler_worker_stack.cc',
+ 'task_scheduler/scheduler_worker_stack.h',
'task_scheduler/sequence.cc',
'task_scheduler/sequence.h',
'task_scheduler/sequence_sort_key.cc',
@@ -875,6 +880,7 @@
'process/process_posix.cc',
'rand_util_posix.cc',
'scoped_native_library.cc',
+ 'synchronization/read_write_lock_posix.cc',
'sys_info.cc',
'sys_info_posix.cc',
'third_party/dynamic_annotations/dynamic_annotations.c',
@@ -932,6 +938,7 @@
['include', '^mac/mac_logging\\.'],
['include', '^mac/mach_logging\\.'],
['include', '^mac/objc_property_releaser\\.'],
+ ['include', '^mac/scoped_block\\.'],
['include', '^mac/scoped_mach_port\\.'],
['include', '^mac/scoped_mach_vm\\.'],
['include', '^mac/scoped_nsautorelease_pool\\.'],
@@ -1000,9 +1007,6 @@
'files/file_path_watcher_stub.cc',
'message_loop/message_pump_libevent.cc',
'posix/file_descriptor_shuffle.cc',
- # Not using sha1_win.cc because it may have caused a
- # regression to page cycler moz.
- 'sha1_win.cc',
'strings/string16.cc',
],
},],
diff --git a/chromium/base/base_paths.h b/chromium/base/base_paths.h
index 26b2fd4c9a1..ef6aa828362 100644
--- a/chromium/base/base_paths.h
+++ b/chromium/base/base_paths.h
@@ -32,9 +32,8 @@ enum BasePathKey {
DIR_MODULE, // Directory containing FILE_MODULE.
DIR_TEMP, // Temporary directory.
DIR_HOME, // User's root home directory. On Windows this will look
- // like "C:\Users\you" (or on XP
- // "C:\Document and Settings\you") which isn't necessarily
- // a great place to put files.
+ // like "C:\Users\<user>" which isn't necessarily a great
+ // place to put files.
FILE_EXE, // Path and filename of the current executable.
FILE_MODULE, // Path and filename of the module containing the code for
// the PathService (which could differ from FILE_EXE if the
diff --git a/chromium/base/base_paths_android.cc b/chromium/base/base_paths_android.cc
index ca58179b678..8a400e5bd14 100644
--- a/chromium/base/base_paths_android.cc
+++ b/chromium/base/base_paths_android.cc
@@ -38,8 +38,10 @@ bool PathProviderAndroid(int key, FilePath* result) {
case base::DIR_MODULE:
return base::android::GetNativeLibraryDirectory(result);
case base::DIR_SOURCE_ROOT:
- // This const is only used for tests.
- return base::android::GetExternalStorageDirectory(result);
+ // Used only by tests.
+ // In that context, hooked up via base/test/test_support_android.cc.
+ NOTIMPLEMENTED();
+ return false;
case base::DIR_USER_DESKTOP:
// Android doesn't support GetUserDesktop.
NOTIMPLEMENTED();
diff --git a/chromium/base/base_paths_win.cc b/chromium/base/base_paths_win.cc
index 86ca4502f53..03adb1e1c95 100644
--- a/chromium/base/base_paths_win.cc
+++ b/chromium/base/base_paths_win.cc
@@ -166,15 +166,15 @@ bool PathProviderWin(int key, FilePath* result) {
// Windows.
// http://stackoverflow.com/questions/76080/how-do-you-reliably-get-the-quick-
// http://www.microsoft.com/technet/scriptcenter/resources/qanda/sept05/hey0901.mspx
- cur = cur.AppendASCII("Microsoft")
- .AppendASCII("Internet Explorer")
- .AppendASCII("Quick Launch");
+ cur = cur.Append(FILE_PATH_LITERAL("Microsoft"))
+ .Append(FILE_PATH_LITERAL("Internet Explorer"))
+ .Append(FILE_PATH_LITERAL("Quick Launch"));
break;
case base::DIR_TASKBAR_PINS:
if (!PathService::Get(base::DIR_USER_QUICK_LAUNCH, &cur))
return false;
- cur = cur.AppendASCII("User Pinned");
- cur = cur.AppendASCII("TaskBar");
+ cur = cur.Append(FILE_PATH_LITERAL("User Pinned"));
+ cur = cur.Append(FILE_PATH_LITERAL("TaskBar"));
break;
case base::DIR_WINDOWS_FONTS:
if (FAILED(SHGetFolderPath(
diff --git a/chromium/base/base_paths_win.h b/chromium/base/base_paths_win.h
index d9dbc39f99a..761226e0add 100644
--- a/chromium/base/base_paths_win.h
+++ b/chromium/base/base_paths_win.h
@@ -26,24 +26,21 @@ enum {
DIR_PROGRAM_FILES6432, // See table above.
DIR_IE_INTERNET_CACHE, // Temporary Internet Files directory.
- DIR_COMMON_START_MENU, // Usually "C:\Documents and Settings\All Users\
- // Start Menu\Programs"
- DIR_START_MENU, // Usually "C:\Documents and Settings\<user>\
+ DIR_COMMON_START_MENU, // Usually "C:\ProgramData\Microsoft\Windows\
// Start Menu\Programs"
+ DIR_START_MENU, // Usually "C:\Users\<user>\AppData\Roaming\Microsoft\
+ // Windows\Start Menu\Programs"
DIR_APP_DATA, // Application Data directory under the user profile.
DIR_LOCAL_APP_DATA, // "Local Settings\Application Data" directory under
// the user profile.
- DIR_COMMON_APP_DATA, // W2K, XP, W2K3: "C:\Documents and Settings\
- // All Users\Application Data".
- // Vista, W2K8 and above: "C:\ProgramData".
+ DIR_COMMON_APP_DATA, // Usually "C:\ProgramData".
DIR_APP_SHORTCUTS, // Where tiles on the start screen are stored, only
// for Windows 8. Maps to "Local\AppData\Microsoft\
// Windows\Application Shortcuts\".
DIR_COMMON_DESKTOP, // Directory for the common desktop (visible
// on all user's Desktop).
DIR_USER_QUICK_LAUNCH, // Directory for the quick launch shortcuts.
- DIR_TASKBAR_PINS, // Directory for the shortcuts pinned to taskbar
- // (Win7-8) via base::win::PinShortcutToTaskbar().
+ DIR_TASKBAR_PINS, // Directory for the shortcuts pinned to taskbar.
DIR_WINDOWS_FONTS, // Usually C:\Windows\Fonts.
PATH_WIN_END
diff --git a/chromium/base/big_endian.h b/chromium/base/big_endian.h
index 868b0442173..5684c6758da 100644
--- a/chromium/base/big_endian.h
+++ b/chromium/base/big_endian.h
@@ -15,8 +15,8 @@ namespace base {
// Read an integer (signed or unsigned) from |buf| in Big Endian order.
// Note: this loop is unrolled with -O1 and above.
-// NOTE(szym): glibc dns-canon.c and SpdyFrameReader use
-// ntohs(*(uint16_t*)ptr) which is potentially unaligned.
+// NOTE(szym): glibc dns-canon.c use ntohs(*(uint16_t*)ptr) which is
+// potentially unaligned.
// This would cause SIGBUS on ARMv5 or earlier and ARMv6-M.
template<typename T>
inline void ReadBigEndian(const char buf[], T* out) {
diff --git a/chromium/base/bind.h b/chromium/base/bind.h
index 46dbb913bee..ed5a94a8c2f 100644
--- a/chromium/base/bind.h
+++ b/chromium/base/bind.h
@@ -46,47 +46,11 @@
namespace base {
-namespace internal {
-
-// Don't use Alias Template directly here to avoid a compile error on MSVC2013.
template <typename Functor, typename... Args>
-struct MakeUnboundRunTypeImpl {
- using Type =
- typename BindState<
- typename FunctorTraits<Functor>::RunnableType,
- typename FunctorTraits<Functor>::RunType,
- Args...>::UnboundRunType;
-};
-
-} // namespace internal
-
-template <typename Functor, typename... Args>
-using MakeUnboundRunType =
- typename internal::MakeUnboundRunTypeImpl<Functor, Args...>::Type;
-
-template <typename Functor, typename... Args>
-base::Callback<MakeUnboundRunType<Functor, Args...>>
+inline base::Callback<MakeUnboundRunType<Functor, Args...>>
Bind(Functor functor, Args&&... args) {
// Type aliases for how to store and run the functor.
using RunnableType = typename internal::FunctorTraits<Functor>::RunnableType;
- using RunType = typename internal::FunctorTraits<Functor>::RunType;
-
- // Use RunnableType::RunType instead of RunType above because our
- // checks below for bound references need to know what the actual
- // functor is going to interpret the argument as.
- using BoundRunType = typename RunnableType::RunType;
-
- using BoundArgs =
- internal::TakeTypeListItem<sizeof...(Args),
- internal::ExtractArgs<BoundRunType>>;
-
- // Do not allow binding a non-const reference parameter. Non-const reference
- // parameters are disallowed by the Google style guide. Also, binding a
- // non-const reference parameter can make for subtle bugs because the
- // invoked function will receive a reference to the stored copy of the
- // argument and not the original.
- static_assert(!internal::HasNonConstReferenceItem<BoundArgs>::value,
- "do not bind functions with nonconst ref");
const bool is_method = internal::HasIsMethodTag<RunnableType>::value;
@@ -100,11 +64,14 @@ Bind(Functor functor, Args&&... args) {
!internal::HasRefCountedParamAsRawPtr<is_method, Args...>::value,
"a parameter is a refcounted type and needs scoped_refptr");
- using BindState = internal::BindState<RunnableType, RunType, Args...>;
+ using BindState = internal::BindState<RunnableType, Args...>;
+ using UnboundRunType = MakeUnboundRunType<Functor, Args...>;
+ using CallbackType = Callback<UnboundRunType>;
+ using Invoker = internal::Invoker<BindState, UnboundRunType>;
- return Callback<typename BindState::UnboundRunType>(
- new BindState(internal::MakeRunnable(functor),
- std::forward<Args>(args)...));
+ return CallbackType(new BindState(internal::MakeRunnable(functor),
+ std::forward<Args>(args)...),
+ &Invoker::Run);
}
} // namespace base
diff --git a/chromium/base/bind_helpers.h b/chromium/base/bind_helpers.h
index 590d788b96e..29057baef20 100644
--- a/chromium/base/bind_helpers.h
+++ b/chromium/base/bind_helpers.h
@@ -170,6 +170,10 @@
#include "build/build_config.h"
namespace base {
+
+template <typename T>
+struct IsWeakReceiver;
+
namespace internal {
// Use the Substitution Failure Is Not An Error (SFINAE) trick to inspect T
@@ -236,64 +240,6 @@ namespace internal {
//
// Works on gcc-4.2, gcc-4.4, and Visual Studio 2008.
//
-// TODO(ajwong): Move to ref_counted.h or template_util.h when we've vetted
-// this works well.
-//
-// TODO(ajwong): Make this check for Release() as well.
-// See http://crbug.com/82038.
-template <typename T>
-class SupportsAddRefAndRelease {
- using Yes = char[1];
- using No = char[2];
-
- struct BaseMixin {
- void AddRef();
- };
-
-// MSVC warns when you try to use Base if T has a private destructor, the
-// common pattern for refcounted types. It does this even though no attempt to
-// instantiate Base is made. We disable the warning for this definition.
-#if defined(OS_WIN)
-#pragma warning(push)
-#pragma warning(disable:4624)
-#endif
- struct Base : public T, public BaseMixin {
- };
-#if defined(OS_WIN)
-#pragma warning(pop)
-#endif
-
- template <void(BaseMixin::*)()> struct Helper {};
-
- template <typename C>
- static No& Check(Helper<&C::AddRef>*);
-
- template <typename >
- static Yes& Check(...);
-
- public:
- enum { value = sizeof(Check<Base>(0)) == sizeof(Yes) };
-};
-
-// Helpers to assert that arguments of a recounted type are bound with a
-// scoped_refptr.
-template <bool IsClasstype, typename T>
-struct UnsafeBindtoRefCountedArgHelper : std::false_type {
-};
-
-template <typename T>
-struct UnsafeBindtoRefCountedArgHelper<true, T>
- : std::integral_constant<bool, SupportsAddRefAndRelease<T>::value> {
-};
-
-template <typename T>
-struct UnsafeBindtoRefCountedArg : std::false_type {
-};
-
-template <typename T>
-struct UnsafeBindtoRefCountedArg<T*>
- : UnsafeBindtoRefCountedArgHelper<std::is_class<T>::value, T> {
-};
template <typename T>
class HasIsMethodTag {
@@ -340,18 +286,11 @@ class RetainedRefWrapper {
template <typename T>
struct IgnoreResultHelper {
- explicit IgnoreResultHelper(T functor) : functor_(functor) {}
+ explicit IgnoreResultHelper(T functor) : functor_(std::move(functor)) {}
T functor_;
};
-template <typename T>
-struct IgnoreResultHelper<Callback<T> > {
- explicit IgnoreResultHelper(const Callback<T>& functor) : functor_(functor) {}
-
- const Callback<T>& functor_;
-};
-
// An alternate implementation is to avoid the destructive copy, and instead
// specialize ParamTraits<> for OwnedWrapper<> to change the StorageType to
// a class that is essentially a std::unique_ptr<>.
@@ -417,17 +356,17 @@ class PassedWrapper {
// Unwrap the stored parameters for the wrappers above.
template <typename T>
-const T& Unwrap(const T& o) {
- return o;
+T&& Unwrap(T&& o) {
+ return std::forward<T>(o);
}
template <typename T>
-T* Unwrap(UnretainedWrapper<T> unretained) {
+T* Unwrap(const UnretainedWrapper<T>& unretained) {
return unretained.get();
}
template <typename T>
-const T& Unwrap(ConstRefWrapper<T> const_ref) {
+const T& Unwrap(const ConstRefWrapper<T>& const_ref) {
return const_ref.get();
}
@@ -437,17 +376,12 @@ T* Unwrap(const RetainedRefWrapper<T>& o) {
}
template <typename T>
-const WeakPtr<T>& Unwrap(const WeakPtr<T>& o) {
- return o;
-}
-
-template <typename T>
T* Unwrap(const OwnedWrapper<T>& o) {
return o.get();
}
template <typename T>
-T Unwrap(PassedWrapper<T>& o) {
+T Unwrap(const PassedWrapper<T>& o) {
return o.Take();
}
@@ -458,16 +392,11 @@ T Unwrap(PassedWrapper<T>& o) {
//
// The first argument should be the type of the object that will be received by
// the method.
-template <bool IsMethod, typename... Args>
-struct IsWeakMethod : public std::false_type {};
-
-template <typename T, typename... Args>
-struct IsWeakMethod<true, WeakPtr<T>, Args...> : public std::true_type {};
+template <bool is_method, typename... Args>
+struct IsWeakMethod : std::false_type {};
template <typename T, typename... Args>
-struct IsWeakMethod<true, ConstRefWrapper<WeakPtr<T>>, Args...>
- : public std::true_type {};
-
+struct IsWeakMethod<true, T, Args...> : IsWeakReceiver<T> {};
// Packs a list of types to hold them in a single type.
template <typename... Types>
@@ -550,19 +479,25 @@ struct MakeFunctionTypeImpl<R, TypeList<Args...>> {
template <typename R, typename ArgList>
using MakeFunctionType = typename MakeFunctionTypeImpl<R, ArgList>::Type;
-// Used for ExtractArgs.
+// Used for ExtractArgs and ExtractReturnType.
template <typename Signature>
struct ExtractArgsImpl;
template <typename R, typename... Args>
struct ExtractArgsImpl<R(Args...)> {
- using Type = TypeList<Args...>;
+ using ReturnType = R;
+ using ArgsList = TypeList<Args...>;
};
// A type-level function that extracts function arguments into a TypeList.
// E.g. ExtractArgs<R(A, B, C)> is evaluated to TypeList<A, B, C>.
template <typename Signature>
-using ExtractArgs = typename ExtractArgsImpl<Signature>::Type;
+using ExtractArgs = typename ExtractArgsImpl<Signature>::ArgsList;
+
+// A type-level function that extracts the return type of a function.
+// E.g. ExtractReturnType<R(A, B, C)> is evaluated to R.
+template <typename Signature>
+using ExtractReturnType = typename ExtractArgsImpl<Signature>::ReturnType;
} // namespace internal
@@ -611,13 +546,7 @@ static inline internal::PassedWrapper<T> Passed(T* scoper) {
template <typename T>
static inline internal::IgnoreResultHelper<T> IgnoreResult(T data) {
- return internal::IgnoreResultHelper<T>(data);
-}
-
-template <typename T>
-static inline internal::IgnoreResultHelper<Callback<T> >
-IgnoreResult(const Callback<T>& data) {
- return internal::IgnoreResultHelper<Callback<T> >(data);
+ return internal::IgnoreResultHelper<T>(std::move(data));
}
BASE_EXPORT void DoNothing();
@@ -627,6 +556,26 @@ void DeletePointer(T* obj) {
delete obj;
}
+// An injection point to control |this| pointer behavior on a method invocation.
+// If IsWeakReceiver<> is true_type for |T| and |T| is used for a receiver of a
+// method, base::Bind cancels the method invocation if the receiver is tested as
+// false.
+// E.g. Foo::bar() is not called:
+// struct Foo : base::SupportsWeakPtr<Foo> {
+// void bar() {}
+// };
+//
+// WeakPtr<Foo> oo = nullptr;
+// base::Bind(&Foo::bar, oo).Run();
+template <typename T>
+struct IsWeakReceiver : std::false_type {};
+
+template <typename T>
+struct IsWeakReceiver<internal::ConstRefWrapper<T>> : IsWeakReceiver<T> {};
+
+template <typename T>
+struct IsWeakReceiver<WeakPtr<T>> : std::true_type {};
+
} // namespace base
#endif // BASE_BIND_HELPERS_H_
diff --git a/chromium/base/bind_internal.h b/chromium/base/bind_internal.h
index 6e0a425eab0..c7d6c3b4adc 100644
--- a/chromium/base/bind_internal.h
+++ b/chromium/base/bind_internal.h
@@ -7,6 +7,7 @@
#include <stddef.h>
+#include <tuple>
#include <type_traits>
#include "base/bind_helpers.h"
@@ -63,22 +64,6 @@ namespace internal {
// into the Bind() system, doing most of the type resolution.
// There are ARITY BindState types.
-// HasNonConstReferenceParam selects true_type when any of the parameters in
-// |Sig| is a non-const reference.
-// Implementation note: This non-specialized case handles zero-arity case only.
-// Non-zero-arity cases should be handled by the specialization below.
-template <typename List>
-struct HasNonConstReferenceItem : std::false_type {};
-
-// Implementation note: Select true_type if the first parameter is a non-const
-// reference. Otherwise, skip the first parameter and check rest of parameters
-// recursively.
-template <typename T, typename... Args>
-struct HasNonConstReferenceItem<TypeList<T, Args...>>
- : std::conditional<is_non_const_reference<T>::value,
- std::true_type,
- HasNonConstReferenceItem<TypeList<Args...>>>::type {};
-
// HasRefCountedTypeAsRawPtr selects true_type when any of the |Args| is a raw
// pointer to a RefCounted type.
// Implementation note: This non-specialized case handles zero-arity case only.
@@ -155,7 +140,7 @@ class RunnableAdapter<R(*)(Args...)> {
}
template <typename... RunArgs>
- R Run(RunArgs&&... args) {
+ R Run(RunArgs&&... args) const {
return function_(std::forward<RunArgs>(args)...);
}
@@ -177,7 +162,7 @@ class RunnableAdapter<R(T::*)(Args...)> {
}
template <typename Receiver, typename... RunArgs>
- R Run(Receiver&& receiver_ptr, RunArgs&&... args) {
+ R Run(Receiver&& receiver_ptr, RunArgs&&... args) const {
// Clang skips CV qualifier check on a method pointer invocation when the
// receiver is a subclass. Store the receiver into a const reference to
// T to ensure the CV check works.
@@ -202,7 +187,7 @@ class RunnableAdapter<R(T::*)(Args...) const> {
}
template <typename Receiver, typename... RunArgs>
- R Run(Receiver&& receiver_ptr, RunArgs&&... args) {
+ R Run(Receiver&& receiver_ptr, RunArgs&&... args) const {
// Clang skips CV qualifier check on a method pointer invocation when the
// receiver is a subclass. Store the receiver into a unqualified reference
// to T to ensure the CV check works.
@@ -293,42 +278,43 @@ MakeRunnable(const Callback<T>& t) {
//
// WeakCalls similarly need special syntax that is applied to the first
// argument to check if they should no-op themselves.
-template <bool IsWeakCall, typename ReturnType, typename Runnable>
+template <bool is_weak_call, typename ReturnType>
struct InvokeHelper;
-template <typename ReturnType, typename Runnable>
-struct InvokeHelper<false, ReturnType, Runnable> {
- template <typename... RunArgs>
- static ReturnType MakeItSo(Runnable runnable, RunArgs&&... args) {
- return runnable.Run(std::forward<RunArgs>(args)...);
+template <typename ReturnType>
+struct InvokeHelper<false, ReturnType> {
+ template <typename Runnable, typename... RunArgs>
+ static inline ReturnType MakeItSo(Runnable&& runnable, RunArgs&&... args) {
+ return std::forward<Runnable>(runnable).Run(std::forward<RunArgs>(args)...);
}
};
-template <typename Runnable>
-struct InvokeHelper<false, void, Runnable> {
- template <typename... RunArgs>
- static void MakeItSo(Runnable runnable, RunArgs&&... args) {
- runnable.Run(std::forward<RunArgs>(args)...);
+template <>
+struct InvokeHelper<false, void> {
+ template <typename Runnable, typename... RunArgs>
+ static inline void MakeItSo(Runnable&& runnable, RunArgs&&... args) {
+ std::forward<Runnable>(runnable).Run(std::forward<RunArgs>(args)...);
}
};
-template <typename Runnable>
-struct InvokeHelper<true, void, Runnable> {
- template <typename BoundWeakPtr, typename... RunArgs>
- static void MakeItSo(Runnable runnable,
- BoundWeakPtr weak_ptr,
+template <>
+struct InvokeHelper<true, void> {
+ template <typename Runnable, typename BoundWeakPtr, typename... RunArgs>
+ static void MakeItSo(Runnable&& runnable,
+ BoundWeakPtr&& weak_ptr,
RunArgs&&... args) {
- if (!weak_ptr.get()) {
+ if (!weak_ptr) {
return;
}
- runnable.Run(weak_ptr.get(), std::forward<RunArgs>(args)...);
+ std::forward<Runnable>(runnable).Run(
+ std::forward<BoundWeakPtr>(weak_ptr), std::forward<RunArgs>(args)...);
}
};
#if !defined(_MSC_VER)
-template <typename ReturnType, typename Runnable>
-struct InvokeHelper<true, ReturnType, Runnable> {
+template <typename ReturnType>
+struct InvokeHelper<true, ReturnType> {
// WeakCalls are only supported for functions with a void return type.
// Otherwise, the function result would be undefined if the the WeakPtr<>
// is invalidated.
@@ -341,26 +327,42 @@ struct InvokeHelper<true, ReturnType, Runnable> {
// Invoker<>
//
// See description at the top of the file.
-template <typename BoundIndices, typename StorageType,
- typename InvokeHelperType, typename UnboundForwardRunType>
+template <typename StorageType, typename UnboundRunType>
struct Invoker;
-template <size_t... bound_indices,
- typename StorageType,
- typename InvokeHelperType,
- typename R,
- typename... UnboundArgs>
-struct Invoker<IndexSequence<bound_indices...>,
- StorageType,
- InvokeHelperType,
- R(UnboundArgs...)> {
+template <typename StorageType, typename R, typename... UnboundArgs>
+struct Invoker<StorageType, R(UnboundArgs...)> {
static R Run(BindStateBase* base, UnboundArgs&&... unbound_args) {
- StorageType* storage = static_cast<StorageType*>(base);
// Local references to make debugger stepping easier. If in a debugger,
// you really want to warp ahead and step through the
// InvokeHelper<>::MakeItSo() call below.
- return InvokeHelperType::MakeItSo(
- storage->runnable_, Unwrap(get<bound_indices>(storage->bound_args_))...,
+ const StorageType* storage = static_cast<StorageType*>(base);
+ static constexpr size_t num_bound_args =
+ std::tuple_size<decltype(storage->bound_args_)>::value;
+ return RunImpl(storage->runnable_,
+ storage->bound_args_,
+ MakeIndexSequence<num_bound_args>(),
+ std::forward<UnboundArgs>(unbound_args)...);
+ }
+
+ template <typename Runnable, typename BoundArgsTuple, size_t... indices>
+ static inline R RunImpl(Runnable&& runnable,
+ BoundArgsTuple&& bound,
+ IndexSequence<indices...>,
+ UnboundArgs&&... unbound_args) {
+ static constexpr bool is_method =
+ HasIsMethodTag<typename std::decay<Runnable>::type>::value;
+
+ using DecayedArgsTuple = typename std::decay<BoundArgsTuple>::type;
+ static constexpr bool is_weak_call =
+ IsWeakMethod<is_method,
+ typename std::tuple_element<
+ indices,
+ DecayedArgsTuple>::type...>::value;
+
+ return InvokeHelper<is_weak_call, R>::MakeItSo(
+ std::forward<Runnable>(runnable),
+ Unwrap(std::get<indices>(std::forward<BoundArgsTuple>(bound)))...,
std::forward<UnboundArgs>(unbound_args)...);
}
};
@@ -384,51 +386,36 @@ template <bool is_method, typename... BoundArgs>
using MakeArgsStorage = typename MakeArgsStorageImpl<
is_method, typename std::decay<BoundArgs>::type...>::Type;
+// Used to implement MakeUnboundRunType.
+template <typename Functor, typename... BoundArgs>
+struct MakeUnboundRunTypeImpl {
+ using RunType = typename FunctorTraits<Functor>::RunType;
+ using ReturnType = ExtractReturnType<RunType>;
+ using Args = ExtractArgs<RunType>;
+ using UnboundArgs = DropTypeListItem<sizeof...(BoundArgs), Args>;
+ using Type = MakeFunctionType<ReturnType, UnboundArgs>;
+};
+
// BindState<>
//
// This stores all the state passed into Bind() and is also where most
// of the template resolution magic occurs.
//
// Runnable is the functor we are binding arguments to.
-// RunType is type of the Run() function that the Invoker<> should use.
-// Normally, this is the same as the RunType of the Runnable, but it can
-// be different if an adapter like IgnoreResult() has been used.
//
// BoundArgs contains the storage type for all the bound arguments.
-template <typename Runnable, typename RunType, typename... BoundArgs>
-struct BindState;
-
-template <typename Runnable,
- typename R,
- typename... Args,
- typename... BoundArgs>
-struct BindState<Runnable, R(Args...), BoundArgs...> final
- : public BindStateBase {
+template <typename Runnable, typename... BoundArgs>
+struct BindState final : public BindStateBase {
private:
- using StorageType = BindState<Runnable, R(Args...), BoundArgs...>;
- using RunnableType = Runnable;
-
- enum { is_method = HasIsMethodTag<Runnable>::value };
-
- // true_type if Runnable is a method invocation and the first bound argument
- // is a WeakPtr.
- using IsWeakCall =
- IsWeakMethod<is_method, typename std::decay<BoundArgs>::type...>;
+ using RunnableType = typename std::decay<Runnable>::type;
- using BoundIndices = MakeIndexSequence<sizeof...(BoundArgs)>;
- using InvokeHelperType = InvokeHelper<IsWeakCall::value, R, Runnable>;
-
- using UnboundArgs = DropTypeListItem<sizeof...(BoundArgs), TypeList<Args...>>;
+ static constexpr bool is_method = HasIsMethodTag<RunnableType>::value;
public:
- using UnboundRunType = MakeFunctionType<R, UnboundArgs>;
- using InvokerType =
- Invoker<BoundIndices, StorageType, InvokeHelperType, UnboundRunType>;
-
template <typename... ForwardArgs>
- BindState(const Runnable& runnable, ForwardArgs&&... bound_args)
+ explicit BindState(RunnableType runnable, ForwardArgs&&... bound_args)
: BindStateBase(&Destroy),
- runnable_(runnable),
+ runnable_(std::move(runnable)),
bound_args_(std::forward<ForwardArgs>(bound_args)...) {}
RunnableType runnable_;
@@ -443,6 +430,13 @@ struct BindState<Runnable, R(Args...), BoundArgs...> final
};
} // namespace internal
+
+// Returns a RunType of bound functor.
+// E.g. MakeUnboundRunType<R(A, B, C), A, B> is evaluated to R(C).
+template <typename Functor, typename... BoundArgs>
+using MakeUnboundRunType =
+ typename internal::MakeUnboundRunTypeImpl<Functor, BoundArgs...>::Type;
+
} // namespace base
#endif // BASE_BIND_INTERNAL_H_
diff --git a/chromium/base/bind_internal_win.h b/chromium/base/bind_internal_win.h
index 2def8743823..1d14717509e 100644
--- a/chromium/base/bind_internal_win.h
+++ b/chromium/base/bind_internal_win.h
@@ -36,7 +36,7 @@ class RunnableAdapter<R(__stdcall *)(Args...)> {
}
template <typename... RunArgs>
- R Run(RunArgs&&... args) {
+ R Run(RunArgs&&... args) const {
return function_(std::forward<RunArgs>(args)...);
}
@@ -57,7 +57,7 @@ class RunnableAdapter<R(__fastcall *)(Args...)> {
}
template <typename... RunArgs>
- R Run(RunArgs&&... args) {
+ R Run(RunArgs&&... args) const {
return function_(std::forward<RunArgs>(args)...);
}
diff --git a/chromium/base/bind_unittest.cc b/chromium/base/bind_unittest.cc
index 615ad330a92..5a7ce38627a 100644
--- a/chromium/base/bind_unittest.cc
+++ b/chromium/base/bind_unittest.cc
@@ -650,28 +650,6 @@ TEST_F(BindTest, ArrayArgumentBinding) {
EXPECT_EQ(3, const_array_cb.Run());
}
-// Verify SupportsAddRefAndRelease correctly introspects the class type for
-// AddRef() and Release().
-// - Class with AddRef() and Release()
-// - Class without AddRef() and Release()
-// - Derived Class with AddRef() and Release()
-// - Derived Class without AddRef() and Release()
-// - Derived Class with AddRef() and Release() and a private destructor.
-TEST_F(BindTest, SupportsAddRefAndRelease) {
- EXPECT_TRUE(internal::SupportsAddRefAndRelease<HasRef>::value);
- EXPECT_FALSE(internal::SupportsAddRefAndRelease<NoRef>::value);
-
- // StrictMock<T> is a derived class of T. So, we use StrictMock<HasRef> and
- // StrictMock<NoRef> to test that SupportsAddRefAndRelease works over
- // inheritance.
- EXPECT_TRUE(internal::SupportsAddRefAndRelease<StrictMock<HasRef> >::value);
- EXPECT_FALSE(internal::SupportsAddRefAndRelease<StrictMock<NoRef> >::value);
-
- // This matters because the implementation creates a dummy class that
- // inherits from the template type.
- EXPECT_TRUE(internal::SupportsAddRefAndRelease<HasRefPrivateDtor>::value);
-}
-
// Unretained() wrapper support.
// - Method bound to Unretained() non-const object.
// - Const method bound to Unretained() non-const object.
diff --git a/chromium/base/bind_unittest.nc b/chromium/base/bind_unittest.nc
index 5e9cff82bde..70743f91779 100644
--- a/chromium/base/bind_unittest.nc
+++ b/chromium/base/bind_unittest.nc
@@ -146,7 +146,7 @@ void WontCompile() {
ref_arg_cb.Run(p);
}
-#elif defined(NCTEST_DISALLOW_BIND_TO_NON_CONST_REF_PARAM) // [r"fatal error: static_assert failed \"do not bind functions with nonconst ref\""]
+#elif defined(NCTEST_DISALLOW_BIND_TO_NON_CONST_REF_PARAM) // [r"fatal error: binding value of type 'const base::Parent' to reference to type 'base::Parent' drops 'const' qualifier"]
// Binding functions with reference parameters, unsupported.
//
diff --git a/chromium/base/callback.h b/chromium/base/callback.h
index abb907bef95..e087c731d18 100644
--- a/chromium/base/callback.h
+++ b/chromium/base/callback.h
@@ -345,14 +345,13 @@
// please include "base/callback_forward.h" instead.
namespace base {
-namespace internal {
-template <typename Runnable, typename RunType, typename... BoundArgsType>
-struct BindState;
-} // namespace internal
template <typename R, typename... Args, internal::CopyMode copy_mode>
class Callback<R(Args...), copy_mode>
: public internal::CallbackBase<copy_mode> {
+ private:
+ using PolymorphicInvoke = R (*)(internal::BindStateBase*, Args&&...);
+
public:
// MSVC 2013 doesn't support Type Alias of function types.
// Revisit this after we update it to newer version.
@@ -360,16 +359,9 @@ class Callback<R(Args...), copy_mode>
Callback() : internal::CallbackBase<copy_mode>(nullptr) {}
- template <typename Runnable, typename BindRunType, typename... BoundArgs>
- explicit Callback(
- internal::BindState<Runnable, BindRunType, BoundArgs...>* bind_state)
+ Callback(internal::BindStateBase* bind_state,
+ PolymorphicInvoke invoke_func)
: internal::CallbackBase<copy_mode>(bind_state) {
- // Force the assignment to a local variable of PolymorphicInvoke
- // so the compiler will typecheck that the passed in Run() method has
- // the correct type.
- PolymorphicInvoke invoke_func =
- &internal::BindState<Runnable, BindRunType, BoundArgs...>
- ::InvokerType::Run;
using InvokeFuncStorage =
typename internal::CallbackBase<copy_mode>::InvokeFuncStorage;
this->polymorphic_invoke_ =
@@ -396,9 +388,6 @@ class Callback<R(Args...), copy_mode>
reinterpret_cast<PolymorphicInvoke>(this->polymorphic_invoke_);
return f(this->bind_state_.get(), std::forward<Args>(args)...);
}
-
- private:
- using PolymorphicInvoke = R (*)(internal::BindStateBase*, Args&&...);
};
} // namespace base
diff --git a/chromium/base/callback_helpers.cc b/chromium/base/callback_helpers.cc
index ef02b2bde0c..8fd3dde6ca4 100644
--- a/chromium/base/callback_helpers.cc
+++ b/chromium/base/callback_helpers.cc
@@ -8,18 +8,25 @@
namespace base {
-ScopedClosureRunner::ScopedClosureRunner() {
-}
+ScopedClosureRunner::ScopedClosureRunner() {}
ScopedClosureRunner::ScopedClosureRunner(const Closure& closure)
- : closure_(closure) {
-}
+ : closure_(closure) {}
ScopedClosureRunner::~ScopedClosureRunner() {
if (!closure_.is_null())
closure_.Run();
}
+ScopedClosureRunner::ScopedClosureRunner(ScopedClosureRunner&& other)
+ : closure_(other.Release()) {}
+
+ScopedClosureRunner& ScopedClosureRunner::operator=(
+ ScopedClosureRunner&& other) {
+ Reset(other.Release());
+ return *this;
+}
+
void ScopedClosureRunner::Reset() {
Closure old_closure = Release();
if (!old_closure.is_null())
diff --git a/chromium/base/callback_helpers.h b/chromium/base/callback_helpers.h
index 860803989f4..c4b0d867634 100644
--- a/chromium/base/callback_helpers.h
+++ b/chromium/base/callback_helpers.h
@@ -27,16 +27,27 @@ base::Callback<Sig> ResetAndReturn(base::Callback<Sig>* cb) {
return ret;
}
-// ScopedClosureRunner is akin to scoped_ptr for Closures. It ensures that the
-// Closure is executed and deleted no matter how the current scope exits.
+// ScopedClosureRunner is akin to std::unique_ptr<> for Closures. It ensures
+// that the Closure is executed no matter how the current scope exits.
class BASE_EXPORT ScopedClosureRunner {
public:
ScopedClosureRunner();
explicit ScopedClosureRunner(const Closure& closure);
~ScopedClosureRunner();
+ ScopedClosureRunner(ScopedClosureRunner&& other);
+
+ // Calls the current closure if it's set and replaces it with the closure from
+ // |other|.
+ ScopedClosureRunner& operator=(ScopedClosureRunner&& other);
+
+ // Calls the current closure and resets it, so it wont be called again.
void Reset();
+
+ // Calls the current closure and replaces it with the new one.
void Reset(const Closure& closure);
+
+ // Releases the Closure without calling.
Closure Release() WARN_UNUSED_RESULT;
private:
diff --git a/chromium/base/callback_helpers_unittest.cc b/chromium/base/callback_helpers_unittest.cc
index 3b17a6b754c..0e42852f673 100644
--- a/chromium/base/callback_helpers_unittest.cc
+++ b/chromium/base/callback_helpers_unittest.cc
@@ -58,4 +58,47 @@ TEST(BindHelpersTest, TestScopedClosureRunnerReset) {
EXPECT_EQ(1, run_count_3);
}
+TEST(BindHelpersTest, TestScopedClosureRunnerMoveConstructor) {
+ int run_count = 0;
+ {
+ std::unique_ptr<base::ScopedClosureRunner> runner(
+ new base::ScopedClosureRunner(base::Bind(&Increment, &run_count)));
+ base::ScopedClosureRunner runner2(std::move(*runner));
+ runner.reset();
+ EXPECT_EQ(0, run_count);
+ }
+ EXPECT_EQ(1, run_count);
+}
+
+TEST(BindHelpersTest, TestScopedClosureRunnerMoveAssignment) {
+ int run_count = 0;
+ {
+ base::ScopedClosureRunner runner;
+ {
+ base::ScopedClosureRunner runner2(base::Bind(&Increment, &run_count));
+ runner = std::move(runner2);
+ }
+ EXPECT_EQ(0, run_count);
+ }
+ EXPECT_EQ(1, run_count);
+}
+
+TEST(BindHelpersTest, TestScopedClosureRunnerRunOnReplace) {
+ int run_count1 = 0;
+ int run_count2 = 0;
+ {
+ base::ScopedClosureRunner runner1(base::Bind(&Increment, &run_count1));
+ {
+ base::ScopedClosureRunner runner2(base::Bind(&Increment, &run_count2));
+ runner1 = std::move(runner2);
+ EXPECT_EQ(1, run_count1);
+ EXPECT_EQ(0, run_count2);
+ }
+ EXPECT_EQ(1, run_count1);
+ EXPECT_EQ(0, run_count2);
+ }
+ EXPECT_EQ(1, run_count1);
+ EXPECT_EQ(1, run_count2);
+}
+
} // namespace
diff --git a/chromium/base/callback_unittest.cc b/chromium/base/callback_unittest.cc
index 176ea0650ab..ce453a10758 100644
--- a/chromium/base/callback_unittest.cc
+++ b/chromium/base/callback_unittest.cc
@@ -14,63 +14,38 @@
namespace base {
-namespace {
-
-struct FakeInvoker {
- // MSVC 2013 doesn't support Type Alias of function types.
- // Revisit this after we update it to newer version.
- typedef void RunType(internal::BindStateBase*);
- static void Run(internal::BindStateBase*) {
- }
-};
-
-} // namespace
-
-namespace internal {
+void NopInvokeFunc(internal::BindStateBase*) {}
// White-box testpoints to inject into a Callback<> object for checking
// comparators and emptiness APIs. Use a BindState that is specialized
// based on a type we declared in the anonymous namespace above to remove any
// chance of colliding with another instantiation and breaking the
// one-definition-rule.
-template <>
-struct BindState<void(), void(), FakeInvoker>
- : public BindStateBase {
- public:
- BindState() : BindStateBase(&Destroy) {}
- using InvokerType = FakeInvoker;
+struct FakeBindState1 : internal::BindStateBase {
+ FakeBindState1() : BindStateBase(&Destroy) {}
private:
- ~BindState() {}
- static void Destroy(BindStateBase* self) {
- delete static_cast<BindState*>(self);
+ ~FakeBindState1() {}
+ static void Destroy(internal::BindStateBase* self) {
+ delete static_cast<FakeBindState1*>(self);
}
};
-template <>
-struct BindState<void(), void(), FakeInvoker, FakeInvoker>
- : public BindStateBase {
- public:
- BindState() : BindStateBase(&Destroy) {}
- using InvokerType = FakeInvoker;
+struct FakeBindState2 : internal::BindStateBase {
+ FakeBindState2() : BindStateBase(&Destroy) {}
private:
- ~BindState() {}
- static void Destroy(BindStateBase* self) {
- delete static_cast<BindState*>(self);
+ ~FakeBindState2() {}
+ static void Destroy(internal::BindStateBase* self) {
+ delete static_cast<FakeBindState2*>(self);
}
};
-} // namespace internal
namespace {
-using FakeBindState1 = internal::BindState<void(), void(), FakeInvoker>;
-using FakeBindState2 =
- internal::BindState<void(), void(), FakeInvoker, FakeInvoker>;
-
class CallbackTest : public ::testing::Test {
public:
CallbackTest()
- : callback_a_(new FakeBindState1()),
- callback_b_(new FakeBindState2()) {
+ : callback_a_(new FakeBindState1(), &NopInvokeFunc),
+ callback_b_(new FakeBindState2(), &NopInvokeFunc) {
}
~CallbackTest() override {}
@@ -113,7 +88,7 @@ TEST_F(CallbackTest, Equals) {
EXPECT_FALSE(callback_b_.Equals(callback_a_));
// We should compare based on instance, not type.
- Callback<void()> callback_c(new FakeBindState1());
+ Callback<void()> callback_c(new FakeBindState1(), &NopInvokeFunc);
Callback<void()> callback_a2 = callback_a_;
EXPECT_TRUE(callback_a_.Equals(callback_a2));
EXPECT_FALSE(callback_a_.Equals(callback_c));
diff --git a/chromium/base/chromeos/logging.h b/chromium/base/chromeos/logging.h
new file mode 100644
index 00000000000..558d8d302d9
--- /dev/null
+++ b/chromium/base/chromeos/logging.h
@@ -0,0 +1,28 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CHROMEOS_LOGGING_H_
+#define BASE_CHROMEOS_LOGGING_H_
+
+#include "base/logging.h"
+
+namespace logging {
+
+#if defined(OS_CHROMEOS)
+
+// These macros are used to log events on ChromeOS which we want to be included
+// in the system log of the device.
+#define CHROMEOS_SYSLOG(severity) LOG(severity)
+#define CHROMEOS_SYSLOG_IF(severity, condition) LOG_IF(severity, condition)
+
+#else // Not defined(OS_CHROMEOS)
+
+#define CHROMEOS_SYSLOG(severity) LOG_IF(severity, false)
+#define CHROMEOS_SYSLOG_IF(severity, condition) LOG_IF(severity, false)
+
+#endif // defined(OS_CHROMEOS)
+
+} // namespace logging
+
+#endif // BASE_CHROMEOS_LOGGING_H_
diff --git a/chromium/base/command_line.cc b/chromium/base/command_line.cc
index c991959d691..99ea2b00032 100644
--- a/chromium/base/command_line.cc
+++ b/chromium/base/command_line.cc
@@ -197,6 +197,17 @@ void CommandLine::set_slash_is_not_a_switch() {
DCHECK_EQ(wcscmp(kSwitchPrefixes[arraysize(kSwitchPrefixes) - 1], L"/"), 0);
switch_prefix_count = arraysize(kSwitchPrefixes) - 1;
}
+
+// static
+void CommandLine::InitUsingArgvForTesting(int argc, const char* const* argv) {
+ DCHECK(!current_process_commandline_);
+ current_process_commandline_ = new CommandLine(NO_PROGRAM);
+ // On Windows we need to convert the command line arguments to string16.
+ base::CommandLine::StringVector argv_vector;
+ for (int i = 0; i < argc; ++i)
+ argv_vector.push_back(UTF8ToUTF16(argv[i]));
+ current_process_commandline_->InitFromArgv(argv_vector);
+}
#endif
// static
diff --git a/chromium/base/command_line.h b/chromium/base/command_line.h
index 3de8873e26a..860d1c2c06b 100644
--- a/chromium/base/command_line.h
+++ b/chromium/base/command_line.h
@@ -69,6 +69,13 @@ class BASE_EXPORT CommandLine {
// object and the behavior will be the same as Posix systems (only hyphens
// begin switches, everything else will be an arg).
static void set_slash_is_not_a_switch();
+
+ // Normally when the CommandLine singleton is initialized it gets the command
+ // line via the GetCommandLineW API and then uses the shell32 API
+ // CommandLineToArgvW to parse the command line and convert it back to
+ // argc and argv. Tests who don't want this dependency on shell32 and need
+ // to honor the arguments passed in should use this function.
+ static void InitUsingArgvForTesting(int argc, const char* const* argv);
#endif
// Initialize the current process CommandLine singleton. On Windows, ignores
diff --git a/chromium/base/debug/profiler.cc b/chromium/base/debug/profiler.cc
index a4426ab3072..b19e7ecd00f 100644
--- a/chromium/base/debug/profiler.cc
+++ b/chromium/base/debug/profiler.cc
@@ -154,8 +154,8 @@ bool FindResolutionFunctionInImports(
FunctionSearchContext* context =
reinterpret_cast<FunctionSearchContext*>(cookie);
- DCHECK_NE(nullptr, context);
- DCHECK_EQ(nullptr, context->function);
+ DCHECK(context);
+ DCHECK(!context->function);
// Our import address table contains pointers to the functions we import
// at this point. Let's retrieve the first such function and use it to
diff --git a/chromium/base/debug/stack_trace.cc b/chromium/base/debug/stack_trace.cc
index 1c96a569d97..ac0ead76be2 100644
--- a/chromium/base/debug/stack_trace.cc
+++ b/chromium/base/debug/stack_trace.cc
@@ -11,6 +11,12 @@
#include "base/macros.h"
+#if HAVE_TRACE_STACK_FRAME_POINTERS && defined(OS_ANDROID)
+#include <pthread.h>
+#include "base/process/process_handle.h"
+#include "base/threading/platform_thread.h"
+#endif
+
namespace base {
namespace debug {
@@ -41,6 +47,44 @@ std::string StackTrace::ToString() const {
#if HAVE_TRACE_STACK_FRAME_POINTERS
+#if defined(OS_ANDROID)
+
+static uintptr_t GetStackEnd() {
+ // Bionic reads proc/maps on every call to pthread_getattr_np() when called
+ // from the main thread. So we need to cache end of stack in that case to get
+ // acceptable performance.
+ // For all other threads pthread_getattr_np() is fast enough as it just reads
+ // values from its pthread_t argument.
+ static uintptr_t main_stack_end = 0;
+
+ bool is_main_thread = GetCurrentProcId() == PlatformThread::CurrentId();
+
+ if (is_main_thread && main_stack_end) {
+ return main_stack_end;
+ }
+
+ uintptr_t stack_begin = 0;
+ size_t stack_size = 0;
+ pthread_attr_t attributes;
+ int error = pthread_getattr_np(pthread_self(), &attributes);
+ if (!error) {
+ error = pthread_attr_getstack(
+ &attributes,
+ reinterpret_cast<void**>(&stack_begin),
+ &stack_size);
+ pthread_attr_destroy(&attributes);
+ }
+ DCHECK(!error);
+
+ uintptr_t stack_end = stack_begin + stack_size;
+ if (is_main_thread) {
+ main_stack_end = stack_end;
+ }
+ return stack_end;
+}
+
+#endif // defined(OS_ANDROID)
+
size_t TraceStackFramePointers(const void** out_trace,
size_t max_depth,
size_t skip_initial) {
@@ -49,6 +93,10 @@ size_t TraceStackFramePointers(const void** out_trace,
// be valid.
uintptr_t sp = reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
+#if defined(OS_ANDROID)
+ uintptr_t stack_end = GetStackEnd();
+#endif
+
size_t depth = 0;
while (depth < max_depth) {
#if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
@@ -58,6 +106,13 @@ size_t TraceStackFramePointers(const void** out_trace,
sp -= sizeof(uintptr_t);
#endif
+#if defined(OS_ANDROID)
+ // Both sp[0] and s[1] must be valid.
+ if (sp + 2 * sizeof(uintptr_t) > stack_end) {
+ break;
+ }
+#endif
+
if (skip_initial != 0) {
skip_initial--;
} else {
diff --git a/chromium/base/debug/stack_trace_win.cc b/chromium/base/debug/stack_trace_win.cc
index 95da513e66c..9369663b95a 100644
--- a/chromium/base/debug/stack_trace_win.cc
+++ b/chromium/base/debug/stack_trace_win.cc
@@ -9,14 +9,13 @@
#include <stddef.h>
#include <iostream>
+#include <memory>
+#include "base/files/file_path.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/singleton.h"
-#include "base/process/launch.h"
-#include "base/strings/string_util.h"
#include "base/synchronization/lock.h"
-#include "base/win/windows_version.h"
namespace base {
namespace debug {
diff --git a/chromium/base/deferred_sequenced_task_runner_unittest.cc b/chromium/base/deferred_sequenced_task_runner_unittest.cc
index 1ddc3542496..e34827ab9ee 100644
--- a/chromium/base/deferred_sequenced_task_runner_unittest.cc
+++ b/chromium/base/deferred_sequenced_task_runner_unittest.cc
@@ -8,6 +8,7 @@
#include "base/bind_helpers.h"
#include "base/location.h"
#include "base/memory/ref_counted.h"
+#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/non_thread_safe.h"
#include "base/threading/thread.h"
@@ -69,14 +70,14 @@ class DeferredSequencedTaskRunnerTest : public testing::Test,
TEST_F(DeferredSequencedTaskRunnerTest, Stopped) {
PostExecuteTask(1);
- loop_.RunUntilIdle();
+ base::RunLoop().RunUntilIdle();
EXPECT_THAT(executed_task_ids_, testing::ElementsAre());
}
TEST_F(DeferredSequencedTaskRunnerTest, Start) {
StartRunner();
PostExecuteTask(1);
- loop_.RunUntilIdle();
+ base::RunLoop().RunUntilIdle();
EXPECT_THAT(executed_task_ids_, testing::ElementsAre(1));
}
@@ -85,34 +86,34 @@ TEST_F(DeferredSequencedTaskRunnerTest, StartWithMultipleElements) {
for (int i = 1; i < 5; ++i)
PostExecuteTask(i);
- loop_.RunUntilIdle();
+ base::RunLoop().RunUntilIdle();
EXPECT_THAT(executed_task_ids_, testing::ElementsAre(1, 2, 3, 4));
}
TEST_F(DeferredSequencedTaskRunnerTest, DeferredStart) {
PostExecuteTask(1);
- loop_.RunUntilIdle();
+ base::RunLoop().RunUntilIdle();
EXPECT_THAT(executed_task_ids_, testing::ElementsAre());
StartRunner();
- loop_.RunUntilIdle();
+ base::RunLoop().RunUntilIdle();
EXPECT_THAT(executed_task_ids_, testing::ElementsAre(1));
PostExecuteTask(2);
- loop_.RunUntilIdle();
+ base::RunLoop().RunUntilIdle();
EXPECT_THAT(executed_task_ids_, testing::ElementsAre(1, 2));
}
TEST_F(DeferredSequencedTaskRunnerTest, DeferredStartWithMultipleElements) {
for (int i = 1; i < 5; ++i)
PostExecuteTask(i);
- loop_.RunUntilIdle();
+ base::RunLoop().RunUntilIdle();
EXPECT_THAT(executed_task_ids_, testing::ElementsAre());
StartRunner();
for (int i = 5; i < 9; ++i)
PostExecuteTask(i);
- loop_.RunUntilIdle();
+ base::RunLoop().RunUntilIdle();
EXPECT_THAT(executed_task_ids_, testing::ElementsAre(1, 2, 3, 4, 5, 6, 7, 8));
}
@@ -139,7 +140,7 @@ TEST_F(DeferredSequencedTaskRunnerTest, DeferredStartWithMultipleThreads) {
}
}
- loop_.RunUntilIdle();
+ base::RunLoop().RunUntilIdle();
EXPECT_THAT(executed_task_ids_,
testing::WhenSorted(testing::ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
}
diff --git a/chromium/base/environment.cc b/chromium/base/environment.cc
index 9eef42967b2..534a7a88127 100644
--- a/chromium/base/environment.cc
+++ b/chromium/base/environment.cc
@@ -8,6 +8,7 @@
#include <vector>
+#include "base/memory/ptr_util.h"
#include "base/strings/string_piece.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
@@ -25,7 +26,7 @@ namespace {
class EnvironmentImpl : public Environment {
public:
- bool GetVar(const char* variable_name, std::string* result) override {
+ bool GetVar(StringPiece variable_name, std::string* result) override {
if (GetVarImpl(variable_name, result))
return true;
@@ -44,19 +45,19 @@ class EnvironmentImpl : public Environment {
return GetVarImpl(alternate_case_var.c_str(), result);
}
- bool SetVar(const char* variable_name,
+ bool SetVar(StringPiece variable_name,
const std::string& new_value) override {
return SetVarImpl(variable_name, new_value);
}
- bool UnSetVar(const char* variable_name) override {
+ bool UnSetVar(StringPiece variable_name) override {
return UnSetVarImpl(variable_name);
}
private:
- bool GetVarImpl(const char* variable_name, std::string* result) {
+ bool GetVarImpl(StringPiece variable_name, std::string* result) {
#if defined(OS_POSIX)
- const char* env_value = getenv(variable_name);
+ const char* env_value = getenv(variable_name.data());
if (!env_value)
return false;
// Note that the variable may be defined but empty.
@@ -64,8 +65,8 @@ class EnvironmentImpl : public Environment {
*result = env_value;
return true;
#elif defined(OS_WIN)
- DWORD value_length = ::GetEnvironmentVariable(
- UTF8ToWide(variable_name).c_str(), NULL, 0);
+ DWORD value_length =
+ ::GetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), nullptr, 0);
if (value_length == 0)
return false;
if (result) {
@@ -80,10 +81,10 @@ class EnvironmentImpl : public Environment {
#endif
}
- bool SetVarImpl(const char* variable_name, const std::string& new_value) {
+ bool SetVarImpl(StringPiece variable_name, const std::string& new_value) {
#if defined(OS_POSIX)
// On success, zero is returned.
- return !setenv(variable_name, new_value.c_str(), 1);
+ return !setenv(variable_name.data(), new_value.c_str(), 1);
#elif defined(OS_WIN)
// On success, a nonzero value is returned.
return !!SetEnvironmentVariable(UTF8ToWide(variable_name).c_str(),
@@ -91,13 +92,13 @@ class EnvironmentImpl : public Environment {
#endif
}
- bool UnSetVarImpl(const char* variable_name) {
+ bool UnSetVarImpl(StringPiece variable_name) {
#if defined(OS_POSIX)
// On success, zero is returned.
- return !unsetenv(variable_name);
+ return !unsetenv(variable_name.data());
#elif defined(OS_WIN)
// On success, a nonzero value is returned.
- return !!SetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), NULL);
+ return !!SetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), nullptr);
#endif
}
};
@@ -134,12 +135,12 @@ const char kHome[] = "HOME";
Environment::~Environment() {}
// static
-Environment* Environment::Create() {
- return new EnvironmentImpl();
+std::unique_ptr<Environment> Environment::Create() {
+ return MakeUnique<EnvironmentImpl>();
}
-bool Environment::HasVar(const char* variable_name) {
- return GetVar(variable_name, NULL);
+bool Environment::HasVar(StringPiece variable_name) {
+ return GetVar(variable_name, nullptr);
}
#if defined(OS_WIN)
diff --git a/chromium/base/environment.h b/chromium/base/environment.h
index 12eeaf7ebb1..3a4ed04e4bb 100644
--- a/chromium/base/environment.h
+++ b/chromium/base/environment.h
@@ -11,6 +11,7 @@
#include "base/base_export.h"
#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
#include "build/build_config.h"
namespace base {
@@ -27,23 +28,22 @@ class BASE_EXPORT Environment {
public:
virtual ~Environment();
- // Static factory method that returns the implementation that provide the
- // appropriate platform-specific instance.
- static Environment* Create();
+ // Returns the appropriate platform-specific instance.
+ static std::unique_ptr<Environment> Create();
// Gets an environment variable's value and stores it in |result|.
// Returns false if the key is unset.
- virtual bool GetVar(const char* variable_name, std::string* result) = 0;
+ virtual bool GetVar(StringPiece variable_name, std::string* result) = 0;
- // Syntactic sugar for GetVar(variable_name, NULL);
- virtual bool HasVar(const char* variable_name);
+ // Syntactic sugar for GetVar(variable_name, nullptr);
+ virtual bool HasVar(StringPiece variable_name);
// Returns true on success, otherwise returns false.
- virtual bool SetVar(const char* variable_name,
+ virtual bool SetVar(StringPiece variable_name,
const std::string& new_value) = 0;
// Returns true on success, otherwise returns false.
- virtual bool UnSetVar(const char* variable_name) = 0;
+ virtual bool UnSetVar(StringPiece variable_name) = 0;
};
diff --git a/chromium/base/feature_list.cc b/chromium/base/feature_list.cc
index 46732108dd6..435165e10ca 100644
--- a/chromium/base/feature_list.cc
+++ b/chromium/base/feature_list.cc
@@ -23,6 +23,9 @@ namespace {
// have more control over initialization timing. Leaky.
FeatureList* g_instance = nullptr;
+// Tracks whether the FeatureList instance was initialized via an accessor.
+bool g_initialized_from_accessor = false;
+
// Some characters are not allowed to appear in feature names or the associated
// field trial names, as they are used as special characters for command-line
// serialization. This function checks that the strings are ASCII (since they
@@ -35,10 +38,7 @@ bool IsValidFeatureOrFieldTrialName(const std::string& name) {
} // namespace
-FeatureList::FeatureList()
- : initialized_(false),
- initialized_from_command_line_(false) {
-}
+FeatureList::FeatureList() {}
FeatureList::~FeatureList() {}
@@ -133,7 +133,11 @@ void FeatureList::GetFeatureOverrides(std::string* enable_overrides,
// static
bool FeatureList::IsEnabled(const Feature& feature) {
- return GetInstance()->IsFeatureEnabled(feature);
+ if (!g_instance) {
+ g_initialized_from_accessor = true;
+ return feature.default_state == FEATURE_ENABLED_BY_DEFAULT;
+ }
+ return g_instance->IsFeatureEnabled(feature);
}
// static
@@ -158,6 +162,10 @@ bool FeatureList::InitializeInstance(const std::string& enable_features,
// For example, we initialize an instance in chrome/browser/
// chrome_browser_main.cc and do not override it in content/browser/
// browser_main_loop.cc.
+ // If the singleton was previously initialized from within an accessor, we
+ // want to prevent callers from reinitializing the singleton and masking the
+ // accessor call(s) which likely returned incorrect information.
+ CHECK(!g_initialized_from_accessor);
bool instance_existed_before = false;
if (g_instance) {
if (g_instance->initialized_from_command_line_)
@@ -192,6 +200,7 @@ void FeatureList::SetInstance(std::unique_ptr<FeatureList> instance) {
void FeatureList::ClearInstanceForTesting() {
delete g_instance;
g_instance = nullptr;
+ g_initialized_from_accessor = false;
}
void FeatureList::FinalizeInitialization() {
diff --git a/chromium/base/feature_list.h b/chromium/base/feature_list.h
index 2a47427fb26..e9ed00a124a 100644
--- a/chromium/base/feature_list.h
+++ b/chromium/base/feature_list.h
@@ -247,10 +247,10 @@ class BASE_EXPORT FeatureList {
// Whether this object has been fully initialized. This gets set to true as a
// result of FinalizeInitialization().
- bool initialized_;
+ bool initialized_ = false;
// Whether this object has been initialized from command line.
- bool initialized_from_command_line_;
+ bool initialized_from_command_line_ = false;
DISALLOW_COPY_AND_ASSIGN(FeatureList);
};
diff --git a/chromium/base/feature_list_unittest.cc b/chromium/base/feature_list_unittest.cc
index a7e7b71183b..9d1dcb72f30 100644
--- a/chromium/base/feature_list_unittest.cc
+++ b/chromium/base/feature_list_unittest.cc
@@ -457,4 +457,15 @@ TEST_F(FeatureListTest, InitializeInstance) {
EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
}
+TEST_F(FeatureListTest, UninitializedInstance_IsEnabledReturnsFalse) {
+ ClearFeatureListInstance();
+ // This test case simulates the calling pattern found in code which does not
+ // explicitly initialize the features list.
+ // All IsEnabled() calls should return the default value in this scenario.
+ EXPECT_EQ(nullptr, FeatureList::GetInstance());
+ EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+ EXPECT_EQ(nullptr, FeatureList::GetInstance());
+ EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+}
+
} // namespace base
diff --git a/chromium/base/file_version_info_win.cc b/chromium/base/file_version_info_win.cc
index 02a14db0032..00261b76df5 100644
--- a/chromium/base/file_version_info_win.cc
+++ b/chromium/base/file_version_info_win.cc
@@ -6,48 +6,63 @@
#include <windows.h>
#include <stddef.h>
-#include <stdint.h>
-#include "base/file_version_info.h"
#include "base/files/file_path.h"
#include "base/logging.h"
-#include "base/macros.h"
#include "base/threading/thread_restrictions.h"
+#include "base/win/resource_util.h"
using base::FilePath;
-FileVersionInfoWin::FileVersionInfoWin(void* data,
- WORD language,
- WORD code_page)
- : language_(language), code_page_(code_page) {
- base::ThreadRestrictions::AssertIOAllowed();
- data_.reset((char*) data);
- fixed_file_info_ = NULL;
- UINT size;
- ::VerQueryValue(data_.get(), L"\\", (LPVOID*)&fixed_file_info_, &size);
+namespace {
+
+struct LanguageAndCodePage {
+ WORD language;
+ WORD code_page;
+};
+
+// Returns the \\VarFileInfo\\Translation value extracted from the
+// VS_VERSION_INFO resource in |data|.
+LanguageAndCodePage* GetTranslate(const void* data) {
+ LanguageAndCodePage* translate = nullptr;
+ UINT length;
+ if (::VerQueryValue(data, L"\\VarFileInfo\\Translation",
+ reinterpret_cast<void**>(&translate), &length)) {
+ return translate;
+ }
+ return nullptr;
}
-FileVersionInfoWin::~FileVersionInfoWin() {
- DCHECK(data_.get());
+VS_FIXEDFILEINFO* GetVsFixedFileInfo(const void* data) {
+ VS_FIXEDFILEINFO* fixed_file_info = nullptr;
+ UINT length;
+ if (::VerQueryValue(data, L"\\", reinterpret_cast<void**>(&fixed_file_info),
+ &length)) {
+ return fixed_file_info;
+ }
+ return nullptr;
}
-typedef struct {
- WORD language;
- WORD code_page;
-} LanguageAndCodePage;
+} // namespace
+
+FileVersionInfoWin::~FileVersionInfoWin() = default;
// static
FileVersionInfo* FileVersionInfo::CreateFileVersionInfoForModule(
HMODULE module) {
- // Note that the use of MAX_PATH is basically in line with what we do for
- // all registered paths (PathProviderWin).
- wchar_t system_buffer[MAX_PATH];
- system_buffer[0] = 0;
- if (!GetModuleFileName(module, system_buffer, MAX_PATH))
- return NULL;
+ void* data;
+ size_t version_info_length;
+ const bool has_version_resource = base::win::GetResourceFromModule(
+ module, VS_VERSION_INFO, RT_VERSION, &data, &version_info_length);
+ if (!has_version_resource)
+ return nullptr;
+
+ const LanguageAndCodePage* translate = GetTranslate(data);
+ if (!translate)
+ return nullptr;
- FilePath app_path(system_buffer);
- return CreateFileVersionInfo(app_path);
+ return new FileVersionInfoWin(data, translate->language,
+ translate->code_page);
}
// static
@@ -57,32 +72,21 @@ FileVersionInfo* FileVersionInfo::CreateFileVersionInfo(
DWORD dummy;
const wchar_t* path = file_path.value().c_str();
- DWORD length = ::GetFileVersionInfoSize(path, &dummy);
+ const DWORD length = ::GetFileVersionInfoSize(path, &dummy);
if (length == 0)
- return NULL;
+ return nullptr;
- void* data = calloc(length, 1);
- if (!data)
- return NULL;
+ std::vector<uint8_t> data(length, 0);
- if (!::GetFileVersionInfo(path, dummy, length, data)) {
- free(data);
- return NULL;
- }
-
- LanguageAndCodePage* translate = NULL;
- uint32_t page_count;
- BOOL query_result = VerQueryValue(data, L"\\VarFileInfo\\Translation",
- (void**) &translate, &page_count);
+ if (!::GetFileVersionInfo(path, dummy, length, data.data()))
+ return nullptr;
- if (query_result && translate) {
- return new FileVersionInfoWin(data, translate->language,
- translate->code_page);
+ const LanguageAndCodePage* translate = GetTranslate(data.data());
+ if (!translate)
+ return nullptr;
- } else {
- free(data);
- return NULL;
- }
+ return new FileVersionInfoWin(std::move(data), translate->language,
+ translate->code_page);
}
base::string16 FileVersionInfoWin::company_name() {
@@ -175,7 +179,7 @@ bool FileVersionInfoWin::GetValue(const wchar_t* name,
L"\\StringFileInfo\\%04x%04x\\%ls", language, code_page, name);
LPVOID value = NULL;
uint32_t size;
- BOOL r = ::VerQueryValue(data_.get(), sub_block, &value, &size);
+ BOOL r = ::VerQueryValue(data_, sub_block, &value, &size);
if (r && value) {
value_str->assign(static_cast<wchar_t*>(value));
return true;
@@ -191,3 +195,24 @@ std::wstring FileVersionInfoWin::GetStringValue(const wchar_t* name) {
else
return L"";
}
+
+FileVersionInfoWin::FileVersionInfoWin(std::vector<uint8_t>&& data,
+ WORD language,
+ WORD code_page)
+ : owned_data_(std::move(data)),
+ data_(owned_data_.data()),
+ language_(language),
+ code_page_(code_page),
+ fixed_file_info_(GetVsFixedFileInfo(data_)) {
+ DCHECK(!owned_data_.empty());
+}
+
+FileVersionInfoWin::FileVersionInfoWin(void* data,
+ WORD language,
+ WORD code_page)
+ : data_(data),
+ language_(language),
+ code_page_(code_page),
+ fixed_file_info_(GetVsFixedFileInfo(data)) {
+ DCHECK(data_);
+}
diff --git a/chromium/base/file_version_info_win.h b/chromium/base/file_version_info_win.h
index 1e152a86d65..d91b67f67ba 100644
--- a/chromium/base/file_version_info_win.h
+++ b/chromium/base/file_version_info_win.h
@@ -5,20 +5,23 @@
#ifndef BASE_FILE_VERSION_INFO_WIN_H_
#define BASE_FILE_VERSION_INFO_WIN_H_
+#include <windows.h>
+
+#include <stdint.h>
+
#include <memory>
#include <string>
+#include <vector>
#include "base/base_export.h"
#include "base/file_version_info.h"
#include "base/macros.h"
-#include "base/memory/free_deleter.h"
struct tagVS_FIXEDFILEINFO;
typedef tagVS_FIXEDFILEINFO VS_FIXEDFILEINFO;
class BASE_EXPORT FileVersionInfoWin : public FileVersionInfo {
public:
- FileVersionInfoWin(void* data, WORD language, WORD code_page);
~FileVersionInfoWin() override;
// Accessors to the different version properties.
@@ -48,14 +51,25 @@ class BASE_EXPORT FileVersionInfoWin : public FileVersionInfo {
std::wstring GetStringValue(const wchar_t* name);
// Get the fixed file info if it exists. Otherwise NULL
- VS_FIXEDFILEINFO* fixed_file_info() { return fixed_file_info_; }
+ const VS_FIXEDFILEINFO* fixed_file_info() const { return fixed_file_info_; }
private:
- std::unique_ptr<char, base::FreeDeleter> data_;
- WORD language_;
- WORD code_page_;
- // This is a pointer into the data_ if it exists. Otherwise NULL.
- VS_FIXEDFILEINFO* fixed_file_info_;
+ friend FileVersionInfo;
+
+ // |data| is a VS_VERSION_INFO resource. |language| and |code_page| are
+ // extracted from the \VarFileInfo\Translation value of |data|.
+ FileVersionInfoWin(std::vector<uint8_t>&& data,
+ WORD language,
+ WORD code_page);
+ FileVersionInfoWin(void* data, WORD language, WORD code_page);
+
+ const std::vector<uint8_t> owned_data_;
+ const void* const data_;
+ const WORD language_;
+ const WORD code_page_;
+
+ // This is a pointer into |data_| if it exists. Otherwise nullptr.
+ const VS_FIXEDFILEINFO* const fixed_file_info_;
DISALLOW_COPY_AND_ASSIGN(FileVersionInfoWin);
};
diff --git a/chromium/base/file_version_info_unittest.cc b/chromium/base/file_version_info_win_unittest.cc
index ac5320f398b..b5788fea090 100644
--- a/chromium/base/file_version_info_unittest.cc
+++ b/chromium/base/file_version_info_win_unittest.cc
@@ -2,27 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/file_version_info.h"
+#include "base/file_version_info_win.h"
+
+#include <windows.h>
#include <stddef.h>
#include <memory>
+#include "base/file_version_info.h"
#include "base/files/file_path.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
#include "base/path_service.h"
-#include "build/build_config.h"
+#include "base/scoped_native_library.h"
#include "testing/gtest/include/gtest/gtest.h"
-#if defined(OS_WIN)
-#include "base/file_version_info_win.h"
-#endif
-
using base::FilePath;
namespace {
-#if defined(OS_WIN)
FilePath GetTestDataPath() {
FilePath path;
PathService::Get(base::DIR_SOURCE_ROOT, &path);
@@ -32,12 +31,54 @@ FilePath GetTestDataPath() {
path = path.AppendASCII("file_version_info_unittest");
return path;
}
-#endif
+
+class FileVersionInfoFactory {
+ public:
+ explicit FileVersionInfoFactory(const FilePath& path) : path_(path) {}
+
+ std::unique_ptr<FileVersionInfo> Create() const {
+ return base::WrapUnique(FileVersionInfo::CreateFileVersionInfo(path_));
+ }
+
+ private:
+ const FilePath path_;
+
+ DISALLOW_COPY_AND_ASSIGN(FileVersionInfoFactory);
+};
+
+class FileVersionInfoForModuleFactory {
+ public:
+ explicit FileVersionInfoForModuleFactory(const FilePath& path)
+ // Load the library with LOAD_LIBRARY_AS_IMAGE_RESOURCE since it shouldn't
+ // be executed.
+ : library_(::LoadLibraryEx(path.value().c_str(),
+ nullptr,
+ LOAD_LIBRARY_AS_IMAGE_RESOURCE)) {
+ EXPECT_TRUE(library_.is_valid());
+ }
+
+ std::unique_ptr<FileVersionInfo> Create() const {
+ return base::WrapUnique(
+ FileVersionInfo::CreateFileVersionInfoForModule(library_.get()));
+ }
+
+ private:
+ const base::ScopedNativeLibrary library_;
+
+ DISALLOW_COPY_AND_ASSIGN(FileVersionInfoForModuleFactory);
+};
+
+template <typename T>
+class FileVersionInfoTest : public testing::Test {};
+
+using FileVersionInfoFactories =
+ ::testing::Types<FileVersionInfoFactory, FileVersionInfoForModuleFactory>;
} // namespace
-#if defined(OS_WIN)
-TEST(FileVersionInfoTest, HardCodedProperties) {
+TYPED_TEST_CASE(FileVersionInfoTest, FileVersionInfoFactories);
+
+TYPED_TEST(FileVersionInfoTest, HardCodedProperties) {
const wchar_t kDLLName[] = {L"FileVersionInfoTest1.dll"};
const wchar_t* const kExpectedValues[15] = {
@@ -62,8 +103,9 @@ TEST(FileVersionInfoTest, HardCodedProperties) {
FilePath dll_path = GetTestDataPath();
dll_path = dll_path.Append(kDLLName);
- std::unique_ptr<FileVersionInfo> version_info(
- FileVersionInfo::CreateFileVersionInfo(dll_path));
+ TypeParam factory(dll_path);
+ std::unique_ptr<FileVersionInfo> version_info(factory.Create());
+ ASSERT_TRUE(version_info);
int j = 0;
EXPECT_EQ(kExpectedValues[j++], version_info->company_name());
@@ -82,62 +124,52 @@ TEST(FileVersionInfoTest, HardCodedProperties) {
EXPECT_EQ(kExpectedValues[j++], version_info->legal_trademarks());
EXPECT_EQ(kExpectedValues[j++], version_info->last_change());
}
-#endif
-#if defined(OS_WIN)
-TEST(FileVersionInfoTest, IsOfficialBuild) {
- const wchar_t* kDLLNames[] = {
- L"FileVersionInfoTest1.dll",
- L"FileVersionInfoTest2.dll"
+TYPED_TEST(FileVersionInfoTest, IsOfficialBuild) {
+ constexpr struct {
+ const wchar_t* const dll_name;
+ const bool is_official_build;
+ } kTestItems[]{
+ {L"FileVersionInfoTest1.dll", true}, {L"FileVersionInfoTest2.dll", false},
};
- const bool kExpected[] = {
- true,
- false,
- };
-
- // Test consistency check.
- ASSERT_EQ(arraysize(kDLLNames), arraysize(kExpected));
-
- for (size_t i = 0; i < arraysize(kDLLNames); ++i) {
- FilePath dll_path = GetTestDataPath();
- dll_path = dll_path.Append(kDLLNames[i]);
+ for (const auto& test_item : kTestItems) {
+ const FilePath dll_path = GetTestDataPath().Append(test_item.dll_name);
- std::unique_ptr<FileVersionInfo> version_info(
- FileVersionInfo::CreateFileVersionInfo(dll_path));
+ TypeParam factory(dll_path);
+ std::unique_ptr<FileVersionInfo> version_info(factory.Create());
+ ASSERT_TRUE(version_info);
- EXPECT_EQ(kExpected[i], version_info->is_official_build());
+ EXPECT_EQ(test_item.is_official_build, version_info->is_official_build());
}
}
-#endif
-#if defined(OS_WIN)
-TEST(FileVersionInfoTest, CustomProperties) {
+TYPED_TEST(FileVersionInfoTest, CustomProperties) {
FilePath dll_path = GetTestDataPath();
dll_path = dll_path.AppendASCII("FileVersionInfoTest1.dll");
- std::unique_ptr<FileVersionInfo> version_info(
- FileVersionInfo::CreateFileVersionInfo(dll_path));
+ TypeParam factory(dll_path);
+ std::unique_ptr<FileVersionInfo> version_info(factory.Create());
+ ASSERT_TRUE(version_info);
// Test few existing properties.
std::wstring str;
FileVersionInfoWin* version_info_win =
static_cast<FileVersionInfoWin*>(version_info.get());
- EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 1", &str));
+ EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 1", &str));
EXPECT_EQ(L"Un", str);
EXPECT_EQ(L"Un", version_info_win->GetStringValue(L"Custom prop 1"));
- EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 2", &str));
+ EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 2", &str));
EXPECT_EQ(L"Deux", str);
EXPECT_EQ(L"Deux", version_info_win->GetStringValue(L"Custom prop 2"));
- EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 3", &str));
+ EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 3", &str));
EXPECT_EQ(L"1600 Amphitheatre Parkway Mountain View, CA 94043", str);
EXPECT_EQ(L"1600 Amphitheatre Parkway Mountain View, CA 94043",
version_info_win->GetStringValue(L"Custom prop 3"));
// Test an non-existing property.
- EXPECT_FALSE(version_info_win->GetValue(L"Unknown property", &str));
+ EXPECT_FALSE(version_info_win->GetValue(L"Unknown property", &str));
EXPECT_EQ(L"", version_info_win->GetStringValue(L"Unknown property"));
}
-#endif
diff --git a/chromium/base/files/file.h b/chromium/base/files/file.h
index 7ab5ca58594..ae2bd1b61bd 100644
--- a/chromium/base/files/file.h
+++ b/chromium/base/files/file.h
@@ -13,7 +13,7 @@
#include "base/files/file_path.h"
#include "base/files/file_tracing.h"
#include "base/files/scoped_file.h"
-#include "base/move.h"
+#include "base/macros.h"
#include "base/time/time.h"
#include "build/build_config.h"
@@ -29,10 +29,13 @@
namespace base {
#if defined(OS_WIN)
-typedef HANDLE PlatformFile;
+using PlatformFile = HANDLE;
+
+const PlatformFile kInvalidPlatformFile = INVALID_HANDLE_VALUE;
#elif defined(OS_POSIX)
-typedef int PlatformFile;
+using PlatformFile = int;
+const PlatformFile kInvalidPlatformFile = -1;
#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL)
typedef struct stat stat_wrapper_t;
#else
@@ -51,8 +54,6 @@ typedef struct stat64 stat_wrapper_t;
// to the OS is not considered const, even if there is no apparent change to
// member variables.
class BASE_EXPORT File {
- MOVE_ONLY_TYPE_FOR_CPP_03(File)
-
public:
// FLAG_(OPEN|CREATE).* are mutually exclusive. You should specify exactly one
// of the five (possibly combining with other flags) when opening or creating
@@ -331,6 +332,8 @@ class BASE_EXPORT File {
Error error_details_;
bool created_;
bool async_;
+
+ DISALLOW_COPY_AND_ASSIGN(File);
};
} // namespace base
diff --git a/chromium/base/files/file_path.cc b/chromium/base/files/file_path.cc
index 2c199e131a5..ca0d1d1bb95 100644
--- a/chromium/base/files/file_path.cc
+++ b/chromium/base/files/file_path.cc
@@ -693,6 +693,10 @@ bool FilePath::ReadFromPickle(PickleIterator* iter) {
int FilePath::CompareIgnoreCase(StringPieceType string1,
StringPieceType string2) {
+ static decltype(::CharUpperW)* const char_upper_api =
+ reinterpret_cast<decltype(::CharUpperW)*>(
+ ::GetProcAddress(::GetModuleHandle(L"user32.dll"), "CharUpperW"));
+ CHECK(char_upper_api);
// Perform character-wise upper case comparison rather than using the
// fully Unicode-aware CompareString(). For details see:
// http://blogs.msdn.com/michkap/archive/2005/10/17/481600.aspx
@@ -702,9 +706,9 @@ int FilePath::CompareIgnoreCase(StringPieceType string1,
StringPieceType::const_iterator string2end = string2.end();
for ( ; i1 != string1end && i2 != string2end; ++i1, ++i2) {
wchar_t c1 =
- (wchar_t)LOWORD(::CharUpperW((LPWSTR)(DWORD_PTR)MAKELONG(*i1, 0)));
+ (wchar_t)LOWORD(char_upper_api((LPWSTR)(DWORD_PTR)MAKELONG(*i1, 0)));
wchar_t c2 =
- (wchar_t)LOWORD(::CharUpperW((LPWSTR)(DWORD_PTR)MAKELONG(*i2, 0)));
+ (wchar_t)LOWORD(char_upper_api((LPWSTR)(DWORD_PTR)MAKELONG(*i2, 0)));
if (c1 < c2)
return -1;
if (c1 > c2)
diff --git a/chromium/base/files/file_path_watcher_unittest.cc b/chromium/base/files/file_path_watcher_unittest.cc
index c85a50a42c7..6bfa71f0545 100644
--- a/chromium/base/files/file_path_watcher_unittest.cc
+++ b/chromium/base/files/file_path_watcher_unittest.cc
@@ -196,7 +196,7 @@ class FilePathWatcherTest : public testing::Test {
bool WaitForEvents() WARN_UNUSED_RESULT {
collector_->Reset();
- loop_.Run();
+ RunLoop().Run();
return collector_->Success();
}
@@ -215,7 +215,8 @@ bool FilePathWatcherTest::SetupWatch(const FilePath& target,
FilePathWatcher* watcher,
TestDelegateBase* delegate,
bool recursive_watch) {
- base::WaitableEvent completion(false, false);
+ base::WaitableEvent completion(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
bool result;
file_thread_.task_runner()->PostTask(
FROM_HERE, base::Bind(SetupWatchCallback, target, watcher, delegate,
@@ -889,9 +890,9 @@ TEST_F(FilePathWatcherTest, DirAttributesChanged) {
// We should not get notified in this case as it hasn't affected our ability
// to access the file.
ASSERT_TRUE(ChangeFilePermissions(test_dir1, Read, false));
- loop_.PostDelayedTask(FROM_HERE,
- MessageLoop::QuitWhenIdleClosure(),
- TestTimeouts::tiny_timeout());
+ loop_.task_runner()->PostDelayedTask(FROM_HERE,
+ MessageLoop::QuitWhenIdleClosure(),
+ TestTimeouts::tiny_timeout());
ASSERT_FALSE(WaitForEvents());
ASSERT_TRUE(ChangeFilePermissions(test_dir1, Read, true));
diff --git a/chromium/base/files/file_proxy_unittest.cc b/chromium/base/files/file_proxy_unittest.cc
index 2562208b270..d6a4a1edbfc 100644
--- a/chromium/base/files/file_proxy_unittest.cc
+++ b/chromium/base/files/file_proxy_unittest.cc
@@ -15,6 +15,7 @@
#include "base/files/scoped_temp_dir.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
+#include "base/run_loop.h"
#include "base/threading/thread.h"
#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
@@ -80,7 +81,7 @@ class FileProxyTest : public testing::Test {
proxy->CreateOrOpen(
test_path(), flags,
Bind(&FileProxyTest::DidCreateOrOpen, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_TRUE(proxy->IsValid());
}
@@ -108,7 +109,7 @@ TEST_F(FileProxyTest, CreateOrOpen_Create) {
test_path(),
File::FLAG_CREATE | File::FLAG_READ,
Bind(&FileProxyTest::DidCreateOrOpen, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_EQ(File::FILE_OK, error_);
EXPECT_TRUE(proxy.IsValid());
@@ -127,7 +128,7 @@ TEST_F(FileProxyTest, CreateOrOpen_Open) {
test_path(),
File::FLAG_OPEN | File::FLAG_READ,
Bind(&FileProxyTest::DidCreateOrOpen, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_EQ(File::FILE_OK, error_);
EXPECT_TRUE(proxy.IsValid());
@@ -140,7 +141,7 @@ TEST_F(FileProxyTest, CreateOrOpen_OpenNonExistent) {
test_path(),
File::FLAG_OPEN | File::FLAG_READ,
Bind(&FileProxyTest::DidCreateOrOpen, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_EQ(File::FILE_ERROR_NOT_FOUND, error_);
EXPECT_FALSE(proxy.IsValid());
EXPECT_FALSE(proxy.created());
@@ -156,7 +157,7 @@ TEST_F(FileProxyTest, CreateOrOpen_AbandonedCreate) {
File::FLAG_CREATE | File::FLAG_READ,
Bind(&FileProxyTest::DidCreateOrOpen, weak_factory_.GetWeakPtr()));
}
- MessageLoop::current()->Run();
+ RunLoop().Run();
ThreadRestrictions::SetIOAllowed(prev);
EXPECT_TRUE(PathExists(test_path()));
@@ -173,7 +174,7 @@ TEST_F(FileProxyTest, Close) {
#endif
proxy.Close(Bind(&FileProxyTest::DidFinish, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_EQ(File::FILE_OK, error_);
EXPECT_FALSE(proxy.IsValid());
@@ -187,7 +188,7 @@ TEST_F(FileProxyTest, CreateTemporary) {
proxy.CreateTemporary(
0 /* additional_file_flags */,
Bind(&FileProxyTest::DidCreateTemporary, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_TRUE(proxy.IsValid());
EXPECT_EQ(File::FILE_OK, error_);
@@ -196,7 +197,7 @@ TEST_F(FileProxyTest, CreateTemporary) {
// The file should be writable.
proxy.Write(0, "test", 4,
Bind(&FileProxyTest::DidWrite, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_EQ(File::FILE_OK, error_);
EXPECT_EQ(4, bytes_written_);
}
@@ -235,7 +236,7 @@ TEST_F(FileProxyTest, GetInfo) {
CreateProxy(File::FLAG_OPEN | File::FLAG_READ, &proxy);
proxy.GetInfo(
Bind(&FileProxyTest::DidGetFileInfo, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
// Verify.
EXPECT_EQ(File::FILE_OK, error_);
@@ -258,7 +259,7 @@ TEST_F(FileProxyTest, Read) {
CreateProxy(File::FLAG_OPEN | File::FLAG_READ, &proxy);
proxy.Read(0, 128, Bind(&FileProxyTest::DidRead, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
// Verify.
EXPECT_EQ(File::FILE_OK, error_);
@@ -276,14 +277,14 @@ TEST_F(FileProxyTest, WriteAndFlush) {
int data_bytes = arraysize(data);
proxy.Write(0, data, data_bytes,
Bind(&FileProxyTest::DidWrite, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_EQ(File::FILE_OK, error_);
EXPECT_EQ(data_bytes, bytes_written_);
// Flush the written data. (So that the following read should always
// succeed. On some platforms it may work with or without this flush.)
proxy.Flush(Bind(&FileProxyTest::DidFinish, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_EQ(File::FILE_OK, error_);
// Verify the written data.
@@ -311,7 +312,7 @@ TEST_F(FileProxyTest, MAYBE_SetTimes) {
proxy.SetTimes(last_accessed_time, last_modified_time,
Bind(&FileProxyTest::DidFinish, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_EQ(File::FILE_OK, error_);
File::Info info;
@@ -338,7 +339,7 @@ TEST_F(FileProxyTest, SetLength_Shrink) {
CreateProxy(File::FLAG_OPEN | File::FLAG_WRITE, &proxy);
proxy.SetLength(7,
Bind(&FileProxyTest::DidFinish, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
// Verify.
GetFileInfo(test_path(), &info);
@@ -364,7 +365,7 @@ TEST_F(FileProxyTest, SetLength_Expand) {
CreateProxy(File::FLAG_OPEN | File::FLAG_WRITE, &proxy);
proxy.SetLength(53,
Bind(&FileProxyTest::DidFinish, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
// Verify.
GetFileInfo(test_path(), &info);
diff --git a/chromium/base/files/file_util.h b/chromium/base/files/file_util.h
index 8fd9fffeb36..420dcaee61b 100644
--- a/chromium/base/files/file_util.h
+++ b/chromium/base/files/file_util.h
@@ -37,6 +37,7 @@
namespace base {
+class Environment;
class Time;
//-----------------------------------------------------------------------------
@@ -199,6 +200,11 @@ BASE_EXPORT bool GetPosixFilePermissions(const FilePath& path, int* mode);
// the permission of a file which the symlink points to.
BASE_EXPORT bool SetPosixFilePermissions(const FilePath& path, int mode);
+// Returns true iff |executable| can be found in any directory specified by the
+// environment variable in |env|.
+BASE_EXPORT bool ExecutableExistsInPath(Environment* env,
+ const FilePath::StringType& executable);
+
#endif // OS_POSIX
// Returns true if the given directory is empty
diff --git a/chromium/base/files/file_util_posix.cc b/chromium/base/files/file_util_posix.cc
index ca1c5250b92..42de9316f1c 100644
--- a/chromium/base/files/file_util_posix.cc
+++ b/chromium/base/files/file_util_posix.cc
@@ -22,6 +22,7 @@
#include <time.h>
#include <unistd.h>
+#include "base/environment.h"
#include "base/files/file_enumerator.h"
#include "base/files/file_path.h"
#include "base/files/scoped_file.h"
@@ -31,6 +32,7 @@
#include "base/path_service.h"
#include "base/posix/eintr_wrapper.h"
#include "base/stl_util.h"
+#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/sys_string_conversions.h"
@@ -455,6 +457,25 @@ bool SetPosixFilePermissions(const FilePath& path,
return true;
}
+bool ExecutableExistsInPath(Environment* env,
+ const FilePath::StringType& executable) {
+ std::string path;
+ if (!env->GetVar("PATH", &path)) {
+ LOG(ERROR) << "No $PATH variable. Assuming no " << executable << ".";
+ return false;
+ }
+
+ for (const StringPiece& cur_path :
+ SplitStringPiece(path, ":", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY)) {
+ FilePath file(cur_path);
+ int permissions;
+ if (GetPosixFilePermissions(file.Append(executable), &permissions) &&
+ (permissions & FILE_PERMISSION_EXECUTE_BY_USER))
+ return true;
+ }
+ return false;
+}
+
#if !defined(OS_MACOSX)
// This is implemented in file_util_mac.mm for Mac.
bool GetTempDir(FilePath* path) {
diff --git a/chromium/base/files/file_util_proxy_unittest.cc b/chromium/base/files/file_util_proxy_unittest.cc
index 74083699f00..a01aa4e4f23 100644
--- a/chromium/base/files/file_util_proxy_unittest.cc
+++ b/chromium/base/files/file_util_proxy_unittest.cc
@@ -8,6 +8,7 @@
#include "base/files/file_util.h"
#include "base/files/scoped_temp_dir.h"
#include "base/memory/weak_ptr.h"
+#include "base/run_loop.h"
#include "base/threading/thread.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -67,7 +68,7 @@ TEST_F(FileUtilProxyTest, GetFileInfo_File) {
file_task_runner(),
test_path(),
Bind(&FileUtilProxyTest::DidGetFileInfo, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
// Verify.
EXPECT_EQ(File::FILE_OK, error_);
@@ -90,7 +91,7 @@ TEST_F(FileUtilProxyTest, GetFileInfo_Directory) {
file_task_runner(),
test_path(),
Bind(&FileUtilProxyTest::DidGetFileInfo, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
// Verify.
EXPECT_EQ(File::FILE_OK, error_);
@@ -113,7 +114,7 @@ TEST_F(FileUtilProxyTest, Touch) {
last_accessed_time,
last_modified_time,
Bind(&FileUtilProxyTest::DidFinish, weak_factory_.GetWeakPtr()));
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_EQ(File::FILE_OK, error_);
File::Info info;
diff --git a/chromium/base/files/file_util_unittest.cc b/chromium/base/files/file_util_unittest.cc
index ac3a654fdd3..153cd9cbd49 100644
--- a/chromium/base/files/file_util_unittest.cc
+++ b/chromium/base/files/file_util_unittest.cc
@@ -12,6 +12,7 @@
#include <vector>
#include "base/base_paths.h"
+#include "base/environment.h"
#include "base/files/file_enumerator.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
@@ -33,7 +34,6 @@
#include <shlobj.h>
#include <tchar.h>
#include <winioctl.h>
-#include "base/environment.h"
#include "base/win/scoped_handle.h"
#include "base/win/windows_version.h"
#endif
@@ -838,6 +838,45 @@ TEST_F(FileUtilTest, ChangeDirectoryPermissionsAndEnumerate) {
EXPECT_FALSE(PathExists(subdir_path));
}
+TEST_F(FileUtilTest, ExecutableExistsInPath) {
+ // Create two directories that we will put in our PATH
+ const char kPath[] = "PATH";
+ const FilePath::CharType kDir1[] = FPL("dir1");
+ const FilePath::CharType kDir2[] = FPL("dir2");
+
+ FilePath dir1 = temp_dir_.path().Append(kDir1);
+ FilePath dir2 = temp_dir_.path().Append(kDir2);
+ ASSERT_TRUE(CreateDirectory(dir1));
+ ASSERT_TRUE(CreateDirectory(dir2));
+
+ std::unique_ptr<Environment> env(base::Environment::Create());
+
+ ASSERT_TRUE(env->SetVar(kPath, dir1.value() + ":" + dir2.value()));
+
+ const FilePath::CharType kRegularFileName[] = FPL("regular_file");
+ const FilePath::CharType kExeFileName[] = FPL("exe");
+ const FilePath::CharType kDneFileName[] = FPL("does_not_exist");
+
+ const FilePath kExePath = dir1.Append(kExeFileName);
+ const FilePath kRegularFilePath = dir2.Append(kRegularFileName);
+
+ // Write file.
+ const std::string kData("hello");
+ ASSERT_EQ(static_cast<int>(kData.length()),
+ WriteFile(kExePath, kData.data(), kData.length()));
+ ASSERT_TRUE(PathExists(kExePath));
+ ASSERT_EQ(static_cast<int>(kData.length()),
+ WriteFile(kRegularFilePath, kData.data(), kData.length()));
+ ASSERT_TRUE(PathExists(kRegularFilePath));
+
+ ASSERT_TRUE(SetPosixFilePermissions(dir1.Append(kExeFileName),
+ FILE_PERMISSION_EXECUTE_BY_USER));
+
+ EXPECT_TRUE(ExecutableExistsInPath(env.get(), kExeFileName));
+ EXPECT_FALSE(ExecutableExistsInPath(env.get(), kRegularFileName));
+ EXPECT_FALSE(ExecutableExistsInPath(env.get(), kDneFileName));
+}
+
#endif // defined(OS_POSIX)
#if defined(OS_WIN)
@@ -1367,7 +1406,7 @@ TEST_F(FileUtilTest, CopyDirectoryWithTrailingSeparators) {
#if defined(OS_WIN)
FilePath from_path =
temp_dir_.path().Append(FILE_PATH_LITERAL("Copy_From_Subdir\\\\\\"));
-#elif defined (OS_POSIX)
+#elif defined(OS_POSIX)
FilePath from_path =
temp_dir_.path().Append(FILE_PATH_LITERAL("Copy_From_Subdir///"));
#endif
@@ -1685,9 +1724,7 @@ TEST_F(FileUtilTest, IsOnNetworkDrive) {
EXPECT_EQ(test_case.expected, observed) << " input: " << input.value();
}
- Environment* env = Environment::Create();
- ASSERT_TRUE(!!env);
-
+ std::unique_ptr<Environment> env(Environment::Create());
// To test IsOnNetworkDrive() for remote cases, set up a file server
// and place a file called file.txt on the server e.g.
// \\DC01\TESTSHARE\file.txt
diff --git a/chromium/base/files/important_file_writer_unittest.cc b/chromium/base/files/important_file_writer_unittest.cc
index ba1d4d3f932..43e051ebcfa 100644
--- a/chromium/base/files/important_file_writer_unittest.cc
+++ b/chromium/base/files/important_file_writer_unittest.cc
@@ -157,7 +157,7 @@ TEST_F(ImportantFileWriterTest, ScheduleWrite) {
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, MessageLoop::QuitWhenIdleClosure(),
TimeDelta::FromMilliseconds(100));
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_FALSE(writer.HasPendingWrite());
ASSERT_TRUE(PathExists(writer.path()));
EXPECT_EQ("foo", GetFileContent(writer.path()));
@@ -173,7 +173,7 @@ TEST_F(ImportantFileWriterTest, DoScheduledWrite) {
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, MessageLoop::QuitWhenIdleClosure(),
TimeDelta::FromMilliseconds(100));
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_FALSE(writer.HasPendingWrite());
ASSERT_TRUE(PathExists(writer.path()));
EXPECT_EQ("foo", GetFileContent(writer.path()));
@@ -190,7 +190,7 @@ TEST_F(ImportantFileWriterTest, BatchingWrites) {
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, MessageLoop::QuitWhenIdleClosure(),
TimeDelta::FromMilliseconds(100));
- MessageLoop::current()->Run();
+ RunLoop().Run();
ASSERT_TRUE(PathExists(writer.path()));
EXPECT_EQ("baz", GetFileContent(writer.path()));
}
diff --git a/chromium/base/files/scoped_file.cc b/chromium/base/files/scoped_file.cc
index 8971280776c..8ce45b8ba39 100644
--- a/chromium/base/files/scoped_file.cc
+++ b/chromium/base/files/scoped_file.cc
@@ -8,8 +8,10 @@
#include "build/build_config.h"
#if defined(OS_POSIX)
+#include <errno.h>
#include <unistd.h>
+#include "base/debug/alias.h"
#include "base/posix/eintr_wrapper.h"
#endif
@@ -27,7 +29,15 @@ void ScopedFDCloseTraits::Free(int fd) {
// Chrome relies on being able to "drop" such access.
// It's especially problematic on Linux with the setuid sandbox, where
// a single open directory would bypass the entire security model.
- PCHECK(0 == IGNORE_EINTR(close(fd)));
+ int ret = IGNORE_EINTR(close(fd));
+
+ // TODO(davidben): Remove this once it's been determined whether
+ // https://crbug.com/603354 is caused by EBADF or a network filesystem
+ // returning some other error.
+ int close_errno = errno;
+ base::debug::Alias(&close_errno);
+
+ PCHECK(0 == ret);
}
#endif // OS_POSIX
diff --git a/chromium/base/i18n/icu_util.cc b/chromium/base/i18n/icu_util.cc
index c85195f06d8..30876f42f42 100644
--- a/chromium/base/i18n/icu_util.cc
+++ b/chromium/base/i18n/icu_util.cc
@@ -78,12 +78,6 @@ const char kAndroidAssetsIcuDataFileName[] = "assets/icudtl.dat";
// File handle intentionally never closed. Not using File here because its
// Windows implementation guards against two instances owning the same
// PlatformFile (which we allow since we know it is never freed).
-const PlatformFile kInvalidPlatformFile =
-#if defined(OS_WIN)
- INVALID_HANDLE_VALUE;
-#else
- -1;
-#endif
PlatformFile g_icudtl_pf = kInvalidPlatformFile;
MemoryMappedFile* g_icudtl_mapped_file = nullptr;
MemoryMappedFile::Region g_icudtl_region;
diff --git a/chromium/base/i18n/number_formatting.cc b/chromium/base/i18n/number_formatting.cc
index 6f454a0848b..b5108334846 100644
--- a/chromium/base/i18n/number_formatting.cc
+++ b/chromium/base/i18n/number_formatting.cc
@@ -9,6 +9,7 @@
#include <memory>
#include "base/format_macros.h"
+#include "base/i18n/message_formatter.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/strings/string_util.h"
@@ -54,7 +55,7 @@ string16 FormatNumber(int64_t number) {
if (!number_format) {
// As a fallback, just return the raw number in a string.
- return UTF8ToUTF16(StringPrintf("%" PRId64, number));
+ return ASCIIToUTF16(StringPrintf("%" PRId64, number));
}
icu::UnicodeString ustr;
number_format->format(number, ustr);
@@ -68,7 +69,7 @@ string16 FormatDouble(double number, int fractional_digits) {
if (!number_format) {
// As a fallback, just return the raw number in a string.
- return UTF8ToUTF16(StringPrintf("%f", number));
+ return ASCIIToUTF16(StringPrintf("%f", number));
}
number_format->setMaximumFractionDigits(fractional_digits);
number_format->setMinimumFractionDigits(fractional_digits);
@@ -78,6 +79,11 @@ string16 FormatDouble(double number, int fractional_digits) {
return string16(ustr.getBuffer(), static_cast<size_t>(ustr.length()));
}
+string16 FormatPercent(int number) {
+ return i18n::MessageFormatter::FormatWithNumberedArgs(
+ ASCIIToUTF16("{0,number,percent}"), static_cast<double>(number) / 100.0);
+}
+
namespace testing {
void ResetFormatters() {
diff --git a/chromium/base/i18n/number_formatting.h b/chromium/base/i18n/number_formatting.h
index bdb862f21a6..9636bf4d1ba 100644
--- a/chromium/base/i18n/number_formatting.h
+++ b/chromium/base/i18n/number_formatting.h
@@ -13,8 +13,7 @@
namespace base {
// Return a number formatted with separators in the user's locale.
-// Ex: FormatNumber(1234567)
-// => "1,234,567" in English, "1.234.567" in German
+// Ex: FormatNumber(1234567) => "1,234,567" in English, "1.234.567" in German
BASE_I18N_EXPORT string16 FormatNumber(int64_t number);
// Return a number formatted with separators in the user's locale.
@@ -22,6 +21,10 @@ BASE_I18N_EXPORT string16 FormatNumber(int64_t number);
// => "1,234,567.8" in English, "1.234.567,8" in German
BASE_I18N_EXPORT string16 FormatDouble(double number, int fractional_digits);
+// Return a percentage formatted with space and symbol in the user's locale.
+// Ex: FormatPercent(12) => "12%" in English, "12 %" in Romanian
+BASE_I18N_EXPORT string16 FormatPercent(int number);
+
namespace testing {
// Causes cached formatters to be discarded and recreated. Only useful for
diff --git a/chromium/base/i18n/number_formatting_unittest.cc b/chromium/base/i18n/number_formatting_unittest.cc
index 31341ac25e4..a131bf3e7d7 100644
--- a/chromium/base/i18n/number_formatting_unittest.cc
+++ b/chromium/base/i18n/number_formatting_unittest.cc
@@ -94,5 +94,31 @@ TEST(NumberFormattingTest, FormatDouble) {
}
}
+TEST(NumberFormattingTest, FormatPercent) {
+ static const struct {
+ int64_t number;
+ const char* expected_english;
+ const wchar_t* expected_german; // Note: Space before % isn't \x20.
+ const wchar_t* expected_persian; // Note: Non-Arabic numbers and %.
+ } cases[] = {
+ {0, "0%", L"0\xa0%", L"\x6f0\x200f\x66a"},
+ {42, "42%", L"42\xa0%", L"\x6f4\x6f2\x200f\x66a"},
+ {1024, "1,024%", L"1.024\xa0%", L"\x6f1\x66c\x6f0\x6f2\x6f4\x200f\x66a"},
+ };
+
+ test::ScopedRestoreICUDefaultLocale restore_locale;
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ i18n::SetICUDefaultLocale("en");
+ EXPECT_EQ(ASCIIToUTF16(cases[i].expected_english),
+ FormatPercent(cases[i].number));
+ i18n::SetICUDefaultLocale("de");
+ EXPECT_EQ(WideToUTF16(cases[i].expected_german),
+ FormatPercent(cases[i].number));
+ i18n::SetICUDefaultLocale("fa");
+ EXPECT_EQ(WideToUTF16(cases[i].expected_persian),
+ FormatPercent(cases[i].number));
+ }
+}
+
} // namespace
} // namespace base
diff --git a/chromium/base/i18n/time_formatting_unittest.cc b/chromium/base/i18n/time_formatting_unittest.cc
index 51eebc26cf6..51a48513aca 100644
--- a/chromium/base/i18n/time_formatting_unittest.cc
+++ b/chromium/base/i18n/time_formatting_unittest.cc
@@ -52,7 +52,8 @@ TEST(TimeFormattingTest, MAYBE_TimeFormatTimeOfDayDefault12h) {
test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("en_US");
- Time time(Time::FromLocalExploded(kTestDateTimeExploded));
+ Time time;
+ EXPECT_TRUE(Time::FromLocalExploded(kTestDateTimeExploded, &time));
string16 clock24h(ASCIIToUTF16("15:42"));
string16 clock12h_pm(ASCIIToUTF16("3:42 PM"));
string16 clock12h(ASCIIToUTF16("3:42"));
@@ -94,7 +95,8 @@ TEST(TimeFormattingTest, MAYBE_TimeFormatTimeOfDayDefault24h) {
test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("en_GB");
- Time time(Time::FromLocalExploded(kTestDateTimeExploded));
+ Time time;
+ EXPECT_TRUE(Time::FromLocalExploded(kTestDateTimeExploded, &time));
string16 clock24h(ASCIIToUTF16("15:42"));
string16 clock12h_pm(ASCIIToUTF16("3:42 pm"));
string16 clock12h(ASCIIToUTF16("3:42"));
@@ -135,7 +137,8 @@ TEST(TimeFormattingTest, MAYBE_TimeFormatTimeOfDayJP) {
test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("ja_JP");
- Time time(Time::FromLocalExploded(kTestDateTimeExploded));
+ Time time;
+ EXPECT_TRUE(Time::FromLocalExploded(kTestDateTimeExploded, &time));
string16 clock24h(ASCIIToUTF16("15:42"));
string16 clock12h_pm(WideToUTF16(L"\x5348\x5f8c" L"3:42"));
string16 clock12h(ASCIIToUTF16("3:42"));
@@ -174,7 +177,8 @@ TEST(TimeFormattingTest, MAYBE_TimeFormatDateUS) {
test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("en_US");
- Time time(Time::FromLocalExploded(kTestDateTimeExploded));
+ Time time;
+ EXPECT_TRUE(Time::FromLocalExploded(kTestDateTimeExploded, &time));
EXPECT_EQ(ASCIIToUTF16("Apr 30, 2011"), TimeFormatShortDate(time));
EXPECT_EQ(ASCIIToUTF16("4/30/11"), TimeFormatShortDateNumeric(time));
@@ -202,7 +206,8 @@ TEST(TimeFormattingTest, MAYBE_TimeFormatDateGB) {
test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("en_GB");
- Time time(Time::FromLocalExploded(kTestDateTimeExploded));
+ Time time;
+ EXPECT_TRUE(Time::FromLocalExploded(kTestDateTimeExploded, &time));
EXPECT_EQ(ASCIIToUTF16("30 Apr 2011"), TimeFormatShortDate(time));
EXPECT_EQ(ASCIIToUTF16("30/04/2011"), TimeFormatShortDateNumeric(time));
diff --git a/chromium/base/ios/ios_util.h b/chromium/base/ios/ios_util.h
index 3b276ff7998..fe9ec202c5d 100644
--- a/chromium/base/ios/ios_util.h
+++ b/chromium/base/ios/ios_util.h
@@ -19,6 +19,9 @@ BASE_EXPORT bool IsRunningOnIOS8OrLater();
// Returns whether the operating system is iOS 9 or later.
BASE_EXPORT bool IsRunningOnIOS9OrLater();
+// Returns whether the operating system is iOS 10 or later.
+BASE_EXPORT bool IsRunningOnIOS10OrLater();
+
// Returns whether the operating system is at the given version or later.
BASE_EXPORT bool IsRunningOnOrLater(int32_t major,
int32_t minor,
diff --git a/chromium/base/ios/ios_util.mm b/chromium/base/ios/ios_util.mm
index bc10d19e8f6..7af85444ada 100644
--- a/chromium/base/ios/ios_util.mm
+++ b/chromium/base/ios/ios_util.mm
@@ -37,6 +37,10 @@ bool IsRunningOnIOS9OrLater() {
return IsRunningOnOrLater(9, 0, 0);
}
+bool IsRunningOnIOS10OrLater() {
+ return IsRunningOnOrLater(10, 0, 0);
+}
+
bool IsRunningOnOrLater(int32_t major, int32_t minor, int32_t bug_fix) {
static const int32_t* current_version = OSVersionAsArray();
int32_t version[] = {major, minor, bug_fix};
diff --git a/chromium/base/json/json_writer.cc b/chromium/base/json/json_writer.cc
index 19bc0da972f..0b658eed59d 100644
--- a/chromium/base/json/json_writer.cc
+++ b/chromium/base/json/json_writer.cc
@@ -127,9 +127,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
bool first_value_has_been_output = false;
bool result = node.GetAsList(&list);
DCHECK(result);
- for (ListValue::const_iterator it = list->begin(); it != list->end();
- ++it) {
- const Value* value = *it;
+ for (const auto& value : *list) {
if (omit_binary_values_ && value->GetType() == Value::TYPE_BINARY)
continue;
diff --git a/chromium/base/json/json_writer_unittest.cc b/chromium/base/json/json_writer_unittest.cc
index 37ad2686843..233ac5e8678 100644
--- a/chromium/base/json/json_writer_unittest.cc
+++ b/chromium/base/json/json_writer_unittest.cc
@@ -129,14 +129,11 @@ TEST(JSONWriterTest, BinaryValues) {
EXPECT_EQ("[5,2]", output_js);
DictionaryValue binary_dict;
- binary_dict.Set(
- "a", WrapUnique(BinaryValue::CreateWithCopiedBuffer("asdf", 4)));
+ binary_dict.Set("a", BinaryValue::CreateWithCopiedBuffer("asdf", 4));
binary_dict.SetInteger("b", 5);
- binary_dict.Set(
- "c", WrapUnique(BinaryValue::CreateWithCopiedBuffer("asdf", 4)));
+ binary_dict.Set("c", BinaryValue::CreateWithCopiedBuffer("asdf", 4));
binary_dict.SetInteger("d", 2);
- binary_dict.Set(
- "e", WrapUnique(BinaryValue::CreateWithCopiedBuffer("asdf", 4)));
+ binary_dict.Set("e", BinaryValue::CreateWithCopiedBuffer("asdf", 4));
EXPECT_FALSE(JSONWriter::Write(binary_dict, &output_js));
EXPECT_TRUE(JSONWriter::WriteWithOptions(
binary_dict, JSONWriter::OPTIONS_OMIT_BINARY_VALUES, &output_js));
diff --git a/chromium/base/logging.cc b/chromium/base/logging.cc
index 4390d8cda30..0771b47c182 100644
--- a/chromium/base/logging.cc
+++ b/chromium/base/logging.cc
@@ -13,8 +13,6 @@
#if defined(OS_WIN)
#include <io.h>
#include <windows.h>
-#include "base/files/file_path.h"
-#include "base/files/file_util.h"
typedef HANDLE FileHandle;
typedef HANDLE MutexHandle;
// Windows warns on using write(). It prefers _write().
@@ -289,13 +287,24 @@ bool InitializeLogFileHandle() {
FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr);
if (g_log_file == INVALID_HANDLE_VALUE || g_log_file == nullptr) {
+ // We are intentionally not using FilePath or FileUtil here to reduce the
+ // dependencies of the logging implementation. For e.g. FilePath and
+ // FileUtil depend on shell32 and user32.dll. This is not acceptable for
+ // some consumers of base logging like chrome_elf, etc.
+ // Please don't change the code below to use FilePath.
// try the current directory
- base::FilePath file_path;
- if (!base::GetCurrentDirectory(&file_path))
+ wchar_t system_buffer[MAX_PATH];
+ system_buffer[0] = 0;
+ DWORD len = ::GetCurrentDirectory(arraysize(system_buffer),
+ system_buffer);
+ if (len == 0 || len > arraysize(system_buffer))
return false;
- *g_log_file_name = file_path.Append(
- FILE_PATH_LITERAL("debug.log")).value();
+ *g_log_file_name = system_buffer;
+ // Append a trailing backslash if needed.
+ if (g_log_file_name->back() != L'\\')
+ *g_log_file_name += L"\\";
+ *g_log_file_name += L"debug.log";
g_log_file = CreateFile(g_log_file_name->c_str(), FILE_APPEND_DATA,
FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
@@ -454,8 +463,7 @@ template std::string* MakeCheckOpString<unsigned int, unsigned long>(
template std::string* MakeCheckOpString<std::string, std::string>(
const std::string&, const std::string&, const char* name);
-template <>
-void MakeCheckOpValueString(std::ostream* os, const std::nullptr_t& p) {
+void MakeCheckOpValueString(std::ostream* os, std::nullptr_t p) {
(*os) << "nullptr";
}
diff --git a/chromium/base/logging.h b/chromium/base/logging.h
index 36c9c6f311d..fd148aa2694 100644
--- a/chromium/base/logging.h
+++ b/chromium/base/logging.h
@@ -11,10 +11,13 @@
#include <cstring>
#include <sstream>
#include <string>
+#include <type_traits>
+#include <utility>
#include "base/base_export.h"
#include "base/debug/debugger.h"
#include "base/macros.h"
+#include "base/template_util.h"
#include "build/build_config.h"
//
@@ -516,14 +519,26 @@ class CheckOpResult {
// This formats a value for a failing CHECK_XX statement. Ordinarily,
// it uses the definition for operator<<, with a few special cases below.
template <typename T>
-inline void MakeCheckOpValueString(std::ostream* os, const T& v) {
+inline typename std::enable_if<
+ base::internal::SupportsOstreamOperator<const T&>::value,
+ void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
(*os) << v;
}
-// We need an explicit specialization for std::nullptr_t.
-template <>
-BASE_EXPORT void MakeCheckOpValueString(std::ostream* os,
- const std::nullptr_t& p);
+// We need overloads for enums that don't support operator<<.
+// (i.e. scoped enums where no operator<< overload was declared).
+template <typename T>
+inline typename std::enable_if<
+ !base::internal::SupportsOstreamOperator<const T&>::value &&
+ std::is_enum<T>::value,
+ void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
+ (*os) << static_cast<typename base::underlying_type<T>::type>(v);
+}
+
+// We need an explicit overload for std::nullptr_t.
+BASE_EXPORT void MakeCheckOpValueString(std::ostream* os, std::nullptr_t p);
// Build the error message string. This is separate from the "Impl"
// function template because it is not performance critical and so can
diff --git a/chromium/base/logging_unittest.cc b/chromium/base/logging_unittest.cc
index 7254265b177..8a20c54fb4c 100644
--- a/chromium/base/logging_unittest.cc
+++ b/chromium/base/logging_unittest.cc
@@ -251,6 +251,13 @@ TEST_F(LoggingTest, Dcheck) {
DCHECK_NE(p_not_null, nullptr);
DCHECK_NE(nullptr, p_not_null);
EXPECT_EQ(0, log_sink_call_count);
+
+ // Test DCHECK on a scoped enum.
+ enum class Animal { DOG, CAT };
+ DCHECK_EQ(Animal::DOG, Animal::DOG);
+ EXPECT_EQ(0, log_sink_call_count);
+ DCHECK_EQ(Animal::DOG, Animal::CAT);
+ EXPECT_EQ(DCHECK_IS_ON() ? 1 : 0, log_sink_call_count);
}
TEST_F(LoggingTest, DcheckReleaseBehavior) {
diff --git a/chromium/base/mac/bind_objc_block.h b/chromium/base/mac/bind_objc_block.h
index c31f26e5a31..2434d444f5c 100644
--- a/chromium/base/mac/bind_objc_block.h
+++ b/chromium/base/mac/bind_objc_block.h
@@ -45,8 +45,11 @@ R RunBlock(base::mac::ScopedBlock<R(^)(Args...)> block, Args... args) {
// note above).
template<typename R, typename... Args>
base::Callback<R(Args...)> BindBlock(R(^block)(Args...)) {
- return base::Bind(&base::internal::RunBlock<R, Args...>,
- base::mac::ScopedBlock<R(^)(Args...)>(Block_copy(block)));
+ return base::Bind(
+ &base::internal::RunBlock<R, Args...>,
+ base::mac::ScopedBlock<R (^)(Args...)>(
+ base::mac::internal::ScopedBlockTraits<R (^)(Args...)>::Retain(
+ block)));
}
} // namespace base
diff --git a/chromium/base/mac/bind_objc_block_unittest.mm b/chromium/base/mac/bind_objc_block_unittest.mm
index c0e690c0083..f3c189cd4b7 100644
--- a/chromium/base/mac/bind_objc_block_unittest.mm
+++ b/chromium/base/mac/bind_objc_block_unittest.mm
@@ -11,8 +11,16 @@
#include "base/callback_helpers.h"
#include "testing/gtest/include/gtest/gtest.h"
+// See bind_objc_block_unittest_arc.mm for why this is necessary. Remove once
+// gyp support is dropped.
+void BindObjcBlockUnittestArcLinkerWorkaround();
+
namespace {
+TEST(BindObjcBlockTest, EnableARCTests) {
+ BindObjcBlockUnittestArcLinkerWorkaround();
+}
+
TEST(BindObjcBlockTest, TestScopedClosureRunnerExitScope) {
int run_count = 0;
int* ptr = &run_count;
diff --git a/chromium/base/mac/bind_objc_block_unittest_arc.mm b/chromium/base/mac/bind_objc_block_unittest_arc.mm
new file mode 100644
index 00000000000..ded1c338e47
--- /dev/null
+++ b/chromium/base/mac/bind_objc_block_unittest_arc.mm
@@ -0,0 +1,109 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/mac/bind_objc_block.h"
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/callback_helpers.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+// This free-function is there to ensure that the object file in not discarded
+// at link time when building with gyp (it is required because targets is built
+// as a static library with gyp and not source set which cause the object file
+// to be discarded if no symbol is used). Remove once gyp support is dropped.
+void BindObjcBlockUnittestArcLinkerWorkaround() {}
+
+namespace {
+
+TEST(BindObjcBlockTestARC, TestScopedClosureRunnerExitScope) {
+ int run_count = 0;
+ int* ptr = &run_count;
+ {
+ base::ScopedClosureRunner runner(base::BindBlock(^{
+ (*ptr)++;
+ }));
+ EXPECT_EQ(0, run_count);
+ }
+ EXPECT_EQ(1, run_count);
+}
+
+TEST(BindObjcBlockTestARC, TestScopedClosureRunnerRelease) {
+ int run_count = 0;
+ int* ptr = &run_count;
+ base::Closure c;
+ {
+ base::ScopedClosureRunner runner(base::BindBlock(^{
+ (*ptr)++;
+ }));
+ c = runner.Release();
+ EXPECT_EQ(0, run_count);
+ }
+ EXPECT_EQ(0, run_count);
+ c.Run();
+ EXPECT_EQ(1, run_count);
+}
+
+TEST(BindObjcBlockTestARC, TestReturnValue) {
+ const int kReturnValue = 42;
+ base::Callback<int(void)> c = base::BindBlock(^{return kReturnValue;});
+ EXPECT_EQ(kReturnValue, c.Run());
+}
+
+TEST(BindObjcBlockTestARC, TestArgument) {
+ const int kArgument = 42;
+ base::Callback<int(int)> c = base::BindBlock(^(int a){return a + 1;});
+ EXPECT_EQ(kArgument + 1, c.Run(kArgument));
+}
+
+TEST(BindObjcBlockTestARC, TestTwoArguments) {
+ std::string result;
+ std::string* ptr = &result;
+ base::Callback<void(const std::string&, const std::string&)> c =
+ base::BindBlock(^(const std::string& a, const std::string& b) {
+ *ptr = a + b;
+ });
+ c.Run("forty", "two");
+ EXPECT_EQ(result, "fortytwo");
+}
+
+TEST(BindObjcBlockTestARC, TestThreeArguments) {
+ std::string result;
+ std::string* ptr = &result;
+ base::Callback<void(const std::string&,
+ const std::string&,
+ const std::string&)> c =
+ base::BindBlock(^(const std::string& a,
+ const std::string& b,
+ const std::string& c) {
+ *ptr = a + b + c;
+ });
+ c.Run("six", "times", "nine");
+ EXPECT_EQ(result, "sixtimesnine");
+}
+
+TEST(BindObjcBlockTestARC, TestSixArguments) {
+ std::string result1;
+ std::string* ptr = &result1;
+ int result2;
+ int* ptr2 = &result2;
+ base::Callback<void(int, int, const std::string&, const std::string&,
+ int, const std::string&)> c =
+ base::BindBlock(^(int a, int b, const std::string& c,
+ const std::string& d, int e, const std::string& f) {
+ *ptr = c + d + f;
+ *ptr2 = a + b + e;
+ });
+ c.Run(1, 2, "infinite", "improbability", 3, "drive");
+ EXPECT_EQ(result1, "infiniteimprobabilitydrive");
+ EXPECT_EQ(result2, 6);
+}
+
+} // namespace
diff --git a/chromium/base/mac/call_with_eh_frame.cc b/chromium/base/mac/call_with_eh_frame.cc
index 7267676d666..45785414194 100644
--- a/chromium/base/mac/call_with_eh_frame.cc
+++ b/chromium/base/mac/call_with_eh_frame.cc
@@ -12,27 +12,43 @@
namespace base {
namespace mac {
+#if defined(OS_IOS)
+// No iOS assembly implementation exists, so just call the block directly.
+void CallWithEHFrame(void (^block)(void)) {
+ block();
+}
+#else // OS_MACOSX
+extern "C" _Unwind_Reason_Code __gxx_personality_v0(int,
+ _Unwind_Action,
+ uint64_t,
+ struct _Unwind_Exception*,
+ struct _Unwind_Context*);
+
_Unwind_Reason_Code CxxPersonalityRoutine(
int version,
_Unwind_Action actions,
- uint64_t exceptionClass,
- struct _Unwind_Exception* exceptionObject,
+ uint64_t exception_class,
+ struct _Unwind_Exception* exception_object,
struct _Unwind_Context* context) {
- // Tell libunwind that this is the end of the stack. When it encounters the
- // CallWithEHFrame, it will stop searching for an exception handler. The
- // result is that no exception handler has been found higher on the stack,
- // and any that are lower on the stack (e.g. in CFRunLoopRunSpecific), will
- // now be skipped. Since this is reporting the end of the stack, and no
- // exception handler will have been found, std::terminate() will be called.
- return _URC_END_OF_STACK;
-}
+ // Unwinding is a two-phase process: phase one searches for an exception
+ // handler, and phase two performs cleanup. For phase one, this custom
+ // personality will terminate the search. For phase two, this should delegate
+ // back to the standard personality routine.
-#if defined(OS_IOS)
-// No iOS assembly implementation exists, so just call the block directly.
-void CallWithEHFrame(void (^block)(void)) {
- block();
+ if ((actions & _UA_SEARCH_PHASE) != 0) {
+ // Tell libunwind that this is the end of the stack. When it encounters the
+ // CallWithEHFrame, it will stop searching for an exception handler. The
+ // result is that no exception handler has been found higher on the stack,
+ // and any that are lower on the stack (e.g. in CFRunLoopRunSpecific), will
+ // now be skipped. Since this is reporting the end of the stack, and no
+ // exception handler will have been found, std::terminate() will be called.
+ return _URC_END_OF_STACK;
+ }
+
+ return __gxx_personality_v0(version, actions, exception_class,
+ exception_object, context);
}
-#endif
+#endif // defined(OS_IOS)
} // namespace mac
} // namespace base
diff --git a/chromium/base/mac/libdispatch_task_runner.cc b/chromium/base/mac/libdispatch_task_runner.cc
index adf12619d67..dcbeecc08ac 100644
--- a/chromium/base/mac/libdispatch_task_runner.cc
+++ b/chromium/base/mac/libdispatch_task_runner.cc
@@ -13,7 +13,8 @@ namespace mac {
LibDispatchTaskRunner::LibDispatchTaskRunner(const char* name)
: queue_(dispatch_queue_create(name, NULL)),
- queue_finalized_(false, false) {
+ queue_finalized_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED) {
dispatch_set_context(queue_, this);
dispatch_set_finalizer_f(queue_, &LibDispatchTaskRunner::Finalizer);
}
diff --git a/chromium/base/mac/libdispatch_task_runner_unittest.cc b/chromium/base/mac/libdispatch_task_runner_unittest.cc
index bfe776c6970..a7bc9282084 100644
--- a/chromium/base/mac/libdispatch_task_runner_unittest.cc
+++ b/chromium/base/mac/libdispatch_task_runner_unittest.cc
@@ -10,6 +10,8 @@
#include "base/mac/bind_objc_block.h"
#include "base/macros.h"
#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
#include "base/strings/stringprintf.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -24,10 +26,10 @@ class LibDispatchTaskRunnerTest : public testing::Test {
// all non-delayed tasks are run on the LibDispatchTaskRunner.
void DispatchLastTask() {
dispatch_async(task_runner_->GetDispatchQueue(), ^{
- message_loop_.PostTask(FROM_HERE,
- base::MessageLoop::QuitWhenIdleClosure());
+ message_loop_.task_runner()->PostTask(
+ FROM_HERE, base::MessageLoop::QuitWhenIdleClosure());
});
- message_loop_.Run();
+ base::RunLoop().Run();
task_runner_->Shutdown();
}
@@ -160,11 +162,11 @@ TEST_F(LibDispatchTaskRunnerTest, NonNestable) {
TaskOrderMarker marker(this, "First");
task_runner_->PostNonNestableTask(FROM_HERE, base::BindBlock(^{
TaskOrderMarker marker(this, "Second NonNestable");
- message_loop_.PostTask(FROM_HERE,
- base::MessageLoop::QuitWhenIdleClosure());
+ message_loop_.task_runner()->PostTask(
+ FROM_HERE, base::MessageLoop::QuitWhenIdleClosure());
}));
}));
- message_loop_.Run();
+ base::RunLoop().Run();
task_runner_->Shutdown();
const char* const expectations[] = {
@@ -186,11 +188,11 @@ TEST_F(LibDispatchTaskRunnerTest, PostDelayed) {
task_runner_->PostDelayedTask(FROM_HERE, base::BindBlock(^{
TaskOrderMarker marker(this, "Timed");
run_time = base::TimeTicks::Now();
- message_loop_.PostTask(FROM_HERE,
- base::MessageLoop::QuitWhenIdleClosure());
+ message_loop_.task_runner()->PostTask(
+ FROM_HERE, base::MessageLoop::QuitWhenIdleClosure());
}), delta);
task_runner_->PostTask(FROM_HERE, BoundRecordTaskOrder(this, "Second"));
- message_loop_.Run();
+ base::RunLoop().Run();
task_runner_->Shutdown();
const char* const expectations[] = {
diff --git a/chromium/base/mac/mac_logging.mm b/chromium/base/mac/mac_logging.mm
index 381ad30614e..f0d3c07da83 100644
--- a/chromium/base/mac/mac_logging.mm
+++ b/chromium/base/mac/mac_logging.mm
@@ -32,8 +32,8 @@ OSStatusLogMessage::OSStatusLogMessage(const char* file_path,
OSStatusLogMessage::~OSStatusLogMessage() {
#if defined(OS_IOS)
- // TODO(ios): Consider using NSError with NSOSStatusErrorDomain to try to
- // get a description of the failure.
+ // TODO(crbug.com/546375): Consider using NSError with NSOSStatusErrorDomain
+ // to try to get a description of the failure.
stream() << ": " << status_;
#else
stream() << ": "
diff --git a/chromium/base/mac/mac_util.h b/chromium/base/mac/mac_util.h
index c72c5f14336..5d2afc2565e 100644
--- a/chromium/base/mac/mac_util.h
+++ b/chromium/base/mac/mac_util.h
@@ -113,53 +113,39 @@ BASE_EXPORT bool RemoveQuarantineAttribute(const FilePath& file_path);
// "OrLater" variants to those that check for a specific version, unless you
// know for sure that you need to check for a specific version.
-// Mountain Lion is Mac OS X 10.8, Darwin 12.
-BASE_EXPORT bool IsOSMountainLion();
-BASE_EXPORT bool IsOSMountainLionOrEarlier();
-BASE_EXPORT bool IsOSMountainLionOrLater();
-
-// Mavericks is Mac OS X 10.9, Darwin 13.
+// Mavericks is OS X 10.9, Darwin 13.
BASE_EXPORT bool IsOSMavericks();
-BASE_EXPORT bool IsOSMavericksOrEarlier();
BASE_EXPORT bool IsOSMavericksOrLater();
-// Yosemite is Mac OS X 10.10, Darwin 14.
+// Yosemite is OS X 10.10, Darwin 14.
BASE_EXPORT bool IsOSYosemite();
BASE_EXPORT bool IsOSYosemiteOrEarlier();
BASE_EXPORT bool IsOSYosemiteOrLater();
-// El Capitan is Mac OS X 10.11, Darwin 15.
+// El Capitan is OS X 10.11, Darwin 15.
BASE_EXPORT bool IsOSElCapitan();
+BASE_EXPORT bool IsOSElCapitanOrEarlier();
BASE_EXPORT bool IsOSElCapitanOrLater();
+// Sierra is macOS 10.12, Darwin 16.
+BASE_EXPORT bool IsOSSierra();
+BASE_EXPORT bool IsOSSierraOrLater();
+
// This should be infrequently used. It only makes sense to use this to avoid
// codepaths that are very likely to break on future (unreleased, untested,
// unborn) OS releases, or to log when the OS is newer than any known version.
-BASE_EXPORT bool IsOSLaterThanElCapitan_DontCallThis();
+BASE_EXPORT bool IsOSLaterThanSierra_DontCallThis();
// Inline functions that are redundant due to version ranges being mutually-
// exclusive.
-inline bool IsOSMountainLionOrEarlier() { return !IsOSMavericksOrLater(); }
-inline bool IsOSMavericksOrEarlier() { return !IsOSYosemiteOrLater(); }
inline bool IsOSYosemiteOrEarlier() { return !IsOSElCapitanOrLater(); }
+inline bool IsOSElCapitanOrEarlier() { return !IsOSSierraOrLater(); }
// When the deployment target is set, the code produced cannot run on earlier
// OS releases. That enables some of the IsOS* family to be implemented as
// constant-value inline functions. The MAC_OS_X_VERSION_MIN_REQUIRED macro
// contains the value of the deployment target.
-#if defined(MAC_OS_X_VERSION_10_8) && \
- MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_8
-#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_8
-inline bool IsOSMountainLionOrLater() { return true; }
-#endif
-
-#if defined(MAC_OS_X_VERSION_10_8) && \
- MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_8
-#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_8
-inline bool IsOSMountainLion() { return false; }
-#endif
-
#if defined(MAC_OS_X_VERSION_10_9) && \
MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_9
#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_9
@@ -194,7 +180,19 @@ inline bool IsOSElCapitanOrLater() { return true; }
MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_11
#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_11
inline bool IsOSElCapitan() { return false; }
-inline bool IsOSLaterThanElCapitan_DontCallThis() { return true; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_12) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_12
+#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_12
+inline bool IsOSSierraOrLater() { return true; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_12) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_12
+#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_12
+inline bool IsOSSierra() { return false; }
+inline bool IsOSLaterThanSierra_DontCallThis() { return true; }
#endif
// Retrieve the system's model identifier string from the IOKit registry:
diff --git a/chromium/base/mac/mac_util.mm b/chromium/base/mac/mac_util.mm
index ec3c65dba3f..c7f7a6d6560 100644
--- a/chromium/base/mac/mac_util.mm
+++ b/chromium/base/mac/mac_util.mm
@@ -456,28 +456,14 @@ int MacOSXMinorVersion() {
}
enum {
- SNOW_LEOPARD_MINOR_VERSION = 6,
- LION_MINOR_VERSION = 7,
- MOUNTAIN_LION_MINOR_VERSION = 8,
MAVERICKS_MINOR_VERSION = 9,
YOSEMITE_MINOR_VERSION = 10,
EL_CAPITAN_MINOR_VERSION = 11,
+ SIERRA_MINOR_VERSION = 12,
};
} // namespace
-#if !defined(BASE_MAC_MAC_UTIL_H_INLINED_GT_10_8)
-bool IsOSMountainLion() {
- return MacOSXMinorVersion() == MOUNTAIN_LION_MINOR_VERSION;
-}
-#endif
-
-#if !defined(BASE_MAC_MAC_UTIL_H_INLINED_GE_10_8)
-bool IsOSMountainLionOrLater() {
- return MacOSXMinorVersion() >= MOUNTAIN_LION_MINOR_VERSION;
-}
-#endif
-
#if !defined(BASE_MAC_MAC_UTIL_H_INLINED_GT_10_9)
bool IsOSMavericks() {
return MacOSXMinorVersion() == MAVERICKS_MINOR_VERSION;
@@ -514,9 +500,21 @@ bool IsOSElCapitanOrLater() {
}
#endif
-#if !defined(BASE_MAC_MAC_UTIL_H_INLINED_GT_10_11)
-bool IsOSLaterThanElCapitan_DontCallThis() {
- return MacOSXMinorVersion() > EL_CAPITAN_MINOR_VERSION;
+#if !defined(BASE_MAC_MAC_UTIL_H_INLINED_GT_10_12)
+bool IsOSSierra() {
+ return MacOSXMinorVersion() == SIERRA_MINOR_VERSION;
+}
+#endif
+
+#if !defined(BASE_MAC_MAC_UTIL_H_INLINED_GE_10_12)
+bool IsOSSierraOrLater() {
+ return MacOSXMinorVersion() >= SIERRA_MINOR_VERSION;
+}
+#endif
+
+#if !defined(BASE_MAC_MAC_UTIL_H_INLINED_GT_10_12)
+bool IsOSLaterThanSierra_DontCallThis() {
+ return MacOSXMinorVersion() > SIERRA_MINOR_VERSION;
}
#endif
diff --git a/chromium/base/mac/mac_util_unittest.mm b/chromium/base/mac/mac_util_unittest.mm
index bed39f4a27f..15f603d5640 100644
--- a/chromium/base/mac/mac_util_unittest.mm
+++ b/chromium/base/mac/mac_util_unittest.mm
@@ -145,60 +145,56 @@ TEST_F(MacUtilTest, IsOSEllipsis) {
base::SysInfo::OperatingSystemVersionNumbers(&major, &minor, &bugfix);
if (major == 10) {
- if (minor == 8) {
- EXPECT_TRUE(IsOSMountainLion());
- EXPECT_TRUE(IsOSMountainLionOrEarlier());
- EXPECT_TRUE(IsOSMountainLionOrLater());
- EXPECT_FALSE(IsOSMavericks());
- EXPECT_TRUE(IsOSMavericksOrEarlier());
- EXPECT_FALSE(IsOSMavericksOrLater());
- EXPECT_FALSE(IsOSYosemite());
- EXPECT_TRUE(IsOSYosemiteOrEarlier());
- EXPECT_FALSE(IsOSYosemiteOrLater());
- EXPECT_FALSE(IsOSElCapitan());
- EXPECT_FALSE(IsOSElCapitanOrLater());
- EXPECT_FALSE(IsOSLaterThanElCapitan_DontCallThis());
- } else if (minor == 9) {
- EXPECT_FALSE(IsOSMountainLion());
- EXPECT_FALSE(IsOSMountainLionOrEarlier());
- EXPECT_TRUE(IsOSMountainLionOrLater());
+ if (minor == 9) {
EXPECT_TRUE(IsOSMavericks());
- EXPECT_TRUE(IsOSMavericksOrEarlier());
EXPECT_TRUE(IsOSMavericksOrLater());
EXPECT_FALSE(IsOSYosemite());
EXPECT_TRUE(IsOSYosemiteOrEarlier());
EXPECT_FALSE(IsOSYosemiteOrLater());
EXPECT_FALSE(IsOSElCapitan());
+ EXPECT_TRUE(IsOSElCapitanOrEarlier());
EXPECT_FALSE(IsOSElCapitanOrLater());
- EXPECT_FALSE(IsOSLaterThanElCapitan_DontCallThis());
+ EXPECT_FALSE(IsOSSierra());
+ EXPECT_FALSE(IsOSSierraOrLater());
+ EXPECT_FALSE(IsOSLaterThanSierra_DontCallThis());
} else if (minor == 10) {
- EXPECT_FALSE(IsOSMountainLion());
- EXPECT_FALSE(IsOSMountainLionOrEarlier());
- EXPECT_TRUE(IsOSMountainLionOrLater());
EXPECT_FALSE(IsOSMavericks());
- EXPECT_FALSE(IsOSMavericksOrEarlier());
EXPECT_TRUE(IsOSMavericksOrLater());
EXPECT_TRUE(IsOSYosemite());
EXPECT_TRUE(IsOSYosemiteOrEarlier());
EXPECT_TRUE(IsOSYosemiteOrLater());
EXPECT_FALSE(IsOSElCapitan());
+ EXPECT_TRUE(IsOSElCapitanOrEarlier());
EXPECT_FALSE(IsOSElCapitanOrLater());
- EXPECT_FALSE(IsOSLaterThanElCapitan_DontCallThis());
+ EXPECT_FALSE(IsOSSierra());
+ EXPECT_FALSE(IsOSSierraOrLater());
+ EXPECT_FALSE(IsOSLaterThanSierra_DontCallThis());
} else if (minor == 11) {
- EXPECT_FALSE(IsOSMountainLion());
- EXPECT_FALSE(IsOSMountainLionOrEarlier());
- EXPECT_TRUE(IsOSMountainLionOrLater());
EXPECT_FALSE(IsOSMavericks());
- EXPECT_FALSE(IsOSMavericksOrEarlier());
EXPECT_TRUE(IsOSMavericksOrLater());
EXPECT_FALSE(IsOSYosemite());
EXPECT_FALSE(IsOSYosemiteOrEarlier());
EXPECT_TRUE(IsOSYosemiteOrLater());
EXPECT_TRUE(IsOSElCapitan());
+ EXPECT_TRUE(IsOSElCapitanOrEarlier());
+ EXPECT_TRUE(IsOSElCapitanOrLater());
+ EXPECT_FALSE(IsOSSierra());
+ EXPECT_FALSE(IsOSSierraOrLater());
+ EXPECT_FALSE(IsOSLaterThanSierra_DontCallThis());
+ } else if (minor == 12) {
+ EXPECT_FALSE(IsOSMavericks());
+ EXPECT_TRUE(IsOSMavericksOrLater());
+ EXPECT_FALSE(IsOSYosemite());
+ EXPECT_FALSE(IsOSYosemiteOrEarlier());
+ EXPECT_TRUE(IsOSYosemiteOrLater());
+ EXPECT_FALSE(IsOSElCapitan());
+ EXPECT_FALSE(IsOSElCapitanOrEarlier());
EXPECT_TRUE(IsOSElCapitanOrLater());
- EXPECT_FALSE(IsOSLaterThanElCapitan_DontCallThis());
+ EXPECT_TRUE(IsOSSierra());
+ EXPECT_TRUE(IsOSSierraOrLater());
+ EXPECT_FALSE(IsOSLaterThanSierra_DontCallThis());
} else {
- // Not six, seven, eight, nine, ten, or eleven. Ah, ah, ah.
+ // Not nine, ten, eleven, or twelve. Ah, ah, ah.
EXPECT_TRUE(false);
}
} else {
diff --git a/chromium/base/mac/mach_port_broker_unittest.cc b/chromium/base/mac/mach_port_broker_unittest.cc
index c15afb68fd1..bff8eb6a9bc 100644
--- a/chromium/base/mac/mach_port_broker_unittest.cc
+++ b/chromium/base/mac/mach_port_broker_unittest.cc
@@ -23,7 +23,8 @@ class MachPortBrokerTest : public testing::Test,
public:
MachPortBrokerTest()
: broker_(kBootstrapPortName),
- event_(true, false),
+ event_(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED),
received_process_(kNullProcessHandle) {
broker_.AddObserver(this);
}
diff --git a/chromium/base/mac/objc_property_releaser.h b/chromium/base/mac/objc_property_releaser.h
index 973d793218b..8160674b99b 100644
--- a/chromium/base/mac/objc_property_releaser.h
+++ b/chromium/base/mac/objc_property_releaser.h
@@ -9,6 +9,10 @@
#include "base/base_export.h"
+#if defined(__has_feature) && __has_feature(objc_arc)
+#error "In ARC code properties are release automatically. Don't use this class."
+#endif
+
namespace base {
namespace mac {
diff --git a/chromium/base/mac/scoped_block.h b/chromium/base/mac/scoped_block.h
index bc2688f13ac..8199677f150 100644
--- a/chromium/base/mac/scoped_block.h
+++ b/chromium/base/mac/scoped_block.h
@@ -9,6 +9,12 @@
#include "base/mac/scoped_typeref.h"
+#if defined(__has_feature) && __has_feature(objc_arc)
+#define BASE_MAC_BRIDGE_CAST(TYPE, VALUE) (__bridge TYPE)(VALUE)
+#else
+#define BASE_MAC_BRIDGE_CAST(TYPE, VALUE) VALUE
+#endif
+
namespace base {
namespace mac {
@@ -17,8 +23,13 @@ namespace internal {
template <typename B>
struct ScopedBlockTraits {
static B InvalidValue() { return nullptr; }
- static B Retain(B block) { return Block_copy(block); }
- static void Release(B block) { Block_release(block); }
+ static B Retain(B block) {
+ return BASE_MAC_BRIDGE_CAST(
+ B, Block_copy(BASE_MAC_BRIDGE_CAST(const void*, block)));
+ }
+ static void Release(B block) {
+ Block_release(BASE_MAC_BRIDGE_CAST(const void*, block));
+ }
};
} // namespace internal
@@ -32,4 +43,6 @@ using ScopedBlock = ScopedTypeRef<B, internal::ScopedBlockTraits<B>>;
} // namespace mac
} // namespace base
+#undef BASE_MAC_BRIDGE_CAST
+
#endif // BASE_MAC_SCOPED_BLOCK_H_
diff --git a/chromium/base/mac/scoped_nsobject.h b/chromium/base/mac/scoped_nsobject.h
index 4b26acf7585..cc54aa0ca8c 100644
--- a/chromium/base/mac/scoped_nsobject.h
+++ b/chromium/base/mac/scoped_nsobject.h
@@ -12,10 +12,13 @@
// singled out because it is most typically included from other header files.
#import <Foundation/NSObject.h>
+#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/mac/scoped_typeref.h"
+#if !defined(__has_feature) || !__has_feature(objc_arc)
@class NSAutoreleasePool;
+#endif
namespace base {
@@ -38,14 +41,39 @@ namespace base {
// scoped_nsautorelease_pool.h instead.
// We check for bad uses of scoped_nsobject and NSAutoreleasePool at compile
// time with a template specialization (see below).
+//
+// If Automatic Reference Counting (aka ARC) is enabled then the ownership
+// policy is not controllable by the user as ARC make it really difficult to
+// transfer ownership (the reference passed to scoped_nsobject constructor is
+// sunk by ARC and __attribute((ns_consumed)) appears to not work correctly
+// with Objective-C++ see https://llvm.org/bugs/show_bug.cgi?id=27887). Due to
+// that, the policy is always to |RETAIN| when using ARC.
namespace internal {
+BASE_EXPORT id ScopedNSProtocolTraitsRetain(__unsafe_unretained id obj)
+ __attribute((ns_returns_not_retained));
+BASE_EXPORT id ScopedNSProtocolTraitsAutoRelease(__unsafe_unretained id obj)
+ __attribute((ns_returns_not_retained));
+BASE_EXPORT void ScopedNSProtocolTraitsRelease(__unsafe_unretained id obj);
+
+// Traits for ScopedTypeRef<>. As this class may be compiled from file with
+// Automatic Reference Counting enable or not all methods have annotation to
+// enforce the same code generation in both case (in particular, the Retain
+// method uses ns_returns_not_retained to prevent ARC to insert a -release
+// call on the returned value and thus defeating the -retain).
template <typename NST>
struct ScopedNSProtocolTraits {
- static NST InvalidValue() { return nil; }
- static NST Retain(NST nst) { return [nst retain]; }
- static void Release(NST nst) { [nst release]; }
+ static NST InvalidValue() __attribute((ns_returns_not_retained)) {
+ return nil;
+ }
+ static NST Retain(__unsafe_unretained NST nst)
+ __attribute((ns_returns_not_retained)) {
+ return ScopedNSProtocolTraitsRetain(nst);
+ }
+ static void Release(__unsafe_unretained NST nst) {
+ ScopedNSProtocolTraitsRelease(nst);
+ }
};
} // namespace internal
@@ -54,11 +82,49 @@ template <typename NST>
class scoped_nsprotocol
: public ScopedTypeRef<NST, internal::ScopedNSProtocolTraits<NST>> {
public:
- using ScopedTypeRef<NST,
- internal::ScopedNSProtocolTraits<NST>>::ScopedTypeRef;
+ using Traits = internal::ScopedNSProtocolTraits<NST>;
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ explicit scoped_nsprotocol(
+ NST object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+ : ScopedTypeRef<NST, Traits>(object, policy) {}
+#else
+ explicit scoped_nsprotocol(NST object = Traits::InvalidValue())
+ : ScopedTypeRef<NST, Traits>(object, base::scoped_policy::RETAIN) {}
+#endif
+
+ scoped_nsprotocol(const scoped_nsprotocol<NST>& that)
+ : ScopedTypeRef<NST, Traits>(that) {}
+
+ template <typename NSR>
+ explicit scoped_nsprotocol(const scoped_nsprotocol<NSR>& that_as_subclass)
+ : ScopedTypeRef<NST, Traits>(that_as_subclass) {}
+
+ scoped_nsprotocol(scoped_nsprotocol<NST>&& that)
+ : ScopedTypeRef<NST, Traits>(that) {}
+
+ scoped_nsprotocol& operator=(const scoped_nsprotocol<NST>& that) {
+ ScopedTypeRef<NST, Traits>::operator=(that);
+ return *this;
+ }
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ void reset(NST object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy =
+ base::scoped_policy::ASSUME) {
+ ScopedTypeRef<NST, Traits>::reset(object, policy);
+ }
+#else
+ void reset(NST object = Traits::InvalidValue()) {
+ ScopedTypeRef<NST, Traits>::reset(object, base::scoped_policy::RETAIN);
+ }
+#endif
// Shift reference to the autorelease pool to be released later.
- NST autorelease() { return [this->release() autorelease]; }
+ NST autorelease() __attribute((ns_returns_not_retained)) {
+ return internal::ScopedNSProtocolTraitsAutoRelease(this->release());
+ }
};
// Free functions
@@ -80,17 +146,92 @@ bool operator!=(C p1, const scoped_nsprotocol<C>& p2) {
template <typename NST>
class scoped_nsobject : public scoped_nsprotocol<NST*> {
public:
- using scoped_nsprotocol<NST*>::scoped_nsprotocol;
-
+ using Traits = typename scoped_nsprotocol<NST*>::Traits;
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ explicit scoped_nsobject(
+ NST* object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+ : scoped_nsprotocol<NST*>(object, policy) {}
+#else
+ explicit scoped_nsobject(NST* object = Traits::InvalidValue())
+ : scoped_nsprotocol<NST*>(object) {}
+#endif
+
+ scoped_nsobject(const scoped_nsobject<NST>& that)
+ : scoped_nsprotocol<NST*>(that) {}
+
+ template <typename NSR>
+ explicit scoped_nsobject(const scoped_nsobject<NSR>& that_as_subclass)
+ : scoped_nsprotocol<NST*>(that_as_subclass) {}
+
+ scoped_nsobject(scoped_nsobject<NST>&& that)
+ : scoped_nsprotocol<NST*>(that) {}
+
+ scoped_nsobject& operator=(const scoped_nsobject<NST>& that) {
+ scoped_nsprotocol<NST*>::operator=(that);
+ return *this;
+ }
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ void reset(NST* object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy =
+ base::scoped_policy::ASSUME) {
+ scoped_nsprotocol<NST*>::reset(object, policy);
+ }
+#else
+ void reset(NST* object = Traits::InvalidValue()) {
+ scoped_nsprotocol<NST*>::reset(object);
+ }
+#endif
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
static_assert(std::is_same<NST, NSAutoreleasePool>::value == false,
"Use ScopedNSAutoreleasePool instead");
+#endif
};
// Specialization to make scoped_nsobject<id> work.
template<>
class scoped_nsobject<id> : public scoped_nsprotocol<id> {
public:
- using scoped_nsprotocol<id>::scoped_nsprotocol;
+ using Traits = typename scoped_nsprotocol<id>::Traits;
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ explicit scoped_nsobject(
+ id object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+ : scoped_nsprotocol<id>(object, policy) {}
+#else
+ explicit scoped_nsobject(id object = Traits::InvalidValue())
+ : scoped_nsprotocol<id>(object) {}
+#endif
+
+ scoped_nsobject(const scoped_nsobject<id>& that)
+ : scoped_nsprotocol<id>(that) {}
+
+ template <typename NSR>
+ explicit scoped_nsobject(const scoped_nsobject<NSR>& that_as_subclass)
+ : scoped_nsprotocol<id>(that_as_subclass) {}
+
+ scoped_nsobject(scoped_nsobject<id>&& that) : scoped_nsprotocol<id>(that) {}
+
+ scoped_nsobject& operator=(const scoped_nsobject<id>& that) {
+ scoped_nsprotocol<id>::operator=(that);
+ return *this;
+ }
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ void reset(id object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy =
+ base::scoped_policy::ASSUME) {
+ scoped_nsprotocol<id>::reset(object, policy);
+ }
+#else
+ void reset(id object = Traits::InvalidValue()) {
+ scoped_nsprotocol<id>::reset(object);
+ }
+#endif
};
} // namespace base
diff --git a/chromium/base/mac/scoped_nsobject.mm b/chromium/base/mac/scoped_nsobject.mm
new file mode 100644
index 00000000000..65b40317dc7
--- /dev/null
+++ b/chromium/base/mac/scoped_nsobject.mm
@@ -0,0 +1,23 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/mac/scoped_nsobject.h"
+
+namespace base {
+namespace internal {
+
+id ScopedNSProtocolTraitsRetain(id obj) {
+ return [obj retain];
+}
+
+id ScopedNSProtocolTraitsAutoRelease(id obj) {
+ return [obj autorelease];
+}
+
+void ScopedNSProtocolTraitsRelease(id obj) {
+ return [obj release];
+}
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/mac/scoped_nsobject_unittest.mm b/chromium/base/mac/scoped_nsobject_unittest.mm
index 8b0b97df3a7..cefb5fe19fb 100644
--- a/chromium/base/mac/scoped_nsobject_unittest.mm
+++ b/chromium/base/mac/scoped_nsobject_unittest.mm
@@ -8,8 +8,16 @@
#include "base/mac/scoped_nsobject.h"
#include "testing/gtest/include/gtest/gtest.h"
+// See scoped_nsobject_unittest_arc.mm for why this is necessary. Remove once
+// gyp support is dropped.
+void ScopedNSObjectUnittestArcLinkerWorkaround();
+
namespace {
+TEST(ScopedNSObjectTest, EnableARCTests) {
+ ScopedNSObjectUnittestArcLinkerWorkaround();
+}
+
TEST(ScopedNSObjectTest, ScopedNSObject) {
base::scoped_nsobject<NSObject> p1([[NSObject alloc] init]);
ASSERT_TRUE(p1.get());
diff --git a/chromium/base/mac/scoped_nsobject_unittest_arc.mm b/chromium/base/mac/scoped_nsobject_unittest_arc.mm
new file mode 100644
index 00000000000..e69348801f5
--- /dev/null
+++ b/chromium/base/mac/scoped_nsobject_unittest_arc.mm
@@ -0,0 +1,137 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#import <CoreFoundation/CoreFoundation.h>
+
+#include "base/logging.h"
+#import "base/mac/scoped_nsobject.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+// This free-function is there to ensure that the object file in not discarded
+// at link time when building with gyp (it is required because targets is built
+// as a static library with gyp and not source set which cause the object file
+// to be discarded if no symbol is used). Remove once gyp support is dropped.
+void ScopedNSObjectUnittestArcLinkerWorkaround() {}
+
+namespace {
+
+template <typename NST>
+CFIndex GetRetainCount(const base::scoped_nsobject<NST>& nst) {
+ @autoreleasepool {
+ return CFGetRetainCount((__bridge CFTypeRef)nst.get()) - 1;
+ }
+}
+
+#if __has_feature(objc_arc_weak)
+TEST(ScopedNSObjectTestARC, DefaultPolicyIsRetain) {
+ __weak id o;
+ @autoreleasepool {
+ base::scoped_nsprotocol<id> p([[NSObject alloc] init]);
+ o = p.get();
+ DCHECK_EQ(o, p.get());
+ }
+ DCHECK_EQ(o, nil);
+}
+#endif
+
+TEST(ScopedNSObjectTestARC, ScopedNSObject) {
+ base::scoped_nsobject<NSObject> p1([[NSObject alloc] init]);
+ @autoreleasepool {
+ EXPECT_TRUE(p1.get());
+ EXPECT_TRUE(p1.get());
+ }
+ EXPECT_EQ(1, GetRetainCount(p1));
+ EXPECT_EQ(1, GetRetainCount(p1));
+ base::scoped_nsobject<NSObject> p2(p1);
+ @autoreleasepool {
+ EXPECT_EQ(p1.get(), p2.get());
+ }
+ EXPECT_EQ(2, GetRetainCount(p1));
+ p2.reset();
+ EXPECT_EQ(nil, p2.get());
+ EXPECT_EQ(1, GetRetainCount(p1));
+ {
+ base::scoped_nsobject<NSObject> p3 = p1;
+ @autoreleasepool {
+ EXPECT_EQ(p1.get(), p3.get());
+ }
+ EXPECT_EQ(2, GetRetainCount(p1));
+ p3 = p1;
+ @autoreleasepool {
+ EXPECT_EQ(p1.get(), p3.get());
+ }
+ EXPECT_EQ(2, GetRetainCount(p1));
+ }
+ EXPECT_EQ(1, GetRetainCount(p1));
+ base::scoped_nsobject<NSObject> p4;
+ @autoreleasepool {
+ p4 = base::scoped_nsobject<NSObject>(p1.get());
+ }
+ EXPECT_EQ(2, GetRetainCount(p1));
+ @autoreleasepool {
+ EXPECT_TRUE(p1 == p1.get());
+ EXPECT_TRUE(p1 == p1);
+ EXPECT_FALSE(p1 != p1);
+ EXPECT_FALSE(p1 != p1.get());
+ }
+ base::scoped_nsobject<NSObject> p5([[NSObject alloc] init]);
+ @autoreleasepool {
+ EXPECT_TRUE(p1 != p5);
+ EXPECT_TRUE(p1 != p5.get());
+ EXPECT_FALSE(p1 == p5);
+ EXPECT_FALSE(p1 == p5.get());
+ }
+
+ base::scoped_nsobject<NSObject> p6 = p1;
+ EXPECT_EQ(3, GetRetainCount(p6));
+ @autoreleasepool {
+ p6.autorelease();
+ EXPECT_EQ(nil, p6.get());
+ }
+ EXPECT_EQ(2, GetRetainCount(p1));
+}
+
+TEST(ScopedNSObjectTestARC, ScopedNSObjectInContainer) {
+ base::scoped_nsobject<id> p([[NSObject alloc] init]);
+ @autoreleasepool {
+ EXPECT_TRUE(p.get());
+ }
+ EXPECT_EQ(1, GetRetainCount(p));
+ @autoreleasepool {
+ std::vector<base::scoped_nsobject<id>> objects;
+ objects.push_back(p);
+ EXPECT_EQ(2, GetRetainCount(p));
+ @autoreleasepool {
+ EXPECT_EQ(p.get(), objects[0].get());
+ }
+ objects.push_back(base::scoped_nsobject<id>([[NSObject alloc] init]));
+ @autoreleasepool {
+ EXPECT_TRUE(objects[1].get());
+ }
+ EXPECT_EQ(1, GetRetainCount(objects[1]));
+ }
+ EXPECT_EQ(1, GetRetainCount(p));
+}
+
+TEST(ScopedNSObjectTestARC, ScopedNSObjectFreeFunctions) {
+ base::scoped_nsobject<id> p1([[NSObject alloc] init]);
+ id o1 = p1.get();
+ EXPECT_TRUE(o1 == p1);
+ EXPECT_FALSE(o1 != p1);
+ base::scoped_nsobject<id> p2([[NSObject alloc] init]);
+ EXPECT_TRUE(o1 != p2);
+ EXPECT_FALSE(o1 == p2);
+ id o2 = p2.get();
+ swap(p1, p2);
+ EXPECT_EQ(o2, p1.get());
+ EXPECT_EQ(o1, p2.get());
+}
+
+} // namespace
diff --git a/chromium/base/mac/scoped_typeref.h b/chromium/base/mac/scoped_typeref.h
index eed5afb539e..b8d8a142625 100644
--- a/chromium/base/mac/scoped_typeref.h
+++ b/chromium/base/mac/scoped_typeref.h
@@ -53,8 +53,8 @@ class ScopedTypeRef {
public:
typedef T element_type;
- ScopedTypeRef(
- T object = Traits::InvalidValue(),
+ explicit ScopedTypeRef(
+ __unsafe_unretained T object = Traits::InvalidValue(),
base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
: object_(object) {
if (object_ && policy == base::scoped_policy::RETAIN)
@@ -67,12 +67,10 @@ class ScopedTypeRef {
object_ = Traits::Retain(object_);
}
- // Without this, passing a ScopedTypeRef<A,TraitsX> to construct a
- // ScopedTypeRef<A,TraitsY> would automatically cast down to an A, and then
- // ASSUME ownership of A, when a retain is what was needed.
- template<typename OtherTraits>
- ScopedTypeRef(const ScopedTypeRef<T, OtherTraits>& that_with_other_traits)
- : object_(that_with_other_traits.get()) {
+ // This allows passing an object to a function that takes its superclass.
+ template <typename R, typename RTraits>
+ explicit ScopedTypeRef(const ScopedTypeRef<R, RTraits>& that_as_subclass)
+ : object_(that_as_subclass.get()) {
if (object_)
object_ = Traits::Retain(object_);
}
@@ -99,9 +97,9 @@ class ScopedTypeRef {
return &object_;
}
- void reset(T object = Traits::InvalidValue(),
+ void reset(__unsafe_unretained T object = Traits::InvalidValue(),
base::scoped_policy::OwnershipPolicy policy =
- base::scoped_policy::ASSUME) {
+ base::scoped_policy::ASSUME) {
if (object && policy == base::scoped_policy::RETAIN)
object = Traits::Retain(object);
if (object_)
@@ -109,24 +107,16 @@ class ScopedTypeRef {
object_ = object;
}
- bool operator==(T that) const {
- return object_ == that;
- }
+ bool operator==(__unsafe_unretained T that) const { return object_ == that; }
- bool operator!=(T that) const {
- return object_ != that;
- }
+ bool operator!=(__unsafe_unretained T that) const { return object_ != that; }
- operator T() const {
- return object_;
- }
+ operator T() const __attribute((ns_returns_not_retained)) { return object_; }
- T get() const {
- return object_;
- }
+ T get() const __attribute((ns_returns_not_retained)) { return object_; }
void swap(ScopedTypeRef& that) {
- T temp = that.object_;
+ __unsafe_unretained T temp = that.object_;
that.object_ = object_;
object_ = temp;
}
@@ -134,14 +124,14 @@ class ScopedTypeRef {
// ScopedTypeRef<>::release() is like std::unique_ptr<>::release. It is NOT
// a wrapper for Release(). To force a ScopedTypeRef<> object to call
// Release(), use ScopedTypeRef<>::reset().
- T release() WARN_UNUSED_RESULT {
- T temp = object_;
+ T release() __attribute((ns_returns_not_retained)) WARN_UNUSED_RESULT {
+ __unsafe_unretained T temp = object_;
object_ = Traits::InvalidValue();
return temp;
}
private:
- T object_;
+ __unsafe_unretained T object_;
};
} // namespace base
diff --git a/chromium/base/mac/sdk_forward_declarations.h b/chromium/base/mac/sdk_forward_declarations.h
index 9cb52b9a278..ab23b588714 100644
--- a/chromium/base/mac/sdk_forward_declarations.h
+++ b/chromium/base/mac/sdk_forward_declarations.h
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// This file contains forward declarations for items in later SDKs than the
-// default one with which Chromium is built (currently 10.6).
+// default one with which Chromium is built (currently 10.10).
// If you call any function from this header, be sure to check at runtime for
// respondsToSelector: before calling these functions (else your code will crash
// on older OS X versions that chrome still supports).
@@ -12,6 +12,7 @@
#define BASE_MAC_SDK_FORWARD_DECLARATIONS_H_
#import <AppKit/AppKit.h>
+#import <CoreBluetooth/CoreBluetooth.h>
#import <CoreWLAN/CoreWLAN.h>
#import <ImageCaptureCore/ImageCaptureCore.h>
#import <IOBluetooth/IOBluetooth.h>
@@ -20,211 +21,10 @@
#include "base/base_export.h"
// ----------------------------------------------------------------------------
-// Either define or forward declare classes only available in OSX 10.7+.
-// ----------------------------------------------------------------------------
-
-#if !defined(MAC_OS_X_VERSION_10_7) || \
- MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7
-
-@interface CWChannel : NSObject
-@end
-
-@interface CBPeripheral : NSObject
-@end
-
-@interface CBCentralManager : NSObject
-@end
-
-@interface CBUUID : NSObject
-@end
-
-#else
-
-@class CWChannel;
-@class CBPeripheral;
-@class CBCentralManager;
-@class CBUUID;
-
-#endif // MAC_OS_X_VERSION_10_7
-
-#if !defined(MAC_OS_X_VERSION_10_8) || \
- MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_8
-
-@interface NSUUID : NSObject
-@end
-
-#else
-
-@class NSUUID;
-
-#endif // MAC_OS_X_VERSION_10_8
-
-#if !defined(MAC_OS_X_VERSION_10_9) || \
- MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_9
-
-// NSProgress is public API in 10.9, but a version of it exists and is usable
-// in 10.8.
-@interface NSProgress : NSObject
-@end
-
-@interface NSAppearance : NSObject
-@end
-
-#else
-
-@class NSProgress;
-@class NSAppearance;
-
-#endif // MAC_OS_X_VERSION_10_9
-
-#if !defined(MAC_OS_X_VERSION_10_10) || \
- MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_10
-
-@interface NSUserActivity : NSObject
-@end
-
-#else
-
-@class NSUserActivity;
-
-#endif // MAC_OS_X_VERSION_10_10
-
-// ----------------------------------------------------------------------------
// Define typedefs, enums, and protocols not available in the version of the
// OSX SDK being compiled against.
// ----------------------------------------------------------------------------
-#if !defined(MAC_OS_X_VERSION_10_7) || \
- MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7
-
-enum {
- NSEventPhaseNone = 0, // event not associated with a phase.
- NSEventPhaseBegan = 0x1 << 0,
- NSEventPhaseStationary = 0x1 << 1,
- NSEventPhaseChanged = 0x1 << 2,
- NSEventPhaseEnded = 0x1 << 3,
- NSEventPhaseCancelled = 0x1 << 4
-};
-typedef NSUInteger NSEventPhase;
-
-enum {
- NSFullScreenWindowMask = 1 << 14,
-};
-
-enum {
- NSApplicationPresentationFullScreen = 1 << 10,
-};
-
-enum {
- NSWindowCollectionBehaviorFullScreenPrimary = 1 << 7,
- NSWindowCollectionBehaviorFullScreenAuxiliary = 1 << 8,
-};
-
-enum {
- NSEventSwipeTrackingLockDirection = 0x1 << 0,
- NSEventSwipeTrackingClampGestureAmount = 0x1 << 1,
-};
-typedef NSUInteger NSEventSwipeTrackingOptions;
-
-enum {
- NSWindowAnimationBehaviorDefault = 0,
- NSWindowAnimationBehaviorNone = 2,
- NSWindowAnimationBehaviorDocumentWindow = 3,
- NSWindowAnimationBehaviorUtilityWindow = 4,
- NSWindowAnimationBehaviorAlertPanel = 5
-};
-typedef NSInteger NSWindowAnimationBehavior;
-
-enum {
- NSWindowDocumentVersionsButton = 6,
- NSWindowFullScreenButton,
-};
-typedef NSUInteger NSWindowButton;
-
-enum CWChannelBand {
- kCWChannelBandUnknown = 0,
- kCWChannelBand2GHz = 1,
- kCWChannelBand5GHz = 2,
-};
-
-enum {
- kCWSecurityNone = 0,
- kCWSecurityWEP = 1,
- kCWSecurityWPAPersonal = 2,
- kCWSecurityWPAPersonalMixed = 3,
- kCWSecurityWPA2Personal = 4,
- kCWSecurityPersonal = 5,
- kCWSecurityDynamicWEP = 6,
- kCWSecurityWPAEnterprise = 7,
- kCWSecurityWPAEnterpriseMixed = 8,
- kCWSecurityWPA2Enterprise = 9,
- kCWSecurityEnterprise = 10,
- kCWSecurityUnknown = NSIntegerMax,
-};
-
-typedef NSInteger CWSecurity;
-
-enum {
- kBluetoothFeatureLESupportedController = (1 << 6L),
-};
-
-@protocol IOBluetoothDeviceInquiryDelegate
-- (void)deviceInquiryStarted:(IOBluetoothDeviceInquiry*)sender;
-- (void)deviceInquiryDeviceFound:(IOBluetoothDeviceInquiry*)sender
- device:(IOBluetoothDevice*)device;
-- (void)deviceInquiryComplete:(IOBluetoothDeviceInquiry*)sender
- error:(IOReturn)error
- aborted:(BOOL)aborted;
-@end
-
-enum {
- CBPeripheralStateDisconnected = 0,
- CBPeripheralStateConnecting,
- CBPeripheralStateConnected,
-};
-typedef NSInteger CBPeripheralState;
-
-enum {
- CBCentralManagerStateUnknown = 0,
- CBCentralManagerStateResetting,
- CBCentralManagerStateUnsupported,
- CBCentralManagerStateUnauthorized,
- CBCentralManagerStatePoweredOff,
- CBCentralManagerStatePoweredOn,
-};
-typedef NSInteger CBCentralManagerState;
-
-@protocol CBCentralManagerDelegate;
-
-@protocol CBCentralManagerDelegate<NSObject>
-- (void)centralManagerDidUpdateState:(CBCentralManager*)central;
-- (void)centralManager:(CBCentralManager*)central
- didDiscoverPeripheral:(CBPeripheral*)peripheral
- advertisementData:(NSDictionary*)advertisementData
- RSSI:(NSNumber*)RSSI;
-@end
-
-#endif // MAC_OS_X_VERSION_10_7
-
-#if !defined(MAC_OS_X_VERSION_10_8) || \
- MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_8
-
-enum { NSEventPhaseMayBegin = 0x1 << 5 };
-
-#endif // MAC_OS_X_VERSION_10_8
-
-#if !defined(MAC_OS_X_VERSION_10_9) || \
- MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_9
-
-enum {
- NSWindowOcclusionStateVisible = 1UL << 1,
-};
-typedef NSUInteger NSWindowOcclusionState;
-
-enum { NSWorkspaceLaunchWithErrorPresentation = 0x00000040 };
-
-#endif // MAC_OS_X_VERSION_10_9
-
#if !defined(MAC_OS_X_VERSION_10_11) || \
MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_11
@@ -243,6 +43,13 @@ typedef NSInteger NSPressureBehavior;
- (instancetype)initWithPressureBehavior:(NSPressureBehavior)pressureBehavior;
@end
+enum {
+ NSSpringLoadingHighlightNone = 0,
+ NSSpringLoadingHighlightStandard,
+ NSSpringLoadingHighlightEmphasized
+};
+typedef NSUInteger NSSpringLoadingHighlight;
+
#endif // MAC_OS_X_VERSION_10_11
// ----------------------------------------------------------------------------
@@ -251,18 +58,6 @@ typedef NSInteger NSPressureBehavior;
// ----------------------------------------------------------------------------
extern "C" {
-#if !defined(MAC_OS_X_VERSION_10_7) || \
- MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
-BASE_EXPORT extern NSString* const NSWindowWillEnterFullScreenNotification;
-BASE_EXPORT extern NSString* const NSWindowWillExitFullScreenNotification;
-BASE_EXPORT extern NSString* const NSWindowDidEnterFullScreenNotification;
-BASE_EXPORT extern NSString* const NSWindowDidExitFullScreenNotification;
-BASE_EXPORT extern NSString* const
- NSWindowDidChangeBackingPropertiesNotification;
-BASE_EXPORT extern NSString* const CBAdvertisementDataServiceDataKey;
-BASE_EXPORT extern NSString* const CBAdvertisementDataServiceUUIDsKey;
-#endif // MAC_OS_X_VERSION_10_7
-
#if !defined(MAC_OS_X_VERSION_10_9) || \
MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
BASE_EXPORT extern NSString* const NSWindowDidChangeOcclusionStateNotification;
@@ -279,147 +74,12 @@ BASE_EXPORT extern NSString* const NSAppearanceNameVibrantLight;
} // extern "C"
// ----------------------------------------------------------------------------
-// If compiling against an older version of the OSX SDK, declare functions that
-// are available in newer versions of the OSX SDK. If compiling against a newer
-// version of the OSX SDK, redeclare those same functions to suppress
-// -Wpartial-availability warnings.
+// If compiling against an older version of the OSX SDK, declare classes and
+// functions that are available in newer versions of the OSX SDK. If compiling
+// against a newer version of the OSX SDK, redeclare those same classes and
+// functions to suppress -Wpartial-availability warnings.
// ----------------------------------------------------------------------------
-// Once Chrome no longer supports OSX 10.6, everything within this preprocessor
-// block can be removed.
-#if !defined(MAC_OS_X_VERSION_10_7) || \
- MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
-
-@interface NSEvent (LionSDK)
-+ (BOOL)isSwipeTrackingFromScrollEventsEnabled;
-- (NSEventPhase)momentumPhase;
-- (NSEventPhase)phase;
-- (BOOL)hasPreciseScrollingDeltas;
-- (CGFloat)scrollingDeltaX;
-- (CGFloat)scrollingDeltaY;
-- (void)trackSwipeEventWithOptions:(NSEventSwipeTrackingOptions)options
- dampenAmountThresholdMin:(CGFloat)minDampenThreshold
- max:(CGFloat)maxDampenThreshold
- usingHandler:(void (^)(CGFloat gestureAmount,
- NSEventPhase phase,
- BOOL isComplete,
- BOOL* stop))trackingHandler;
-- (BOOL)isDirectionInvertedFromDevice;
-@end
-
-@interface NSApplication (LionSDK)
-- (void)disableRelaunchOnLogin;
-@end
-
-@interface CALayer (LionSDK)
-- (CGFloat)contentsScale;
-- (void)setContentsScale:(CGFloat)contentsScale;
-@end
-
-@interface NSScreen (LionSDK)
-- (CGFloat)backingScaleFactor;
-- (NSRect)convertRectToBacking:(NSRect)aRect;
-@end
-
-@interface NSWindow (LionSDK)
-- (CGFloat)backingScaleFactor;
-- (NSWindowAnimationBehavior)animationBehavior;
-- (void)setAnimationBehavior:(NSWindowAnimationBehavior)newAnimationBehavior;
-- (void)toggleFullScreen:(id)sender;
-- (void)setRestorable:(BOOL)flag;
-- (NSRect)convertRectFromScreen:(NSRect)aRect;
-- (NSRect)convertRectToScreen:(NSRect)aRect;
-@end
-
-@interface NSCursor (LionSDKDeclarations)
-+ (NSCursor*)IBeamCursorForVerticalLayout;
-@end
-
-@interface NSAnimationContext (LionSDK)
-+ (void)runAnimationGroup:(void (^)(NSAnimationContext* context))changes
- completionHandler:(void (^)(void))completionHandler;
-@property(copy) void (^completionHandler)(void);
-@end
-
-@interface NSView (LionSDK)
-- (NSSize)convertSizeFromBacking:(NSSize)size;
-- (void)setWantsBestResolutionOpenGLSurface:(BOOL)flag;
-- (NSDraggingSession*)beginDraggingSessionWithItems:(NSArray*)items
- event:(NSEvent*)event
- source:
- (id<NSDraggingSource>)source;
-@end
-
-@interface NSObject (ICCameraDeviceDelegateLionSDK)
-- (void)deviceDidBecomeReadyWithCompleteContentCatalog:(ICDevice*)device;
-- (void)didDownloadFile:(ICCameraFile*)file
- error:(NSError*)error
- options:(NSDictionary*)options
- contextInfo:(void*)contextInfo;
-@end
-
-@interface CWInterface (LionSDK)
-- (BOOL)associateToNetwork:(CWNetwork*)network
- password:(NSString*)password
- error:(NSError**)error;
-- (NSSet*)scanForNetworksWithName:(NSString*)networkName error:(NSError**)error;
-@end
-
-@interface CWChannel (LionSDK)
-@property(readonly) CWChannelBand channelBand;
-@end
-
-@interface CWNetwork (LionSDK)
-@property(readonly) CWChannel* wlanChannel;
-@property(readonly) NSInteger rssiValue;
-- (BOOL)supportsSecurity:(CWSecurity)security;
-@end
-
-@interface IOBluetoothHostController (LionSDK)
-- (NSString*)nameAsString;
-- (BluetoothHCIPowerState)powerState;
-@end
-
-@interface IOBluetoothL2CAPChannel (LionSDK)
-@property(readonly) BluetoothL2CAPMTU outgoingMTU;
-@end
-
-@interface IOBluetoothDevice (LionSDK)
-- (NSString*)addressString;
-- (unsigned int)classOfDevice;
-- (BluetoothConnectionHandle)connectionHandle;
-- (BluetoothHCIRSSIValue)rawRSSI;
-- (NSArray*)services;
-- (IOReturn)performSDPQuery:(id)target uuids:(NSArray*)uuids;
-@end
-
-@interface CBPeripheral (LionSDK)
-@property(readonly, nonatomic) CFUUIDRef UUID;
-@property(retain, readonly) NSString* name;
-@property(readonly) BOOL isConnected;
-@end
-
-@interface CBCentralManager (LionSDK)
-@property(readonly) CBCentralManagerState state;
-- (id)initWithDelegate:(id<CBCentralManagerDelegate>)delegate
- queue:(dispatch_queue_t)queue;
-- (void)scanForPeripheralsWithServices:(NSArray*)serviceUUIDs
- options:(NSDictionary*)options;
-- (void)stopScan;
-@end
-
-@interface CBUUID (LionSDK)
-@property(nonatomic, readonly) NSData* data;
-+ (CBUUID*)UUIDWithString:(NSString*)theString;
-@end
-
-BASE_EXPORT extern "C" void NSAccessibilityPostNotificationWithUserInfo(
- id object,
- NSString* notification,
- NSDictionary* user_info);
-
-#endif // MAC_OS_X_VERSION_10_7
-
// Once Chrome no longer supports OSX 10.7, everything within this preprocessor
// block can be removed.
#if !defined(MAC_OS_X_VERSION_10_8) || \
@@ -444,6 +104,11 @@ BASE_EXPORT extern "C" void NSAccessibilityPostNotificationWithUserInfo(
#if !defined(MAC_OS_X_VERSION_10_9) || \
MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
+// NSProgress is public API in 10.9, but a version of it exists and is usable
+// in 10.8.
+@class NSProgress;
+@class NSAppearance;
+
@interface NSProgress (MavericksSDK)
- (instancetype)initWithParent:(NSProgress*)parentProgressOrNil
@@ -501,6 +166,8 @@ BASE_EXPORT extern "C" void NSAccessibilityPostNotificationWithUserInfo(
@class NSVisualEffectView;
+@class NSUserActivity;
+
#endif // MAC_OS_X_VERSION_10_9
// Once Chrome no longer supports OSX 10.9, everything within this preprocessor
@@ -559,8 +226,6 @@ BASE_EXPORT extern "C" void NSAccessibilityPostNotificationWithUserInfo(
// declared in the OSX 10.9+ SDK, so when compiling against an OSX 10.9+ SDK,
// declare the symbol.
// ----------------------------------------------------------------------------
-#if defined(MAC_OS_X_VERSION_10_9) && \
- MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_9
BASE_EXPORT extern "C" NSString* const kCWSSIDDidChangeNotification;
-#endif
+
#endif // BASE_MAC_SDK_FORWARD_DECLARATIONS_H_
diff --git a/chromium/base/macros.h b/chromium/base/macros.h
index 554ea439ed4..60c5cbd8cb7 100644
--- a/chromium/base/macros.h
+++ b/chromium/base/macros.h
@@ -20,11 +20,11 @@
#define DISALLOW_ASSIGN(TypeName) \
void operator=(const TypeName&) = delete
-// A macro to disallow the copy constructor and operator= functions
-// This should be used in the private: declarations for a class
+// A macro to disallow the copy constructor and operator= functions.
+// This should be used in the private: declarations for a class.
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&); \
- void operator=(const TypeName&)
+ TypeName(const TypeName&) = delete; \
+ void operator=(const TypeName&) = delete
// A macro to disallow all the implicit constructors, namely the
// default constructor, copy constructor and operator= functions.
diff --git a/chromium/base/memory/memory_pressure_monitor_mac.cc b/chromium/base/memory/memory_pressure_monitor_mac.cc
index a80631eb7f8..dfbaff8a75b 100644
--- a/chromium/base/memory/memory_pressure_monitor_mac.cc
+++ b/chromium/base/memory/memory_pressure_monitor_mac.cc
@@ -65,9 +65,6 @@ MemoryPressureMonitor::~MemoryPressureMonitor() {
MemoryPressureListener::MemoryPressureLevel
MemoryPressureMonitor::GetCurrentPressureLevel() const {
- if (base::mac::IsOSMountainLionOrEarlier()) {
- return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
- }
int mac_memory_pressure;
size_t length = sizeof(int);
sysctlbyname("kern.memorystatus_vm_pressure_level", &mac_memory_pressure,
diff --git a/chromium/base/memory/ref_counted.h b/chromium/base/memory/ref_counted.h
index 5b866d14c81..05fc4c4d3ff 100644
--- a/chromium/base/memory/ref_counted.h
+++ b/chromium/base/memory/ref_counted.h
@@ -111,7 +111,7 @@ class BASE_EXPORT RefCountedThreadSafeBase {
//
// A base class for reference counted classes. Otherwise, known as a cheap
-// knock-off of WebKit's RefCounted<T> class. To use this guy just extend your
+// knock-off of WebKit's RefCounted<T> class. To use this, just extend your
// class from it like so:
//
// class MyFoo : public base::RefCounted<MyFoo> {
diff --git a/chromium/base/memory/scoped_vector.h b/chromium/base/memory/scoped_vector.h
index adbab8cf490..f3581eaa9bd 100644
--- a/chromium/base/memory/scoped_vector.h
+++ b/chromium/base/memory/scoped_vector.h
@@ -11,7 +11,7 @@
#include <vector>
#include "base/logging.h"
-#include "base/move.h"
+#include "base/macros.h"
#include "base/stl_util.h"
// ScopedVector wraps a vector deleting the elements from its
@@ -21,8 +21,6 @@
// we have support for moveable types inside containers).
template <class T>
class ScopedVector {
- MOVE_ONLY_TYPE_FOR_CPP_03(ScopedVector)
-
public:
typedef typename std::vector<T*>::allocator_type allocator_type;
typedef typename std::vector<T*>::size_type size_type;
@@ -142,6 +140,8 @@ class ScopedVector {
private:
std::vector<T*> v_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedVector);
};
#endif // BASE_MEMORY_SCOPED_VECTOR_H_
diff --git a/chromium/base/memory/shared_memory.h b/chromium/base/memory/shared_memory.h
index b7e5a050603..e1c9fa70bd7 100644
--- a/chromium/base/memory/shared_memory.h
+++ b/chromium/base/memory/shared_memory.h
@@ -24,6 +24,10 @@
#include "base/files/scoped_file.h"
#endif
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
namespace base {
class FilePath;
@@ -263,12 +267,10 @@ class BASE_EXPORT SharedMemory {
// before being mapped.
bool external_section_;
std::wstring name_;
- HANDLE mapped_file_;
+ win::ScopedHandle mapped_file_;
#elif defined(OS_MACOSX) && !defined(OS_IOS)
// The OS primitive that backs the shared memory region.
SharedMemoryHandle shm_;
-
- int readonly_mapped_file_;
#elif defined(OS_POSIX)
int mapped_file_;
int readonly_mapped_file_;
diff --git a/chromium/base/memory/shared_memory_mac.cc b/chromium/base/memory/shared_memory_mac.cc
index c9882fd0153..97ce94ea1b2 100644
--- a/chromium/base/memory/shared_memory_mac.cc
+++ b/chromium/base/memory/shared_memory_mac.cc
@@ -75,15 +75,10 @@ SharedMemoryCreateOptions::SharedMemoryCreateOptions()
share_read_only(false) {}
SharedMemory::SharedMemory()
- : readonly_mapped_file_(-1),
- mapped_size_(0),
- memory_(NULL),
- read_only_(false),
- requested_size_(0) {}
+ : mapped_size_(0), memory_(NULL), read_only_(false), requested_size_(0) {}
SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
: shm_(handle),
- readonly_mapped_file_(-1),
mapped_size_(0),
memory_(NULL),
read_only_(read_only),
diff --git a/chromium/base/memory/shared_memory_unittest.cc b/chromium/base/memory/shared_memory_unittest.cc
index 8251f608402..f29865c21ae 100644
--- a/chromium/base/memory/shared_memory_unittest.cc
+++ b/chromium/base/memory/shared_memory_unittest.cc
@@ -592,11 +592,6 @@ TEST(SharedMemoryTest, UnsafeImageSection) {
EXPECT_FALSE(shared_memory_open.Map(1));
EXPECT_EQ(nullptr, shared_memory_open.memory());
- SharedMemory shared_memory_handle_dup(
- SharedMemoryHandle(section_handle.Get(), ::GetCurrentProcessId()), true);
- EXPECT_FALSE(shared_memory_handle_dup.Map(1));
- EXPECT_EQ(nullptr, shared_memory_handle_dup.memory());
-
SharedMemory shared_memory_handle_local(
SharedMemoryHandle(section_handle.Take(), ::GetCurrentProcessId()), true);
EXPECT_FALSE(shared_memory_handle_local.Map(1));
diff --git a/chromium/base/memory/shared_memory_win.cc b/chromium/base/memory/shared_memory_win.cc
index 421fde87e73..4e1b63c3cf7 100644
--- a/chromium/base/memory/shared_memory_win.cc
+++ b/chromium/base/memory/shared_memory_win.cc
@@ -9,12 +9,35 @@
#include <stdint.h>
#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
#include "base/rand_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
namespace {
+// Errors that can occur during Shared Memory construction.
+// These match tools/metrics/histograms/histograms.xml.
+// This enum is append-only.
+enum CreateError {
+ SUCCESS = 0,
+ SIZE_ZERO = 1,
+ SIZE_TOO_LARGE = 2,
+ INITIALIZE_ACL_FAILURE = 3,
+ INITIALIZE_SECURITY_DESC_FAILURE = 4,
+ SET_SECURITY_DESC_FAILURE = 5,
+ CREATE_FILE_MAPPING_FAILURE = 6,
+ REDUCE_PERMISSIONS_FAILURE = 7,
+ ALREADY_EXISTS = 8,
+ CREATE_ERROR_LAST = ALREADY_EXISTS
+};
+
+// Emits an UMA metric.
+void LogError(CreateError error) {
+ UMA_HISTOGRAM_ENUMERATION("SharedMemory.CreateError", error,
+ CREATE_ERROR_LAST + 1);
+}
+
typedef enum _SECTION_INFORMATION_CLASS {
SectionBasicInformation,
} SECTION_INFORMATION_CLASS;
@@ -84,8 +107,10 @@ HANDLE CreateFileMappingWithReducedPermissions(SECURITY_ATTRIBUTES* sa,
LPCWSTR name) {
HANDLE h = CreateFileMapping(INVALID_HANDLE_VALUE, sa, PAGE_READWRITE, 0,
static_cast<DWORD>(rounded_size), name);
- if (!h)
+ if (!h) {
+ LogError(CREATE_FILE_MAPPING_FAILURE);
return nullptr;
+ }
HANDLE h2;
BOOL success = ::DuplicateHandle(
@@ -93,7 +118,13 @@ HANDLE CreateFileMappingWithReducedPermissions(SECURITY_ATTRIBUTES* sa,
FILE_MAP_READ | FILE_MAP_WRITE | SECTION_QUERY, FALSE, 0);
BOOL rv = ::CloseHandle(h);
DCHECK(rv);
- return success ? h2 : nullptr;
+
+ if (!success) {
+ LogError(REDUCE_PERMISSIONS_FAILURE);
+ return nullptr;
+ }
+
+ return h2;
}
} // namespace.
@@ -109,7 +140,6 @@ SharedMemoryCreateOptions::SharedMemoryCreateOptions()
SharedMemory::SharedMemory()
: external_section_(false),
- mapped_file_(NULL),
mapped_size_(0),
memory_(NULL),
read_only_(false),
@@ -118,7 +148,6 @@ SharedMemory::SharedMemory()
SharedMemory::SharedMemory(const std::wstring& name)
: external_section_(false),
name_(name),
- mapped_file_(NULL),
mapped_size_(0),
memory_(NULL),
read_only_(false),
@@ -126,12 +155,12 @@ SharedMemory::SharedMemory(const std::wstring& name)
SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
: external_section_(true),
- mapped_file_(handle.GetHandle()),
mapped_size_(0),
memory_(NULL),
read_only_(read_only),
requested_size_(0) {
DCHECK(!handle.IsValid() || handle.BelongsToCurrentProcess());
+ mapped_file_.Set(handle.GetHandle());
}
SharedMemory::~SharedMemory() {
@@ -187,14 +216,18 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
// wasting 32k per mapping on average.
static const size_t kSectionMask = 65536 - 1;
DCHECK(!options.executable);
- DCHECK(!mapped_file_);
- if (options.size == 0)
+ DCHECK(!mapped_file_.Get());
+ if (options.size == 0) {
+ LogError(SIZE_ZERO);
return false;
+ }
// Check maximum accounting for overflow.
if (options.size >
- static_cast<size_t>(std::numeric_limits<int>::max()) - kSectionMask)
+ static_cast<size_t>(std::numeric_limits<int>::max()) - kSectionMask) {
+ LogError(SIZE_TOO_LARGE);
return false;
+ }
size_t rounded_size = (options.size + kSectionMask) & ~kSectionMask;
name_ = options.name_deprecated ?
@@ -206,12 +239,18 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
if (name_.empty()) {
// Add an empty DACL to enforce anonymous read-only sections.
sa.lpSecurityDescriptor = &sd;
- if (!InitializeAcl(&dacl, sizeof(dacl), ACL_REVISION))
+ if (!InitializeAcl(&dacl, sizeof(dacl), ACL_REVISION)) {
+ LogError(INITIALIZE_ACL_FAILURE);
return false;
- if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION))
+ }
+ if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION)) {
+ LogError(INITIALIZE_SECURITY_DESC_FAILURE);
return false;
- if (!SetSecurityDescriptorDacl(&sd, TRUE, &dacl, FALSE))
+ }
+ if (!SetSecurityDescriptorDacl(&sd, TRUE, &dacl, FALSE)) {
+ LogError(SET_SECURITY_DESC_FAILURE);
return false;
+ }
// Windows ignores DACLs on certain unnamed objects (like shared sections).
// So, we generate a random name when we need to enforce read-only.
@@ -221,10 +260,12 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
rand_values[0], rand_values[1],
rand_values[2], rand_values[3]);
}
- mapped_file_ = CreateFileMappingWithReducedPermissions(
- &sa, rounded_size, name_.empty() ? nullptr : name_.c_str());
- if (!mapped_file_)
+ mapped_file_.Set(CreateFileMappingWithReducedPermissions(
+ &sa, rounded_size, name_.empty() ? nullptr : name_.c_str()));
+ if (!mapped_file_.IsValid()) {
+ // The error is logged within CreateFileMappingWithReducedPermissions().
return false;
+ }
requested_size_ = options.size;
@@ -236,10 +277,12 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
external_section_ = true;
if (!options.open_existing_deprecated) {
Close();
+ LogError(ALREADY_EXISTS);
return false;
}
}
+ LogError(SUCCESS);
return true;
}
@@ -249,15 +292,15 @@ bool SharedMemory::Delete(const std::string& name) {
}
bool SharedMemory::Open(const std::string& name, bool read_only) {
- DCHECK(!mapped_file_);
+ DCHECK(!mapped_file_.Get());
DWORD access = FILE_MAP_READ | SECTION_QUERY;
if (!read_only)
access |= FILE_MAP_WRITE;
name_ = ASCIIToUTF16(name);
read_only_ = read_only;
- mapped_file_ =
- OpenFileMapping(access, false, name_.empty() ? nullptr : name_.c_str());
- if (!mapped_file_)
+ mapped_file_.Set(
+ OpenFileMapping(access, false, name_.empty() ? nullptr : name_.c_str()));
+ if (!mapped_file_.IsValid())
return false;
// If a name specified assume it's an external section.
if (!name_.empty())
@@ -267,7 +310,7 @@ bool SharedMemory::Open(const std::string& name, bool read_only) {
}
bool SharedMemory::MapAt(off_t offset, size_t bytes) {
- if (mapped_file_ == NULL)
+ if (!mapped_file_.Get())
return false;
if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
@@ -276,11 +319,12 @@ bool SharedMemory::MapAt(off_t offset, size_t bytes) {
if (memory_)
return false;
- if (external_section_ && !IsSectionSafeToMap(mapped_file_))
+ if (external_section_ && !IsSectionSafeToMap(mapped_file_.Get()))
return false;
memory_ = MapViewOfFile(
- mapped_file_, read_only_ ? FILE_MAP_READ : FILE_MAP_READ | FILE_MAP_WRITE,
+ mapped_file_.Get(),
+ read_only_ ? FILE_MAP_READ : FILE_MAP_READ | FILE_MAP_WRITE,
static_cast<uint64_t>(offset) >> 32, static_cast<DWORD>(offset), bytes);
if (memory_ != NULL) {
DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
@@ -307,14 +351,15 @@ bool SharedMemory::ShareToProcessCommon(ProcessHandle process,
*new_handle = SharedMemoryHandle();
DWORD access = FILE_MAP_READ | SECTION_QUERY;
DWORD options = 0;
- HANDLE mapped_file = mapped_file_;
+ HANDLE mapped_file = mapped_file_.Get();
HANDLE result;
if (share_mode == SHARE_CURRENT_MODE && !read_only_)
access |= FILE_MAP_WRITE;
if (close_self) {
// DUPLICATE_CLOSE_SOURCE causes DuplicateHandle to close mapped_file.
options = DUPLICATE_CLOSE_SOURCE;
- mapped_file_ = NULL;
+ HANDLE detached_handle = mapped_file_.Take();
+ DCHECK_EQ(detached_handle, mapped_file);
Unmap();
}
@@ -334,14 +379,11 @@ bool SharedMemory::ShareToProcessCommon(ProcessHandle process,
void SharedMemory::Close() {
- if (mapped_file_ != NULL) {
- ::CloseHandle(mapped_file_);
- mapped_file_ = NULL;
- }
+ mapped_file_.Close();
}
SharedMemoryHandle SharedMemory::handle() const {
- return SharedMemoryHandle(mapped_file_, base::GetCurrentProcId());
+ return SharedMemoryHandle(mapped_file_.Get(), base::GetCurrentProcId());
}
} // namespace base
diff --git a/chromium/base/memory/weak_ptr.cc b/chromium/base/memory/weak_ptr.cc
index 16d3dff10aa..4e77b04973c 100644
--- a/chromium/base/memory/weak_ptr.cc
+++ b/chromium/base/memory/weak_ptr.cc
@@ -34,14 +34,16 @@ WeakReference::Flag::~Flag() {
WeakReference::WeakReference() {
}
-WeakReference::WeakReference(const WeakReference& other) = default;
-
WeakReference::WeakReference(const Flag* flag) : flag_(flag) {
}
WeakReference::~WeakReference() {
}
+WeakReference::WeakReference(WeakReference&& other) = default;
+
+WeakReference::WeakReference(const WeakReference& other) = default;
+
bool WeakReference::is_valid() const { return flag_.get() && flag_->IsValid(); }
WeakReferenceOwner::WeakReferenceOwner() {
diff --git a/chromium/base/memory/weak_ptr.h b/chromium/base/memory/weak_ptr.h
index 3b8bcb1b077..3544439dd3c 100644
--- a/chromium/base/memory/weak_ptr.h
+++ b/chromium/base/memory/weak_ptr.h
@@ -109,10 +109,14 @@ class BASE_EXPORT WeakReference {
};
WeakReference();
- WeakReference(const WeakReference& other);
explicit WeakReference(const Flag* flag);
~WeakReference();
+ WeakReference(WeakReference&& other);
+ WeakReference(const WeakReference& other);
+ WeakReference& operator=(WeakReference&& other) = default;
+ WeakReference& operator=(const WeakReference& other) = default;
+
bool is_valid() const;
private:
@@ -145,6 +149,11 @@ class BASE_EXPORT WeakPtrBase {
WeakPtrBase();
~WeakPtrBase();
+ WeakPtrBase(const WeakPtrBase& other) = default;
+ WeakPtrBase(WeakPtrBase&& other) = default;
+ WeakPtrBase& operator=(const WeakPtrBase& other) = default;
+ WeakPtrBase& operator=(WeakPtrBase&& other) = default;
+
protected:
explicit WeakPtrBase(const WeakReference& ref);
@@ -205,10 +214,13 @@ class WeakPtr : public internal::WeakPtrBase {
WeakPtr(std::nullptr_t) : ptr_(nullptr) {}
// Allow conversion from U to T provided U "is a" T. Note that this
- // is separate from the (implicit) copy constructor.
+ // is separate from the (implicit) copy and move constructors.
template <typename U>
WeakPtr(const WeakPtr<U>& other) : WeakPtrBase(other), ptr_(other.ptr_) {
}
+ template <typename U>
+ WeakPtr(WeakPtr<U>&& other)
+ : WeakPtrBase(std::move(other)), ptr_(other.ptr_) {}
T* get() const { return ref_.is_valid() ? ptr_ : nullptr; }
@@ -226,36 +238,10 @@ class WeakPtr : public internal::WeakPtrBase {
ptr_ = nullptr;
}
- // Implement "Safe Bool Idiom"
- // https://en.wikibooks.org/wiki/More_C%2B%2B_Idioms/Safe_bool
- //
- // Allow WeakPtr<element_type> to be used in boolean expressions such as
- // if (weak_ptr_instance)
- // But do not become convertible to a real bool (which is dangerous).
- // Implementation requires:
- // typedef Testable
- // operator Testable() const
- // operator==
- // operator!=
- //
- // == and != operators must be declared explicitly or dissallowed, as
- // otherwise "ptr1 == ptr2" will compile but do the wrong thing (i.e., convert
- // to Testable and then do the comparison).
- //
- // C++11 provides for "explicit operator bool()", however it is currently
- // banned due to MSVS2013. https://chromium-cpp.appspot.com/#core-blacklist
- private:
- typedef T* WeakPtr::*Testable;
-
- public:
- operator Testable() const { return get() ? &WeakPtr::ptr_ : nullptr; }
+ // Allow conditionals to test validity, e.g. if (weak_ptr) {...};
+ explicit operator bool() const { return get() != nullptr; }
private:
- // Explicitly declare comparison operators as required by the "Safe Bool
- // Idiom", but keep them private.
- template <class U> bool operator==(WeakPtr<U> const&) const;
- template <class U> bool operator!=(WeakPtr<U> const&) const;
-
friend class internal::SupportsWeakPtrBase;
template <typename U> friend class WeakPtr;
friend class SupportsWeakPtr<T>;
@@ -271,6 +257,24 @@ class WeakPtr : public internal::WeakPtrBase {
T* ptr_;
};
+// Allow callers to compare WeakPtrs against nullptr to test validity.
+template <class T>
+bool operator!=(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+ return !(weak_ptr == nullptr);
+}
+template <class T>
+bool operator!=(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+ return weak_ptr != nullptr;
+}
+template <class T>
+bool operator==(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+ return weak_ptr.get() == nullptr;
+}
+template <class T>
+bool operator==(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+ return weak_ptr == nullptr;
+}
+
// A class may be composed of a WeakPtrFactory and thereby
// control how it exposes weak pointers to itself. This is helpful if you only
// need weak pointers within the implementation of a class. This class is also
diff --git a/chromium/base/memory/weak_ptr_unittest.cc b/chromium/base/memory/weak_ptr_unittest.cc
index df6c24f8a8a..ebcf33c57ec 100644
--- a/chromium/base/memory/weak_ptr_unittest.cc
+++ b/chromium/base/memory/weak_ptr_unittest.cc
@@ -69,7 +69,8 @@ class BackgroundThread : public Thread {
~BackgroundThread() override { Stop(); }
void CreateArrowFromTarget(Arrow** arrow, Target* target) {
- WaitableEvent completion(true, false);
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
FROM_HERE, base::Bind(&BackgroundThread::DoCreateArrowFromTarget, arrow,
target, &completion));
@@ -77,7 +78,8 @@ class BackgroundThread : public Thread {
}
void CreateArrowFromArrow(Arrow** arrow, const Arrow* other) {
- WaitableEvent completion(true, false);
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
FROM_HERE, base::Bind(&BackgroundThread::DoCreateArrowFromArrow, arrow,
other, &completion));
@@ -85,7 +87,8 @@ class BackgroundThread : public Thread {
}
void DeleteTarget(Target* object) {
- WaitableEvent completion(true, false);
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
FROM_HERE,
base::Bind(&BackgroundThread::DoDeleteTarget, object, &completion));
@@ -93,7 +96,8 @@ class BackgroundThread : public Thread {
}
void CopyAndAssignArrow(Arrow* object) {
- WaitableEvent completion(true, false);
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
FROM_HERE, base::Bind(&BackgroundThread::DoCopyAndAssignArrow, object,
&completion));
@@ -101,7 +105,8 @@ class BackgroundThread : public Thread {
}
void CopyAndAssignArrowBase(Arrow* object) {
- WaitableEvent completion(true, false);
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
FROM_HERE, base::Bind(&BackgroundThread::DoCopyAndAssignArrowBase,
object, &completion));
@@ -109,7 +114,8 @@ class BackgroundThread : public Thread {
}
void DeleteArrow(Arrow* object) {
- WaitableEvent completion(true, false);
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
FROM_HERE,
base::Bind(&BackgroundThread::DoDeleteArrow, object, &completion));
@@ -117,7 +123,8 @@ class BackgroundThread : public Thread {
}
Target* DeRef(const Arrow* arrow) {
- WaitableEvent completion(true, false);
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
Target* result = nullptr;
task_runner()->PostTask(FROM_HERE, base::Bind(&BackgroundThread::DoDeRef,
arrow, &result, &completion));
@@ -196,6 +203,16 @@ TEST(WeakPtrFactoryTest, Comparison) {
EXPECT_EQ(ptr.get(), ptr2.get());
}
+TEST(WeakPtrFactoryTest, Move) {
+ int data;
+ WeakPtrFactory<int> factory(&data);
+ WeakPtr<int> ptr = factory.GetWeakPtr();
+ WeakPtr<int> ptr2 = factory.GetWeakPtr();
+ WeakPtr<int> ptr3 = std::move(ptr2);
+ EXPECT_NE(ptr.get(), ptr2.get());
+ EXPECT_EQ(ptr.get(), ptr3.get());
+}
+
TEST(WeakPtrFactoryTest, OutOfScope) {
WeakPtr<int> ptr;
EXPECT_EQ(nullptr, ptr.get());
@@ -301,6 +318,19 @@ TEST(WeakPtrFactoryTest, BooleanTesting) {
}
}
+TEST(WeakPtrFactoryTest, ComparisonToNull) {
+ int data;
+ WeakPtrFactory<int> factory(&data);
+
+ WeakPtr<int> ptr_to_an_instance = factory.GetWeakPtr();
+ EXPECT_NE(nullptr, ptr_to_an_instance);
+ EXPECT_NE(ptr_to_an_instance, nullptr);
+
+ WeakPtr<int> null_ptr;
+ EXPECT_EQ(null_ptr, nullptr);
+ EXPECT_EQ(nullptr, null_ptr);
+}
+
TEST(WeakPtrTest, InvalidateWeakPtrs) {
int data;
WeakPtrFactory<int> factory(&data);
diff --git a/chromium/base/memory/weak_ptr_unittest.nc b/chromium/base/memory/weak_ptr_unittest.nc
index 32deca9f17b..9b1226b7941 100644
--- a/chromium/base/memory/weak_ptr_unittest.nc
+++ b/chromium/base/memory/weak_ptr_unittest.nc
@@ -59,7 +59,7 @@ void WontCompile() {
SupportsWeakPtr<Producer>::StaticAsWeakPtr<DerivedProducer>(&f);
}
-#elif defined(NCTEST_UNSAFE_HELPER_DOWNCAST) // [r"fatal error: cannot initialize a member subobject of type 'base::DerivedProducer \*' with an lvalue of type 'base::Producer \*const'"]
+#elif defined(NCTEST_UNSAFE_HELPER_DOWNCAST) // [r"fatal error: cannot initialize a member subobject of type 'base::DerivedProducer \*' with an lvalue of type 'base::Producer \*'"]
void WontCompile() {
Producer f;
@@ -73,14 +73,14 @@ void WontCompile() {
WeakPtr<DerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);
}
-#elif defined(NCTEST_UNSAFE_WRONG_INSANTIATED_HELPER_DOWNCAST) // [r"fatal error: cannot initialize a member subobject of type 'base::DerivedProducer \*' with an lvalue of type 'base::Producer \*const'"]
+#elif defined(NCTEST_UNSAFE_WRONG_INSANTIATED_HELPER_DOWNCAST) // [r"fatal error: cannot initialize a member subobject of type 'base::DerivedProducer \*' with an lvalue of type 'base::Producer \*'"]
void WontCompile() {
Producer f;
WeakPtr<DerivedProducer> ptr = AsWeakPtr<Producer>(&f);
}
-#elif defined(NCTEST_UNSAFE_HELPER_CAST) // [r"fatal error: cannot initialize a member subobject of type 'base::OtherDerivedProducer \*' with an lvalue of type 'base::DerivedProducer \*const'"]
+#elif defined(NCTEST_UNSAFE_HELPER_CAST) // [r"fatal error: cannot initialize a member subobject of type 'base::OtherDerivedProducer \*' with an lvalue of type 'base::DerivedProducer \*'"]
void WontCompile() {
DerivedProducer f;
@@ -94,14 +94,14 @@ void WontCompile() {
WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<OtherDerivedProducer>(&f);
}
-#elif defined(NCTEST_UNSAFE_WRONG_INSTANTIATED_HELPER_SIDECAST) // [r"fatal error: cannot initialize a member subobject of type 'base::OtherDerivedProducer \*' with an lvalue of type 'base::DerivedProducer \*const'"]
+#elif defined(NCTEST_UNSAFE_WRONG_INSTANTIATED_HELPER_SIDECAST) // [r"fatal error: cannot initialize a member subobject of type 'base::OtherDerivedProducer \*' with an lvalue of type 'base::DerivedProducer \*'"]
void WontCompile() {
DerivedProducer f;
WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);
}
-#elif defined(NCTEST_UNRELATED_HELPER) // [r"fatal error: cannot initialize a member subobject of type 'base::Unrelated \*' with an lvalue of type 'base::DerivedProducer \*const'"]
+#elif defined(NCTEST_UNRELATED_HELPER) // [r"fatal error: cannot initialize a member subobject of type 'base::Unrelated \*' with an lvalue of type 'base::DerivedProducer \*'"]
void WontCompile() {
DerivedProducer f;
@@ -129,7 +129,7 @@ void WontCompile() {
WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
}
-#elif defined(NCTEST_AMBIGUOUS_ANCESTORS) // [r"fatal error: member 'AsWeakPtr' found in multiple base classes of different types"]
+#elif defined(NCTEST_AMBIGUOUS_ANCESTORS) // [r"fatal error: use of undeclared identifier 'AsWeakPtrImpl'"]
void WontCompile() {
MultiplyDerivedProducer f;
diff --git a/chromium/base/message_loop/incoming_task_queue.cc b/chromium/base/message_loop/incoming_task_queue.cc
index ebecc1ba54c..6deaba5656f 100644
--- a/chromium/base/message_loop/incoming_task_queue.cc
+++ b/chromium/base/message_loop/incoming_task_queue.cc
@@ -37,6 +37,15 @@ bool AlwaysNotifyPump(MessageLoop::Type type) {
#endif
}
+TimeTicks CalculateDelayedRuntime(TimeDelta delay) {
+ TimeTicks delayed_run_time;
+ if (delay > TimeDelta())
+ delayed_run_time = TimeTicks::Now() + delay;
+ else
+ DCHECK_EQ(delay.InMilliseconds(), 0) << "delay should not be negative";
+ return delayed_run_time;
+}
+
} // namespace
IncomingTaskQueue::IncomingTaskQueue(MessageLoop* message_loop)
@@ -58,7 +67,6 @@ bool IncomingTaskQueue::AddToIncomingQueue(
<< "Requesting super-long task delay period of " << delay.InSeconds()
<< " seconds from here: " << from_here.ToString();
- AutoLock locked(incoming_queue_lock_);
PendingTask pending_task(
from_here, task, CalculateDelayedRuntime(delay), nestable);
#if defined(OS_WIN)
@@ -68,7 +76,6 @@ bool IncomingTaskQueue::AddToIncomingQueue(
// resolution on Windows is between 10 and 15ms.
if (delay > TimeDelta() &&
delay.InMilliseconds() < (2 * Time::kMinLowResolutionThresholdMs)) {
- ++high_res_task_count_;
pending_task.is_high_res = true;
}
#endif
@@ -97,7 +104,7 @@ int IncomingTaskQueue::ReloadWorkQueue(TaskQueue* work_queue) {
// incoming queue becomes nonempty we need to schedule it again.
message_loop_scheduled_ = false;
} else {
- incoming_queue_.Swap(work_queue);
+ incoming_queue_.swap(*work_queue);
}
// Reset the count of high resolution tasks since our queue is now empty.
int high_res_tasks = high_res_task_count_;
@@ -106,17 +113,25 @@ int IncomingTaskQueue::ReloadWorkQueue(TaskQueue* work_queue) {
}
void IncomingTaskQueue::WillDestroyCurrentMessageLoop() {
- AutoLock lock(incoming_queue_lock_);
+ base::subtle::AutoWriteLock lock(message_loop_lock_);
message_loop_ = NULL;
}
void IncomingTaskQueue::StartScheduling() {
- AutoLock lock(incoming_queue_lock_);
- DCHECK(!is_ready_for_scheduling_);
- DCHECK(!message_loop_scheduled_);
- is_ready_for_scheduling_ = true;
- if (!incoming_queue_.empty())
- ScheduleWork();
+ bool schedule_work;
+ {
+ AutoLock lock(incoming_queue_lock_);
+ DCHECK(!is_ready_for_scheduling_);
+ DCHECK(!message_loop_scheduled_);
+ is_ready_for_scheduling_ = true;
+ schedule_work = !incoming_queue_.empty();
+ }
+ if (schedule_work) {
+ DCHECK(message_loop_);
+ // Don't need to lock |message_loop_lock_| here because this function is
+ // called by MessageLoop on its thread.
+ message_loop_->ScheduleWork();
+ }
}
IncomingTaskQueue::~IncomingTaskQueue() {
@@ -124,58 +139,60 @@ IncomingTaskQueue::~IncomingTaskQueue() {
DCHECK(!message_loop_);
}
-TimeTicks IncomingTaskQueue::CalculateDelayedRuntime(TimeDelta delay) {
- TimeTicks delayed_run_time;
- if (delay > TimeDelta())
- delayed_run_time = TimeTicks::Now() + delay;
- else
- DCHECK_EQ(delay.InMilliseconds(), 0) << "delay should not be negative";
- return delayed_run_time;
-}
-
bool IncomingTaskQueue::PostPendingTask(PendingTask* pending_task) {
// Warning: Don't try to short-circuit, and handle this thread's tasks more
// directly, as it could starve handling of foreign threads. Put every task
// into this queue.
- // This should only be called while the lock is taken.
- incoming_queue_lock_.AssertAcquired();
+ // Ensures |message_loop_| isn't destroyed while running.
+ base::subtle::AutoReadLock hold_message_loop(message_loop_lock_);
if (!message_loop_) {
pending_task->task.Reset();
return false;
}
- // Initialize the sequence number. The sequence number is used for delayed
- // tasks (to facilitate FIFO sorting when two tasks have the same
- // delayed_run_time value) and for identifying the task in about:tracing.
- pending_task->sequence_num = next_sequence_num_++;
+ bool schedule_work = false;
+ {
+ AutoLock hold(incoming_queue_lock_);
- message_loop_->task_annotator()->DidQueueTask("MessageLoop::PostTask",
- *pending_task);
-
- bool was_empty = incoming_queue_.empty();
- incoming_queue_.push(*pending_task);
- pending_task->task.Reset();
+#if defined(OS_WIN)
+ if (pending_task->is_high_res)
+ ++high_res_task_count_;
+#endif
- if (is_ready_for_scheduling_ &&
- (always_schedule_work_ || (!message_loop_scheduled_ && was_empty))) {
- ScheduleWork();
+ // Initialize the sequence number. The sequence number is used for delayed
+ // tasks (to facilitate FIFO sorting when two tasks have the same
+ // delayed_run_time value) and for identifying the task in about:tracing.
+ pending_task->sequence_num = next_sequence_num_++;
+
+ message_loop_->task_annotator()->DidQueueTask("MessageLoop::PostTask",
+ *pending_task);
+
+ bool was_empty = incoming_queue_.empty();
+ incoming_queue_.push(std::move(*pending_task));
+
+ if (is_ready_for_scheduling_ &&
+ (always_schedule_work_ || (!message_loop_scheduled_ && was_empty))) {
+ schedule_work = true;
+ // After we've scheduled the message loop, we do not need to do so again
+ // until we know it has processed all of the work in our queue and is
+ // waiting for more work again. The message loop will always attempt to
+ // reload from the incoming queue before waiting again so we clear this
+ // flag in ReloadWorkQueue().
+ message_loop_scheduled_ = true;
+ }
}
- return true;
-}
+ // Wake up the message loop and schedule work. This is done outside
+ // |incoming_queue_lock_| because signaling the message loop may cause this
+ // thread to be switched. If |incoming_queue_lock_| is held, any other thread
+ // that wants to post a task will be blocked until this thread switches back
+ // in and releases |incoming_queue_lock_|.
+ if (schedule_work)
+ message_loop_->ScheduleWork();
-void IncomingTaskQueue::ScheduleWork() {
- DCHECK(is_ready_for_scheduling_);
- // Wake up the message loop.
- message_loop_->ScheduleWork();
- // After we've scheduled the message loop, we do not need to do so again
- // until we know it has processed all of the work in our queue and is
- // waiting for more work again. The message loop will always attempt to
- // reload from the incoming queue before waiting again so we clear this flag
- // in ReloadWorkQueue().
- message_loop_scheduled_ = true;
+ return true;
}
} // namespace internal
diff --git a/chromium/base/message_loop/incoming_task_queue.h b/chromium/base/message_loop/incoming_task_queue.h
index e450aa164fa..aff71d20bf8 100644
--- a/chromium/base/message_loop/incoming_task_queue.h
+++ b/chromium/base/message_loop/incoming_task_queue.h
@@ -10,6 +10,7 @@
#include "base/memory/ref_counted.h"
#include "base/pending_task.h"
#include "base/synchronization/lock.h"
+#include "base/synchronization/read_write_lock.h"
#include "base/time/time.h"
namespace base {
@@ -62,9 +63,6 @@ class BASE_EXPORT IncomingTaskQueue
friend class RefCountedThreadSafe<IncomingTaskQueue>;
virtual ~IncomingTaskQueue();
- // Calculates the time at which a PendingTask should run.
- TimeTicks CalculateDelayedRuntime(TimeDelta delay);
-
// Adds a task to |incoming_queue_|. The caller retains ownership of
// |pending_task|, but this function will reset the value of
// |pending_task->task|. This is needed to ensure that the posting call stack
@@ -78,9 +76,14 @@ class BASE_EXPORT IncomingTaskQueue
// so that ReloadWorkQueue() completes in constant time.
int high_res_task_count_;
- // The lock that protects access to the members of this class.
+ // The lock that protects access to the members of this class, except
+ // |message_loop_|.
base::Lock incoming_queue_lock_;
+ // Lock that protects |message_loop_| to prevent it from being deleted while a
+ // task is being posted.
+ base::subtle::ReadWriteLock message_loop_lock_;
+
// An incoming queue of tasks that are acquired under a mutex for processing
// on this instance's thread. These tasks have not yet been been pushed to
// |message_loop_|.
diff --git a/chromium/base/message_loop/message_loop.cc b/chromium/base/message_loop/message_loop.cc
index 3fef91be460..9d3769169cf 100644
--- a/chromium/base/message_loop/message_loop.cc
+++ b/chromium/base/message_loop/message_loop.cc
@@ -18,6 +18,7 @@
#include "base/metrics/statistics_recorder.h"
#include "base/run_loop.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/threading/thread_id_name_manager.h"
#include "base/threading/thread_local.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
@@ -303,9 +304,9 @@ void MessageLoop::RunUntilIdle() {
void MessageLoop::QuitWhenIdle() {
DCHECK_EQ(this, current());
if (run_loop_) {
- run_loop_->quit_when_idle_received_ = true;
+ run_loop_->QuitWhenIdle();
} else {
- NOTREACHED() << "Must be inside Run to call Quit";
+ NOTREACHED() << "Must be inside Run to call QuitWhenIdle";
}
}
@@ -389,16 +390,14 @@ MessageLoop::MessageLoop(Type type, MessagePumpFactoryCallback pump_factory)
in_high_res_mode_(false),
#endif
nestable_tasks_allowed_(true),
-#if defined(OS_WIN)
- os_modal_loop_(false),
-#endif // OS_WIN
pump_factory_(pump_factory),
message_histogram_(NULL),
run_loop_(NULL),
incoming_task_queue_(new internal::IncomingTaskQueue(this)),
unbound_task_runner_(
new internal::MessageLoopTaskRunner(incoming_task_queue_)),
- task_runner_(unbound_task_runner_) {
+ task_runner_(unbound_task_runner_),
+ thread_id_(kInvalidThreadId) {
// If type is TYPE_CUSTOM non-null pump_factory must be given.
DCHECK(type_ != TYPE_CUSTOM || !pump_factory_.is_null());
}
@@ -417,6 +416,22 @@ void MessageLoop::BindToCurrentThread() {
unbound_task_runner_->BindToCurrentThread();
unbound_task_runner_ = nullptr;
SetThreadTaskRunnerHandle();
+ {
+ // Save the current thread's ID for potential use by other threads
+ // later from GetThreadName().
+ thread_id_ = PlatformThread::CurrentId();
+ subtle::MemoryBarrier();
+ }
+}
+
+std::string MessageLoop::GetThreadName() const {
+ if (thread_id_ == kInvalidThreadId) {
+ // |thread_id_| may already have been initialized but this thread might not
+ // have received the update yet.
+ subtle::MemoryBarrier();
+ DCHECK_NE(kInvalidThreadId, thread_id_);
+ }
+ return ThreadIdNameManager::GetInstance()->GetName(thread_id_);
}
void MessageLoop::SetTaskRunner(
@@ -549,6 +564,12 @@ void MessageLoop::ScheduleWork() {
pump_->ScheduleWork();
}
+#if defined(OS_WIN)
+bool MessageLoop::MessagePumpWasSignaled() {
+ return pump_->WasSignaled();
+}
+#endif
+
//------------------------------------------------------------------------------
// Method and data for histogramming events and actions taken by each instance
// on each thread.
@@ -557,13 +578,12 @@ void MessageLoop::StartHistogrammer() {
#if !defined(OS_NACL) // NaCl build has no metrics code.
if (enable_histogrammer_ && !message_histogram_
&& StatisticsRecorder::IsActive()) {
- DCHECK(!thread_name_.empty());
+ std::string thread_name = GetThreadName();
+ DCHECK(!thread_name.empty());
message_histogram_ = LinearHistogram::FactoryGetWithRangeDescription(
- "MsgLoop:" + thread_name_,
- kLeastNonZeroMessageId, kMaxMessageId,
+ "MsgLoop:" + thread_name, kLeastNonZeroMessageId, kMaxMessageId,
kNumberOfDistinctMessagesDisplayed,
- HistogramBase::kHexRangePrintingFlag,
- event_descriptions_);
+ HistogramBase::kHexRangePrintingFlag, event_descriptions_);
}
#endif
}
diff --git a/chromium/base/message_loop/message_loop.h b/chromium/base/message_loop/message_loop.h
index b9517c450ee..1230f4196d0 100644
--- a/chromium/base/message_loop/message_loop.h
+++ b/chromium/base/message_loop/message_loop.h
@@ -291,12 +291,10 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// Returns the type passed to the constructor.
Type type() const { return type_; }
- // Optional call to connect the thread name with this loop.
- void set_thread_name(const std::string& thread_name) {
- DCHECK(thread_name_.empty()) << "Should not rename this thread!";
- thread_name_ = thread_name;
- }
- const std::string& thread_name() const { return thread_name_; }
+ // Returns the name of the thread this message loop is bound to.
+ // This function is only valid when this message loop is running and
+ // BindToCurrentThread has already been called.
+ std::string GetThreadName() const;
// Gets the TaskRunner associated with this message loop.
const scoped_refptr<SingleThreadTaskRunner>& task_runner() {
@@ -375,16 +373,6 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
void AddTaskObserver(TaskObserver* task_observer);
void RemoveTaskObserver(TaskObserver* task_observer);
-#if defined(OS_WIN)
- void set_os_modal_loop(bool os_modal_loop) {
- os_modal_loop_ = os_modal_loop;
- }
-
- bool os_modal_loop() const {
- return os_modal_loop_;
- }
-#endif // OS_WIN
-
// Can only be called from the thread that owns the MessageLoop.
bool is_running() const;
@@ -402,6 +390,15 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// Runs the specified PendingTask.
void RunTask(const PendingTask& pending_task);
+#if defined(OS_WIN)
+ // TODO (stanisc): crbug.com/596190: Remove this after the signaling issue
+ // has been investigated.
+ // This should be used for diagnostic only. If message pump wake-up mechanism
+ // is based on auto-reset event this call would reset the event to unset
+ // state.
+ bool MessagePumpWasSignaled();
+#endif
+
//----------------------------------------------------------------------------
protected:
std::unique_ptr<MessagePump> pump_;
@@ -523,17 +520,10 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// insider a (accidentally induced?) nested message pump.
bool nestable_tasks_allowed_;
-#if defined(OS_WIN)
- // Should be set to true before calling Windows APIs like TrackPopupMenu, etc.
- // which enter a modal message loop.
- bool os_modal_loop_;
-#endif
-
// pump_factory_.Run() is called to create a message pump for this loop
// if type_ is TYPE_CUSTOM and pump_ is null.
MessagePumpFactoryCallback pump_factory_;
- std::string thread_name_;
// A profiling histogram showing the counts of various messages and events.
HistogramBase* message_histogram_;
@@ -552,6 +542,9 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
scoped_refptr<SingleThreadTaskRunner> task_runner_;
std::unique_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle_;
+ // Id of the thread this message loop is bound to.
+ PlatformThreadId thread_id_;
+
template <class T, class R> friend class base::subtle::DeleteHelperInternal;
template <class T, class R> friend class base::subtle::ReleaseHelperInternal;
diff --git a/chromium/base/message_loop/message_loop_task_runner_unittest.cc b/chromium/base/message_loop/message_loop_task_runner_unittest.cc
index 044350acd1c..cabd25013bc 100644
--- a/chromium/base/message_loop/message_loop_task_runner_unittest.cc
+++ b/chromium/base/message_loop/message_loop_task_runner_unittest.cc
@@ -11,6 +11,8 @@
#include "base/debug/leak_annotations.h"
#include "base/message_loop/message_loop.h"
#include "base/message_loop/message_loop_task_runner.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -24,7 +26,8 @@ class MessageLoopTaskRunnerTest : public testing::Test {
MessageLoopTaskRunnerTest()
: current_loop_(new MessageLoop()),
task_thread_("task_thread"),
- thread_sync_(true, false) {}
+ thread_sync_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
void DeleteCurrentMessageLoop() { current_loop_.reset(); }
@@ -35,7 +38,7 @@ class MessageLoopTaskRunnerTest : public testing::Test {
task_thread_.Start();
// Allow us to pause the |task_thread_|'s MessageLoop.
- task_thread_.message_loop()->PostTask(
+ task_thread_.message_loop()->task_runner()->PostTask(
FROM_HERE, Bind(&MessageLoopTaskRunnerTest::BlockTaskThreadHelper,
Unretained(this)));
}
@@ -257,7 +260,8 @@ class MessageLoopTaskRunnerThreadingTest : public testing::Test {
}
void Quit() const {
- loop_.PostTask(FROM_HERE, MessageLoop::QuitWhenIdleClosure());
+ loop_.task_runner()->PostTask(FROM_HERE,
+ MessageLoop::QuitWhenIdleClosure());
}
void AssertOnIOThread() const {
@@ -313,21 +317,21 @@ class MessageLoopTaskRunnerThreadingTest : public testing::Test {
TEST_F(MessageLoopTaskRunnerThreadingTest, Release) {
EXPECT_TRUE(io_thread_->task_runner()->ReleaseSoon(FROM_HERE, this));
- MessageLoop::current()->Run();
+ RunLoop().Run();
}
TEST_F(MessageLoopTaskRunnerThreadingTest, Delete) {
DeletedOnFile* deleted_on_file = new DeletedOnFile(this);
EXPECT_TRUE(
file_thread_->task_runner()->DeleteSoon(FROM_HERE, deleted_on_file));
- MessageLoop::current()->Run();
+ RunLoop().Run();
}
TEST_F(MessageLoopTaskRunnerThreadingTest, PostTask) {
EXPECT_TRUE(file_thread_->task_runner()->PostTask(
FROM_HERE, Bind(&MessageLoopTaskRunnerThreadingTest::BasicFunction,
Unretained(this))));
- MessageLoop::current()->Run();
+ RunLoop().Run();
}
TEST_F(MessageLoopTaskRunnerThreadingTest, PostTaskAfterThreadExits) {
diff --git a/chromium/base/message_loop/message_loop_test.cc b/chromium/base/message_loop/message_loop_test.cc
index 4e45acbbf02..1ab946f9e24 100644
--- a/chromium/base/message_loop/message_loop_test.cc
+++ b/chromium/base/message_loop/message_loop_test.cc
@@ -12,6 +12,7 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
@@ -96,25 +97,25 @@ void RunTest_PostTask(MessagePumpFactory factory) {
// Add tests to message loop
scoped_refptr<Foo> foo(new Foo());
std::string a("a"), b("b"), c("c"), d("d");
- MessageLoop::current()->PostTask(FROM_HERE, Bind(
- &Foo::Test0, foo.get()));
- MessageLoop::current()->PostTask(FROM_HERE, Bind(
- &Foo::Test1ConstRef, foo.get(), a));
- MessageLoop::current()->PostTask(FROM_HERE, Bind(
- &Foo::Test1Ptr, foo.get(), &b));
- MessageLoop::current()->PostTask(FROM_HERE, Bind(
- &Foo::Test1Int, foo.get(), 100));
- MessageLoop::current()->PostTask(FROM_HERE, Bind(
- &Foo::Test2Ptr, foo.get(), &a, &c));
- MessageLoop::current()->PostTask(FROM_HERE, Bind(
- &Foo::Test2Mixed, foo.get(), a, &d));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&Foo::Test0, foo.get()));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&Foo::Test1ConstRef, foo.get(), a));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&Foo::Test1Ptr, foo.get(), &b));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&Foo::Test1Int, foo.get(), 100));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&Foo::Test2Ptr, foo.get(), &a, &c));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&Foo::Test2Mixed, foo.get(), a, &d));
// After all tests, post a message that will shut down the message loop
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE,
Bind(&MessageLoop::QuitWhenIdle, Unretained(MessageLoop::current())));
// Now kick things off
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_EQ(foo->test_count(), 105);
EXPECT_EQ(foo->result(), "abacad");
@@ -131,12 +132,11 @@ void RunTest_PostDelayedTask_Basic(MessagePumpFactory factory) {
int num_tasks = 1;
Time run_time;
- loop.PostDelayedTask(
- FROM_HERE, Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
- kDelay);
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time, &num_tasks), kDelay);
Time time_before_run = Time::Now();
- loop.Run();
+ RunLoop().Run();
Time time_after_run = Time::Now();
EXPECT_EQ(0, num_tasks);
@@ -151,18 +151,16 @@ void RunTest_PostDelayedTask_InDelayOrder(MessagePumpFactory factory) {
int num_tasks = 2;
Time run_time1, run_time2;
- loop.PostDelayedTask(
- FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time1, &num_tasks),
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks),
TimeDelta::FromMilliseconds(200));
// If we get a large pause in execution (due to a context switch) here, this
// test could fail.
- loop.PostDelayedTask(
- FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
TimeDelta::FromMilliseconds(10));
- loop.Run();
+ RunLoop().Run();
EXPECT_EQ(0, num_tasks);
EXPECT_TRUE(run_time2 < run_time1);
@@ -185,14 +183,12 @@ void RunTest_PostDelayedTask_InPostOrder(MessagePumpFactory factory) {
int num_tasks = 2;
Time run_time1, run_time2;
- loop.PostDelayedTask(
- FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time1, &num_tasks), kDelay);
- loop.PostDelayedTask(
- FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time2, &num_tasks), kDelay);
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks), kDelay);
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks), kDelay);
- loop.Run();
+ RunLoop().Run();
EXPECT_EQ(0, num_tasks);
EXPECT_TRUE(run_time1 < run_time2);
@@ -210,14 +206,13 @@ void RunTest_PostDelayedTask_InPostOrder_2(MessagePumpFactory factory) {
int num_tasks = 2;
Time run_time;
- loop.PostTask(FROM_HERE, Bind(&SlowFunc, kPause, &num_tasks));
- loop.PostDelayedTask(
- FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
+ loop.task_runner()->PostTask(FROM_HERE, Bind(&SlowFunc, kPause, &num_tasks));
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
TimeDelta::FromMilliseconds(10));
Time time_before_run = Time::Now();
- loop.Run();
+ RunLoop().Run();
Time time_after_run = Time::Now();
EXPECT_EQ(0, num_tasks);
@@ -240,14 +235,14 @@ void RunTest_PostDelayedTask_InPostOrder_3(MessagePumpFactory factory) {
// Clutter the ML with tasks.
for (int i = 1; i < num_tasks; ++i)
- loop.PostTask(FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time1, &num_tasks));
+ loop.task_runner()->PostTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks));
- loop.PostDelayedTask(
+ loop.task_runner()->PostDelayedTask(
FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
TimeDelta::FromMilliseconds(1));
- loop.Run();
+ RunLoop().Run();
EXPECT_EQ(0, num_tasks);
EXPECT_TRUE(run_time2 > run_time1);
@@ -265,18 +260,16 @@ void RunTest_PostDelayedTask_SharedTimer(MessagePumpFactory factory) {
int num_tasks = 1;
Time run_time1, run_time2;
- loop.PostDelayedTask(
- FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time1, &num_tasks),
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks),
TimeDelta::FromSeconds(1000));
- loop.PostDelayedTask(
- FROM_HERE,
- Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
TimeDelta::FromMilliseconds(10));
Time start_time = Time::Now();
- loop.Run();
+ RunLoop().Run();
EXPECT_EQ(0, num_tasks);
// Ensure that we ran in far less time than the slower timer.
@@ -309,7 +302,7 @@ class RecordDeletionProbe : public RefCounted<RecordDeletionProbe> {
~RecordDeletionProbe() {
*was_deleted_ = true;
if (post_on_delete_.get())
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&RecordDeletionProbe::Run, post_on_delete_.get()));
}
@@ -323,13 +316,13 @@ void RunTest_EnsureDeletion(MessagePumpFactory factory) {
{
std::unique_ptr<MessagePump> pump(factory());
MessageLoop loop(std::move(pump));
- loop.PostTask(
+ loop.task_runner()->PostTask(
FROM_HERE, Bind(&RecordDeletionProbe::Run,
- new RecordDeletionProbe(NULL, &a_was_deleted)));
+ new RecordDeletionProbe(NULL, &a_was_deleted)));
// TODO(ajwong): Do we really need 1000ms here?
- loop.PostDelayedTask(
+ loop.task_runner()->PostDelayedTask(
FROM_HERE, Bind(&RecordDeletionProbe::Run,
- new RecordDeletionProbe(NULL, &b_was_deleted)),
+ new RecordDeletionProbe(NULL, &b_was_deleted)),
TimeDelta::FromMilliseconds(1000));
}
EXPECT_TRUE(a_was_deleted);
@@ -348,7 +341,7 @@ void RunTest_EnsureDeletion_Chain(MessagePumpFactory factory) {
RecordDeletionProbe* a = new RecordDeletionProbe(NULL, &a_was_deleted);
RecordDeletionProbe* b = new RecordDeletionProbe(a, &b_was_deleted);
RecordDeletionProbe* c = new RecordDeletionProbe(b, &c_was_deleted);
- loop.PostTask(FROM_HERE, Bind(&RecordDeletionProbe::Run, c));
+ loop.task_runner()->PostTask(FROM_HERE, Bind(&RecordDeletionProbe::Run, c));
}
EXPECT_TRUE(a_was_deleted);
EXPECT_TRUE(b_was_deleted);
@@ -358,11 +351,11 @@ void RunTest_EnsureDeletion_Chain(MessagePumpFactory factory) {
void NestingFunc(int* depth) {
if (*depth > 0) {
*depth -= 1;
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&NestingFunc, depth));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&NestingFunc, depth));
MessageLoop::current()->SetNestableTasksAllowed(true);
- MessageLoop::current()->Run();
+ RunLoop().Run();
}
MessageLoop::current()->QuitWhenIdle();
}
@@ -372,9 +365,9 @@ void RunTest_Nesting(MessagePumpFactory factory) {
MessageLoop loop(std::move(pump));
int depth = 100;
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&NestingFunc, &depth));
- MessageLoop::current()->Run();
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&NestingFunc, &depth));
+ RunLoop().Run();
EXPECT_EQ(depth, 0);
}
@@ -410,9 +403,10 @@ void RunNestedLoop(TestNestingObserver* observer,
RunLoop nested_loop;
// Verify that by the time the first task is run the observer has seen the
// message loop begin.
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&ExpectOneBeginNestedLoop, observer));
- MessageLoop::current()->PostTask(FROM_HERE, nested_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&ExpectOneBeginNestedLoop, observer));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ nested_loop.QuitClosure());
nested_loop.Run();
// Quitting message loops doesn't change the begin count.
@@ -431,9 +425,10 @@ void RunTest_NestingObserver(MessagePumpFactory factory) {
outer_loop.AddNestingObserver(&nesting_observer);
// Post a task that runs a nested message loop.
- outer_loop.PostTask(FROM_HERE, Bind(&RunNestedLoop, &nesting_observer,
- outer_loop.QuitWhenIdleClosure()));
- outer_loop.Run();
+ outer_loop.task_runner()->PostTask(FROM_HERE,
+ Bind(&RunNestedLoop, &nesting_observer,
+ outer_loop.QuitWhenIdleClosure()));
+ RunLoop().Run();
outer_loop.RemoveNestingObserver(&nesting_observer);
}
@@ -523,7 +518,7 @@ void RecursiveFunc(TaskList* order, int cookie, int depth,
if (depth > 0) {
if (is_reentrant)
MessageLoop::current()->SetNestableTasksAllowed(true);
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE,
Bind(&RecursiveFunc, order, cookie, depth - 1, is_reentrant));
}
@@ -541,17 +536,14 @@ void RunTest_RecursiveDenial1(MessagePumpFactory factory) {
EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
TaskList order;
- MessageLoop::current()->PostTask(
- FROM_HERE,
- Bind(&RecursiveFunc, &order, 1, 2, false));
- MessageLoop::current()->PostTask(
- FROM_HERE,
- Bind(&RecursiveFunc, &order, 2, 2, false));
- MessageLoop::current()->PostTask(
- FROM_HERE,
- Bind(&QuitFunc, &order, 3));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&RecursiveFunc, &order, 1, 2, false));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&RecursiveFunc, &order, 2, 2, false));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&QuitFunc, &order, 3));
- MessageLoop::current()->Run();
+ RunLoop().Run();
// FIFO order.
ASSERT_EQ(14U, order.Size());
@@ -588,20 +580,16 @@ void RunTest_RecursiveDenial3(MessagePumpFactory factory) {
EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
TaskList order;
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&RecursiveSlowFunc, &order, 1, 2, false));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&RecursiveSlowFunc, &order, 2, 2, false));
- MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- Bind(&OrderedFunc, &order, 3),
- TimeDelta::FromMilliseconds(5));
- MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- Bind(&QuitFunc, &order, 4),
- TimeDelta::FromMilliseconds(5));
+ MessageLoop::current()->task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 3), TimeDelta::FromMilliseconds(5));
+ MessageLoop::current()->task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&QuitFunc, &order, 4), TimeDelta::FromMilliseconds(5));
- MessageLoop::current()->Run();
+ RunLoop().Run();
// FIFO order.
ASSERT_EQ(16U, order.Size());
@@ -628,14 +616,14 @@ void RunTest_RecursiveSupport1(MessagePumpFactory factory) {
MessageLoop loop(std::move(pump));
TaskList order;
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&RecursiveFunc, &order, 1, 2, true));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&RecursiveFunc, &order, 2, 2, true));
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&QuitFunc, &order, 3));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&QuitFunc, &order, 3));
- MessageLoop::current()->Run();
+ RunLoop().Run();
// FIFO order.
ASSERT_EQ(14U, order.Size());
@@ -670,7 +658,7 @@ void RunTest_NonNestableWithNoNesting(MessagePumpFactory factory) {
Bind(&OrderedFunc, &order, 2));
MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
Bind(&QuitFunc, &order, 3));
- MessageLoop::current()->Run();
+ RunLoop().Run();
// FIFO order.
ASSERT_EQ(6U, order.Size());
@@ -723,7 +711,7 @@ void RunTest_NonNestableInNestedLoop(MessagePumpFactory factory) {
FROM_HERE,
Bind(&QuitFunc, &order, 6));
- MessageLoop::current()->Run();
+ RunLoop().Run();
// FIFO order.
ASSERT_EQ(12U, order.Size());
@@ -762,20 +750,20 @@ void RunTest_QuitNow(MessagePumpFactory factory) {
RunLoop run_loop;
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&FuncThatQuitsNow));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&FuncThatQuitsNow));
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 3));
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&FuncThatQuitsNow));
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 4)); // never runs
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&FuncThatQuitsNow));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 4)); // never runs
- MessageLoop::current()->Run();
+ RunLoop().Run();
ASSERT_EQ(6U, order.Size());
int task_index = 0;
@@ -798,13 +786,13 @@ void RunTest_RunLoopQuitTop(MessagePumpFactory factory) {
RunLoop outer_run_loop;
RunLoop nested_run_loop;
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
- MessageLoop::current()->PostTask(
- FROM_HERE, outer_run_loop.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, nested_run_loop.QuitClosure());
outer_run_loop.Run();
@@ -828,14 +816,14 @@ void RunTest_RunLoopQuitNested(MessagePumpFactory factory) {
RunLoop outer_run_loop;
RunLoop nested_run_loop;
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, nested_run_loop.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->PostTask(
- FROM_HERE, outer_run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
outer_run_loop.Run();
@@ -859,15 +847,15 @@ void RunTest_RunLoopQuitBogus(MessagePumpFactory factory) {
RunLoop nested_run_loop;
RunLoop bogus_run_loop;
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
- MessageLoop::current()->PostTask(
- FROM_HERE, bogus_run_loop.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ bogus_run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->PostTask(
- FROM_HERE, outer_run_loop.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, nested_run_loop.QuitClosure());
outer_run_loop.Run();
@@ -894,35 +882,35 @@ void RunTest_RunLoopQuitDeep(MessagePumpFactory factory) {
RunLoop nested_loop3;
RunLoop nested_loop4;
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 1, Unretained(&nested_loop1)));
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 2, Unretained(&nested_loop2)));
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 3, Unretained(&nested_loop3)));
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 4, Unretained(&nested_loop4)));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_loop1)));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 2, Unretained(&nested_loop2)));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 3, Unretained(&nested_loop3)));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 4, Unretained(&nested_loop4)));
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 5));
- MessageLoop::current()->PostTask(
- FROM_HERE, outer_run_loop.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 6));
- MessageLoop::current()->PostTask(
- FROM_HERE, nested_loop1.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ nested_loop1.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 7));
- MessageLoop::current()->PostTask(
- FROM_HERE, nested_loop2.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ nested_loop2.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 8));
- MessageLoop::current()->PostTask(
- FROM_HERE, nested_loop3.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ nested_loop3.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 9));
- MessageLoop::current()->PostTask(
- FROM_HERE, nested_loop4.QuitClosure());
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ nested_loop4.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 10));
outer_run_loop.Run();
@@ -961,10 +949,10 @@ void RunTest_RunLoopQuitOrderBefore(MessagePumpFactory factory) {
run_loop.Quit();
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 1)); // never runs
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&FuncThatQuitsNow)); // never runs
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 1)); // never runs
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatQuitsNow)); // never runs
run_loop.Run();
@@ -980,14 +968,14 @@ void RunTest_RunLoopQuitOrderDuring(MessagePumpFactory factory) {
RunLoop run_loop;
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 1));
- MessageLoop::current()->PostTask(
- FROM_HERE, run_loop.QuitClosure());
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 2)); // never runs
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&FuncThatQuitsNow)); // never runs
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 2)); // never runs
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatQuitsNow)); // never runs
run_loop.Run();
@@ -1007,20 +995,20 @@ void RunTest_RunLoopQuitOrderAfter(MessagePumpFactory factory) {
RunLoop run_loop;
- MessageLoop::current()->PostTask(FROM_HERE,
- Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 2));
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&FuncThatQuitsNow));
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&FuncThatQuitsNow));
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 3));
- MessageLoop::current()->PostTask(
- FROM_HERE, run_loop.QuitClosure()); // has no affect
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, run_loop.QuitClosure()); // has no affect
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE, Bind(&OrderedFunc, &order, 4));
- MessageLoop::current()->PostTask(
- FROM_HERE, Bind(&FuncThatQuitsNow));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&FuncThatQuitsNow));
RunLoop outer_run_loop;
outer_run_loop.Run();
@@ -1040,9 +1028,8 @@ void RunTest_RunLoopQuitOrderAfter(MessagePumpFactory factory) {
void PostNTasksThenQuit(int posts_remaining) {
if (posts_remaining > 1) {
- MessageLoop::current()->PostTask(
- FROM_HERE,
- Bind(&PostNTasksThenQuit, posts_remaining - 1));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&PostNTasksThenQuit, posts_remaining - 1));
} else {
MessageLoop::current()->QuitWhenIdle();
}
@@ -1060,8 +1047,8 @@ void RunTest_RecursivePosts(MessagePumpFactory factory) {
const int kNumTimes = 1 << 17;
std::unique_ptr<MessagePump> pump(factory());
MessageLoop loop(std::move(pump));
- loop.PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumTimes));
- loop.Run();
+ loop.task_runner()->PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumTimes));
+ RunLoop().Run();
}
} // namespace test
diff --git a/chromium/base/message_loop/message_loop_unittest.cc b/chromium/base/message_loop/message_loop_unittest.cc
index bc4176fdebd..52337e31a8c 100644
--- a/chromium/base/message_loop/message_loop_unittest.cc
+++ b/chromium/base/message_loop/message_loop_unittest.cc
@@ -18,6 +18,7 @@
#include "base/pending_task.h"
#include "base/posix/eintr_wrapper.h"
#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
#include "base/test/test_simple_task_runner.h"
#include "base/threading/platform_thread.h"
@@ -416,9 +417,8 @@ void RunTest_RecursiveSupport2(MessageLoop::Type message_loop_type) {
void PostNTasksThenQuit(int posts_remaining) {
if (posts_remaining > 1) {
- MessageLoop::current()->PostTask(
- FROM_HERE,
- Bind(&PostNTasksThenQuit, posts_remaining - 1));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&PostNTasksThenQuit, posts_remaining - 1));
} else {
MessageLoop::current()->QuitWhenIdle();
}
@@ -581,6 +581,9 @@ RUN_MESSAGE_LOOP_TESTS(UI, &TypeUIMessagePumpFactory);
RUN_MESSAGE_LOOP_TESTS(IO, &TypeIOMessagePumpFactory);
#if defined(OS_WIN)
+// Additional set of tests for GPU version of UI message loop.
+RUN_MESSAGE_LOOP_TESTS(GPU, &MessagePumpForGpu::CreateMessagePumpForGpu);
+
TEST(MessageLoopTest, PostDelayedTask_SharedTimer_SubPump) {
RunTest_PostDelayedTask_SharedTimer_SubPump();
}
@@ -636,8 +639,8 @@ TEST(MessageLoopTest, TaskObserver) {
MessageLoop loop;
loop.AddTaskObserver(&observer);
- loop.PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumPosts));
- loop.Run();
+ loop.task_runner()->PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumPosts));
+ RunLoop().Run();
loop.RemoveTaskObserver(&observer);
EXPECT_EQ(kNumPosts, observer.num_tasks_started());
@@ -812,11 +815,10 @@ TEST(MessageLoopTest, DestructionObserverTest) {
MLDestructionObserver observer(&task_destroyed, &destruction_observer_called);
loop->AddDestructionObserver(&observer);
- loop->PostDelayedTask(
- FROM_HERE,
- Bind(&DestructionObserverProbe::Run,
- new DestructionObserverProbe(&task_destroyed,
- &destruction_observer_called)),
+ loop->task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&DestructionObserverProbe::Run,
+ new DestructionObserverProbe(
+ &task_destroyed, &destruction_observer_called)),
kDelay);
delete loop;
EXPECT_TRUE(observer.task_destroyed_before_message_loop());
@@ -837,12 +839,12 @@ TEST(MessageLoopTest, ThreadMainTaskRunner) {
&Foo::Test1ConstRef, foo.get(), a));
// Post quit task;
- MessageLoop::current()->PostTask(
+ MessageLoop::current()->task_runner()->PostTask(
FROM_HERE,
Bind(&MessageLoop::QuitWhenIdle, Unretained(MessageLoop::current())));
// Now kick things off
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_EQ(foo->test_count(), 1);
EXPECT_EQ(foo->result(), "a");
@@ -961,7 +963,7 @@ TEST(MessageLoopTest, OriginalRunnerWorks) {
scoped_refptr<Foo> foo(new Foo());
original_runner->PostTask(FROM_HERE,
Bind(&Foo::Test1ConstRef, foo.get(), "a"));
- loop.RunUntilIdle();
+ RunLoop().RunUntilIdle();
EXPECT_EQ(1, foo->test_count());
}
@@ -976,4 +978,20 @@ TEST(MessageLoopTest, DeleteUnboundLoop) {
EXPECT_EQ(loop.task_runner(), ThreadTaskRunnerHandle::Get());
}
+TEST(MessageLoopTest, ThreadName) {
+ {
+ std::string kThreadName("foo");
+ MessageLoop loop;
+ PlatformThread::SetName(kThreadName);
+ EXPECT_EQ(kThreadName, loop.GetThreadName());
+ }
+
+ {
+ std::string kThreadName("bar");
+ base::Thread thread(kThreadName);
+ ASSERT_TRUE(thread.StartAndWaitForTesting());
+ EXPECT_EQ(kThreadName, thread.message_loop()->GetThreadName());
+ }
+}
+
} // namespace base
diff --git a/chromium/base/message_loop/message_pump.cc b/chromium/base/message_loop/message_pump.cc
index 3d85b9b5643..2f740f24239 100644
--- a/chromium/base/message_loop/message_pump.cc
+++ b/chromium/base/message_loop/message_pump.cc
@@ -15,4 +15,11 @@ MessagePump::~MessagePump() {
void MessagePump::SetTimerSlack(TimerSlack) {
}
+#if defined(OS_WIN)
+bool MessagePump::WasSignaled() {
+ NOTREACHED();
+ return false;
+}
+#endif
+
} // namespace base
diff --git a/chromium/base/message_loop/message_pump.h b/chromium/base/message_loop/message_pump.h
index c53be804109..af8ed41f278 100644
--- a/chromium/base/message_loop/message_pump.h
+++ b/chromium/base/message_loop/message_pump.h
@@ -124,6 +124,15 @@ class BASE_EXPORT MessagePump : public NonThreadSafe {
// Sets the timer slack to the specified value.
virtual void SetTimerSlack(TimerSlack timer_slack);
+
+#if defined(OS_WIN)
+ // TODO (stanisc): crbug.com/596190: Remove this after the signaling issue
+ // has been investigated.
+ // This should be used for diagnostic only. If message pump wake-up mechanism
+ // is based on auto-reset event this call would reset the event to unset
+ // state.
+ virtual bool WasSignaled();
+#endif
};
} // namespace base
diff --git a/chromium/base/message_loop/message_pump_default.cc b/chromium/base/message_loop/message_pump_default.cc
index ed15395d56f..3449aec8605 100644
--- a/chromium/base/message_loop/message_pump_default.cc
+++ b/chromium/base/message_loop/message_pump_default.cc
@@ -4,6 +4,8 @@
#include "base/message_loop/message_pump_default.h"
+#include <algorithm>
+
#include "base/logging.h"
#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
@@ -16,8 +18,8 @@ namespace base {
MessagePumpDefault::MessagePumpDefault()
: keep_running_(true),
- event_(false, false) {
-}
+ event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
MessagePumpDefault::~MessagePumpDefault() {
}
@@ -54,7 +56,31 @@ void MessagePumpDefault::Run(Delegate* delegate) {
} else {
TimeDelta delay = delayed_work_time_ - TimeTicks::Now();
if (delay > TimeDelta()) {
+#if defined(OS_WIN)
+ // TODO(stanisc): crbug.com/623223: Consider moving the OS_WIN specific
+ // logic into TimedWait implementation in waitable_event_win.cc.
+
+ // crbug.com/487724: on Windows, waiting for less than 1 ms results in
+ // returning from TimedWait promptly and spinning
+ // MessagePumpDefault::Run loop for up to 1 ms - until it is time to
+ // run a delayed task. |min_delay| is the minimum possible wait to
+ // to avoid the spinning.
+ constexpr TimeDelta min_delay = TimeDelta::FromMilliseconds(1);
+ do {
+ delay = std::max(delay, min_delay);
+ if (event_.TimedWait(delay))
+ break;
+
+ // TimedWait can time out earlier than the specified |delay| on
+ // Windows. It doesn't make sense to run the outer loop in that case
+ // because there isn't going to be any new work. It is less overhead
+ // to just go back to wait.
+ // In practice this inner wait loop might have up to 3 iterations.
+ delay = delayed_work_time_ - TimeTicks::Now();
+ } while (delay > TimeDelta());
+#else
event_.TimedWait(delay);
+#endif
} else {
// It looks like delayed_work_time_ indicates a time in the past, so we
// need to call DoDelayedWork now.
diff --git a/chromium/base/message_loop/message_pump_libevent_unittest.cc b/chromium/base/message_loop/message_pump_libevent_unittest.cc
index 81afa5e58c1..6d1cf7ef31f 100644
--- a/chromium/base/message_loop/message_pump_libevent_unittest.cc
+++ b/chromium/base/message_loop/message_pump_libevent_unittest.cc
@@ -15,6 +15,7 @@
#include "base/message_loop/message_loop.h"
#include "base/posix/eintr_wrapper.h"
#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
#include "base/synchronization/waitable_event_watcher.h"
#include "base/third_party/libevent/event.h"
@@ -177,7 +178,8 @@ void QuitMessageLoopAndStart(const Closure& quit_closure) {
MessageLoop::ScopedNestableTaskAllower allow(MessageLoop::current());
RunLoop runloop;
- MessageLoop::current()->PostTask(FROM_HERE, runloop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ runloop.QuitClosure());
runloop.Run();
}
@@ -188,8 +190,8 @@ class NestedPumpWatcher : public MessagePumpLibevent::Watcher {
void OnFileCanReadWithoutBlocking(int /* fd */) override {
RunLoop runloop;
- MessageLoop::current()->PostTask(FROM_HERE, Bind(&QuitMessageLoopAndStart,
- runloop.QuitClosure()));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&QuitMessageLoopAndStart, runloop.QuitClosure()));
runloop.Run();
}
@@ -220,7 +222,8 @@ class QuitWatcher : public BaseWatcher {
void OnFileCanReadWithoutBlocking(int /* fd */) override {
// Post a fatal closure to the MessageLoop before we quit it.
- MessageLoop::current()->PostTask(FROM_HERE, Bind(&FatalClosure));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&FatalClosure));
// Now quit the MessageLoop.
run_loop_->Quit();
@@ -248,7 +251,8 @@ TEST_F(MessagePumpLibeventTest, QuitWatcher) {
RunLoop run_loop;
MessagePumpLibevent::FileDescriptorWatcher controller;
QuitWatcher delegate(&controller, &run_loop);
- WaitableEvent event(false /* manual_reset */, false /* initially_signaled */);
+ WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
std::unique_ptr<WaitableEventWatcher> watcher(new WaitableEventWatcher);
// Tell the pump to watch the pipe.
@@ -259,19 +263,21 @@ TEST_F(MessagePumpLibeventTest, QuitWatcher) {
const char buf = 0;
const WaitableEventWatcher::EventCallback write_fd_task =
Bind(&WriteFDWrapper, pipefds_[1], &buf, 1);
- io_loop()->PostTask(FROM_HERE,
- Bind(IgnoreResult(&WaitableEventWatcher::StartWatching),
- Unretained(watcher.get()), &event, write_fd_task));
+ io_loop()->task_runner()->PostTask(
+ FROM_HERE, Bind(IgnoreResult(&WaitableEventWatcher::StartWatching),
+ Unretained(watcher.get()), &event, write_fd_task));
// Queue |event| to signal on |loop|.
- loop.PostTask(FROM_HERE, Bind(&WaitableEvent::Signal, Unretained(&event)));
+ loop.task_runner()->PostTask(
+ FROM_HERE, Bind(&WaitableEvent::Signal, Unretained(&event)));
// Now run the MessageLoop.
run_loop.Run();
// StartWatching can move |watcher| to IO thread. Release on IO thread.
- io_loop()->PostTask(FROM_HERE, Bind(&WaitableEventWatcher::StopWatching,
- Owned(watcher.release())));
+ io_loop()->task_runner()->PostTask(
+ FROM_HERE,
+ Bind(&WaitableEventWatcher::StopWatching, Owned(watcher.release())));
}
} // namespace
diff --git a/chromium/base/message_loop/message_pump_perftest.cc b/chromium/base/message_loop/message_pump_perftest.cc
index edaf2068ff6..7bb942fa657 100644
--- a/chromium/base/message_loop/message_pump_perftest.cc
+++ b/chromium/base/message_loop/message_pump_perftest.cc
@@ -9,6 +9,7 @@
#include "base/format_macros.h"
#include "base/memory/ptr_util.h"
#include "base/memory/scoped_vector.h"
+#include "base/single_thread_task_runner.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
@@ -63,10 +64,9 @@ class ScheduleWorkTest : public testing::Test {
base::ThreadTicks::Now() - thread_start;
min_batch_times_[index] = minimum;
max_batch_times_[index] = maximum;
- target_message_loop()->PostTask(FROM_HERE,
- base::Bind(&ScheduleWorkTest::Increment,
- base::Unretained(this),
- schedule_calls));
+ target_message_loop()->task_runner()->PostTask(
+ FROM_HERE, base::Bind(&ScheduleWorkTest::Increment,
+ base::Unretained(this), schedule_calls));
}
void ScheduleWork(MessageLoop::Type target_type, int num_scheduling_threads) {
@@ -101,7 +101,7 @@ class ScheduleWorkTest : public testing::Test {
}
for (int i = 0; i < num_scheduling_threads; ++i) {
- scheduling_threads[i]->message_loop()->PostTask(
+ scheduling_threads[i]->message_loop()->task_runner()->PostTask(
FROM_HERE,
base::Bind(&ScheduleWorkTest::Schedule, base::Unretained(this), i));
}
diff --git a/chromium/base/message_loop/message_pump_win.cc b/chromium/base/message_loop/message_pump_win.cc
index 91a3496cd3c..de20bdc084d 100644
--- a/chromium/base/message_loop/message_pump_win.cc
+++ b/chromium/base/message_loop/message_pump_win.cc
@@ -9,6 +9,7 @@
#include <limits>
+#include "base/memory/ptr_util.h"
#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
#include "base/strings/stringprintf.h"
@@ -27,6 +28,85 @@ enum MessageLoopProblems {
MESSAGE_LOOP_PROBLEM_MAX,
};
+// The following define pointers to user32 API's for the API's which are used
+// in this file. These are added to avoid directly depending on user32 from
+// base as there are users of base who don't want this.
+decltype(::TranslateMessage)* g_translate_message = nullptr;
+decltype(::DispatchMessageW)* g_dispatch_message = nullptr;
+decltype(::PeekMessageW)* g_peek_message = nullptr;
+decltype(::PostMessageW)* g_post_message = nullptr;
+decltype(::DefWindowProcW)* g_def_window_proc = nullptr;
+decltype(::PostQuitMessage)* g_post_quit = nullptr;
+decltype(::UnregisterClassW)* g_unregister_class = nullptr;
+decltype(::RegisterClassExW)* g_register_class = nullptr;
+decltype(::CreateWindowExW)* g_create_window_ex = nullptr;
+decltype(::DestroyWindow)* g_destroy_window = nullptr;
+decltype(::CallMsgFilterW)* g_call_msg_filter = nullptr;
+decltype(::GetQueueStatus)* g_get_queue_status = nullptr;
+decltype(::MsgWaitForMultipleObjectsEx)* g_msg_wait_for_multiple_objects_ex =
+ nullptr;
+decltype(::SetTimer)* g_set_timer = nullptr;
+decltype(::KillTimer)* g_kill_timer = nullptr;
+
+#define GET_USER32_API(module, name) \
+ reinterpret_cast<decltype(name)*>(::GetProcAddress(module, #name))
+
+// Initializes the global pointers to user32 APIs for the API's used in this
+// file.
+void InitUser32APIs() {
+ if (g_translate_message)
+ return;
+
+ HMODULE user32_module = ::GetModuleHandle(L"user32.dll");
+ CHECK(user32_module);
+
+ g_translate_message = GET_USER32_API(user32_module, TranslateMessage);
+ CHECK(g_translate_message);
+
+ g_dispatch_message = GET_USER32_API(user32_module, DispatchMessageW);
+ CHECK(g_dispatch_message);
+
+ g_peek_message = GET_USER32_API(user32_module, PeekMessageW);
+ CHECK(g_peek_message);
+
+ g_post_message = GET_USER32_API(user32_module, PostMessageW);
+ CHECK(g_post_message);
+
+ g_def_window_proc = GET_USER32_API(user32_module, DefWindowProcW);
+ CHECK(g_def_window_proc);
+
+ g_post_quit = GET_USER32_API(user32_module, PostQuitMessage);
+ CHECK(g_post_quit);
+
+ g_unregister_class = GET_USER32_API(user32_module, UnregisterClassW);
+ CHECK(g_unregister_class);
+
+ g_register_class = GET_USER32_API(user32_module, RegisterClassExW);
+ CHECK(g_register_class);
+
+ g_create_window_ex = GET_USER32_API(user32_module, CreateWindowExW);
+ CHECK(g_create_window_ex);
+
+ g_destroy_window = GET_USER32_API(user32_module, DestroyWindow);
+ CHECK(g_destroy_window);
+
+ g_call_msg_filter = GET_USER32_API(user32_module, CallMsgFilterW);
+ CHECK(g_call_msg_filter);
+
+ g_get_queue_status = GET_USER32_API(user32_module, GetQueueStatus);
+ CHECK(g_get_queue_status);
+
+ g_msg_wait_for_multiple_objects_ex =
+ GET_USER32_API(user32_module, MsgWaitForMultipleObjectsEx);
+ CHECK(g_msg_wait_for_multiple_objects_ex);
+
+ g_set_timer = GET_USER32_API(user32_module, SetTimer);
+ CHECK(g_set_timer);
+
+ g_kill_timer = GET_USER32_API(user32_module, KillTimer);
+ CHECK(g_kill_timer);
+}
+
} // namespace
static const wchar_t kWndClassFormat[] = L"Chrome_MessagePumpWindow_%p";
@@ -35,9 +115,15 @@ static const wchar_t kWndClassFormat[] = L"Chrome_MessagePumpWindow_%p";
// task (a series of such messages creates a continuous task pump).
static const int kMsgHaveWork = WM_USER + 1;
+// The application-defined code passed to the hook procedure.
+static const int kMessageFilterCode = 0x5001;
+
//-----------------------------------------------------------------------------
// MessagePumpWin public:
+MessagePumpWin::MessagePumpWin() {
+}
+
void MessagePumpWin::Run(Delegate* delegate) {
RunState s;
s.delegate = delegate;
@@ -88,21 +174,22 @@ int MessagePumpWin::GetCurrentDelay() const {
MessagePumpForUI::MessagePumpForUI()
: atom_(0) {
+ InitUser32APIs();
InitMessageWnd();
}
MessagePumpForUI::~MessagePumpForUI() {
- DestroyWindow(message_hwnd_);
- UnregisterClass(MAKEINTATOM(atom_), CURRENT_MODULE());
+ g_destroy_window(message_hwnd_);
+ g_unregister_class(MAKEINTATOM(atom_), CURRENT_MODULE());
}
void MessagePumpForUI::ScheduleWork() {
- if (InterlockedExchange(&have_work_, 1))
+ if (InterlockedExchange(&work_state_, HAVE_WORK) != READY)
return; // Someone else continued the pumping.
// Make sure the MessagePump does some work for us.
- BOOL ret = PostMessage(message_hwnd_, kMsgHaveWork,
- reinterpret_cast<WPARAM>(this), 0);
+ BOOL ret = g_post_message(message_hwnd_, kMsgHaveWork,
+ reinterpret_cast<WPARAM>(this), 0);
if (ret)
return; // There was room in the Window Message queue.
@@ -114,7 +201,9 @@ void MessagePumpForUI::ScheduleWork() {
// common (queue is full, of about 2000 messages), so we'll do a near-graceful
// recovery. Nested loops are pretty transient (we think), so this will
// probably be recoverable.
- InterlockedExchange(&have_work_, 0); // Clarify that we didn't really insert.
+
+ // Clarify that we didn't really insert.
+ InterlockedExchange(&work_state_, READY);
UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR,
MESSAGE_LOOP_PROBLEM_MAX);
state_->schedule_work_error_count++;
@@ -140,7 +229,7 @@ LRESULT CALLBACK MessagePumpForUI::WndProcThunk(
reinterpret_cast<MessagePumpForUI*>(wparam)->HandleTimerMessage();
break;
}
- return DefWindowProc(hwnd, message, wparam, lparam);
+ return g_def_window_proc(hwnd, message, wparam, lparam);
}
void MessagePumpForUI::DoRunLoop() {
@@ -181,7 +270,7 @@ void MessagePumpForUI::DoRunLoop() {
// don't want to disturb that timer if it is already in flight. However,
// if we did do all remaining delayed work, then lets kill the WM_TIMER.
if (more_work_is_plausible && delayed_work_time_.is_null())
- KillTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this));
+ g_kill_timer(message_hwnd_, reinterpret_cast<UINT_PTR>(this));
if (state_->should_quit)
break;
@@ -209,11 +298,11 @@ void MessagePumpForUI::InitMessageWnd() {
wc.lpfnWndProc = base::win::WrappedWindowProc<WndProcThunk>;
wc.hInstance = instance;
wc.lpszClassName = class_name.c_str();
- atom_ = RegisterClassEx(&wc);
+ atom_ = g_register_class(&wc);
DCHECK(atom_);
- message_hwnd_ = CreateWindow(MAKEINTATOM(atom_), 0, 0, 0, 0, 0, 0,
- HWND_MESSAGE, 0, instance, 0);
+ message_hwnd_ = g_create_window_ex(0, MAKEINTATOM(atom_), 0, 0, 0, 0, 0, 0,
+ HWND_MESSAGE, 0, instance, 0);
DCHECK(message_hwnd_);
}
@@ -227,8 +316,8 @@ void MessagePumpForUI::WaitForWork() {
if (delay < 0) // Negative value means no timers waiting.
delay = INFINITE;
- DWORD result =
- MsgWaitForMultipleObjectsEx(0, NULL, delay, QS_ALLINPUT, wait_flags);
+ DWORD result = g_msg_wait_for_multiple_objects_ex(0, nullptr, delay,
+ QS_ALLINPUT, wait_flags);
if (WAIT_OBJECT_0 == result) {
// A WM_* message is available.
@@ -246,9 +335,9 @@ void MessagePumpForUI::WaitForWork() {
// current thread.
MSG msg = {0};
bool has_pending_sent_message =
- (HIWORD(GetQueueStatus(QS_SENDMESSAGE)) & QS_SENDMESSAGE) != 0;
+ (HIWORD(g_get_queue_status(QS_SENDMESSAGE)) & QS_SENDMESSAGE) != 0;
if (has_pending_sent_message ||
- PeekMessage(&msg, NULL, 0, 0, PM_NOREMOVE)) {
+ g_peek_message(&msg, nullptr, 0, 0, PM_NOREMOVE)) {
return;
}
@@ -268,7 +357,7 @@ void MessagePumpForUI::HandleWorkMessage() {
// sort.
if (!state_) {
// Since we handled a kMsgHaveWork message, we must still update this flag.
- InterlockedExchange(&have_work_, 0);
+ InterlockedExchange(&work_state_, READY);
return;
}
@@ -277,7 +366,7 @@ void MessagePumpForUI::HandleWorkMessage() {
// messages that may be in the Windows message queue.
ProcessPumpReplacementMessage();
- // Now give the delegate a chance to do some work. He'll let us know if he
+ // Now give the delegate a chance to do some work. It'll let us know if it
// needs to do more work.
if (state_->delegate->DoWork())
ScheduleWork();
@@ -286,7 +375,7 @@ void MessagePumpForUI::HandleWorkMessage() {
}
void MessagePumpForUI::HandleTimerMessage() {
- KillTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this));
+ g_kill_timer(message_hwnd_, reinterpret_cast<UINT_PTR>(this));
// If we are being called outside of the context of Run, then don't do
// anything. This could correspond to a MessageBox call or something of
@@ -331,8 +420,8 @@ void MessagePumpForUI::RescheduleTimer() {
// Create a WM_TIMER event that will wake us up to check for any pending
// timers (in case we are running within a nested, external sub-pump).
- BOOL ret = SetTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this),
- delay_msec, NULL);
+ BOOL ret = g_set_timer(message_hwnd_, reinterpret_cast<UINT_PTR>(this),
+ delay_msec, nullptr);
if (ret)
return;
// If we can't set timers, we are in big trouble... but cross our fingers
@@ -349,12 +438,12 @@ bool MessagePumpForUI::ProcessNextWindowsMessage() {
// case to ensure that the message loop peeks again instead of calling
// MsgWaitForMultipleObjectsEx again.
bool sent_messages_in_queue = false;
- DWORD queue_status = GetQueueStatus(QS_SENDMESSAGE);
+ DWORD queue_status = g_get_queue_status(QS_SENDMESSAGE);
if (HIWORD(queue_status) & QS_SENDMESSAGE)
sent_messages_in_queue = true;
MSG msg;
- if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE) != FALSE)
+ if (g_peek_message(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE)
return ProcessMessageHelper(msg);
return sent_messages_in_queue;
@@ -367,7 +456,7 @@ bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) {
// Repost the QUIT message so that it will be retrieved by the primary
// GetMessage() loop.
state_->should_quit = true;
- PostQuitMessage(static_cast<int>(msg.wParam));
+ g_post_quit(static_cast<int>(msg.wParam));
return false;
}
@@ -375,44 +464,36 @@ bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) {
if (msg.message == kMsgHaveWork && msg.hwnd == message_hwnd_)
return ProcessPumpReplacementMessage();
- if (CallMsgFilter(const_cast<MSG*>(&msg), kMessageFilterCode))
+ if (g_call_msg_filter(const_cast<MSG*>(&msg), kMessageFilterCode))
return true;
- TranslateMessage(&msg);
- DispatchMessage(&msg);
+ g_translate_message(&msg);
+ g_dispatch_message(&msg);
return true;
}
bool MessagePumpForUI::ProcessPumpReplacementMessage() {
- // When we encounter a kMsgHaveWork message, this method is called to peek
- // and process a replacement message, such as a WM_PAINT or WM_TIMER. The
- // goal is to make the kMsgHaveWork as non-intrusive as possible, even though
- // a continuous stream of such messages are posted. This method carefully
- // peeks a message while there is no chance for a kMsgHaveWork to be pending,
- // then resets the have_work_ flag (allowing a replacement kMsgHaveWork to
- // possibly be posted), and finally dispatches that peeked replacement. Note
- // that the re-post of kMsgHaveWork may be asynchronous to this thread!!
-
- bool have_message = false;
+ // When we encounter a kMsgHaveWork message, this method is called to peek and
+ // process a replacement message. The goal is to make the kMsgHaveWork as non-
+ // intrusive as possible, even though a continuous stream of such messages are
+ // posted. This method carefully peeks a message while there is no chance for
+ // a kMsgHaveWork to be pending, then resets the |have_work_| flag (allowing a
+ // replacement kMsgHaveWork to possibly be posted), and finally dispatches
+ // that peeked replacement. Note that the re-post of kMsgHaveWork may be
+ // asynchronous to this thread!!
+
MSG msg;
- // We should not process all window messages if we are in the context of an
- // OS modal loop, i.e. in the context of a windows API call like MessageBox.
- // This is to ensure that these messages are peeked out by the OS modal loop.
- if (MessageLoop::current()->os_modal_loop()) {
- // We only peek out WM_PAINT and WM_TIMER here for reasons mentioned above.
- have_message = PeekMessage(&msg, NULL, WM_PAINT, WM_PAINT, PM_REMOVE) ||
- PeekMessage(&msg, NULL, WM_TIMER, WM_TIMER, PM_REMOVE);
- } else {
- have_message = PeekMessage(&msg, NULL, 0, 0, PM_REMOVE) != FALSE;
- }
+ const bool have_message =
+ g_peek_message(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE;
+ // Expect no message or a message different than kMsgHaveWork.
DCHECK(!have_message || kMsgHaveWork != msg.message ||
msg.hwnd != message_hwnd_);
// Since we discarded a kMsgHaveWork message, we must update the flag.
- int old_have_work = InterlockedExchange(&have_work_, 0);
- DCHECK(old_have_work);
+ int old_work_state_ = InterlockedExchange(&work_state_, READY);
+ DCHECK_EQ(HAVE_WORK, old_work_state_);
// We don't need a special time slice if we didn't have_message to process.
if (!have_message)
@@ -427,6 +508,150 @@ bool MessagePumpForUI::ProcessPumpReplacementMessage() {
}
//-----------------------------------------------------------------------------
+// MessagePumpForGpu public:
+
+MessagePumpForGpu::MessagePumpForGpu() {
+ event_.Set(CreateEvent(nullptr, FALSE, FALSE, nullptr));
+ InitUser32APIs();
+}
+
+MessagePumpForGpu::~MessagePumpForGpu() {}
+
+// static
+void MessagePumpForGpu::InitFactory() {
+ bool init_result = MessageLoop::InitMessagePumpForUIFactory(
+ &MessagePumpForGpu::CreateMessagePumpForGpu);
+ DCHECK(init_result);
+}
+
+// static
+std::unique_ptr<MessagePump> MessagePumpForGpu::CreateMessagePumpForGpu() {
+ return WrapUnique<MessagePump>(new MessagePumpForGpu);
+}
+
+void MessagePumpForGpu::ScheduleWork() {
+ if (InterlockedExchange(&work_state_, HAVE_WORK) != READY)
+ return; // Someone else continued the pumping.
+
+ // TODO(stanisc): crbug.com/596190: Preserve for crash dump analysis.
+ // Remove this when the bug is fixed.
+ last_set_event_timeticks_ = TimeTicks::Now();
+
+ // Make sure the MessagePump does some work for us.
+ SetEvent(event_.Get());
+}
+
+void MessagePumpForGpu::ScheduleDelayedWork(
+ const TimeTicks& delayed_work_time) {
+ // We know that we can't be blocked right now since this method can only be
+ // called on the same thread as Run, so we only need to update our record of
+ // how long to sleep when we do sleep.
+ delayed_work_time_ = delayed_work_time;
+}
+
+bool MessagePumpForGpu::WasSignaled() {
+ // If |event_| was set this would reset it back to unset state.
+ return WaitForSingleObject(event_.Get(), 0) == WAIT_OBJECT_0;
+}
+
+//-----------------------------------------------------------------------------
+// MessagePumpForGpu private:
+
+void MessagePumpForGpu::DoRunLoop() {
+ while (!state_->should_quit) {
+ // Indicate that the loop is handling the work.
+ // If there is a race condition between switching to WORKING state here and
+ // the producer thread setting the HAVE_WORK state after exiting the wait,
+ // the event might remain in the signalled state. That might be less than
+ // optimal but wouldn't result in failing to handle the work.
+ InterlockedExchange(&work_state_, WORKING);
+
+ bool more_work_is_plausible = ProcessNextMessage();
+ if (state_->should_quit)
+ break;
+
+ more_work_is_plausible |= state_->delegate->DoWork();
+ if (state_->should_quit)
+ break;
+
+ more_work_is_plausible |=
+ state_->delegate->DoDelayedWork(&delayed_work_time_);
+ if (state_->should_quit)
+ break;
+
+ if (more_work_is_plausible)
+ continue;
+
+ more_work_is_plausible = state_->delegate->DoIdleWork();
+ if (state_->should_quit)
+ break;
+
+ if (more_work_is_plausible)
+ continue;
+
+ // Switch that working state to READY to indicate that the loop is
+ // waiting for accepting new work if it is still in WORKING state and hasn't
+ // been signalled. Otherwise if it is in HAVE_WORK state skip the wait
+ // and proceed to handing the work.
+ if (InterlockedCompareExchange(&work_state_, READY, WORKING) == HAVE_WORK)
+ continue; // Skip wait, more work was requested.
+
+ WaitForWork(); // Wait (sleep) until we have work to do again.
+ }
+}
+
+void MessagePumpForGpu::WaitForWork() {
+ // Wait until a message is available, up to the time needed by the timer
+ // manager to fire the next set of timers.
+ int delay;
+
+ // The while loop handles the situation where on Windows 7 and later versions
+ // MsgWaitForMultipleObjectsEx might time out slightly earlier (less than one
+ // ms) than the specified |delay|. In that situation it is more optimal to
+ // just wait again rather than waste a DoRunLoop cycle.
+ while ((delay = GetCurrentDelay()) != 0) {
+ if (delay < 0) // Negative value means no timers waiting.
+ delay = INFINITE;
+
+ // TODO(stanisc): crbug.com/596190: Preserve for crash dump analysis.
+ // Remove this when the bug is fixed.
+ TimeTicks wait_for_work_timeticks = TimeTicks::Now();
+ debug::Alias(&wait_for_work_timeticks);
+ debug::Alias(&delay);
+
+ HANDLE handle = event_.Get();
+ DWORD result =
+ g_msg_wait_for_multiple_objects_ex(1, &handle, delay, QS_ALLINPUT, 0);
+ DCHECK_NE(WAIT_FAILED, result) << GetLastError();
+ if (result != WAIT_TIMEOUT) {
+ // Either work or message available.
+ return;
+ }
+ }
+}
+
+bool MessagePumpForGpu::ProcessNextMessage() {
+ MSG msg;
+ if (!g_peek_message(&msg, nullptr, 0, 0, PM_REMOVE))
+ return false;
+
+ if (msg.message == WM_QUIT) {
+ // Repost the QUIT message so that it will be retrieved by the primary
+ // GetMessage() loop.
+ state_->should_quit = true;
+ g_post_quit(static_cast<int>(msg.wParam));
+ return false;
+ }
+
+ if (!g_call_msg_filter(const_cast<MSG*>(&msg), kMessageFilterCode)) {
+ g_translate_message(&msg);
+ g_dispatch_message(&msg);
+ }
+
+ return true;
+}
+
+//-----------------------------------------------------------------------------
// MessagePumpForIO public:
MessagePumpForIO::IOContext::IOContext() {
@@ -434,14 +659,15 @@ MessagePumpForIO::IOContext::IOContext() {
}
MessagePumpForIO::MessagePumpForIO() {
- port_.Set(CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, NULL, 1));
+ port_.Set(CreateIoCompletionPort(INVALID_HANDLE_VALUE, nullptr,
+ reinterpret_cast<ULONG_PTR>(nullptr), 1));
DCHECK(port_.IsValid());
}
MessagePumpForIO::~MessagePumpForIO() = default;
void MessagePumpForIO::ScheduleWork() {
- if (InterlockedExchange(&have_work_, 1))
+ if (InterlockedExchange(&work_state_, HAVE_WORK) != READY)
return; // Someone else continued the pumping.
// Make sure the MessagePump does some work for us.
@@ -452,7 +678,7 @@ void MessagePumpForIO::ScheduleWork() {
return; // Post worked perfectly.
// See comment in MessagePumpForUI::ScheduleWork() for this error recovery.
- InterlockedExchange(&have_work_, 0); // Clarify that we didn't succeed.
+ InterlockedExchange(&work_state_, READY); // Clarify that we didn't succeed.
UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", COMPLETION_POST_ERROR,
MESSAGE_LOOP_PROBLEM_MAX);
state_->schedule_work_error_count++;
@@ -502,7 +728,7 @@ void MessagePumpForIO::DoRunLoop() {
if (state_->should_quit)
break;
- more_work_is_plausible |= WaitForIOCompletion(0, NULL);
+ more_work_is_plausible |= WaitForIOCompletion(0, nullptr);
if (state_->should_quit)
break;
@@ -536,7 +762,7 @@ void MessagePumpForIO::WaitForWork() {
if (timeout < 0) // Negative value means no timers waiting.
timeout = INFINITE;
- WaitForIOCompletion(timeout, NULL);
+ WaitForIOCompletion(timeout, nullptr);
}
bool MessagePumpForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
@@ -563,8 +789,8 @@ bool MessagePumpForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
// Asks the OS for another IO completion result.
bool MessagePumpForIO::GetIOItem(DWORD timeout, IOItem* item) {
memset(item, 0, sizeof(*item));
- ULONG_PTR key = NULL;
- OVERLAPPED* overlapped = NULL;
+ ULONG_PTR key = reinterpret_cast<ULONG_PTR>(nullptr);
+ OVERLAPPED* overlapped = nullptr;
if (!GetQueuedCompletionStatus(port_.Get(), &item->bytes_transfered, &key,
&overlapped, timeout)) {
if (!overlapped)
@@ -583,7 +809,7 @@ bool MessagePumpForIO::ProcessInternalIOItem(const IOItem& item) {
reinterpret_cast<void*>(this) == reinterpret_cast<void*>(item.handler)) {
// This is our internal completion.
DCHECK(!item.bytes_transfered);
- InterlockedExchange(&have_work_, 0);
+ InterlockedExchange(&work_state_, READY);
return true;
}
return false;
diff --git a/chromium/base/message_loop/message_pump_win.h b/chromium/base/message_loop/message_pump_win.h
index 8fb4d695350..b5f96b89239 100644
--- a/chromium/base/message_loop/message_pump_win.h
+++ b/chromium/base/message_loop/message_pump_win.h
@@ -8,6 +8,7 @@
#include <windows.h>
#include <list>
+#include <memory>
#include "base/base_export.h"
#include "base/message_loop/message_pump.h"
@@ -21,7 +22,7 @@ namespace base {
// controlling the lifetime of the message pump.
class BASE_EXPORT MessagePumpWin : public MessagePump {
public:
- MessagePumpWin() : have_work_(0), state_(NULL) {}
+ MessagePumpWin();
// MessagePump methods:
void Run(Delegate* delegate) override;
@@ -43,19 +44,26 @@ class BASE_EXPORT MessagePumpWin : public MessagePump {
Time last_schedule_work_error_time;
};
+ // State used with |work_state_| variable.
+ enum WorkState {
+ READY = 0, // Ready to accept new work.
+ HAVE_WORK = 1, // New work has been signalled.
+ WORKING = 2 // Handling the work.
+ };
+
virtual void DoRunLoop() = 0;
int GetCurrentDelay() const;
// The time at which delayed work should run.
TimeTicks delayed_work_time_;
- // A boolean value used to indicate if there is a kMsgDoWork message pending
+ // A value used to indicate if there is a kMsgDoWork message pending
// in the Windows Message queue. There is at most one such message, and it
// can drive execution of tasks when a native message pump is running.
- LONG have_work_;
+ LONG work_state_ = READY;
// State for the current invocation of Run.
- RunState* state_;
+ RunState* state_ = nullptr;
};
//-----------------------------------------------------------------------------
@@ -108,9 +116,6 @@ class BASE_EXPORT MessagePumpWin : public MessagePump {
//
class BASE_EXPORT MessagePumpForUI : public MessagePumpWin {
public:
- // The application-defined code passed to the hook procedure.
- static const int kMessageFilterCode = 0x5001;
-
MessagePumpForUI();
~MessagePumpForUI() override;
@@ -141,6 +146,46 @@ class BASE_EXPORT MessagePumpForUI : public MessagePumpWin {
};
//-----------------------------------------------------------------------------
+// MessagePumpForGpu is a simplified version of UI message pump that is
+// optimized for the GPU process. Unlike MessagePumpForUI it doesn't have a
+// hidden window and doesn't handle a situation where a native message pump
+// might take over message processing.
+//
+class BASE_EXPORT MessagePumpForGpu : public MessagePumpWin {
+ public:
+ MessagePumpForGpu();
+ ~MessagePumpForGpu() override;
+
+ // Factory methods.
+ static void InitFactory();
+ static std::unique_ptr<MessagePump> CreateMessagePumpForGpu();
+
+ // MessagePump methods:
+ void ScheduleWork() override;
+ void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
+
+ // TODO (stanisc): crbug.com/596190: Remove this after the signaling issue
+ // has been investigated.
+ // This should be used for diagnostic only. If message pump wake-up mechanism
+ // is based on auto-reset event this call would reset the event to unset
+ // state.
+ bool WasSignaled() override;
+
+ private:
+ // MessagePumpWin methods:
+ void DoRunLoop() override;
+
+ void WaitForWork();
+ bool ProcessNextMessage();
+
+ win::ScopedHandle event_;
+
+ // Used to help diagnose hangs.
+ // TODO(stanisc): crbug.com/596190: Remove these once the bug is fixed.
+ TimeTicks last_set_event_timeticks_;
+};
+
+//-----------------------------------------------------------------------------
// MessagePumpForIO extends MessagePumpWin with methods that are particular to a
// MessageLoop instantiated with TYPE_IO. This version of MessagePump does not
// deal with Windows mesagges, and instead has a Run loop based on Completion
diff --git a/chromium/base/metrics/OWNERS b/chromium/base/metrics/OWNERS
index 3fd7c0dbc2b..feb8271f7ed 100644
--- a/chromium/base/metrics/OWNERS
+++ b/chromium/base/metrics/OWNERS
@@ -1,3 +1,2 @@
asvitkine@chromium.org
isherman@chromium.org
-jar@chromium.org
diff --git a/chromium/base/metrics/field_trial.cc b/chromium/base/metrics/field_trial.cc
index 3b398cd20e7..600b94ed483 100644
--- a/chromium/base/metrics/field_trial.cc
+++ b/chromium/base/metrics/field_trial.cc
@@ -7,7 +7,6 @@
#include <algorithm>
#include "base/build_time.h"
-#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/rand_util.h"
#include "base/strings/string_number_conversions.h"
@@ -45,8 +44,14 @@ Time CreateTimeFromParams(int year, int month, int day_of_month) {
exploded.minute = 0;
exploded.second = 0;
exploded.millisecond = 0;
+ Time out_time;
+ if (!Time::FromLocalExploded(exploded, &out_time)) {
+ // TODO(maksims): implement failure handling.
+ // We might just return |out_time|, which is Time(0).
+ NOTIMPLEMENTED();
+ }
- return Time::FromLocalExploded(exploded);
+ return out_time;
}
// Returns the boundary value for comparing against the FieldTrial's added
@@ -107,38 +112,6 @@ bool ParseFieldTrialsString(const std::string& trials_string,
return true;
}
-void CheckTrialGroup(const std::string& trial_name,
- const std::string& trial_group,
- std::map<std::string, std::string>* seen_states) {
- if (ContainsKey(*seen_states, trial_name)) {
- CHECK_EQ((*seen_states)[trial_name], trial_group) << trial_name;
- } else {
- (*seen_states)[trial_name] = trial_group;
- }
-}
-
-// A second copy of FieldTrialList::seen_states_ that is meant to outlive the
-// FieldTrialList object to determine if the inconsistency happens because there
-// might be multiple FieldTrialList objects.
-// TODO(asvitkine): Remove when crbug.com/359406 is resolved.
-base::LazyInstance<std::map<std::string, std::string>>::Leaky g_seen_states =
- LAZY_INSTANCE_INITIALIZER;
-
-// A debug token generated during FieldTrialList construction. Used to diagnose
-// crbug.com/359406.
-// TODO(asvitkine): Remove when crbug.com/359406 is resolved.
-int32_t g_debug_token = -1;
-
-// Whether to append the debug token to the child process --force-fieldtrials
-// command line. Used to diagnose crbug.com/359406.
-// TODO(asvitkine): Remove when crbug.com/359406 is resolved.
-bool g_append_debug_token_to_trial_string = false;
-
-// Tracks whether |g_seen_states| is used. Defaults to false, because unit tests
-// will create multiple FieldTrialList instances. Also controls whether
-// |g_debug_token| is included in the field trial state string.
-bool g_use_global_check_states = false;
-
} // namespace
// statics
@@ -242,9 +215,7 @@ void FieldTrial::SetForced() {
// static
void FieldTrial::EnableBenchmarking() {
- // TODO(asvitkine): Change this back to 0u after the trial in FieldTrialList
- // constructor is removed.
- DCHECK_EQ(1u, FieldTrialList::GetFieldTrialCount());
+ DCHECK_EQ(0u, FieldTrialList::GetFieldTrialCount());
enable_benchmarking_ = true;
}
@@ -276,9 +247,6 @@ FieldTrial::FieldTrial(const std::string& trial_name,
DCHECK_GT(total_probability, 0);
DCHECK(!trial_name_.empty());
DCHECK(!default_group_name_.empty());
-
- if (g_debug_token == -1)
- g_debug_token = RandInt(1, INT32_MAX);
}
FieldTrial::~FieldTrial() {}
@@ -344,8 +312,7 @@ FieldTrialList::FieldTrialList(
: entropy_provider_(entropy_provider),
observer_list_(new ObserverListThreadSafe<FieldTrialList::Observer>(
ObserverListBase<FieldTrialList::Observer>::NOTIFY_EXISTING_ONLY)) {
- // TODO(asvitkine): Turn into a DCHECK after http://crbug.com/359406 is fixed.
- CHECK(!global_);
+ DCHECK(!global_);
DCHECK(!used_without_global_);
global_ = this;
@@ -353,30 +320,6 @@ FieldTrialList::FieldTrialList(
Time::Exploded exploded;
two_years_from_build_time.LocalExplode(&exploded);
kNoExpirationYear = exploded.year;
-
- // Run a 50/50 experiment that enables |g_use_global_check_states| only for
- // half the users, to investigate if this instrumentation is causing the
- // crashes to disappear for http://crbug.com/359406. Done here instead of a
- // server-side trial because this needs to be done early during FieldTrialList
- // initialization.
- //
- // Note: |g_use_global_check_states| is set via EnableGlobalStateChecks()
- // prior to the FieldTrialList being created. We only want to do the trial
- // check once the first time FieldTrialList is created, so use a static
- // |first_time| variable to track this.
- //
- // TODO(asvitkine): Remove after http://crbug.com/359406 is fixed.
- static bool first_time = true;
- if (first_time && g_use_global_check_states) {
- first_time = false;
- base::FieldTrial* trial =
- FactoryGetFieldTrial("UMA_CheckStates", 100, "NoChecks",
- kNoExpirationYear, 1, 1,
- FieldTrial::SESSION_RANDOMIZED, nullptr);
- trial->AppendGroup("Checks", 50);
- if (trial->group_name() == "NoChecks")
- g_use_global_check_states = false;
- }
}
FieldTrialList::~FieldTrialList() {
@@ -391,18 +334,6 @@ FieldTrialList::~FieldTrialList() {
}
// static
-void FieldTrialList::EnableGlobalStateChecks() {
- CHECK(!g_use_global_check_states);
- g_use_global_check_states = true;
- g_append_debug_token_to_trial_string = true;
-}
-
-// static
-int32_t FieldTrialList::GetDebugToken() {
- return g_debug_token;
-}
-
-// static
FieldTrial* FieldTrialList::FactoryGetFieldTrial(
const std::string& trial_name,
FieldTrial::Probability total_probability,
@@ -534,12 +465,6 @@ void FieldTrialList::StatesToString(std::string* output) {
output->append(it->group_name);
output->append(1, kPersistentStringSeparator);
}
- if (g_append_debug_token_to_trial_string) {
- output->append("DebugToken");
- output->append(1, kPersistentStringSeparator);
- output->append(IntToString(g_debug_token));
- output->append(1, kPersistentStringSeparator);
- }
}
// static
@@ -562,14 +487,6 @@ void FieldTrialList::AllStatesToString(std::string* output) {
output->append(1, kPersistentStringSeparator);
trial.group_name.AppendToString(output);
output->append(1, kPersistentStringSeparator);
-
- // TODO(asvitkine): Remove these when http://crbug.com/359406 is fixed.
- CheckTrialGroup(trial.trial_name.as_string(), trial.group_name.as_string(),
- &global_->seen_states_);
- if (g_use_global_check_states) {
- CheckTrialGroup(trial.trial_name.as_string(),
- trial.group_name.as_string(), &g_seen_states.Get());
- }
}
}
@@ -694,16 +611,6 @@ void FieldTrialList::NotifyFieldTrialGroupSelection(FieldTrial* field_trial) {
if (!field_trial->enable_field_trial_)
return;
- // TODO(asvitkine): Remove this block when http://crbug.com/359406 is fixed.
- {
- AutoLock auto_lock(global_->lock_);
- CheckTrialGroup(field_trial->trial_name(),
- field_trial->group_name_internal(), &global_->seen_states_);
- if (g_use_global_check_states) {
- CheckTrialGroup(field_trial->trial_name(),
- field_trial->group_name_internal(), &g_seen_states.Get());
- }
- }
global_->observer_list_->Notify(
FROM_HERE, &FieldTrialList::Observer::OnFieldTrialGroupFinalized,
field_trial->trial_name(), field_trial->group_name_internal());
diff --git a/chromium/base/metrics/field_trial.h b/chromium/base/metrics/field_trial.h
index fc6237a5134..28a4606a884 100644
--- a/chromium/base/metrics/field_trial.h
+++ b/chromium/base/metrics/field_trial.h
@@ -347,20 +347,6 @@ class BASE_EXPORT FieldTrialList {
// Destructor Release()'s references to all registered FieldTrial instances.
~FieldTrialList();
- // TODO(asvitkine): Temporary function to diagnose http://crbug.com/359406.
- // Remove when that bug is fixed. This enables using a global map that checks
- // the state of field trials between possible FieldTrialList instances. If
- // enabled, a CHECK will be hit if it's seen that a field trial is given a
- // different state then what was specified to a renderer process launch
- // command line.
- static void EnableGlobalStateChecks();
-
- // TODO(asvitkine): Temporary function to diagnose http://crbug.com/359406.
- // Remove when that bug is fixed. This returns a unique token generated during
- // FieldTrialList construction. This is used to verify that this value stays
- // consistent between renderer process invocations.
- static int32_t GetDebugToken();
-
// Get a FieldTrial instance from the factory.
//
// |name| is used to register the instance with the FieldTrialList class,
diff --git a/chromium/base/metrics/histogram.cc b/chromium/base/metrics/histogram.cc
index 3571657513f..b4a6bd4e1af 100644
--- a/chromium/base/metrics/histogram.cc
+++ b/chromium/base/metrics/histogram.cc
@@ -64,8 +64,7 @@ bool ReadHistogramArguments(PickleIterator* iter,
}
// We use the arguments to find or create the local version of the histogram
- // in this process, so we need to clear the IPC flag.
- DCHECK(*flags & HistogramBase::kIPCSerializationSourceFlag);
+ // in this process, so we need to clear any IPC flag.
*flags &= ~HistogramBase::kIPCSerializationSourceFlag;
return true;
@@ -1137,7 +1136,9 @@ bool CustomHistogram::SerializeInfoImpl(Pickle* pickle) const {
}
double CustomHistogram::GetBucketSize(Count current, uint32_t i) const {
- return 1;
+ // If this is a histogram of enum values, normalizing the bucket count
+ // by the bucket range is not helpful, so just return the bucket count.
+ return current;
}
// static
diff --git a/chromium/base/metrics/histogram_base_unittest.cc b/chromium/base/metrics/histogram_base_unittest.cc
index 5ce39cabe1f..1eb8fd46084 100644
--- a/chromium/base/metrics/histogram_base_unittest.cc
+++ b/chromium/base/metrics/histogram_base_unittest.cc
@@ -29,7 +29,7 @@ class HistogramBaseTest : public testing::Test {
// It is necessary to fully destruct any existing StatisticsRecorder
// before creating a new one.
statistics_recorder_.reset();
- statistics_recorder_.reset(new StatisticsRecorder());
+ statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
}
HistogramBase* GetCreationReportHistogram(const std::string& name) {
diff --git a/chromium/base/metrics/histogram_delta_serialization_unittest.cc b/chromium/base/metrics/histogram_delta_serialization_unittest.cc
index 80a70096716..719bc70970f 100644
--- a/chromium/base/metrics/histogram_delta_serialization_unittest.cc
+++ b/chromium/base/metrics/histogram_delta_serialization_unittest.cc
@@ -14,7 +14,8 @@
namespace base {
TEST(HistogramDeltaSerializationTest, DeserializeHistogramAndAddSamples) {
- StatisticsRecorder statistic_recorder;
+ std::unique_ptr<StatisticsRecorder> statistic_recorder(
+ StatisticsRecorder::CreateTemporaryForTesting());
HistogramDeltaSerialization serializer("HistogramDeltaSerializationTest");
std::vector<std::string> deltas;
// Nothing was changed yet.
diff --git a/chromium/base/metrics/histogram_snapshot_manager_unittest.cc b/chromium/base/metrics/histogram_snapshot_manager_unittest.cc
index 8ec03daa8d9..6d53c86ac58 100644
--- a/chromium/base/metrics/histogram_snapshot_manager_unittest.cc
+++ b/chromium/base/metrics/histogram_snapshot_manager_unittest.cc
@@ -68,11 +68,12 @@ class HistogramFlattenerDeltaRecorder : public HistogramFlattener {
class HistogramSnapshotManagerTest : public testing::Test {
protected:
HistogramSnapshotManagerTest()
- : histogram_snapshot_manager_(&histogram_flattener_delta_recorder_) {}
+ : statistics_recorder_(StatisticsRecorder::CreateTemporaryForTesting()),
+ histogram_snapshot_manager_(&histogram_flattener_delta_recorder_) {}
~HistogramSnapshotManagerTest() override {}
- StatisticsRecorder statistics_recorder_;
+ std::unique_ptr<StatisticsRecorder> statistics_recorder_;
HistogramFlattenerDeltaRecorder histogram_flattener_delta_recorder_;
HistogramSnapshotManager histogram_snapshot_manager_;
};
diff --git a/chromium/base/metrics/histogram_unittest.cc b/chromium/base/metrics/histogram_unittest.cc
index 668ac1ba7e8..5c2ca6883ae 100644
--- a/chromium/base/metrics/histogram_unittest.cc
+++ b/chromium/base/metrics/histogram_unittest.cc
@@ -56,7 +56,7 @@ class HistogramTest : public testing::TestWithParam<bool> {
void InitializeStatisticsRecorder() {
DCHECK(!statistics_recorder_);
- statistics_recorder_.reset(new StatisticsRecorder());
+ statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
}
void UninitializeStatisticsRecorder() {
diff --git a/chromium/base/metrics/persistent_histogram_allocator.cc b/chromium/base/metrics/persistent_histogram_allocator.cc
index 20f64336016..51a3d6c5eba 100644
--- a/chromium/base/metrics/persistent_histogram_allocator.cc
+++ b/chromium/base/metrics/persistent_histogram_allocator.cc
@@ -6,7 +6,10 @@
#include <memory>
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
#include "base/files/important_file_writer.h"
+#include "base/files/memory_mapped_file.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
@@ -16,6 +19,7 @@
#include "base/metrics/persistent_sample_map.h"
#include "base/metrics/sparse_histogram.h"
#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
#include "base/synchronization/lock.h"
// TODO(bcwhite): Order these methods to match the header file. The current
@@ -325,7 +329,6 @@ void PersistentHistogramAllocator::RecordCreateHistogramResult(
result_histogram->Add(result);
}
-// static
std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
PersistentHistogramData* histogram_data_ptr) {
if (!histogram_data_ptr) {
@@ -450,6 +453,36 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
return histogram;
}
+HistogramBase*
+PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
+ const HistogramBase* histogram) {
+ // This should never be called on the global histogram allocator as objects
+ // created there are already within the global statistics recorder.
+ DCHECK_NE(g_allocator, this);
+ DCHECK(histogram);
+
+ HistogramBase* existing =
+ StatisticsRecorder::FindHistogram(histogram->histogram_name());
+ if (existing)
+ return existing;
+
+ // Adding the passed histogram to the SR would cause a problem if the
+ // allocator that holds it eventually goes away. Instead, create a new
+ // one from a serialized version.
+ base::Pickle pickle;
+ if (!histogram->SerializeInfo(&pickle))
+ return nullptr;
+ PickleIterator iter(pickle);
+ existing = DeserializeHistogramInfo(&iter);
+ if (!existing)
+ return nullptr;
+
+ // Make sure there is no "serialization" flag set.
+ DCHECK_EQ(0, existing->flags() & HistogramBase::kIPCSerializationSourceFlag);
+ // Record the newly created histogram in the SR.
+ return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
+}
+
std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
Reference ref) {
// Unfortunately, the histogram "pickle" methods cannot be used as part of
@@ -480,7 +513,42 @@ void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
// two to be created. The allocator does not support releasing the
// acquired memory so just change the type to be empty.
else
- memory_allocator_->SetType(ref, 0);
+ memory_allocator_->ChangeType(ref, 0, kTypeIdHistogram);
+}
+
+void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
+ HistogramBase* histogram) {
+ DCHECK(histogram);
+
+ HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
+ if (!existing) {
+ // The above should never fail but if it does, no real harm is done.
+ // The data won't be merged but it also won't be recorded as merged
+ // so a future try, if successful, will get what was missed. If it
+ // continues to fail, some metric data will be lost but that is better
+ // than crashing.
+ NOTREACHED();
+ return;
+ }
+
+ // Merge the delta from the passed object to the one in the SR.
+ existing->AddSamples(*histogram->SnapshotDelta());
+}
+
+void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
+ const HistogramBase* histogram) {
+ DCHECK(histogram);
+
+ HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
+ if (!existing) {
+ // The above should never fail but if it does, no real harm is done.
+ // Some metric data will be lost but that is better than crashing.
+ NOTREACHED();
+ return;
+ }
+
+ // Merge the delta from the passed object to the one in the SR.
+ existing->AddSamples(*histogram->SnapshotFinalDelta());
}
PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords(
@@ -620,6 +688,37 @@ void GlobalHistogramAllocator::CreateWithLocalMemory(
WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)))));
}
+#if !defined(OS_NACL)
+// static
+void GlobalHistogramAllocator::CreateWithFile(
+ const FilePath& file_path,
+ size_t size,
+ uint64_t id,
+ StringPiece name) {
+ bool exists = PathExists(file_path);
+ File file(
+ file_path, File::FLAG_OPEN_ALWAYS | File::FLAG_SHARE_DELETE |
+ File::FLAG_READ | File::FLAG_WRITE);
+
+ std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+ if (exists) {
+ mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
+ } else {
+ mmfile->Initialize(std::move(file), {0, static_cast<int64_t>(size)},
+ MemoryMappedFile::READ_WRITE_EXTEND);
+ }
+ if (!mmfile->IsValid() ||
+ !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
+ NOTREACHED();
+ return;
+ }
+
+ Set(WrapUnique(new GlobalHistogramAllocator(
+ WrapUnique(new FilePersistentMemoryAllocator(
+ std::move(mmfile), size, id, name, false)))));
+}
+#endif
+
// static
void GlobalHistogramAllocator::CreateWithSharedMemory(
std::unique_ptr<SharedMemory> memory,
@@ -713,6 +812,10 @@ void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
persistent_location_ = location;
}
+const FilePath& GlobalHistogramAllocator::GetPersistentLocation() const {
+ return persistent_location_;
+}
+
bool GlobalHistogramAllocator::WriteToPersistentLocation() {
#if defined(OS_NACL)
// NACL doesn't support file operations, including ImportantFileWriter.
diff --git a/chromium/base/metrics/persistent_histogram_allocator.h b/chromium/base/metrics/persistent_histogram_allocator.h
index 8df45f2813a..ee1fba5f624 100644
--- a/chromium/base/metrics/persistent_histogram_allocator.h
+++ b/chromium/base/metrics/persistent_histogram_allocator.h
@@ -19,6 +19,7 @@
namespace base {
+class FilePath;
class PersistentSampleMapRecords;
class PersistentSparseHistogramDataManager;
@@ -248,6 +249,19 @@ class BASE_EXPORT PersistentHistogramAllocator {
// True, forgetting it otherwise.
void FinalizeHistogram(Reference ref, bool registered);
+ // Merges the data in a persistent histogram with one held globally by the
+ // StatisticsRecorder, updating the "logged" samples within the passed
+ // object so that repeated merges are allowed. Don't call this on a "global"
+ // allocator because histograms created there will already be in the SR.
+ void MergeHistogramDeltaToStatisticsRecorder(HistogramBase* histogram);
+
+ // As above but merge the "final" delta. No update of "logged" samples is
+ // done which means it can operate on read-only objects. It's essential,
+ // however, not to call this more than once or those final samples will
+ // get recorded again.
+ void MergeHistogramFinalDeltaToStatisticsRecorder(
+ const HistogramBase* histogram);
+
// Returns the object that manages the persistent-sample-map records for a
// given |id|. Only one |user| of this data is allowed at a time. This does
// an automatic Acquire() on the records. The user must call Release() on
@@ -332,6 +346,12 @@ class BASE_EXPORT PersistentHistogramAllocator {
std::unique_ptr<HistogramBase> CreateHistogram(
PersistentHistogramData* histogram_data_ptr);
+ // Gets or creates an object in the global StatisticsRecorder matching
+ // the |histogram| passed. Null is returned if one was not found and
+ // one could not be created.
+ HistogramBase* GetOrCreateStatisticsRecorderHistogram(
+ const HistogramBase* histogram);
+
// Record the result of a histogram creation.
static void RecordCreateHistogramResult(CreateHistogramResultType result);
@@ -370,6 +390,17 @@ class BASE_EXPORT GlobalHistogramAllocator
// specified |size| taken from the heap.
static void CreateWithLocalMemory(size_t size, uint64_t id, StringPiece name);
+#if !defined(OS_NACL)
+ // Create a global allocator by memory-mapping a |file|. If the file does
+ // not exist, it will be created with the specified |size|. If the file does
+ // exist, the allocator will use and add to its contents, ignoring the passed
+ // size in favor of the existing size.
+ static void CreateWithFile(const FilePath& file_path,
+ size_t size,
+ uint64_t id,
+ StringPiece name);
+#endif
+
// Create a global allocator using a block of shared |memory| of the
// specified |size|. The allocator takes ownership of the shared memory
// and releases it upon destruction, though the memory will continue to
@@ -408,6 +439,10 @@ class BASE_EXPORT GlobalHistogramAllocator
// in order to persist the data for a later use.
void SetPersistentLocation(const FilePath& location);
+ // Retrieves a previously set pathname to which the contents of this allocator
+ // are to be saved.
+ const FilePath& GetPersistentLocation() const;
+
// Writes the internal data to a previously set location. This is generally
// called when a process is exiting from a section of code that may not know
// the filesystem. The data is written in an atomic manner. The return value
diff --git a/chromium/base/metrics/persistent_histogram_allocator_unittest.cc b/chromium/base/metrics/persistent_histogram_allocator_unittest.cc
index 24a0753fe48..b6806622502 100644
--- a/chromium/base/metrics/persistent_histogram_allocator_unittest.cc
+++ b/chromium/base/metrics/persistent_histogram_allocator_unittest.cc
@@ -4,11 +4,13 @@
#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/files/scoped_temp_dir.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/bucket_ranges.h"
#include "base/metrics/histogram_macros.h"
#include "base/metrics/persistent_memory_allocator.h"
+#include "base/metrics/statistics_recorder.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -17,7 +19,10 @@ class PersistentHistogramAllocatorTest : public testing::Test {
protected:
const int32_t kAllocatorMemorySize = 64 << 10; // 64 KiB
- PersistentHistogramAllocatorTest() { CreatePersistentHistogramAllocator(); }
+ PersistentHistogramAllocatorTest()
+ : statistics_recorder_(StatisticsRecorder::CreateTemporaryForTesting()) {
+ CreatePersistentHistogramAllocator();
+ }
~PersistentHistogramAllocatorTest() override {
DestroyPersistentHistogramAllocator();
}
@@ -39,6 +44,7 @@ class PersistentHistogramAllocatorTest : public testing::Test {
GlobalHistogramAllocator::ReleaseForTesting();
}
+ std::unique_ptr<StatisticsRecorder> statistics_recorder_;
std::unique_ptr<char[]> allocator_memory_;
PersistentMemoryAllocator* allocator_ = nullptr;
@@ -121,4 +127,83 @@ TEST_F(PersistentHistogramAllocatorTest, CreateAndIterateTest) {
EXPECT_FALSE(recovered);
}
+TEST_F(PersistentHistogramAllocatorTest, CreateWithFileTest) {
+ const char temp_name[] = "CreateWithFileTest";
+ ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath temp_file = temp_dir.path().AppendASCII(temp_name);
+ const size_t temp_size = 64 << 10; // 64 KiB
+
+ // Test creation of a new file.
+ GlobalHistogramAllocator::ReleaseForTesting();
+ GlobalHistogramAllocator::CreateWithFile(temp_file, temp_size, 0, temp_name);
+ EXPECT_EQ(std::string(temp_name),
+ GlobalHistogramAllocator::Get()->memory_allocator()->Name());
+
+ // Test re-open of a possibly-existing file.
+ GlobalHistogramAllocator::ReleaseForTesting();
+ GlobalHistogramAllocator::CreateWithFile(temp_file, temp_size, 0, "");
+ EXPECT_EQ(std::string(temp_name),
+ GlobalHistogramAllocator::Get()->memory_allocator()->Name());
+
+ // Test re-open of an known-existing file.
+ GlobalHistogramAllocator::ReleaseForTesting();
+ GlobalHistogramAllocator::CreateWithFile(temp_file, 0, 0, "");
+ EXPECT_EQ(std::string(temp_name),
+ GlobalHistogramAllocator::Get()->memory_allocator()->Name());
+
+ // Final release so file and temp-dir can be removed.
+ GlobalHistogramAllocator::ReleaseForTesting();
+}
+
+TEST_F(PersistentHistogramAllocatorTest, StatisticsRecorderTest) {
+ size_t starting_sr_count = StatisticsRecorder::GetHistogramCount();
+
+ // Create a local StatisticsRecorder in which the newly created histogram
+ // will be recorded.
+ std::unique_ptr<StatisticsRecorder> local_sr =
+ StatisticsRecorder::CreateTemporaryForTesting();
+ EXPECT_EQ(0U, StatisticsRecorder::GetHistogramCount());
+
+ HistogramBase* histogram = LinearHistogram::FactoryGet(
+ "TestHistogram", 1, 10, 10, HistogramBase::kIsPersistent);
+ EXPECT_TRUE(histogram);
+ EXPECT_EQ(1U, StatisticsRecorder::GetHistogramCount());
+ histogram->Add(3);
+ histogram->Add(1);
+ histogram->Add(4);
+ histogram->Add(1);
+ histogram->Add(6);
+
+ // Destroy the local SR and ensure that we're back to the initial state.
+ local_sr.reset();
+ EXPECT_EQ(starting_sr_count, StatisticsRecorder::GetHistogramCount());
+
+ // Create a second allocator and have it access the memory of the first.
+ std::unique_ptr<HistogramBase> recovered;
+ PersistentHistogramAllocator recovery(
+ WrapUnique(new PersistentMemoryAllocator(
+ allocator_memory_.get(), kAllocatorMemorySize, 0, 0, "", false)));
+ PersistentHistogramAllocator::Iterator histogram_iter(&recovery);
+
+ recovered = histogram_iter.GetNext();
+ ASSERT_TRUE(recovered);
+
+ // Merge the recovered histogram to the SR. It will always be a new object.
+ recovery.MergeHistogramDeltaToStatisticsRecorder(recovered.get());
+ EXPECT_EQ(starting_sr_count + 1, StatisticsRecorder::GetHistogramCount());
+ HistogramBase* found =
+ StatisticsRecorder::FindHistogram(recovered->histogram_name());
+ ASSERT_TRUE(found);
+ EXPECT_NE(recovered.get(), found);
+
+ // Ensure that the data got merged, too.
+ std::unique_ptr<HistogramSamples> snapshot = found->SnapshotSamples();
+ EXPECT_EQ(recovered->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
+ EXPECT_EQ(1, snapshot->GetCount(3));
+ EXPECT_EQ(2, snapshot->GetCount(1));
+ EXPECT_EQ(1, snapshot->GetCount(4));
+ EXPECT_EQ(1, snapshot->GetCount(6));
+}
+
} // namespace base
diff --git a/chromium/base/metrics/persistent_memory_allocator.cc b/chromium/base/metrics/persistent_memory_allocator.cc
index bc873fefa0a..dfa408f44d2 100644
--- a/chromium/base/metrics/persistent_memory_allocator.cc
+++ b/chromium/base/metrics/persistent_memory_allocator.cc
@@ -7,6 +7,12 @@
#include <assert.h>
#include <algorithm>
+#if defined(OS_WIN)
+#include "winbase.h"
+#elif defined(OS_POSIX)
+#include <sys/mman.h>
+#endif
+
#include "base/files/memory_mapped_file.h"
#include "base/logging.h"
#include "base/memory/shared_memory.h"
@@ -14,10 +20,8 @@
namespace {
-// Required range of memory segment sizes. It has to fit in an unsigned 32-bit
-// number and should be a power of 2 in order to accomodate almost any page
-// size.
-const uint32_t kSegmentMinSize = 1 << 10; // 1 KiB
+// Limit of memory segment size. It has to fit in an unsigned 32-bit number
+// and should be a power of 2 in order to accomodate almost any page size.
const uint32_t kSegmentMaxSize = 1 << 30; // 1 GiB
// A constant (random) value placed in the shared metadata to identify
@@ -80,8 +84,8 @@ const uint32_t PersistentMemoryAllocator::kAllocAlignment = 8;
struct PersistentMemoryAllocator::BlockHeader {
uint32_t size; // Number of bytes in this block, including header.
uint32_t cookie; // Constant value indicating completed allocation.
- uint32_t type_id; // A number provided by caller indicating data type.
- std::atomic<uint32_t> next; // Pointer to the next block when iterating.
+ std::atomic<uint32_t> type_id; // Arbitrary number indicating data type.
+ std::atomic<uint32_t> next; // Pointer to the next block when iterating.
};
// The shared metadata exists once at the top of the memory segment to
@@ -190,7 +194,7 @@ PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
// "strong" compare-exchange is used because failing unnecessarily would
// mean repeating some fairly costly validations above.
if (last_record_.compare_exchange_strong(last, next)) {
- *type_return = block->type_id;
+ *type_return = block->type_id.load(std::memory_order_relaxed);
break;
}
}
@@ -239,7 +243,6 @@ bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base,
bool readonly) {
return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) &&
(size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) &&
- (size >= kSegmentMinSize || readonly) &&
(size % kAllocAlignment == 0 || readonly) &&
(page_size == 0 || size % page_size == 0 || readonly));
}
@@ -298,10 +301,9 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(
shared_meta()->queue.next.load(std::memory_order_relaxed) != 0 ||
first_block->size != 0 ||
first_block->cookie != 0 ||
- first_block->type_id != 0 ||
+ first_block->type_id.load(std::memory_order_relaxed) != 0 ||
first_block->next != 0) {
// ...or something malicious has been playing with the metadata.
- NOTREACHED();
SetCorrupt();
}
@@ -339,12 +341,22 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(
}
if (!readonly) {
// The allocator is attaching to a previously initialized segment of
- // memory. Make sure the embedded data matches what has been passed.
- if (shared_meta()->size != mem_size_ ||
- shared_meta()->page_size != mem_page_) {
- NOTREACHED();
+ // memory. If the initialization parameters differ, make the best of it
+ // by reducing the local construction parameters to match those of
+ // the actual memory area. This ensures that the local object never
+ // tries to write outside of the original bounds.
+ // Because the fields are const to ensure that no code other than the
+ // constructor makes changes to them as well as to give optimization
+ // hints to the compiler, it's necessary to const-cast them for changes
+ // here.
+ if (shared_meta()->size < mem_size_)
+ *const_cast<uint32_t*>(&mem_size_) = shared_meta()->size;
+ if (shared_meta()->page_size < mem_page_)
+ *const_cast<uint32_t*>(&mem_page_) = shared_meta()->page_size;
+
+ // Ensure that settings are still valid after the above adjustments.
+ if (!IsMemoryAcceptable(base, mem_size_, mem_page_, readonly))
SetCorrupt();
- }
}
}
}
@@ -416,15 +428,20 @@ uint32_t PersistentMemoryAllocator::GetType(Reference ref) const {
const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
if (!block)
return 0;
- return block->type_id;
+ return block->type_id.load(std::memory_order_relaxed);
}
-void PersistentMemoryAllocator::SetType(Reference ref, uint32_t type_id) {
+bool PersistentMemoryAllocator::ChangeType(Reference ref,
+ uint32_t to_type_id,
+ uint32_t from_type_id) {
DCHECK(!readonly_);
volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
if (!block)
- return;
- block->type_id = type_id;
+ return false;
+
+ // This is a "strong" exchange because there is no loop that can retry in
+ // the wake of spurious failures possible with "weak" exchanges.
+ return block->type_id.compare_exchange_strong(from_type_id, to_type_id);
}
PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
@@ -538,7 +555,7 @@ PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
// writing beyond the allocated space and into unallocated space.
if (block->size != 0 ||
block->cookie != kBlockCookieFree ||
- block->type_id != 0 ||
+ block->type_id.load(std::memory_order_relaxed) != 0 ||
block->next.load(std::memory_order_relaxed) != 0) {
SetCorrupt();
return kReferenceNull;
@@ -546,7 +563,7 @@ PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
block->size = size;
block->cookie = kBlockCookieAllocated;
- block->type_id = type_id;
+ block->type_id.store(type_id, std::memory_order_relaxed);
return freeptr;
}
}
@@ -678,8 +695,10 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
return nullptr;
if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
return nullptr;
- if (type_id != 0 && block->type_id != type_id)
+ if (type_id != 0 &&
+ block->type_id.load(std::memory_order_relaxed) != type_id) {
return nullptr;
+ }
}
// Return pointer to block data.
@@ -716,11 +735,44 @@ LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator(
size_t size,
uint64_t id,
base::StringPiece name)
- : PersistentMemoryAllocator(memset(new char[size], 0, size),
+ : PersistentMemoryAllocator(AllocateLocalMemory(size),
size, 0, id, name, false) {}
LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() {
- delete [] mem_base_;
+ DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_);
+}
+
+// static
+void* LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size) {
+#if defined(OS_WIN)
+ void* address =
+ ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ DPCHECK(address);
+ return address;
+#elif defined(OS_POSIX)
+ // MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
+ // MAP_SHARED is not available on Linux <2.4 but required on Mac.
+ void* address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_SHARED, -1, 0);
+ DPCHECK(MAP_FAILED != address);
+ return address;
+#else
+#error This architecture is not (yet) supported.
+#endif
+}
+
+// static
+void LocalPersistentMemoryAllocator::DeallocateLocalMemory(void* memory,
+ size_t size) {
+#if defined(OS_WIN)
+ BOOL success = ::VirtualFree(memory, 0, MEM_DECOMMIT);
+ DPCHECK(success);
+#elif defined(OS_POSIX)
+ int result = ::munmap(memory, size);
+ DPCHECK(0 == result);
+#else
+#error This architecture is not (yet) supported.
+#endif
}
@@ -744,30 +796,35 @@ SharedPersistentMemoryAllocator::~SharedPersistentMemoryAllocator() {}
// static
bool SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
const SharedMemory& memory) {
- return IsMemoryAcceptable(memory.memory(), memory.mapped_size(), 0, true);
+ return IsMemoryAcceptable(memory.memory(), memory.mapped_size(), 0, false);
}
+#if !defined(OS_NACL)
//----- FilePersistentMemoryAllocator ------------------------------------------
FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
std::unique_ptr<MemoryMappedFile> file,
+ size_t max_size,
uint64_t id,
- base::StringPiece name)
+ base::StringPiece name,
+ bool read_only)
: PersistentMemoryAllocator(const_cast<uint8_t*>(file->data()),
- file->length(),
+ max_size != 0 ? max_size : file->length(),
0,
id,
name,
- true),
+ read_only),
mapped_file_(std::move(file)) {}
FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
// static
bool FilePersistentMemoryAllocator::IsFileAcceptable(
- const MemoryMappedFile& file) {
- return IsMemoryAcceptable(file.data(), file.length(), 0, true);
+ const MemoryMappedFile& file,
+ bool read_only) {
+ return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
}
+#endif // !defined(OS_NACL)
} // namespace base
diff --git a/chromium/base/metrics/persistent_memory_allocator.h b/chromium/base/metrics/persistent_memory_allocator.h
index 56edd2ca24e..2fc0d2d0da4 100644
--- a/chromium/base/metrics/persistent_memory_allocator.h
+++ b/chromium/base/metrics/persistent_memory_allocator.h
@@ -241,9 +241,11 @@ class BASE_EXPORT PersistentMemoryAllocator {
// Access the internal "type" of an object. This generally isn't necessary
// but can be used to "clear" the type and so effectively mark it as deleted
- // even though the memory stays valid and allocated.
+ // even though the memory stays valid and allocated. Changing the type is
+ // an atomic compare/exchange and so requires knowing the existing value.
+ // It will return false if the existing type is not what is expected.
uint32_t GetType(Reference ref) const;
- void SetType(Reference ref, uint32_t type_id);
+ bool ChangeType(Reference ref, uint32_t to_type_id, uint32_t from_type_id);
// Reserve space in the memory segment of the desired |size| and |type_id|.
// A return value of zero indicates the allocation failed, otherwise the
@@ -354,6 +356,14 @@ class BASE_EXPORT LocalPersistentMemoryAllocator
~LocalPersistentMemoryAllocator() override;
private:
+ // Allocates a block of local memory of the specified |size|, ensuring that
+ // the memory will not be physically allocated until accessed and will read
+ // as zero when that happens.
+ static void* AllocateLocalMemory(size_t size);
+
+ // Deallocates a block of local |memory| of the specified |size|.
+ static void DeallocateLocalMemory(void* memory, size_t size);
+
DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator);
};
@@ -385,28 +395,34 @@ class BASE_EXPORT SharedPersistentMemoryAllocator
};
+#if !defined(OS_NACL) // NACL doesn't support any kind of file access in build.
// This allocator takes a memory-mapped file object and performs allocation
-// from it. The allocator takes ownership of the file object. Only read access
-// is provided due to limitions of the MemoryMappedFile class.
+// from it. The allocator takes ownership of the file object.
class BASE_EXPORT FilePersistentMemoryAllocator
: public PersistentMemoryAllocator {
public:
+ // A |max_size| of zero will use the length of the file as the maximum
+ // size. The |file| object must have been already created with sufficient
+ // permissions (read, read/write, or read/write/extend).
FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,
+ size_t max_size,
uint64_t id,
- base::StringPiece name);
+ base::StringPiece name,
+ bool read_only);
~FilePersistentMemoryAllocator() override;
// Ensure that the file isn't so invalid that it won't crash when passing it
// to the allocator. This doesn't guarantee the file is valid, just that it
// won't cause the program to abort. The existing IsCorrupt() call will handle
// the rest.
- static bool IsFileAcceptable(const MemoryMappedFile& file);
+ static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
private:
std::unique_ptr<MemoryMappedFile> mapped_file_;
DISALLOW_COPY_AND_ASSIGN(FilePersistentMemoryAllocator);
};
+#endif // !defined(OS_NACL)
} // namespace base
diff --git a/chromium/base/metrics/persistent_memory_allocator_unittest.cc b/chromium/base/metrics/persistent_memory_allocator_unittest.cc
index 70e13921fab..64333389a16 100644
--- a/chromium/base/metrics/persistent_memory_allocator_unittest.cc
+++ b/chromium/base/metrics/persistent_memory_allocator_unittest.cc
@@ -53,7 +53,7 @@ class PersistentMemoryAllocatorTest : public testing::Test {
};
PersistentMemoryAllocatorTest() {
- kAllocAlignment = PersistentMemoryAllocator::kAllocAlignment;
+ kAllocAlignment = GetAllocAlignment();
mem_segment_.reset(new char[TEST_MEMORY_SIZE]);
}
@@ -80,6 +80,10 @@ class PersistentMemoryAllocatorTest : public testing::Test {
return count;
}
+ static uint32_t GetAllocAlignment() {
+ return PersistentMemoryAllocator::kAllocAlignment;
+ }
+
protected:
std::unique_ptr<char[]> mem_segment_;
std::unique_ptr<PersistentMemoryAllocator> allocator_;
@@ -177,9 +181,9 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
// Check that an objcet's type can be changed.
EXPECT_EQ(2U, allocator_->GetType(block2));
- allocator_->SetType(block2, 3);
+ allocator_->ChangeType(block2, 3, 2);
EXPECT_EQ(3U, allocator_->GetType(block2));
- allocator_->SetType(block2, 2);
+ allocator_->ChangeType(block2, 2, 3);
EXPECT_EQ(2U, allocator_->GetType(block2));
// Create second allocator (read/write) using the same memory segment.
@@ -493,7 +497,8 @@ TEST(LocalPersistentMemoryAllocatorTest, CreationTest) {
//----- SharedPersistentMemoryAllocator ----------------------------------------
TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
- SharedMemoryHandle shared_handle;
+ SharedMemoryHandle shared_handle_1;
+ SharedMemoryHandle shared_handle_2;
PersistentMemoryAllocator::MemoryInfo meminfo1;
Reference r123, r456, r789;
@@ -507,19 +512,20 @@ TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
r456 = local.Allocate(456, 456);
r789 = local.Allocate(789, 789);
local.MakeIterable(r123);
- local.SetType(r456, 654);
+ local.ChangeType(r456, 654, 456);
local.MakeIterable(r789);
local.GetMemoryInfo(&meminfo1);
EXPECT_FALSE(local.IsFull());
EXPECT_FALSE(local.IsCorrupt());
- ASSERT_TRUE(local.shared_memory()->ShareToProcess(
- GetCurrentProcessHandle(),
- &shared_handle));
+ ASSERT_TRUE(local.shared_memory()->ShareToProcess(GetCurrentProcessHandle(),
+ &shared_handle_1));
+ ASSERT_TRUE(local.shared_memory()->ShareToProcess(GetCurrentProcessHandle(),
+ &shared_handle_2));
}
// Read-only test.
- std::unique_ptr<SharedMemory> shmem2(new SharedMemory(shared_handle,
+ std::unique_ptr<SharedMemory> shmem2(new SharedMemory(shared_handle_1,
/*readonly=*/true));
ASSERT_TRUE(shmem2->Map(TEST_MEMORY_SIZE));
@@ -545,7 +551,7 @@ TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
EXPECT_EQ(meminfo1.free, meminfo2.free);
// Read/write test.
- std::unique_ptr<SharedMemory> shmem3(new SharedMemory(shared_handle,
+ std::unique_ptr<SharedMemory> shmem3(new SharedMemory(shared_handle_2,
/*readonly=*/false));
ASSERT_TRUE(shmem3->Map(TEST_MEMORY_SIZE));
@@ -578,6 +584,7 @@ TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
}
+#if !defined(OS_NACL)
//----- FilePersistentMemoryAllocator ------------------------------------------
TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
@@ -594,7 +601,7 @@ TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
r456 = local.Allocate(456, 456);
r789 = local.Allocate(789, 789);
local.MakeIterable(r123);
- local.SetType(r456, 654);
+ local.ChangeType(r456, 654, 456);
local.MakeIterable(r789);
local.GetMemoryInfo(&meminfo1);
EXPECT_FALSE(local.IsFull());
@@ -611,7 +618,7 @@ TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
const size_t mmlength = mmfile->length();
EXPECT_GE(meminfo1.total, mmlength);
- FilePersistentMemoryAllocator file(std::move(mmfile), 0, "");
+ FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", true);
EXPECT_TRUE(file.IsReadonly());
EXPECT_EQ(TEST_ID, file.Id());
EXPECT_FALSE(file.IsFull());
@@ -635,10 +642,63 @@ TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
EXPECT_EQ(0U, meminfo2.free);
}
+TEST(FilePersistentMemoryAllocatorTest, ExtendTest) {
+ ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.path().AppendASCII("extend_test");
+ MemoryMappedFile::Region region = {0, 16 << 10}; // 16KiB maximum size.
+
+ // Start with a small but valid file of persistent data.
+ ASSERT_FALSE(PathExists(file_path));
+ {
+ LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
+ local.Allocate(1, 1);
+ local.Allocate(11, 11);
+
+ File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+ ASSERT_TRUE(writer.IsValid());
+ writer.Write(0, (const char*)local.data(), local.used());
+ }
+ ASSERT_TRUE(PathExists(file_path));
+ int64_t before_size;
+ ASSERT_TRUE(GetFileSize(file_path, &before_size));
+
+ // Map it as an extendable read/write file and append to it.
+ {
+ std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+ mmfile->Initialize(
+ File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
+ region, MemoryMappedFile::READ_WRITE_EXTEND);
+ FilePersistentMemoryAllocator allocator(std::move(mmfile), region.size, 0,
+ "", false);
+ EXPECT_EQ(static_cast<size_t>(before_size), allocator.used());
+
+ allocator.Allocate(111, 111);
+ EXPECT_LT(static_cast<size_t>(before_size), allocator.used());
+ }
+
+ // Validate that append worked.
+ int64_t after_size;
+ ASSERT_TRUE(GetFileSize(file_path, &after_size));
+ EXPECT_LT(before_size, after_size);
+
+ // Verify that it's still an acceptable file.
+ {
+ std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+ mmfile->Initialize(
+ File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
+ region, MemoryMappedFile::READ_WRITE_EXTEND);
+ EXPECT_TRUE(FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true));
+ EXPECT_TRUE(
+ FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, false));
+ }
+}
+
TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
+ const uint32_t kAllocAlignment =
+ PersistentMemoryAllocatorTest::GetAllocAlignment();
ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- FilePath file_path_base = temp_dir.path().AppendASCII("persistent_memory_");
LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
local.MakeIterable(local.Allocate(1, 1));
@@ -660,13 +720,23 @@ TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
}
ASSERT_TRUE(PathExists(file_path));
+ // Request read/write access for some sizes that are a multple of the
+ // allocator's alignment size. The allocator is strict about file size
+ // being a multiple of its internal alignment when doing read/write access.
+ const bool read_only = (filesize % (2 * kAllocAlignment)) != 0;
+ const uint32_t file_flags =
+ File::FLAG_OPEN | File::FLAG_READ | (read_only ? 0 : File::FLAG_WRITE);
+ const MemoryMappedFile::Access map_access =
+ read_only ? MemoryMappedFile::READ_ONLY : MemoryMappedFile::READ_WRITE;
+
mmfile.reset(new MemoryMappedFile());
- mmfile->Initialize(file_path);
+ mmfile->Initialize(File(file_path, file_flags), map_access);
EXPECT_EQ(filesize, mmfile->length());
- if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile)) {
+ if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
// Make sure construction doesn't crash. It will, however, cause
// error messages warning about about a corrupted memory segment.
- FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, "");
+ FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
+ read_only);
// Also make sure that iteration doesn't crash.
PersistentMemoryAllocator::Iterator iter(&allocator);
uint32_t type_id;
@@ -680,6 +750,7 @@ TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
(void)type;
(void)size;
}
+
// Ensure that short files are detected as corrupt and full files are not.
EXPECT_EQ(filesize != minsize, allocator.IsCorrupt());
} else {
@@ -700,12 +771,13 @@ TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
ASSERT_TRUE(PathExists(file_path));
mmfile.reset(new MemoryMappedFile());
- mmfile->Initialize(file_path);
+ mmfile->Initialize(File(file_path, file_flags), map_access);
EXPECT_EQ(filesize, mmfile->length());
- if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile)) {
+ if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
// Make sure construction doesn't crash. It will, however, cause
// error messages warning about about a corrupted memory segment.
- FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, "");
+ FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
+ read_only);
EXPECT_TRUE(allocator.IsCorrupt()); // Garbage data so it should be.
} else {
// For filesize >= minsize, the file must be acceptable. This
@@ -715,5 +787,6 @@ TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
}
}
}
+#endif // !defined(OS_NACL)
} // namespace base
diff --git a/chromium/base/metrics/sparse_histogram.cc b/chromium/base/metrics/sparse_histogram.cc
index a2dbb61b218..deba5700594 100644
--- a/chromium/base/metrics/sparse_histogram.cc
+++ b/chromium/base/metrics/sparse_histogram.cc
@@ -208,7 +208,6 @@ HistogramBase* SparseHistogram::DeserializeInfoImpl(PickleIterator* iter) {
return NULL;
}
- DCHECK(flags & HistogramBase::kIPCSerializationSourceFlag);
flags &= ~HistogramBase::kIPCSerializationSourceFlag;
return SparseHistogram::FactoryGet(histogram_name, flags);
diff --git a/chromium/base/metrics/sparse_histogram_unittest.cc b/chromium/base/metrics/sparse_histogram_unittest.cc
index fbff977522d..eab77902765 100644
--- a/chromium/base/metrics/sparse_histogram_unittest.cc
+++ b/chromium/base/metrics/sparse_histogram_unittest.cc
@@ -48,7 +48,7 @@ class SparseHistogramTest : public testing::TestWithParam<bool> {
void InitializeStatisticsRecorder() {
DCHECK(!statistics_recorder_);
- statistics_recorder_.reset(new StatisticsRecorder());
+ statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
}
void UninitializeStatisticsRecorder() {
diff --git a/chromium/base/metrics/statistics_recorder.cc b/chromium/base/metrics/statistics_recorder.cc
index 4eecddade80..1279bae6e0b 100644
--- a/chromium/base/metrics/statistics_recorder.cc
+++ b/chromium/base/metrics/statistics_recorder.cc
@@ -10,6 +10,7 @@
#include "base/debug/leak_annotations.h"
#include "base/json/string_escape.h"
#include "base/logging.h"
+#include "base/memory/ptr_util.h"
#include "base/metrics/histogram.h"
#include "base/metrics/metrics_hashes.h"
#include "base/metrics/persistent_histogram_allocator.h"
@@ -287,7 +288,7 @@ void StatisticsRecorder::GetBucketRanges(
return;
for (const auto& entry : *ranges_) {
- for (const auto& range_entry : *entry.second) {
+ for (auto* range_entry : *entry.second) {
output->push_back(range_entry);
}
}
@@ -421,6 +422,12 @@ void StatisticsRecorder::ForgetHistogramForTesting(base::StringPiece name) {
}
// static
+std::unique_ptr<StatisticsRecorder>
+StatisticsRecorder::CreateTemporaryForTesting() {
+ return WrapUnique(new StatisticsRecorder());
+}
+
+// static
void StatisticsRecorder::UninitializeForTesting() {
// Stop now if it's never been initialized.
if (lock_ == NULL || histograms_ == NULL)
diff --git a/chromium/base/metrics/statistics_recorder.h b/chromium/base/metrics/statistics_recorder.h
index 6c436c292e6..002758e840e 100644
--- a/chromium/base/metrics/statistics_recorder.h
+++ b/chromium/base/metrics/statistics_recorder.h
@@ -26,6 +26,8 @@
#include "base/metrics/histogram_base.h"
#include "base/strings/string_piece.h"
+class SubprocessMetricsProviderTest;
+
namespace base {
class BucketRanges;
@@ -170,7 +172,15 @@ class BASE_EXPORT StatisticsRecorder {
// memory is being released.
static void ForgetHistogramForTesting(base::StringPiece name);
- // Reset any global instance of the statistics-recorder that was created
+ // Creates a local StatisticsRecorder object for testing purposes. All new
+ // histograms will be registered in it until it is destructed or pushed
+ // aside for the lifetime of yet another SR object. The destruction of the
+ // returned object will re-activate the previous one. Always release SR
+ // objects in the opposite order to which they're created.
+ static std::unique_ptr<StatisticsRecorder> CreateTemporaryForTesting()
+ WARN_UNUSED_RESULT;
+
+ // Resets any global instance of the statistics-recorder that was created
// by a call to Initialize().
static void UninitializeForTesting();
@@ -185,15 +195,6 @@ class BASE_EXPORT StatisticsRecorder {
typedef std::map<uint32_t, std::list<const BucketRanges*>*> RangesMap;
friend struct DefaultLazyInstanceTraits<StatisticsRecorder>;
- friend class HistogramBaseTest;
- friend class HistogramSnapshotManagerTest;
- friend class HistogramTest;
- friend class JsonPrefStoreTest;
- friend class SharedHistogramTest;
- friend class SparseHistogramTest;
- friend class StatisticsRecorderTest;
- FRIEND_TEST_ALL_PREFIXES(HistogramDeltaSerializationTest,
- DeserializeHistogramAndAddSamples);
// Imports histograms from global persistent memory. The global lock must
// not be held during this call.
diff --git a/chromium/base/metrics/statistics_recorder_unittest.cc b/chromium/base/metrics/statistics_recorder_unittest.cc
index 813fbd13f5c..95125b8284f 100644
--- a/chromium/base/metrics/statistics_recorder_unittest.cc
+++ b/chromium/base/metrics/statistics_recorder_unittest.cc
@@ -47,7 +47,7 @@ class StatisticsRecorderTest : public testing::TestWithParam<bool> {
void InitializeStatisticsRecorder() {
DCHECK(!statistics_recorder_);
StatisticsRecorder::UninitializeForTesting();
- statistics_recorder_.reset(new StatisticsRecorder());
+ statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
}
void UninitializeStatisticsRecorder() {
diff --git a/chromium/base/metrics/user_metrics.h b/chromium/base/metrics/user_metrics.h
index c80bac038d7..93701e8fd20 100644
--- a/chromium/base/metrics/user_metrics.h
+++ b/chromium/base/metrics/user_metrics.h
@@ -26,9 +26,9 @@ namespace base {
// not good: "SSLDialogShown", "PageLoaded", "DiskFull"
// We use this to gather anonymized information about how users are
// interacting with the browser.
-// WARNING: In calls to this function, UserMetricsAction and a
-// string literal parameter must be on the same line, e.g.
-// RecordAction(UserMetricsAction("my extremely long action name"));
+// WARNING: In calls to this function, UserMetricsAction should be followed by a
+// string literal parameter and not a variable e.g.
+// RecordAction(UserMetricsAction("my action name"));
// This ensures that our processing scripts can associate this action's hash
// with its metric name. Therefore, it will be possible to retrieve the metric
// name from the hash later on.
diff --git a/chromium/base/metrics/user_metrics_action.h b/chromium/base/metrics/user_metrics_action.h
index 8c195b3e803..3eca3ddb8b1 100644
--- a/chromium/base/metrics/user_metrics_action.h
+++ b/chromium/base/metrics/user_metrics_action.h
@@ -10,13 +10,12 @@ namespace base {
// UserMetricsAction exists purely to standardize on the parameters passed to
// UserMetrics. That way, our toolset can scan the source code reliable for
// constructors and extract the associated string constants.
-// WARNING: When using UserMetricsAction, UserMetricsAction and a string literal
-// parameter must be on the same line, e.g.
-// RecordAction(UserMetricsAction("my extremely long action name"));
-// or
-// RenderThread::Get()->RecordAction(
-// UserMetricsAction("my extremely long action name"));
-// because otherwise our processing scripts won't pick up on new actions.
+// WARNING: When using UserMetricsAction you should use a string literal
+// parameter e.g.
+// RecordAction(UserMetricsAction("my action name"));
+// This ensures that our processing scripts can associate this action's hash
+// with its metric name. Therefore, it will be possible to retrieve the metric
+// name from the hash later on.
// Please see tools/metrics/actions/extract_actions.py for details.
struct UserMetricsAction {
const char* str_;
diff --git a/chromium/base/move.h b/chromium/base/move.h
deleted file mode 100644
index 42242b420e6..00000000000
--- a/chromium/base/move.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MOVE_H_
-#define BASE_MOVE_H_
-
-// TODO(dcheng): Remove this header.
-#include <utility>
-
-#include "base/compiler_specific.h"
-#include "base/macros.h"
-#include "build/build_config.h"
-
-// TODO(crbug.com/566182): DEPRECATED!
-// Use DISALLOW_COPY_AND_ASSIGN instead, or if your type will be used in
-// Callbacks, use DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND instead.
-#define MOVE_ONLY_TYPE_FOR_CPP_03(type) \
- DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(type)
-
-// A macro to disallow the copy constructor and copy assignment functions.
-// This should be used in the private: declarations for a class.
-//
-// Use this macro instead of DISALLOW_COPY_AND_ASSIGN if you want to pass
-// ownership of the type through a base::Callback without heap-allocating it
-// into a scoped_ptr. The class must define a move constructor and move
-// assignment operator to make this work.
-//
-// This version of the macro adds a cryptic MoveOnlyTypeForCPP03 typedef for the
-// base::Callback implementation to use. See IsMoveOnlyType template and its
-// usage in base/callback_internal.h for more details.
-// TODO(crbug.com/566182): Remove this macro and use DISALLOW_COPY_AND_ASSIGN
-// everywhere instead.
-#define DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(type) \
- private: \
- type(const type&) = delete; \
- void operator=(const type&) = delete; \
- \
- public: \
- typedef void MoveOnlyTypeForCPP03; \
- \
- private:
-
-#endif // BASE_MOVE_H_
diff --git a/chromium/base/native_library.h b/chromium/base/native_library.h
index 1e764da89aa..b4f3a3cd1ba 100644
--- a/chromium/base/native_library.h
+++ b/chromium/base/native_library.h
@@ -11,8 +11,7 @@
#include <string>
#include "base/base_export.h"
-#include "base/compiler_specific.h"
-#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
#include "build/build_config.h"
#if defined(OS_WIN)
@@ -26,7 +25,7 @@ namespace base {
class FilePath;
#if defined(OS_WIN)
-typedef HMODULE NativeLibrary;
+using NativeLibrary = HMODULE;
#elif defined(OS_MACOSX)
enum NativeLibraryType {
BUNDLE,
@@ -46,9 +45,9 @@ struct NativeLibraryStruct {
void* dylib;
};
};
-typedef NativeLibraryStruct* NativeLibrary;
+using NativeLibrary = NativeLibraryStruct*;
#elif defined(OS_POSIX)
-typedef void* NativeLibrary;
+using NativeLibrary = void*;
#endif // OS_*
struct BASE_EXPORT NativeLibraryLoadError {
@@ -87,13 +86,14 @@ BASE_EXPORT void UnloadNativeLibrary(NativeLibrary library);
// Gets a function pointer from a native library.
BASE_EXPORT void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
- const char* name);
+ StringPiece name);
// Returns the full platform specific name for a native library.
+// |name| must be ASCII.
// For example:
// "mylib" returns "mylib.dll" on Windows, "libmylib.so" on Linux,
-// "mylib.dylib" on Mac.
-BASE_EXPORT string16 GetNativeLibraryName(const string16& name);
+// "libmylib.dylib" on Mac.
+BASE_EXPORT std::string GetNativeLibraryName(StringPiece name);
} // namespace base
diff --git a/chromium/base/native_library_ios.mm b/chromium/base/native_library_ios.mm
index 60a11f2b460..fb1b077a0f4 100644
--- a/chromium/base/native_library_ios.mm
+++ b/chromium/base/native_library_ios.mm
@@ -6,6 +6,8 @@
#include "base/logging.h"
+#include "base/strings/string_util.h"
+
namespace base {
std::string NativeLibraryLoadError::ToString() const {
@@ -29,14 +31,15 @@ void UnloadNativeLibrary(NativeLibrary library) {
// static
void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
- const char* name) {
+ StringPiece name) {
NOTIMPLEMENTED();
return nullptr;
}
// static
-string16 GetNativeLibraryName(const string16& name) {
- return name;
+std::string GetNativeLibraryName(StringPiece name) {
+ DCHECK(IsStringASCII(name));
+ return name.as_string();
}
} // namespace base
diff --git a/chromium/base/native_library_mac.mm b/chromium/base/native_library_mac.mm
index 16848858fd0..f2df0a711a4 100644
--- a/chromium/base/native_library_mac.mm
+++ b/chromium/base/native_library_mac.mm
@@ -27,18 +27,11 @@ static NativeLibraryObjCStatus GetObjCStatusForImage(
// of testing is used in _CFBundleGrokObjcImageInfoFromFile in
// CF-744/CFBundle.c, around lines 2447-2474.
//
- // In 32-bit images, ObjC can be recognized in __OBJC,__image_info, whereas
- // in 64-bit, the data is in __DATA,__objc_imageinfo.
-#if __LP64__
+ // In 64-bit images, ObjC can be recognized in __DATA,__objc_imageinfo.
const section_64* section = getsectbynamefromheader_64(
reinterpret_cast<const struct mach_header_64*>(info.dli_fbase),
SEG_DATA, "__objc_imageinfo");
-#else
- const section* section = getsectbynamefromheader(
- reinterpret_cast<const struct mach_header*>(info.dli_fbase),
- SEG_OBJC, "__image_info");
-#endif
- return section == NULL ? OBJC_NOT_PRESENT : OBJC_PRESENT;
+ return section ? OBJC_PRESENT : OBJC_NOT_PRESENT;
}
std::string NativeLibraryLoadError::ToString() const {
@@ -46,7 +39,7 @@ std::string NativeLibraryLoadError::ToString() const {
}
// static
-NativeLibrary LoadNativeLibrary(const base::FilePath& library_path,
+NativeLibrary LoadNativeLibrary(const FilePath& library_path,
NativeLibraryLoadError* error) {
// dlopen() etc. open the file off disk.
if (library_path.Extension() == "dylib" || !DirectoryExists(library_path)) {
@@ -54,7 +47,7 @@ NativeLibrary LoadNativeLibrary(const base::FilePath& library_path,
if (!dylib) {
if (error)
error->message = dlerror();
- return NULL;
+ return nullptr;
}
NativeLibrary native_lib = new NativeLibraryStruct();
native_lib->type = DYNAMIC_LIB;
@@ -62,16 +55,16 @@ NativeLibrary LoadNativeLibrary(const base::FilePath& library_path,
native_lib->objc_status = OBJC_UNKNOWN;
return native_lib;
}
- base::ScopedCFTypeRef<CFURLRef> url(CFURLCreateFromFileSystemRepresentation(
+ ScopedCFTypeRef<CFURLRef> url(CFURLCreateFromFileSystemRepresentation(
kCFAllocatorDefault,
(const UInt8*)library_path.value().c_str(),
library_path.value().length(),
true));
if (!url)
- return NULL;
+ return nullptr;
CFBundleRef bundle = CFBundleCreate(kCFAllocatorDefault, url.get());
if (!bundle)
- return NULL;
+ return nullptr;
NativeLibrary native_lib = new NativeLibraryStruct();
native_lib->type = BUNDLE;
@@ -103,17 +96,17 @@ void UnloadNativeLibrary(NativeLibrary library) {
// static
void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
- const char* name) {
- void* function_pointer = NULL;
+ StringPiece name) {
+ void* function_pointer = nullptr;
// Get the function pointer using the right API for the type.
if (library->type == BUNDLE) {
- base::ScopedCFTypeRef<CFStringRef> symbol_name(CFStringCreateWithCString(
- kCFAllocatorDefault, name, kCFStringEncodingUTF8));
+ ScopedCFTypeRef<CFStringRef> symbol_name(CFStringCreateWithCString(
+ kCFAllocatorDefault, name.data(), kCFStringEncodingUTF8));
function_pointer = CFBundleGetFunctionPointerForName(library->bundle,
symbol_name);
} else {
- function_pointer = dlsym(library->dylib, name);
+ function_pointer = dlsym(library->dylib, name.data());
}
// If this library hasn't been tested for having ObjC, use the function
@@ -125,8 +118,9 @@ void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
}
// static
-string16 GetNativeLibraryName(const string16& name) {
- return name + ASCIIToUTF16(".dylib");
+std::string GetNativeLibraryName(StringPiece name) {
+ DCHECK(IsStringASCII(name));
+ return "lib" + name.as_string() + ".dylib";
}
} // namespace base
diff --git a/chromium/base/native_library_posix.cc b/chromium/base/native_library_posix.cc
index 3179a93833c..2dc434b7be2 100644
--- a/chromium/base/native_library_posix.cc
+++ b/chromium/base/native_library_posix.cc
@@ -8,6 +8,7 @@
#include "base/files/file_path.h"
#include "base/logging.h"
+#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#include "base/threading/thread_restrictions.h"
@@ -21,7 +22,7 @@ std::string NativeLibraryLoadError::ToString() const {
NativeLibrary LoadNativeLibrary(const FilePath& library_path,
NativeLibraryLoadError* error) {
// dlopen() opens the file off disk.
- base::ThreadRestrictions::AssertIOAllowed();
+ ThreadRestrictions::AssertIOAllowed();
// We deliberately do not use RTLD_DEEPBIND. For the history why, please
// refer to the bug tracker. Some useful bug reports to read include:
@@ -45,13 +46,14 @@ void UnloadNativeLibrary(NativeLibrary library) {
// static
void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
- const char* name) {
- return dlsym(library, name);
+ StringPiece name) {
+ return dlsym(library, name.data());
}
// static
-string16 GetNativeLibraryName(const string16& name) {
- return ASCIIToUTF16("lib") + name + ASCIIToUTF16(".so");
+std::string GetNativeLibraryName(StringPiece name) {
+ DCHECK(IsStringASCII(name));
+ return "lib" + name.as_string() + ".so";
}
} // namespace base
diff --git a/chromium/base/native_library_unittest.cc b/chromium/base/native_library_unittest.cc
index b3cff1d79e3..c189f56e995 100644
--- a/chromium/base/native_library_unittest.cc
+++ b/chromium/base/native_library_unittest.cc
@@ -13,17 +13,27 @@ const FilePath::CharType kDummyLibraryPath[] =
TEST(NativeLibraryTest, LoadFailure) {
NativeLibraryLoadError error;
- NativeLibrary library =
- LoadNativeLibrary(FilePath(kDummyLibraryPath), &error);
- EXPECT_TRUE(library == nullptr);
+ EXPECT_FALSE(LoadNativeLibrary(FilePath(kDummyLibraryPath), &error));
EXPECT_FALSE(error.ToString().empty());
}
// |error| is optional and can be null.
TEST(NativeLibraryTest, LoadFailureWithNullError) {
- NativeLibrary library =
- LoadNativeLibrary(FilePath(kDummyLibraryPath), nullptr);
- EXPECT_TRUE(library == nullptr);
+ EXPECT_FALSE(LoadNativeLibrary(FilePath(kDummyLibraryPath), nullptr));
+}
+
+TEST(NativeLibraryTest, GetNativeLibraryName) {
+ const char kExpectedName[] =
+#if defined(OS_IOS)
+ "mylib";
+#elif defined(OS_MACOSX)
+ "libmylib.dylib";
+#elif defined(OS_POSIX)
+ "libmylib.so";
+#elif defined(OS_WIN)
+ "mylib.dll";
+#endif
+ EXPECT_EQ(kExpectedName, GetNativeLibraryName("mylib"));
}
} // namespace base
diff --git a/chromium/base/native_library_win.cc b/chromium/base/native_library_win.cc
index 1ca3e92f547..31753568000 100644
--- a/chromium/base/native_library_win.cc
+++ b/chromium/base/native_library_win.cc
@@ -7,6 +7,7 @@
#include <windows.h>
#include "base/files/file_util.h"
+#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "base/threading/thread_restrictions.h"
@@ -62,8 +63,7 @@ NativeLibrary LoadNativeLibrary(const FilePath& library_path,
NativeLibrary LoadNativeLibraryDynamically(const FilePath& library_path) {
typedef HMODULE (WINAPI* LoadLibraryFunction)(const wchar_t* file_name);
- LoadLibraryFunction load_library;
- load_library = reinterpret_cast<LoadLibraryFunction>(
+ LoadLibraryFunction load_library = reinterpret_cast<LoadLibraryFunction>(
GetProcAddress(GetModuleHandle(L"kernel32.dll"), "LoadLibraryW"));
return LoadNativeLibraryHelper(library_path, load_library, NULL);
@@ -76,13 +76,14 @@ void UnloadNativeLibrary(NativeLibrary library) {
// static
void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
- const char* name) {
- return GetProcAddress(library, name);
+ StringPiece name) {
+ return GetProcAddress(library, name.data());
}
// static
-string16 GetNativeLibraryName(const string16& name) {
- return name + ASCIIToUTF16(".dll");
+std::string GetNativeLibraryName(StringPiece name) {
+ DCHECK(IsStringASCII(name));
+ return name.as_string() + ".dll";
}
} // namespace base
diff --git a/chromium/base/nix/xdg_util_unittest.cc b/chromium/base/nix/xdg_util_unittest.cc
index a05435545ab..c8e53616ccd 100644
--- a/chromium/base/nix/xdg_util_unittest.cc
+++ b/chromium/base/nix/xdg_util_unittest.cc
@@ -9,9 +9,9 @@
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::_;
+using ::testing::Eq;
using ::testing::Return;
using ::testing::SetArgumentPointee;
-using ::testing::StrEq;
namespace base {
namespace nix {
@@ -20,9 +20,9 @@ namespace {
class MockEnvironment : public Environment {
public:
- MOCK_METHOD2(GetVar, bool(const char*, std::string* result));
- MOCK_METHOD2(SetVar, bool(const char*, const std::string& new_value));
- MOCK_METHOD1(UnSetVar, bool(const char*));
+ MOCK_METHOD2(GetVar, bool(StringPiece, std::string* result));
+ MOCK_METHOD2(SetVar, bool(StringPiece, const std::string& new_value));
+ MOCK_METHOD1(UnSetVar, bool(StringPiece));
};
// Needs to be const char* to make gmock happy.
@@ -46,7 +46,7 @@ const char kXdgDesktop[] = "XDG_CURRENT_DESKTOP";
TEST(XDGUtilTest, GetDesktopEnvironmentGnome) {
MockEnvironment getter;
EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
- EXPECT_CALL(getter, GetVar(StrEq(kDesktopSession), _))
+ EXPECT_CALL(getter, GetVar(Eq(kDesktopSession), _))
.WillOnce(DoAll(SetArgumentPointee<1>(kDesktopGnome), Return(true)));
EXPECT_EQ(DESKTOP_ENVIRONMENT_GNOME, GetDesktopEnvironment(&getter));
@@ -55,7 +55,7 @@ TEST(XDGUtilTest, GetDesktopEnvironmentGnome) {
TEST(XDGUtilTest, GetDesktopEnvironmentMATE) {
MockEnvironment getter;
EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
- EXPECT_CALL(getter, GetVar(StrEq(kDesktopSession), _))
+ EXPECT_CALL(getter, GetVar(Eq(kDesktopSession), _))
.WillOnce(DoAll(SetArgumentPointee<1>(kDesktopMATE), Return(true)));
EXPECT_EQ(DESKTOP_ENVIRONMENT_GNOME, GetDesktopEnvironment(&getter));
@@ -64,7 +64,7 @@ TEST(XDGUtilTest, GetDesktopEnvironmentMATE) {
TEST(XDGUtilTest, GetDesktopEnvironmentKDE4) {
MockEnvironment getter;
EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
- EXPECT_CALL(getter, GetVar(StrEq(kDesktopSession), _))
+ EXPECT_CALL(getter, GetVar(Eq(kDesktopSession), _))
.WillOnce(DoAll(SetArgumentPointee<1>(kDesktopKDE4), Return(true)));
EXPECT_EQ(DESKTOP_ENVIRONMENT_KDE4, GetDesktopEnvironment(&getter));
@@ -73,7 +73,7 @@ TEST(XDGUtilTest, GetDesktopEnvironmentKDE4) {
TEST(XDGUtilTest, GetDesktopEnvironmentKDE3) {
MockEnvironment getter;
EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
- EXPECT_CALL(getter, GetVar(StrEq(kDesktopSession), _))
+ EXPECT_CALL(getter, GetVar(Eq(kDesktopSession), _))
.WillOnce(DoAll(SetArgumentPointee<1>(kDesktopKDE), Return(true)));
EXPECT_EQ(DESKTOP_ENVIRONMENT_KDE3, GetDesktopEnvironment(&getter));
@@ -82,7 +82,7 @@ TEST(XDGUtilTest, GetDesktopEnvironmentKDE3) {
TEST(XDGUtilTest, GetDesktopEnvironmentXFCE) {
MockEnvironment getter;
EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
- EXPECT_CALL(getter, GetVar(StrEq(kDesktopSession), _))
+ EXPECT_CALL(getter, GetVar(Eq(kDesktopSession), _))
.WillOnce(DoAll(SetArgumentPointee<1>(kDesktopXFCE), Return(true)));
EXPECT_EQ(DESKTOP_ENVIRONMENT_XFCE, GetDesktopEnvironment(&getter));
@@ -91,7 +91,7 @@ TEST(XDGUtilTest, GetDesktopEnvironmentXFCE) {
TEST(XDGUtilTest, GetXdgDesktopGnome) {
MockEnvironment getter;
EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
- EXPECT_CALL(getter, GetVar(StrEq(kXdgDesktop), _))
+ EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
.WillOnce(DoAll(SetArgumentPointee<1>(kXdgDesktopGNOME), Return(true)));
EXPECT_EQ(DESKTOP_ENVIRONMENT_GNOME, GetDesktopEnvironment(&getter));
@@ -100,11 +100,11 @@ TEST(XDGUtilTest, GetXdgDesktopGnome) {
TEST(XDGUtilTest, GetXdgDesktopGnomeFallback) {
MockEnvironment getter;
EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
- EXPECT_CALL(getter, GetVar(StrEq(kXdgDesktop), _))
+ EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
.WillOnce(DoAll(SetArgumentPointee<1>(kXdgDesktopUnity), Return(true)));
- EXPECT_CALL(getter, GetVar(StrEq(kDesktopSession), _))
- .WillOnce(DoAll(SetArgumentPointee<1>(kDesktopGnomeFallback),
- Return(true)));
+ EXPECT_CALL(getter, GetVar(Eq(kDesktopSession), _))
+ .WillOnce(
+ DoAll(SetArgumentPointee<1>(kDesktopGnomeFallback), Return(true)));
EXPECT_EQ(DESKTOP_ENVIRONMENT_GNOME, GetDesktopEnvironment(&getter));
}
@@ -112,10 +112,10 @@ TEST(XDGUtilTest, GetXdgDesktopGnomeFallback) {
TEST(XDGUtilTest, GetXdgDesktopKDE5) {
MockEnvironment getter;
EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
- EXPECT_CALL(getter, GetVar(StrEq(kXdgDesktop), _))
+ EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
.WillOnce(DoAll(SetArgumentPointee<1>(kXdgDesktopKDE), Return(true)));
- EXPECT_CALL(getter, GetVar(StrEq(kKDESession), _))
- .WillOnce(DoAll(SetArgumentPointee<1>(kKDESessionKDE5), Return(true)));
+ EXPECT_CALL(getter, GetVar(Eq(kKDESession), _))
+ .WillOnce(DoAll(SetArgumentPointee<1>(kKDESessionKDE5), Return(true)));
EXPECT_EQ(DESKTOP_ENVIRONMENT_KDE5, GetDesktopEnvironment(&getter));
}
@@ -123,7 +123,7 @@ TEST(XDGUtilTest, GetXdgDesktopKDE5) {
TEST(XDGUtilTest, GetXdgDesktopKDE4) {
MockEnvironment getter;
EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
- EXPECT_CALL(getter, GetVar(StrEq(kXdgDesktop), _))
+ EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
.WillOnce(DoAll(SetArgumentPointee<1>(kXdgDesktopKDE), Return(true)));
EXPECT_EQ(DESKTOP_ENVIRONMENT_KDE4, GetDesktopEnvironment(&getter));
@@ -132,7 +132,7 @@ TEST(XDGUtilTest, GetXdgDesktopKDE4) {
TEST(XDGUtilTest, GetXdgDesktopUnity) {
MockEnvironment getter;
EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
- EXPECT_CALL(getter, GetVar(StrEq(kXdgDesktop), _))
+ EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
.WillOnce(DoAll(SetArgumentPointee<1>(kXdgDesktopUnity), Return(true)));
EXPECT_EQ(DESKTOP_ENVIRONMENT_UNITY, GetDesktopEnvironment(&getter));
diff --git a/chromium/base/numerics/safe_numerics_unittest.cc b/chromium/base/numerics/safe_numerics_unittest.cc
index 861f5159284..4be7ab59d72 100644
--- a/chromium/base/numerics/safe_numerics_unittest.cc
+++ b/chromium/base/numerics/safe_numerics_unittest.cc
@@ -63,10 +63,13 @@ Dst GetMaxConvertibleToFloat() {
// Helper macros to wrap displaying the conversion types and line numbers.
#define TEST_EXPECTED_VALIDITY(expected, actual) \
- EXPECT_EQ(expected, CheckedNumeric<Dst>(actual).validity()) \
+ EXPECT_EQ(expected, CheckedNumeric<Dst>(actual).IsValid()) \
<< "Result test: Value " << +(actual).ValueUnsafe() << " as " << dst \
<< " on line " << line;
+#define TEST_EXPECTED_SUCCESS(actual) TEST_EXPECTED_VALIDITY(true, actual)
+#define TEST_EXPECTED_FAILURE(actual) TEST_EXPECTED_VALIDITY(false, actual)
+
#define TEST_EXPECTED_VALUE(expected, actual) \
EXPECT_EQ(static_cast<Dst>(expected), \
CheckedNumeric<Dst>(actual).ValueUnsafe()) \
@@ -82,43 +85,32 @@ static void TestSpecializedArithmetic(
numeric_limits<Dst>::is_signed,
int>::type = 0) {
typedef numeric_limits<Dst> DstLimits;
- TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW,
- -CheckedNumeric<Dst>(DstLimits::min()));
- TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW,
- CheckedNumeric<Dst>(DstLimits::min()).Abs());
+ TEST_EXPECTED_FAILURE(-CheckedNumeric<Dst>(DstLimits::min()));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()).Abs());
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).Abs());
- TEST_EXPECTED_VALIDITY(RANGE_VALID,
- CheckedNumeric<Dst>(DstLimits::max()) + -1);
- TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW,
- CheckedNumeric<Dst>(DstLimits::min()) + -1);
- TEST_EXPECTED_VALIDITY(
- RANGE_UNDERFLOW,
- CheckedNumeric<Dst>(-DstLimits::max()) + -DstLimits::max());
-
- TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW,
- CheckedNumeric<Dst>(DstLimits::min()) - 1);
- TEST_EXPECTED_VALIDITY(RANGE_VALID,
- CheckedNumeric<Dst>(DstLimits::min()) - -1);
- TEST_EXPECTED_VALIDITY(
- RANGE_OVERFLOW,
- CheckedNumeric<Dst>(DstLimits::max()) - -DstLimits::max());
- TEST_EXPECTED_VALIDITY(
- RANGE_UNDERFLOW,
- CheckedNumeric<Dst>(-DstLimits::max()) - DstLimits::max());
-
- TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW,
- CheckedNumeric<Dst>(DstLimits::min()) * 2);
-
- TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW,
- CheckedNumeric<Dst>(DstLimits::min()) / -1);
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) + -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) + -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) +
+ -DstLimits::max());
+
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) - 1);
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) - -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) -
+ -DstLimits::max());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) -
+ DstLimits::max());
+
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) * 2);
+
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) / -1);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(-1) / 2);
// Modulus is legal only for integers.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() % 1);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
TEST_EXPECTED_VALUE(-1, CheckedNumeric<Dst>(-1) % 2);
- TEST_EXPECTED_VALIDITY(RANGE_INVALID, CheckedNumeric<Dst>(-1) % -2);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-1) % -2);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) % 2);
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(DstLimits::max()) % 2);
// Test all the different modulus combinations.
@@ -138,19 +130,14 @@ static void TestSpecializedArithmetic(
!numeric_limits<Dst>::is_signed,
int>::type = 0) {
typedef numeric_limits<Dst> DstLimits;
- TEST_EXPECTED_VALIDITY(RANGE_VALID, -CheckedNumeric<Dst>(DstLimits::min()));
- TEST_EXPECTED_VALIDITY(RANGE_VALID,
- CheckedNumeric<Dst>(DstLimits::min()).Abs());
- TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW,
- CheckedNumeric<Dst>(DstLimits::min()) + -1);
- TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW,
- CheckedNumeric<Dst>(DstLimits::min()) - 1);
+ TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::min()));
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).Abs());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) + -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) - 1);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) * 2);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) / 2);
- TEST_EXPECTED_VALIDITY(RANGE_VALID,
- CheckedNumeric<Dst>(DstLimits::min()).UnsignedAbs());
- TEST_EXPECTED_VALIDITY(
- RANGE_VALID,
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).UnsignedAbs());
+ TEST_EXPECTED_SUCCESS(
CheckedNumeric<typename SignedIntegerForSize<Dst>::type>(
std::numeric_limits<typename SignedIntegerForSize<Dst>::type>::min())
.UnsignedAbs());
@@ -176,29 +163,22 @@ void TestSpecializedArithmetic(
int line,
typename std::enable_if<numeric_limits<Dst>::is_iec559, int>::type = 0) {
typedef numeric_limits<Dst> DstLimits;
- TEST_EXPECTED_VALIDITY(RANGE_VALID, -CheckedNumeric<Dst>(DstLimits::min()));
+ TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::min()));
- TEST_EXPECTED_VALIDITY(RANGE_VALID,
- CheckedNumeric<Dst>(DstLimits::min()).Abs());
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).Abs());
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).Abs());
- TEST_EXPECTED_VALIDITY(RANGE_VALID,
- CheckedNumeric<Dst>(DstLimits::min()) + -1);
- TEST_EXPECTED_VALIDITY(RANGE_VALID,
- CheckedNumeric<Dst>(DstLimits::max()) + 1);
- TEST_EXPECTED_VALIDITY(
- RANGE_UNDERFLOW,
- CheckedNumeric<Dst>(-DstLimits::max()) + -DstLimits::max());
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) + -1);
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) + 1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) +
+ -DstLimits::max());
- TEST_EXPECTED_VALIDITY(
- RANGE_OVERFLOW,
- CheckedNumeric<Dst>(DstLimits::max()) - -DstLimits::max());
- TEST_EXPECTED_VALIDITY(
- RANGE_UNDERFLOW,
- CheckedNumeric<Dst>(-DstLimits::max()) - DstLimits::max());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) -
+ -DstLimits::max());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) -
+ DstLimits::max());
- TEST_EXPECTED_VALIDITY(RANGE_VALID,
- CheckedNumeric<Dst>(DstLimits::min()) * 2);
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) * 2);
TEST_EXPECTED_VALUE(-0.5, CheckedNumeric<Dst>(-1.0) / 2);
EXPECT_EQ(static_cast<Dst>(1.0), CheckedNumeric<Dst>(1.0).ValueFloating());
@@ -258,17 +238,15 @@ static void TestArithmetic(const char* dst, int line) {
TEST_EXPECTED_VALUE(1, (CheckedNumeric<Dst>() + 1));
TEST_EXPECTED_VALUE(2, (CheckedNumeric<Dst>(1) + 1));
TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) + 1));
- TEST_EXPECTED_VALIDITY(RANGE_VALID,
- CheckedNumeric<Dst>(DstLimits::min()) + 1);
- TEST_EXPECTED_VALIDITY(
- RANGE_OVERFLOW, CheckedNumeric<Dst>(DstLimits::max()) + DstLimits::max());
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) + 1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) +
+ DstLimits::max());
// Generic subtraction.
TEST_EXPECTED_VALUE(-1, (CheckedNumeric<Dst>() - 1));
TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(1) - 1));
TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) - 1));
- TEST_EXPECTED_VALIDITY(RANGE_VALID,
- CheckedNumeric<Dst>(DstLimits::max()) - 1);
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) - 1);
// Generic multiplication.
TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>() * 1));
@@ -277,8 +255,8 @@ static void TestArithmetic(const char* dst, int line) {
TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * 0));
TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) * 0));
TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * -1));
- TEST_EXPECTED_VALIDITY(
- RANGE_OVERFLOW, CheckedNumeric<Dst>(DstLimits::max()) * DstLimits::max());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) *
+ DstLimits::max());
// Generic division.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() / 1);
@@ -349,18 +327,18 @@ struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_VALUE_PRESERVING> {
"Comparison must be sign preserving and value preserving");
const CheckedNumeric<Dst> checked_dst = SrcLimits::max();
- TEST_EXPECTED_VALIDITY(RANGE_VALID, checked_dst);
+ TEST_EXPECTED_SUCCESS(checked_dst);
if (MaxExponent<Dst>::value > MaxExponent<Src>::value) {
if (MaxExponent<Dst>::value >= MaxExponent<Src>::value * 2 - 1) {
// At least twice larger type.
- TEST_EXPECTED_VALIDITY(RANGE_VALID, SrcLimits::max() * checked_dst);
+ TEST_EXPECTED_SUCCESS(SrcLimits::max() * checked_dst);
} else { // Larger, but not at least twice as large.
- TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW, SrcLimits::max() * checked_dst);
- TEST_EXPECTED_VALIDITY(RANGE_VALID, checked_dst + 1);
+ TEST_EXPECTED_FAILURE(SrcLimits::max() * checked_dst);
+ TEST_EXPECTED_SUCCESS(checked_dst + 1);
}
} else { // Same width type.
- TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW, checked_dst + 1);
+ TEST_EXPECTED_FAILURE(checked_dst + 1);
}
TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::max());
@@ -389,9 +367,9 @@ struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_NARROW> {
"Destination must be narrower than source");
const CheckedNumeric<Dst> checked_dst;
- TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW, checked_dst + SrcLimits::max());
+ TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
- TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW, checked_dst - SrcLimits::max());
+ TEST_EXPECTED_FAILURE(checked_dst - SrcLimits::max());
TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
@@ -418,7 +396,7 @@ struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_NARROW> {
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(-1));
} else {
- TEST_EXPECTED_VALIDITY(RANGE_INVALID, checked_dst - static_cast<Src>(1));
+ TEST_EXPECTED_FAILURE(checked_dst - static_cast<Src>(1));
TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
}
}
@@ -436,8 +414,8 @@ struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL> {
const CheckedNumeric<Dst> checked_dst;
TEST_EXPECTED_VALUE(SrcLimits::max(), checked_dst + SrcLimits::max());
- TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW, checked_dst + static_cast<Src>(-1));
- TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW, checked_dst + -SrcLimits::max());
+ TEST_EXPECTED_FAILURE(checked_dst + static_cast<Src>(-1));
+ TEST_EXPECTED_FAILURE(checked_dst + -SrcLimits::max());
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::max());
@@ -459,9 +437,9 @@ struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_NARROW> {
const CheckedNumeric<Dst> checked_dst;
TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
- TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW, checked_dst + SrcLimits::max());
- TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW, checked_dst + static_cast<Src>(-1));
- TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW, checked_dst + -SrcLimits::max());
+ TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
+ TEST_EXPECTED_FAILURE(checked_dst + static_cast<Src>(-1));
+ TEST_EXPECTED_FAILURE(checked_dst + -SrcLimits::max());
TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
@@ -501,7 +479,7 @@ struct TestNumericConversion<Dst, Src, UNSIGN_TO_SIGN_NARROW_OR_EQUAL> {
const CheckedNumeric<Dst> checked_dst;
TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
- TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW, checked_dst + SrcLimits::max());
+ TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
TEST_EXPECTED_VALUE(SrcLimits::min(), checked_dst + SrcLimits::min());
TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
diff --git a/chromium/base/observer_list_threadsafe.h b/chromium/base/observer_list_threadsafe.h
index 6821795705a..fe783542f47 100644
--- a/chromium/base/observer_list_threadsafe.h
+++ b/chromium/base/observer_list_threadsafe.h
@@ -7,6 +7,7 @@
#include <algorithm>
#include <map>
+#include <tuple>
#include "base/bind.h"
#include "base/location.h"
@@ -177,8 +178,8 @@ class ObserverListThreadSafe
void Notify(const tracked_objects::Location& from_here,
Method m,
const Params&... params) {
- internal::UnboundMethod<ObserverType, Method, Tuple<Params...>> method(
- m, MakeTuple(params...));
+ internal::UnboundMethod<ObserverType, Method, std::tuple<Params...>> method(
+ m, std::make_tuple(params...));
AutoLock lock(list_lock_);
for (const auto& entry : observer_lists_) {
@@ -186,8 +187,8 @@ class ObserverListThreadSafe
context->task_runner->PostTask(
from_here,
Bind(&ObserverListThreadSafe<ObserverType>::template NotifyWrapper<
- Method, Tuple<Params...>>,
- this, context, method));
+ Method, std::tuple<Params...>>,
+ this, context, method));
}
}
diff --git a/chromium/base/observer_list_unittest.cc b/chromium/base/observer_list_unittest.cc
index 2e51e455216..097a2ed28b1 100644
--- a/chromium/base/observer_list_unittest.cc
+++ b/chromium/base/observer_list_unittest.cc
@@ -111,7 +111,7 @@ class AddRemoveThread : public PlatformThread::Delegate,
loop_->task_runner()->PostTask(
FROM_HERE,
base::Bind(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
- loop_->Run();
+ RunLoop().Run();
//LOG(ERROR) << "Loop 0x" << std::hex << loop_ << " done. " <<
// count_observes_ << ", " << count_addtask_;
delete loop_;
diff --git a/chromium/base/optional.h b/chromium/base/optional.h
index 5ae427f8a9c..b468964ae33 100644
--- a/chromium/base/optional.h
+++ b/chromium/base/optional.h
@@ -9,6 +9,7 @@
#include "base/logging.h"
#include "base/memory/aligned_memory.h"
+#include "base/template_util.h"
namespace base {
@@ -30,6 +31,36 @@ constexpr in_place_t in_place = {};
// http://en.cppreference.com/w/cpp/utility/optional/nullopt
constexpr nullopt_t nullopt(0);
+namespace internal {
+
+template <typename T, bool = base::is_trivially_destructible<T>::value>
+struct OptionalStorage {
+ // When T is not trivially destructible we must call its
+ // destructor before deallocating its memory.
+ ~OptionalStorage() {
+ if (!is_null_)
+ buffer_.template data_as<T>()->~T();
+ }
+
+ bool is_null_ = true;
+ base::AlignedMemory<sizeof(T), ALIGNOF(T)> buffer_;
+};
+
+template <typename T>
+struct OptionalStorage<T, true> {
+ // When T is trivially destructible (i.e. its destructor does nothing)
+ // there is no need to call it.
+ // Since |base::AlignedMemory| is just an array its destructor
+ // is trivial. Explicitly defaulting the destructor means it's not
+ // user-provided. All of this together make this destructor trivial.
+ ~OptionalStorage() = default;
+
+ bool is_null_ = true;
+ base::AlignedMemory<sizeof(T), ALIGNOF(T)> buffer_;
+};
+
+} // namespace internal
+
// base::Optional is a Chromium version of the C++17 optional class:
// std::optional documentation:
// http://en.cppreference.com/w/cpp/utility/optional
@@ -46,16 +77,18 @@ constexpr nullopt_t nullopt(0);
template <typename T>
class Optional {
public:
+ using value_type = T;
+
constexpr Optional() = default;
Optional(base::nullopt_t) : Optional() {}
Optional(const Optional& other) {
- if (!other.is_null_)
+ if (!other.storage_.is_null_)
Init(other.value());
}
Optional(Optional&& other) {
- if (!other.is_null_)
+ if (!other.storage_.is_null_)
Init(std::move(other.value()));
}
@@ -68,10 +101,7 @@ class Optional {
emplace(std::forward<Args>(args)...);
}
- ~Optional() {
- // TODO(mlamouri): use is_trivially_destructible<T>::value when possible.
- FreeIfNeeded();
- }
+ ~Optional() = default;
Optional& operator=(base::nullopt_t) {
FreeIfNeeded();
@@ -79,7 +109,7 @@ class Optional {
}
Optional& operator=(const Optional& other) {
- if (other.is_null_) {
+ if (other.storage_.is_null_) {
FreeIfNeeded();
return *this;
}
@@ -89,7 +119,7 @@ class Optional {
}
Optional& operator=(Optional&& other) {
- if (other.is_null_) {
+ if (other.storage_.is_null_) {
FreeIfNeeded();
return *this;
}
@@ -108,14 +138,14 @@ class Optional {
// TODO(mlamouri): can't use 'constexpr' with DCHECK.
const T* operator->() const {
- DCHECK(!is_null_);
+ DCHECK(!storage_.is_null_);
return &value();
}
// TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
// meant to be 'constexpr const'.
T* operator->() {
- DCHECK(!is_null_);
+ DCHECK(!storage_.is_null_);
return &value();
}
@@ -131,32 +161,32 @@ class Optional {
// meant to be 'constexpr const'.
T&& operator*() && { return std::move(value()); }
- constexpr explicit operator bool() const { return !is_null_; }
+ constexpr explicit operator bool() const { return !storage_.is_null_; }
// TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
// meant to be 'constexpr const'.
T& value() & {
- DCHECK(!is_null_);
- return *buffer_.template data_as<T>();
+ DCHECK(!storage_.is_null_);
+ return *storage_.buffer_.template data_as<T>();
}
// TODO(mlamouri): can't use 'constexpr' with DCHECK.
const T& value() const& {
- DCHECK(!is_null_);
- return *buffer_.template data_as<T>();
+ DCHECK(!storage_.is_null_);
+ return *storage_.buffer_.template data_as<T>();
}
// TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
// meant to be 'constexpr const'.
T&& value() && {
- DCHECK(!is_null_);
- return std::move(*buffer_.template data_as<T>());
+ DCHECK(!storage_.is_null_);
+ return std::move(*storage_.buffer_.template data_as<T>());
}
// TODO(mlamouri): can't use 'constexpr' with DCHECK.
const T&& value() const&& {
- DCHECK(!is_null_);
- return std::move(*buffer_.template data_as<T>());
+ DCHECK(!storage_.is_null_);
+ return std::move(*storage_.buffer_.template data_as<T>());
}
template <class U>
@@ -166,7 +196,8 @@ class Optional {
// "T must be copy constructible");
static_assert(std::is_convertible<U, T>::value,
"U must be convertible to T");
- return is_null_ ? static_cast<T>(std::forward<U>(default_value)) : value();
+ return storage_.is_null_ ? static_cast<T>(std::forward<U>(default_value))
+ : value();
}
template <class U>
@@ -176,26 +207,26 @@ class Optional {
// "T must be move constructible");
static_assert(std::is_convertible<U, T>::value,
"U must be convertible to T");
- return is_null_ ? static_cast<T>(std::forward<U>(default_value))
- : std::move(value());
+ return storage_.is_null_ ? static_cast<T>(std::forward<U>(default_value))
+ : std::move(value());
}
void swap(Optional& other) {
- if (is_null_ && other.is_null_)
+ if (storage_.is_null_ && other.storage_.is_null_)
return;
- if (is_null_ != other.is_null_) {
- if (is_null_) {
- Init(std::move(*other.buffer_.template data_as<T>()));
+ if (storage_.is_null_ != other.storage_.is_null_) {
+ if (storage_.is_null_) {
+ Init(std::move(*other.storage_.buffer_.template data_as<T>()));
other.FreeIfNeeded();
} else {
- other.Init(std::move(*buffer_.template data_as<T>()));
+ other.Init(std::move(*storage_.buffer_.template data_as<T>()));
FreeIfNeeded();
}
return;
}
- DCHECK(!is_null_ && !other.is_null_);
+ DCHECK(!storage_.is_null_ && !other.storage_.is_null_);
using std::swap;
swap(**this, *other);
}
@@ -208,47 +239,46 @@ class Optional {
private:
void Init(const T& value) {
- DCHECK(is_null_);
- new (buffer_.template data_as<T>()) T(value);
- is_null_ = false;
+ DCHECK(storage_.is_null_);
+ new (storage_.buffer_.void_data()) T(value);
+ storage_.is_null_ = false;
}
void Init(T&& value) {
- DCHECK(is_null_);
- new (buffer_.template data_as<T>()) T(std::move(value));
- is_null_ = false;
+ DCHECK(storage_.is_null_);
+ new (storage_.buffer_.void_data()) T(std::move(value));
+ storage_.is_null_ = false;
}
template <class... Args>
void Init(Args&&... args) {
- DCHECK(is_null_);
- new (buffer_.template data_as<T>()) T(std::forward<Args>(args)...);
- is_null_ = false;
+ DCHECK(storage_.is_null_);
+ new (storage_.buffer_.void_data()) T(std::forward<Args>(args)...);
+ storage_.is_null_ = false;
}
void InitOrAssign(const T& value) {
- if (is_null_)
+ if (storage_.is_null_)
Init(value);
else
- *buffer_.template data_as<T>() = value;
+ *storage_.buffer_.template data_as<T>() = value;
}
void InitOrAssign(T&& value) {
- if (is_null_)
+ if (storage_.is_null_)
Init(std::move(value));
else
- *buffer_.template data_as<T>() = std::move(value);
+ *storage_.buffer_.template data_as<T>() = std::move(value);
}
void FreeIfNeeded() {
- if (is_null_)
+ if (storage_.is_null_)
return;
- buffer_.template data_as<T>()->~T();
- is_null_ = true;
+ storage_.buffer_.template data_as<T>()->~T();
+ storage_.is_null_ = true;
}
- bool is_null_ = true;
- base::AlignedMemory<sizeof(T), ALIGNOF(T)> buffer_;
+ internal::OptionalStorage<T> storage_;
};
template <class T>
diff --git a/chromium/base/optional_unittest.cc b/chromium/base/optional_unittest.cc
index 812acf2d220..d6bf2636918 100644
--- a/chromium/base/optional_unittest.cc
+++ b/chromium/base/optional_unittest.cc
@@ -22,6 +22,9 @@ class TestObject {
COPY_CONSTRUCTED,
MOVE_CONSTRUCTED,
MOVED_FROM,
+ COPY_ASSIGNED,
+ MOVE_ASSIGNED,
+ SWAPPED,
};
TestObject() : foo_(0), bar_(0.0), state_(State::DEFAULT_CONSTRUCTED) {}
@@ -42,10 +45,26 @@ class TestObject {
TestObject& operator=(const TestObject& other) {
foo_ = other.foo_;
bar_ = other.bar_;
- state_ = State::COPY_CONSTRUCTED;
+ state_ = State::COPY_ASSIGNED;
return *this;
}
+ TestObject& operator=(TestObject&& other) {
+ foo_ = other.foo_;
+ bar_ = other.bar_;
+ state_ = State::MOVE_ASSIGNED;
+ other.state_ = State::MOVED_FROM;
+ return *this;
+ }
+
+ void Swap(TestObject* other) {
+ using std::swap;
+ swap(foo_, other->foo_);
+ swap(bar_, other->bar_);
+ state_ = State::SWAPPED;
+ other->state_ = State::SWAPPED;
+ }
+
bool operator==(const TestObject& other) const {
return foo_ == other.foo_ && bar_ == other.bar_;
}
@@ -59,8 +78,24 @@ class TestObject {
State state_;
};
+// Implementing Swappable concept.
+void swap(TestObject& lhs, TestObject& rhs) {
+ lhs.Swap(&rhs);
+}
+
+class NonTriviallyDestructible {
+ ~NonTriviallyDestructible() {}
+};
+
} // anonymous namespace
+static_assert(is_trivially_destructible<Optional<int>>::value,
+ "OptionalIsTriviallyDestructible");
+
+static_assert(
+ !is_trivially_destructible<Optional<NonTriviallyDestructible>>::value,
+ "OptionalIsTriviallyDestructible");
+
TEST(OptionalTest, DefaultConstructor) {
{
Optional<float> o;
@@ -215,6 +250,11 @@ TEST(OptionalTest, ConstructorForwardArguments) {
}
}
+TEST(OptionalTest, NulloptConstructor) {
+ Optional<int> a = base::nullopt;
+ EXPECT_FALSE(a);
+}
+
TEST(OptionalTest, AssignValue) {
{
Optional<float> a;
@@ -245,6 +285,16 @@ TEST(OptionalTest, AssignValue) {
Optional<TestObject> b(TestObject(3, 0.1));
EXPECT_TRUE(a == b);
}
+
+ {
+ Optional<TestObject> a = TestObject(4, 1.0);
+ EXPECT_TRUE(!!a);
+ a = TestObject(3, 0.1);
+ EXPECT_TRUE(!!a);
+
+ Optional<TestObject> b(TestObject(3, 0.1));
+ EXPECT_TRUE(a == b);
+ }
}
TEST(OptionalTest, AssignObject) {
@@ -277,6 +327,16 @@ TEST(OptionalTest, AssignObject) {
EXPECT_TRUE(a.value() == TestObject(3, 0.1));
EXPECT_TRUE(a == b);
}
+
+ {
+ Optional<TestObject> a(TestObject(4, 1.0));
+ Optional<TestObject> b(TestObject(3, 0.1));
+ a = b;
+
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(a.value() == TestObject(3, 0.1));
+ EXPECT_TRUE(a == b);
+ }
}
TEST(OptionalTest, AssignObject_rvalue) {
@@ -312,6 +372,19 @@ TEST(OptionalTest, AssignObject_rvalue) {
EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, a->state());
EXPECT_EQ(TestObject::State::MOVED_FROM, b->state());
}
+
+ {
+ Optional<TestObject> a(TestObject(4, 1.0));
+ Optional<TestObject> b(TestObject(3, 0.1));
+ a = std::move(b);
+
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(!!b);
+ EXPECT_TRUE(TestObject(3, 0.1) == a.value());
+
+ EXPECT_EQ(TestObject::State::MOVE_ASSIGNED, a->state());
+ EXPECT_EQ(TestObject::State::MOVED_FROM, b->state());
+ }
}
TEST(OptionalTest, AssignNull) {
@@ -452,6 +525,8 @@ TEST(OptionalTest, Swap_bothValue) {
EXPECT_TRUE(!!b);
EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
EXPECT_TRUE(TestObject(0, 0.1) == b.value_or(TestObject(42, 0.42)));
+ EXPECT_EQ(TestObject::State::SWAPPED, a->state());
+ EXPECT_EQ(TestObject::State::SWAPPED, b->state());
}
TEST(OptionalTest, Emplace) {
@@ -1136,7 +1211,7 @@ TEST(OptionalTest, MakeOptional) {
EXPECT_TRUE(!!o);
EXPECT_TRUE(TestObject(0, 0.42) == *o);
EXPECT_EQ(TestObject::State::MOVED_FROM, value.state());
- EXPECT_EQ(TestObject::State::COPY_CONSTRUCTED, o->state());
+ EXPECT_EQ(TestObject::State::MOVE_ASSIGNED, o->state());
EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED,
base::make_optional(std::move(value))->state());
@@ -1184,6 +1259,8 @@ TEST(OptionalTest, NonMemberSwap_bothValue) {
EXPECT_TRUE(!!b);
EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
EXPECT_TRUE(TestObject(0, 0.1) == b.value_or(TestObject(42, 0.42)));
+ EXPECT_EQ(TestObject::State::SWAPPED, a->state());
+ EXPECT_EQ(TestObject::State::SWAPPED, b->state());
}
TEST(OptionalTest, Hash_OptionalReflectsInternal) {
diff --git a/chromium/base/pending_task.cc b/chromium/base/pending_task.cc
index d21f7c7f0d9..c0999bab5f3 100644
--- a/chromium/base/pending_task.cc
+++ b/chromium/base/pending_task.cc
@@ -51,8 +51,4 @@ bool PendingTask::operator<(const PendingTask& other) const {
return (sequence_num - other.sequence_num) > 0;
}
-void TaskQueue::Swap(TaskQueue* queue) {
- c.swap(queue->c); // Calls std::deque::swap.
-}
-
} // namespace base
diff --git a/chromium/base/pending_task.h b/chromium/base/pending_task.h
index fd0b883026e..c31ab9af264 100644
--- a/chromium/base/pending_task.h
+++ b/chromium/base/pending_task.h
@@ -46,15 +46,10 @@ struct BASE_EXPORT PendingTask : public TrackingInfo {
bool is_high_res;
};
-// Wrapper around std::queue specialized for PendingTask which adds a Swap
-// helper method.
-class BASE_EXPORT TaskQueue : public std::queue<PendingTask> {
- public:
- void Swap(TaskQueue* queue);
-};
+using TaskQueue = std::queue<PendingTask>;
// PendingTasks are sorted by their |delayed_run_time| property.
-typedef std::priority_queue<base::PendingTask> DelayedTaskQueue;
+using DelayedTaskQueue = std::priority_queue<base::PendingTask>;
} // namespace base
diff --git a/chromium/base/posix/global_descriptors.h b/chromium/base/posix/global_descriptors.h
index 1761e2518f8..edb299de5c9 100644
--- a/chromium/base/posix/global_descriptors.h
+++ b/chromium/base/posix/global_descriptors.h
@@ -57,7 +57,12 @@ class BASE_EXPORT GlobalDescriptors {
#else
// 3 used by __android_log_write().
// 4 used by... something important on Android M.
- static const int kBaseDescriptor = 5;
+ // 5 used by... something important on Android L... on low-end devices.
+ // TODO(amistry): An Android, this mechanism is only used for tests since the
+ // content child launcher spawns a process by creating a new Activity using
+ // the Android APIs. For tests, come up with a way that doesn't require using
+ // a pre-defined fd.
+ static const int kBaseDescriptor = 6;
#endif
// Return the singleton instance of GlobalDescriptors.
diff --git a/chromium/base/posix/unix_domain_socket_linux_unittest.cc b/chromium/base/posix/unix_domain_socket_linux_unittest.cc
index e4b63c0cd48..3f5173cfc21 100644
--- a/chromium/base/posix/unix_domain_socket_linux_unittest.cc
+++ b/chromium/base/posix/unix_domain_socket_linux_unittest.cc
@@ -52,7 +52,8 @@ TEST(UnixDomainSocketTest, SendRecvMsgAbortOnReplyFDClose) {
message_fds.clear();
// Check that the thread didn't get blocked.
- WaitableEvent event(false, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
message_thread.task_runner()->PostTask(
FROM_HERE, Bind(&WaitableEvent::Signal, Unretained(&event)));
ASSERT_TRUE(event.TimedWait(TimeDelta::FromMilliseconds(5000)));
diff --git a/chromium/base/process/kill_win.cc b/chromium/base/process/kill_win.cc
index 0f60a291a07..358590e7a38 100644
--- a/chromium/base/process/kill_win.cc
+++ b/chromium/base/process/kill_win.cc
@@ -14,8 +14,8 @@
#include "base/bind_helpers.h"
#include "base/logging.h"
#include "base/macros.h"
-#include "base/message_loop/message_loop.h"
#include "base/process/process_iterator.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/win/object_watcher.h"
namespace base {
@@ -46,7 +46,7 @@ class TimerExpiredTask : public win::ObjectWatcher::Delegate {
void TimedOut();
- // MessageLoop::Watcher -----------------------------------------------------
+ // win::ObjectWatcher::Delegate implementation.
void OnObjectSignaled(HANDLE object) override;
private:
@@ -193,7 +193,7 @@ void EnsureProcessTerminated(Process process) {
return;
}
- MessageLoop::current()->PostDelayedTask(
+ ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, Bind(&TimerExpiredTask::TimedOut,
Owned(new TimerExpiredTask(std::move(process)))),
TimeDelta::FromMilliseconds(kWaitInterval));
diff --git a/chromium/base/process/launch.h b/chromium/base/process/launch.h
index adfa093cfa7..28cea7b931c 100644
--- a/chromium/base/process/launch.h
+++ b/chromium/base/process/launch.h
@@ -72,7 +72,7 @@ struct BASE_EXPORT LaunchOptions {
bool start_hidden;
// If non-null, inherit exactly the list of handles in this vector (these
- // handles must be inheritable). This is only supported on Vista and higher.
+ // handles must be inheritable).
HandlesToInheritVector* handles_to_inherit;
// If true, the new process inherits handles from the parent. In production
@@ -80,7 +80,7 @@ struct BASE_EXPORT LaunchOptions {
// binaries, because open handles from other libraries and subsystems will
// leak to the child process, causing errors such as open socket hangs.
// Note: If |handles_to_inherit| is non-null, this flag is ignored and only
- // those handles will be inherited (on Vista and higher).
+ // those handles will be inherited.
bool inherit_handles;
// If non-null, runs as if the user represented by the token had launched it.
diff --git a/chromium/base/process/launch_win.cc b/chromium/base/process/launch_win.cc
index 243d0a99b4b..97b59a5bfe4 100644
--- a/chromium/base/process/launch_win.cc
+++ b/chromium/base/process/launch_win.cc
@@ -24,7 +24,6 @@
#include "base/process/kill.h"
#include "base/strings/utf_string_conversions.h"
#include "base/sys_info.h"
-#include "base/win/object_watcher.h"
#include "base/win/scoped_handle.h"
#include "base/win/scoped_process_information.h"
#include "base/win/startup_information.h"
@@ -147,12 +146,6 @@ void RouteStdioToConsole(bool create_console_if_not_found) {
// http://crbug.com/358267. Confirm that the underlying HANDLE is valid
// before aborting.
- // This causes NaCl tests to hang on XP for reasons unclear, perhaps due
- // to not being able to inherit handles. Since it's only for debugging,
- // and redirecting still works, punt for now.
- if (base::win::GetVersion() < base::win::VERSION_VISTA)
- return;
-
intptr_t stdout_handle = _get_osfhandle(_fileno(stdout));
intptr_t stderr_handle = _get_osfhandle(_fileno(stderr));
if (stdout_handle >= 0 || stderr_handle >= 0)
@@ -217,11 +210,6 @@ Process LaunchProcess(const string16& cmdline,
if (options.handles_to_inherit->empty()) {
inherit_handles = false;
} else {
- if (base::win::GetVersion() < base::win::VERSION_VISTA) {
- DLOG(ERROR) << "Specifying handles to inherit requires Vista or later.";
- return Process();
- }
-
if (options.handles_to_inherit->size() >
std::numeric_limits<DWORD>::max() / sizeof(HANDLE)) {
DLOG(ERROR) << "Too many handles to inherit.";
diff --git a/chromium/base/process/memory_mac.mm b/chromium/base/process/memory_mac.mm
index db51fb5098d..32fdd38821d 100644
--- a/chromium/base/process/memory_mac.mm
+++ b/chromium/base/process/memory_mac.mm
@@ -248,20 +248,14 @@ void oom_killer_new() {
// === Core Foundation CFAllocators ===
bool CanGetContextForCFAllocator() {
- return !base::mac::IsOSLaterThanElCapitan_DontCallThis();
+ return !base::mac::IsOSLaterThanSierra_DontCallThis();
}
CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
- if (base::mac::IsOSMountainLion() ||
- base::mac::IsOSMavericks() || base::mac::IsOSYosemite() ||
- base::mac::IsOSElCapitan()) {
- ChromeCFAllocatorLions* our_allocator =
- const_cast<ChromeCFAllocatorLions*>(
- reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
- return &our_allocator->_context;
- } else {
- return NULL;
- }
+ ChromeCFAllocatorLions* our_allocator =
+ const_cast<ChromeCFAllocatorLions*>(
+ reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
+ return &our_allocator->_context;
}
CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
diff --git a/chromium/base/process/process.h b/chromium/base/process/process.h
index 75f6a009df1..70c82601932 100644
--- a/chromium/base/process/process.h
+++ b/chromium/base/process/process.h
@@ -6,7 +6,7 @@
#define BASE_PROCESS_PROCESS_H_
#include "base/base_export.h"
-#include "base/move.h"
+#include "base/macros.h"
#include "base/process/process_handle.h"
#include "base/time/time.h"
#include "build/build_config.h"
@@ -31,8 +31,6 @@ namespace base {
// the process dies, and it may be reused by the system, which means that it may
// end up pointing to the wrong process.
class BASE_EXPORT Process {
- MOVE_ONLY_TYPE_FOR_CPP_03(Process)
-
public:
explicit Process(ProcessHandle handle = kNullProcessHandle);
@@ -136,6 +134,8 @@ class BASE_EXPORT Process {
#else
ProcessHandle process_;
#endif
+
+ DISALLOW_COPY_AND_ASSIGN(Process);
};
#if defined(OS_CHROMEOS)
diff --git a/chromium/base/process/process_metrics.h b/chromium/base/process/process_metrics.h
index 8d4e51b517d..57cb3abec0d 100644
--- a/chromium/base/process/process_metrics.h
+++ b/chromium/base/process/process_metrics.h
@@ -272,6 +272,14 @@ struct BASE_EXPORT SystemMemoryInfoKB {
int total;
int free;
+#if defined(OS_LINUX)
+ // This provides an estimate of available memory as described here:
+ // https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
+ // NOTE: this is ONLY valid in kernels 3.14 and up. Its value will always
+ // be 0 in earlier kernel versions.
+ int available;
+#endif
+
#if !defined(OS_MACOSX)
int swap_total;
int swap_free;
diff --git a/chromium/base/process/process_metrics_linux.cc b/chromium/base/process/process_metrics_linux.cc
index 89a26090da7..3d27656d6ac 100644
--- a/chromium/base/process/process_metrics_linux.cc
+++ b/chromium/base/process/process_metrics_linux.cc
@@ -534,6 +534,9 @@ const size_t kDiskWeightedIOTime = 13;
SystemMemoryInfoKB::SystemMemoryInfoKB() {
total = 0;
free = 0;
+#if defined(OS_LINUX)
+ available = 0;
+#endif
buffers = 0;
cached = 0;
active_anon = 0;
@@ -564,6 +567,9 @@ std::unique_ptr<Value> SystemMemoryInfoKB::ToValue() const {
res->SetInteger("total", total);
res->SetInteger("free", free);
+#if defined(OS_LINUX)
+ res->SetInteger("available", available);
+#endif
res->SetInteger("buffers", buffers);
res->SetInteger("cached", cached);
res->SetInteger("active_anon", active_anon);
@@ -621,6 +627,10 @@ bool ParseProcMeminfo(const std::string& meminfo_data,
target = &meminfo->total;
else if (tokens[0] == "MemFree:")
target = &meminfo->free;
+#if defined(OS_LINUX)
+ else if (tokens[0] == "MemAvailable:")
+ target = &meminfo->available;
+#endif
else if (tokens[0] == "Buffers:")
target = &meminfo->buffers;
else if (tokens[0] == "Cached:")
diff --git a/chromium/base/process/process_util_unittest.cc b/chromium/base/process/process_util_unittest.cc
index d9e6407e465..c162fb20366 100644
--- a/chromium/base/process/process_util_unittest.cc
+++ b/chromium/base/process/process_util_unittest.cc
@@ -681,14 +681,15 @@ TEST_F(ProcessUtilTest, MAYBE_FDRemapping) {
// open some dummy fds to make sure they don't propagate over to the
// child process.
int dev_null = open("/dev/null", O_RDONLY);
+ DPCHECK(dev_null != -1);
int sockets[2];
- socketpair(AF_UNIX, SOCK_STREAM, 0, sockets);
+ int ret = socketpair(AF_UNIX, SOCK_STREAM, 0, sockets);
+ DPCHECK(ret == 0);
int fds_after = CountOpenFDsInChild();
ASSERT_EQ(fds_after, fds_before);
- int ret;
ret = IGNORE_EINTR(close(sockets[0]));
DPCHECK(ret == 0);
ret = IGNORE_EINTR(close(sockets[1]));
@@ -979,7 +980,8 @@ TEST_F(ProcessUtilTest, GetAppOutputWithExitCode) {
}
TEST_F(ProcessUtilTest, GetParentProcessId) {
- base::ProcessId ppid = base::GetParentProcessId(base::GetCurrentProcId());
+ base::ProcessId ppid =
+ base::GetParentProcessId(base::GetCurrentProcessHandle());
EXPECT_EQ(ppid, getppid());
}
diff --git a/chromium/base/profiler/stack_sampling_profiler.cc b/chromium/base/profiler/stack_sampling_profiler.cc
index 82e454ddc4d..3d4979ca0bf 100644
--- a/chromium/base/profiler/stack_sampling_profiler.cc
+++ b/chromium/base/profiler/stack_sampling_profiler.cc
@@ -120,7 +120,8 @@ StackSamplingProfiler::SamplingThread::SamplingThread(
const CompletedCallback& completed_callback)
: native_sampler_(std::move(native_sampler)),
params_(params),
- stop_event_(false, false),
+ stop_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED),
completed_callback_(completed_callback) {}
StackSamplingProfiler::SamplingThread::~SamplingThread() {}
diff --git a/chromium/base/profiler/stack_sampling_profiler_unittest.cc b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
index 12a05e15017..3a59e6d2c30 100644
--- a/chromium/base/profiler/stack_sampling_profiler_unittest.cc
+++ b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
@@ -85,7 +85,7 @@ using TargetFunction = const void*(*)(WaitableEvent*, WaitableEvent*,
// SignalAndWaitUntilSignaled() when coordinated with the main thread.
class TargetThread : public PlatformThread::Delegate {
public:
- TargetThread(const StackConfiguration& stack_config);
+ explicit TargetThread(const StackConfiguration& stack_config);
// PlatformThread::Delegate:
void ThreadMain() override;
@@ -149,8 +149,12 @@ class TargetThread : public PlatformThread::Delegate {
};
TargetThread::TargetThread(const StackConfiguration& stack_config)
- : thread_started_event_(false, false), finish_event_(false, false),
- id_(0), stack_config_(stack_config) {}
+ : thread_started_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ finish_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ id_(0),
+ stack_config_(stack_config) {}
void TargetThread::ThreadMain() {
id_ = PlatformThread::CurrentId();
@@ -267,9 +271,8 @@ NativeLibrary LoadOtherLibrary() {
const auto load = [](NativeLibrary* library) {
FilePath other_library_path;
ASSERT_TRUE(PathService::Get(DIR_EXE, &other_library_path));
- other_library_path = other_library_path.Append(FilePath::FromUTF16Unsafe(
- GetNativeLibraryName(ASCIIToUTF16(
- "base_profiler_test_support_library"))));
+ other_library_path = other_library_path.AppendASCII(
+ GetNativeLibraryName("base_profiler_test_support_library"));
NativeLibraryLoadError load_error;
*library = LoadNativeLibrary(other_library_path, &load_error);
ASSERT_TRUE(*library) << "error loading " << other_library_path.value()
@@ -351,9 +354,11 @@ void CaptureProfiles(const SamplingParams& params, TimeDelta profiler_wait_time,
CallStackProfiles* profiles) {
profiles->clear();
- WithTargetThread([&params, profiles, profiler_wait_time](
- PlatformThreadId target_thread_id) {
- WaitableEvent sampling_thread_completed(true, false);
+ WithTargetThread([&params, profiles,
+ profiler_wait_time](PlatformThreadId target_thread_id) {
+ WaitableEvent sampling_thread_completed(
+ WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
const StackSamplingProfiler::CompletedCallback callback =
Bind(&SaveProfilesAndSignalEvent, Unretained(profiles),
Unretained(&sampling_thread_completed));
@@ -411,7 +416,7 @@ std::string FormatSampleForDiagnosticOutput(
const Sample& sample,
const std::vector<Module>& modules) {
std::string output;
- for (const Frame& frame: sample) {
+ for (const Frame& frame : sample) {
output += StringPrintf(
"0x%p %s\n", reinterpret_cast<const void*>(frame.instruction_pointer),
modules[frame.module_index].filename.AsUTF8Unsafe().c_str());
@@ -465,13 +470,17 @@ void TestLibraryUnload(bool wait_until_unloaded) {
target_thread.WaitForThreadStart();
- WaitableEvent sampling_thread_completed(true, false);
+ WaitableEvent sampling_thread_completed(
+ WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
std::vector<CallStackProfile> profiles;
const StackSamplingProfiler::CompletedCallback callback =
Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
Unretained(&sampling_thread_completed));
- WaitableEvent stack_copied(true, false);
- WaitableEvent start_stack_walk(true, false);
+ WaitableEvent stack_copied(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent start_stack_walk(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
StackCopiedSignaler test_delegate(&stack_copied, &start_stack_walk,
wait_until_unloaded);
StackSamplingProfiler profiler(target_thread.id(), params, callback,
@@ -625,16 +634,19 @@ TEST(StackSamplingProfilerTest, MAYBE_Alloca) {
params.samples_per_burst = 1;
std::vector<CallStackProfile> profiles;
- WithTargetThread([&params, &profiles](
- PlatformThreadId target_thread_id) {
- WaitableEvent sampling_thread_completed(true, false);
- const StackSamplingProfiler::CompletedCallback callback =
- Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
- Unretained(&sampling_thread_completed));
- StackSamplingProfiler profiler(target_thread_id, params, callback);
- profiler.Start();
- sampling_thread_completed.Wait();
- }, StackConfiguration(StackConfiguration::WITH_ALLOCA));
+ WithTargetThread(
+ [&params, &profiles](PlatformThreadId target_thread_id) {
+ WaitableEvent sampling_thread_completed(
+ WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ const StackSamplingProfiler::CompletedCallback callback =
+ Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
+ Unretained(&sampling_thread_completed));
+ StackSamplingProfiler profiler(target_thread_id, params, callback);
+ profiler.Start();
+ sampling_thread_completed.Wait();
+ },
+ StackConfiguration(StackConfiguration::WITH_ALLOCA));
// Look up the sample.
ASSERT_EQ(1u, profiles.size());
@@ -686,7 +698,9 @@ TEST(StackSamplingProfilerTest, MAYBE_StartAndRunAsync) {
CallStackProfiles profiles;
WithTargetThread([&params, &profiles](PlatformThreadId target_thread_id) {
- WaitableEvent sampling_thread_completed(false, false);
+ WaitableEvent sampling_thread_completed(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
const StackSamplingProfiler::CompletedCallback callback =
Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
Unretained(&sampling_thread_completed));
@@ -843,7 +857,9 @@ TEST(StackSamplingProfilerTest, MAYBE_ConcurrentProfiling) {
ScopedVector<WaitableEvent> sampling_completed;
ScopedVector<StackSamplingProfiler> profiler;
for (int i = 0; i < 2; ++i) {
- sampling_completed.push_back(new WaitableEvent(false, false));
+ sampling_completed.push_back(
+ new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED));
const StackSamplingProfiler::CompletedCallback callback =
Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles[i]),
Unretained(sampling_completed[i]));
@@ -886,17 +902,20 @@ TEST(StackSamplingProfilerTest, MAYBE_OtherLibrary) {
std::vector<CallStackProfile> profiles;
{
ScopedNativeLibrary other_library(LoadOtherLibrary());
- WithTargetThread([&params, &profiles](
- PlatformThreadId target_thread_id) {
- WaitableEvent sampling_thread_completed(true, false);
- const StackSamplingProfiler::CompletedCallback callback =
- Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
- Unretained(&sampling_thread_completed));
- StackSamplingProfiler profiler(target_thread_id, params, callback);
- profiler.Start();
- sampling_thread_completed.Wait();
- }, StackConfiguration(StackConfiguration::WITH_OTHER_LIBRARY,
- other_library.get()));
+ WithTargetThread(
+ [&params, &profiles](PlatformThreadId target_thread_id) {
+ WaitableEvent sampling_thread_completed(
+ WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ const StackSamplingProfiler::CompletedCallback callback =
+ Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
+ Unretained(&sampling_thread_completed));
+ StackSamplingProfiler profiler(target_thread_id, params, callback);
+ profiler.Start();
+ sampling_thread_completed.Wait();
+ },
+ StackConfiguration(StackConfiguration::WITH_OTHER_LIBRARY,
+ other_library.get()));
}
// Look up the sample.
diff --git a/chromium/base/run_loop.cc b/chromium/base/run_loop.cc
index 4e425c9339b..a2322f84958 100644
--- a/chromium/base/run_loop.cc
+++ b/chromium/base/run_loop.cc
@@ -51,10 +51,18 @@ void RunLoop::Quit() {
}
}
+void RunLoop::QuitWhenIdle() {
+ quit_when_idle_received_ = true;
+}
+
base::Closure RunLoop::QuitClosure() {
return base::Bind(&RunLoop::Quit, weak_factory_.GetWeakPtr());
}
+base::Closure RunLoop::QuitWhenIdleClosure() {
+ return base::Bind(&RunLoop::QuitWhenIdle, weak_factory_.GetWeakPtr());
+}
+
bool RunLoop::BeforeRun() {
DCHECK(!run_called_);
run_called_ = true;
diff --git a/chromium/base/run_loop.h b/chromium/base/run_loop.h
index 61b0fe1653e..635018f434e 100644
--- a/chromium/base/run_loop.h
+++ b/chromium/base/run_loop.h
@@ -44,26 +44,32 @@ class BASE_EXPORT RunLoop {
bool running() const { return running_; }
- // Quit an earlier call to Run(). There can be other nested RunLoops servicing
- // the same task queue (MessageLoop); Quitting one RunLoop has no bearing on
- // the others. Quit can be called before, during or after Run. If called
- // before Run, Run will return immediately when called. Calling Quit after the
- // RunLoop has already finished running has no effect.
+ // Quit() quits an earlier call to Run() immediately. QuitWhenIdle() quits an
+ // earlier call to Run() when there aren't any tasks or messages in the queue.
//
- // WARNING: You must NEVER assume that a call to Quit will terminate the
- // targetted message loop. If a nested message loop continues running, the
- // target may NEVER terminate. It is very easy to livelock (run forever) in
- // such a case.
+ // There can be other nested RunLoops servicing the same task queue
+ // (MessageLoop); Quitting one RunLoop has no bearing on the others. Quit()
+ // and QuitWhenIdle() can be called before, during or after Run(). If called
+ // before Run(), Run() will return immediately when called. Calling Quit() or
+ // QuitWhenIdle() after the RunLoop has already finished running has no
+ // effect.
+ //
+ // WARNING: You must NEVER assume that a call to Quit() or QuitWhenIdle() will
+ // terminate the targetted message loop. If a nested message loop continues
+ // running, the target may NEVER terminate. It is very easy to livelock (run
+ // forever) in such a case.
void Quit();
+ void QuitWhenIdle();
- // Convenience method to get a closure that safely calls Quit (has no effect
- // if the RunLoop instance is gone).
+ // Convenience methods to get a closure that safely calls Quit() or
+ // QuitWhenIdle() (has no effect if the RunLoop instance is gone).
//
// Example:
// RunLoop run_loop;
// PostTask(run_loop.QuitClosure());
// run_loop.Run();
base::Closure QuitClosure();
+ base::Closure QuitWhenIdleClosure();
private:
friend class MessageLoop;
diff --git a/chromium/base/run_loop_unittest.cc b/chromium/base/run_loop_unittest.cc
new file mode 100644
index 00000000000..a87ced09885
--- /dev/null
+++ b/chromium/base/run_loop_unittest.cc
@@ -0,0 +1,116 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/run_loop.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+void QuitWhenIdleTask(RunLoop* run_loop, int* counter) {
+ run_loop->QuitWhenIdle();
+ ++(*counter);
+}
+
+void ShouldRunTask(int* counter) {
+ ++(*counter);
+}
+
+void ShouldNotRunTask() {
+ ADD_FAILURE() << "Ran a task that shouldn't run.";
+}
+
+void RunNestedLoopTask(int* counter) {
+ RunLoop nested_run_loop;
+
+ // This task should quit |nested_run_loop| but not the main RunLoop.
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, Bind(&QuitWhenIdleTask, Unretained(&nested_run_loop),
+ Unretained(counter)));
+
+ ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE, Bind(&ShouldNotRunTask), TimeDelta::FromDays(1));
+
+ MessageLoop::ScopedNestableTaskAllower allower(MessageLoop::current());
+ nested_run_loop.Run();
+
+ ++(*counter);
+}
+
+class RunLoopTest : public testing::Test {
+ protected:
+ RunLoopTest() = default;
+
+ MessageLoop message_loop_;
+ RunLoop run_loop_;
+ int counter_ = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RunLoopTest);
+};
+
+} // namespace
+
+TEST_F(RunLoopTest, QuitWhenIdle) {
+ message_loop_.task_runner()->PostTask(
+ FROM_HERE,
+ Bind(&QuitWhenIdleTask, Unretained(&run_loop_), Unretained(&counter_)));
+ message_loop_.task_runner()->PostTask(
+ FROM_HERE, Bind(&ShouldRunTask, Unretained(&counter_)));
+ message_loop_.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&ShouldNotRunTask), TimeDelta::FromDays(1));
+
+ run_loop_.Run();
+ EXPECT_EQ(2, counter_);
+}
+
+TEST_F(RunLoopTest, QuitWhenIdleNestedLoop) {
+ message_loop_.task_runner()->PostTask(
+ FROM_HERE, Bind(&RunNestedLoopTask, Unretained(&counter_)));
+ message_loop_.task_runner()->PostTask(
+ FROM_HERE,
+ Bind(&QuitWhenIdleTask, Unretained(&run_loop_), Unretained(&counter_)));
+ message_loop_.task_runner()->PostTask(
+ FROM_HERE, Bind(&ShouldRunTask, Unretained(&counter_)));
+ message_loop_.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&ShouldNotRunTask), TimeDelta::FromDays(1));
+
+ run_loop_.Run();
+ EXPECT_EQ(4, counter_);
+}
+
+TEST_F(RunLoopTest, QuitWhenIdleClosure) {
+ message_loop_.task_runner()->PostTask(FROM_HERE,
+ run_loop_.QuitWhenIdleClosure());
+ message_loop_.task_runner()->PostTask(
+ FROM_HERE, Bind(&ShouldRunTask, Unretained(&counter_)));
+ message_loop_.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&ShouldNotRunTask), TimeDelta::FromDays(1));
+
+ run_loop_.Run();
+ EXPECT_EQ(1, counter_);
+}
+
+// Verify that the QuitWhenIdleClosure() can run after the RunLoop has been
+// deleted. It should have no effect.
+TEST_F(RunLoopTest, QuitWhenIdleClosureAfterRunLoopScope) {
+ Closure quit_when_idle_closure;
+ {
+ RunLoop run_loop;
+ quit_when_idle_closure = run_loop.QuitWhenIdleClosure();
+ run_loop.RunUntilIdle();
+ }
+ quit_when_idle_closure.Run();
+}
+
+} // namespace base
diff --git a/chromium/base/scoped_generic.h b/chromium/base/scoped_generic.h
index d41f19512ce..84de6b7d50d 100644
--- a/chromium/base/scoped_generic.h
+++ b/chromium/base/scoped_generic.h
@@ -11,7 +11,6 @@
#include "base/compiler_specific.h"
#include "base/macros.h"
-#include "base/move.h"
namespace base {
@@ -54,8 +53,6 @@ namespace base {
// typedef ScopedGeneric<int, FooScopedTraits> ScopedFoo;
template<typename T, typename Traits>
class ScopedGeneric {
- DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(ScopedGeneric)
-
private:
// This must be first since it's used inline below.
//
@@ -160,6 +157,8 @@ class ScopedGeneric {
const ScopedGeneric<T2, Traits2>& p2) const;
Data data_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedGeneric);
};
template<class T, class Traits>
diff --git a/chromium/base/scoped_native_library_unittest.cc b/chromium/base/scoped_native_library_unittest.cc
index 204aadb983e..763b45f6c36 100644
--- a/chromium/base/scoped_native_library_unittest.cc
+++ b/chromium/base/scoped_native_library_unittest.cc
@@ -9,6 +9,7 @@
#if defined(OS_WIN)
#include "base/files/file_path.h"
+#include "base/strings/utf_string_conversions.h"
#endif
namespace base {
@@ -27,8 +28,8 @@ TEST(ScopedNativeLibrary, Basic) {
const char kFunctionName[] = "DirectDrawCreate";
NativeLibrary native_library;
{
- FilePath path(GetNativeLibraryName(L"ddraw"));
- native_library = LoadNativeLibrary(path, NULL);
+ FilePath path(FilePath::FromUTF8Unsafe(GetNativeLibraryName("ddraw")));
+ native_library = LoadNativeLibrary(path, nullptr);
ScopedNativeLibrary library(native_library);
EXPECT_TRUE(library.is_valid());
EXPECT_EQ(native_library, library.get());
@@ -39,8 +40,8 @@ TEST(ScopedNativeLibrary, Basic) {
GetFunctionPointerFromNativeLibrary(native_library, kFunctionName),
test_function);
}
- EXPECT_EQ(NULL,
- GetFunctionPointerFromNativeLibrary(native_library, kFunctionName));
+ EXPECT_FALSE(
+ GetFunctionPointerFromNativeLibrary(native_library, kFunctionName));
#endif
}
diff --git a/chromium/base/sequence_checker_unittest.cc b/chromium/base/sequence_checker_unittest.cc
index 1e89a5f4b48..196bb1cc797 100644
--- a/chromium/base/sequence_checker_unittest.cc
+++ b/chromium/base/sequence_checker_unittest.cc
@@ -95,9 +95,8 @@ class SequenceCheckerTest : public testing::Test {
void PostDeleteToOtherThread(
std::unique_ptr<SequenceCheckedObject> sequence_checked_object) {
- other_thread()->message_loop()->DeleteSoon(
- FROM_HERE,
- sequence_checked_object.release());
+ other_thread()->message_loop()->task_runner()->DeleteSoon(
+ FROM_HERE, sequence_checked_object.release());
}
// Destroys the SequencedWorkerPool instance, blocking until it is fully shut
diff --git a/chromium/base/sha1_portable.cc b/chromium/base/sha1.cc
index dd2ab6fe177..a710001ab70 100644
--- a/chromium/base/sha1_portable.cc
+++ b/chromium/base/sha1.cc
@@ -8,6 +8,7 @@
#include <stdint.h>
#include <string.h>
+#include "base/sys_byteorder.h"
namespace base {
@@ -92,10 +93,6 @@ static inline uint32_t K(uint32_t t) {
}
}
-static inline void swapends(uint32_t* t) {
- *t = (*t >> 24) | ((*t >> 8) & 0xff00) | ((*t & 0xff00) << 8) | (*t << 24);
-}
-
const int SecureHashAlgorithm::kDigestSizeBytes = 20;
void SecureHashAlgorithm::Init() {
@@ -118,7 +115,7 @@ void SecureHashAlgorithm::Final() {
Process();
for (int t = 0; t < 5; ++t)
- swapends(&H[t]);
+ H[t] = ByteSwap(H[t]);
}
void SecureHashAlgorithm::Update(const void* data, size_t nbytes) {
@@ -165,7 +162,7 @@ void SecureHashAlgorithm::Process() {
// W and M are in a union, so no need to memcpy.
// memcpy(W, M, sizeof(M));
for (t = 0; t < 16; ++t)
- swapends(&W[t]);
+ W[t] = ByteSwap(W[t]);
// b.
for (t = 16; t < 80; ++t)
diff --git a/chromium/base/sha1_win.cc b/chromium/base/sha1_win.cc
deleted file mode 100644
index b64c9eb8583..00000000000
--- a/chromium/base/sha1_win.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/sha1.h"
-
-#include <windows.h>
-#include <wincrypt.h>
-
-// This file is not being compiled at the moment (see bug 47218). If we keep
-// sha1 inside base, we cannot depend on src/crypto.
-// #include "crypto/scoped_capi_types.h"
-#include "base/logging.h"
-
-namespace base {
-
-std::string SHA1HashString(const std::string& str) {
- ScopedHCRYPTPROV provider;
- if (!CryptAcquireContext(provider.receive(), NULL, NULL, PROV_RSA_FULL,
- CRYPT_VERIFYCONTEXT)) {
- DPLOG(ERROR) << "CryptAcquireContext failed";
- return std::string(kSHA1Length, '\0');
- }
-
- {
- ScopedHCRYPTHASH hash;
- if (!CryptCreateHash(provider, CALG_SHA1, 0, 0, hash.receive())) {
- DPLOG(ERROR) << "CryptCreateHash failed";
- return std::string(kSHA1Length, '\0');
- }
-
- if (!CryptHashData(hash, reinterpret_cast<CONST BYTE*>(str.data()),
- static_cast<DWORD>(str.length()), 0)) {
- DPLOG(ERROR) << "CryptHashData failed";
- return std::string(kSHA1Length, '\0');
- }
-
- DWORD hash_len = 0;
- DWORD buffer_size = sizeof hash_len;
- if (!CryptGetHashParam(hash, HP_HASHSIZE,
- reinterpret_cast<unsigned char*>(&hash_len),
- &buffer_size, 0)) {
- DPLOG(ERROR) << "CryptGetHashParam(HP_HASHSIZE) failed";
- return std::string(kSHA1Length, '\0');
- }
-
- std::string result;
- if (!CryptGetHashParam(hash, HP_HASHVAL,
- // We need the + 1 here not because the call will write a trailing \0,
- // but so that result.length() is correctly set to |hash_len|.
- reinterpret_cast<BYTE*>(WriteInto(&result, hash_len + 1)), &hash_len,
- 0))) {
- DPLOG(ERROR) << "CryptGetHashParam(HP_HASHVAL) failed";
- return std::string(kSHA1Length, '\0');
- }
-
- if (hash_len != kSHA1Length) {
- DLOG(ERROR) << "Returned hash value is wrong length: " << hash_len
- << " should be " << kSHA1Length;
- return std::string(kSHA1Length, '\0');
- }
-
- return result;
- }
-}
-
-} // namespace base
diff --git a/chromium/base/strings/string16.h b/chromium/base/strings/string16.h
index 82dd0fab4f3..30f4e3eec0c 100644
--- a/chromium/base/strings/string16.h
+++ b/chromium/base/strings/string16.h
@@ -48,6 +48,8 @@ typedef std::char_traits<wchar_t> string16_char_traits;
#elif defined(WCHAR_T_IS_UTF32)
+#include <wchar.h> // for mbstate_t
+
namespace base {
typedef uint16_t char16;
diff --git a/chromium/base/strings/string_number_conversions.cc b/chromium/base/strings/string_number_conversions.cc
index f4cf6ec521f..755811ded7e 100644
--- a/chromium/base/strings/string_number_conversions.cc
+++ b/chromium/base/strings/string_number_conversions.cc
@@ -12,10 +12,8 @@
#include <limits>
#include "base/logging.h"
-#include "base/numerics/safe_conversions.h"
#include "base/numerics/safe_math.h"
#include "base/scoped_clear_errno.h"
-#include "base/strings/utf_string_conversions.h"
#include "base/third_party/dmg_fp/dmg_fp.h"
namespace base {
diff --git a/chromium/base/strings/string_number_conversions.h b/chromium/base/strings/string_number_conversions.h
index 1265f0dcba3..a3b876aa155 100644
--- a/chromium/base/strings/string_number_conversions.h
+++ b/chromium/base/strings/string_number_conversions.h
@@ -25,6 +25,14 @@
// Please do not add "convenience" functions for converting strings to integers
// that return the value and ignore success/failure. That encourages people to
// write code that doesn't properly handle the error conditions.
+//
+// DO NOT use these functions in any UI unless it's NOT localized on purpose.
+// Instead, use base::MessageFormatter for a complex message with numbers
+// (integer, float, double) embedded or base::Format{Number,Double,Percent} to
+// just format a single number/percent. Note that some languages use native
+// digits instead of ASCII digits while others use a group separator or decimal
+// point different from ',' and '.'. Using these functions in the UI would lead
+// numbers to be formatted in a non-native way.
// ----------------------------------------------------------------------------
namespace base {
diff --git a/chromium/base/strings/string_number_conversions_unittest.cc b/chromium/base/strings/string_number_conversions_unittest.cc
index 0cc71a696b4..640468c33d5 100644
--- a/chromium/base/strings/string_number_conversions_unittest.cc
+++ b/chromium/base/strings/string_number_conversions_unittest.cc
@@ -13,6 +13,7 @@
#include <cmath>
#include <limits>
+#include "base/bit_cast.h"
#include "base/format_macros.h"
#include "base/macros.h"
#include "base/strings/stringprintf.h"
@@ -805,4 +806,54 @@ TEST(StringNumberConversionsTest, HexEncode) {
EXPECT_EQ(hex.compare("01FF02FE038081"), 0);
}
+// Test cases of known-bad strtod conversions that motivated the use of dmg_fp.
+// See https://bugs.chromium.org/p/chromium/issues/detail?id=593512.
+TEST(StringNumberConversionsTest, StrtodFailures) {
+ static const struct {
+ const char* input;
+ uint64_t expected;
+ } cases[] = {
+ // http://www.exploringbinary.com/incorrectly-rounded-conversions-in-visual-c-plus-plus/
+ {"9214843084008499", 0x43405e6cec57761aULL},
+ {"0.500000000000000166533453693773481063544750213623046875",
+ 0x3fe0000000000002ULL},
+ {"30078505129381147446200", 0x44997a3c7271b021ULL},
+ {"1777820000000000000001", 0x4458180d5bad2e3eULL},
+ {"0.500000000000000166547006220929549868969843373633921146392822265625",
+ 0x3fe0000000000002ULL},
+ {"0.50000000000000016656055874808561867439493653364479541778564453125",
+ 0x3fe0000000000002ULL},
+ {"0.3932922657273", 0x3fd92bb352c4623aULL},
+
+ // http://www.exploringbinary.com/incorrectly-rounded-conversions-in-gcc-and-glibc/
+ {"0.500000000000000166533453693773481063544750213623046875",
+ 0x3fe0000000000002ULL},
+ {"3.518437208883201171875e13", 0x42c0000000000002ULL},
+ {"62.5364939768271845828", 0x404f44abd5aa7ca4ULL},
+ {"8.10109172351e-10", 0x3e0bd5cbaef0fd0cULL},
+ {"1.50000000000000011102230246251565404236316680908203125",
+ 0x3ff8000000000000ULL},
+ {"9007199254740991.4999999999999999999999999999999995",
+ 0x433fffffffffffffULL},
+
+ // http://www.exploringbinary.com/incorrect-decimal-to-floating-point-conversion-in-sqlite/
+ {"1e-23", 0x3b282db34012b251ULL},
+ {"8.533e+68", 0x4e3fa69165a8eea2ULL},
+ {"4.1006e-184", 0x19dbe0d1c7ea60c9ULL},
+ {"9.998e+307", 0x7fe1cc0a350ca87bULL},
+ {"9.9538452227e-280", 0x0602117ae45cde43ULL},
+ {"6.47660115e-260", 0x0a1fdd9e333badadULL},
+ {"7.4e+47", 0x49e033d7eca0adefULL},
+ {"5.92e+48", 0x4a1033d7eca0adefULL},
+ {"7.35e+66", 0x4dd172b70eababa9ULL},
+ {"8.32116e+55", 0x4b8b2628393e02cdULL},
+ };
+
+ for (const auto& test : cases) {
+ double output;
+ EXPECT_TRUE(StringToDouble(test.input, &output));
+ EXPECT_EQ(bit_cast<uint64_t>(output), test.expected);
+ }
+}
+
} // namespace base
diff --git a/chromium/base/strings/string_util.cc b/chromium/base/strings/string_util.cc
index 6bbc215807d..cb668ed7fff 100644
--- a/chromium/base/strings/string_util.cc
+++ b/chromium/base/strings/string_util.cc
@@ -23,7 +23,6 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/singleton.h"
-#include "base/strings/string_split.h"
#include "base/strings/utf_string_conversion_utils.h"
#include "base/strings/utf_string_conversions.h"
#include "base/third_party/icu/icu_utf.h"
diff --git a/chromium/base/sync_socket_win.cc b/chromium/base/sync_socket_win.cc
index 067c7de217b..c101f77ad9d 100644
--- a/chromium/base/sync_socket_win.cc
+++ b/chromium/base/sync_socket_win.cc
@@ -294,13 +294,17 @@ size_t SyncSocket::Peek() {
}
CancelableSyncSocket::CancelableSyncSocket()
- : shutdown_event_(true, false), file_operation_(true, false) {
-}
+ : shutdown_event_(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED),
+ file_operation_(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED) {}
CancelableSyncSocket::CancelableSyncSocket(Handle handle)
- : SyncSocket(handle), shutdown_event_(true, false),
- file_operation_(true, false) {
-}
+ : SyncSocket(handle),
+ shutdown_event_(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED),
+ file_operation_(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED) {}
bool CancelableSyncSocket::Shutdown() {
// This doesn't shut down the pipe immediately, but subsequent Receive or Send
diff --git a/chromium/base/synchronization/condition_variable_win.cc b/chromium/base/synchronization/condition_variable_win.cc
index 61c6a715e0e..d5f24e44387 100644
--- a/chromium/base/synchronization/condition_variable_win.cc
+++ b/chromium/base/synchronization/condition_variable_win.cc
@@ -34,8 +34,8 @@ void ConditionVariable::TimedWait(const TimeDelta& max_time) {
user_lock_->CheckHeldAndUnmark();
#endif
- if (FALSE == SleepConditionVariableSRW(&cv_, srwlock_, timeout, 0)) {
- DCHECK(GetLastError() != WAIT_TIMEOUT);
+ if (!SleepConditionVariableSRW(&cv_, srwlock_, timeout, 0)) {
+ DCHECK_EQ(static_cast<DWORD>(ERROR_TIMEOUT), GetLastError());
}
#if DCHECK_IS_ON()
diff --git a/chromium/base/synchronization/read_write_lock.h b/chromium/base/synchronization/read_write_lock.h
new file mode 100644
index 00000000000..4c59b7b1168
--- /dev/null
+++ b/chromium/base/synchronization/read_write_lock.h
@@ -0,0 +1,105 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_READ_WRITE_LOCK_H_
+#define BASE_SYNCHRONIZATION_READ_WRITE_LOCK_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_NACL)
+#include "base/synchronization/lock.h"
+#endif
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_POSIX)
+#include <pthread.h>
+#else
+# error No reader-writer lock defined for this platform.
+#endif
+
+namespace base {
+namespace subtle {
+
+// An OS-independent wrapper around reader-writer locks. There's no magic here.
+//
+// You are strongly encouraged to use base::Lock instead of this, unless you
+// can demonstrate contention and show that this would lead to an improvement.
+// This lock does not make any guarantees of fairness, which can lead to writer
+// starvation under certain access patterns. You should carefully consider your
+// writer access patterns before using this lock.
+class BASE_EXPORT ReadWriteLock {
+ public:
+ ReadWriteLock();
+ ~ReadWriteLock();
+
+ // Reader lock functions.
+ void ReadAcquire();
+ void ReadRelease();
+
+ // Writer lock functions.
+ void WriteAcquire();
+ void WriteRelease();
+
+ private:
+#if defined(OS_WIN)
+ using NativeHandle = SRWLOCK;
+#elif defined(OS_NACL)
+ using NativeHandle = Lock;
+#elif defined(OS_POSIX)
+ using NativeHandle = pthread_rwlock_t;
+#endif
+
+ NativeHandle native_handle_;
+
+#if defined(OS_NACL)
+ // Even though NaCl has a pthread_rwlock implementation, the build rules don't
+ // make it universally available. So instead, implement a slower and trivial
+ // reader-writer lock using a regular mutex.
+ // TODO(amistry): Remove this and use the posix implementation when it's
+ // available in all build configurations.
+ uint32_t readers_ = 0;
+ // base::Lock does checking to ensure the lock is acquired and released on the
+ // same thread. This is not the case for this lock, so use pthread mutexes
+ // directly here.
+ pthread_mutex_t writer_lock_ = PTHREAD_MUTEX_INITIALIZER;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ReadWriteLock);
+};
+
+class AutoReadLock {
+ public:
+ explicit AutoReadLock(ReadWriteLock& lock) : lock_(lock) {
+ lock_.ReadAcquire();
+ }
+ ~AutoReadLock() {
+ lock_.ReadRelease();
+ }
+
+ private:
+ ReadWriteLock& lock_;
+ DISALLOW_COPY_AND_ASSIGN(AutoReadLock);
+};
+
+class AutoWriteLock {
+ public:
+ explicit AutoWriteLock(ReadWriteLock& lock) : lock_(lock) {
+ lock_.WriteAcquire();
+ }
+ ~AutoWriteLock() {
+ lock_.WriteRelease();
+ }
+
+ private:
+ ReadWriteLock& lock_;
+ DISALLOW_COPY_AND_ASSIGN(AutoWriteLock);
+};
+
+} // namespace subtle
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_READ_WRITE_LOCK_H_
diff --git a/chromium/base/synchronization/read_write_lock_nacl.cc b/chromium/base/synchronization/read_write_lock_nacl.cc
new file mode 100644
index 00000000000..7278b1f52cd
--- /dev/null
+++ b/chromium/base/synchronization/read_write_lock_nacl.cc
@@ -0,0 +1,49 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/read_write_lock.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace subtle {
+
+ReadWriteLock::ReadWriteLock() {}
+
+ReadWriteLock::~ReadWriteLock() {
+ DCHECK_EQ(0u, readers_);
+ int result = pthread_mutex_destroy(&writer_lock_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::ReadAcquire() {
+ AutoLock hold(native_handle_);
+ readers_++;
+ if (readers_ == 1) {
+ int result = pthread_mutex_lock(&writer_lock_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+ }
+}
+
+void ReadWriteLock::ReadRelease() {
+ AutoLock hold(native_handle_);
+ readers_--;
+ if (readers_ == 0) {
+ int result = pthread_mutex_unlock(&writer_lock_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+ }
+}
+
+void ReadWriteLock::WriteAcquire() {
+ int result = pthread_mutex_lock(&writer_lock_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::WriteRelease() {
+ int result = pthread_mutex_unlock(&writer_lock_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+} // namespace subtle
+} // namespace base
diff --git a/chromium/base/synchronization/read_write_lock_posix.cc b/chromium/base/synchronization/read_write_lock_posix.cc
new file mode 100644
index 00000000000..e5de091f06e
--- /dev/null
+++ b/chromium/base/synchronization/read_write_lock_posix.cc
@@ -0,0 +1,40 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/read_write_lock.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace subtle {
+
+ReadWriteLock::ReadWriteLock() : native_handle_(PTHREAD_RWLOCK_INITIALIZER) {}
+
+ReadWriteLock::~ReadWriteLock() {
+ int result = pthread_rwlock_destroy(&native_handle_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::ReadAcquire() {
+ int result = pthread_rwlock_rdlock(&native_handle_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::ReadRelease() {
+ int result = pthread_rwlock_unlock(&native_handle_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::WriteAcquire() {
+ int result = pthread_rwlock_wrlock(&native_handle_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::WriteRelease() {
+ int result = pthread_rwlock_unlock(&native_handle_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+} // namespace subtle
+} // namespace base
diff --git a/chromium/base/synchronization/read_write_lock_unittest.cc b/chromium/base/synchronization/read_write_lock_unittest.cc
new file mode 100644
index 00000000000..d1acaf25449
--- /dev/null
+++ b/chromium/base/synchronization/read_write_lock_unittest.cc
@@ -0,0 +1,225 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/read_write_lock.h"
+
+#include <stdlib.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace subtle {
+
+// Basic test to make sure that *Acquire()/*Release() don't crash.
+
+class BasicReadWriteLockTestThread : public PlatformThread::Delegate {
+ public:
+ explicit BasicReadWriteLockTestThread(ReadWriteLock* lock)
+ : lock_(lock), acquired_(0) {}
+
+ void ThreadMain() override {
+ for (int i = 0; i < 10; i++) {
+ AutoReadLock locker(*lock_);
+ acquired_++;
+ }
+ for (int i = 0; i < 10; i++) {
+ AutoWriteLock locker(*lock_);
+ acquired_++;
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(rand() % 20));
+ }
+ }
+
+ int acquired() const { return acquired_; }
+
+ private:
+ ReadWriteLock* lock_;
+ int acquired_;
+
+ DISALLOW_COPY_AND_ASSIGN(BasicReadWriteLockTestThread);
+};
+
+TEST(ReadWriteLockTest, Basic) {
+ ReadWriteLock lock;
+ BasicReadWriteLockTestThread thread(&lock);
+ PlatformThreadHandle handle;
+
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+
+ int acquired = 0;
+ for (int i = 0; i < 5; i++) {
+ AutoReadLock locker(lock);
+ acquired++;
+ }
+ for (int i = 0; i < 10; i++) {
+ AutoWriteLock locker(lock);
+ acquired++;
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(rand() % 20));
+ }
+ for (int i = 0; i < 5; i++) {
+ AutoReadLock locker(lock);
+ acquired++;
+ }
+
+ PlatformThread::Join(handle);
+
+ EXPECT_EQ(20, acquired);
+ EXPECT_GE(20, thread.acquired());
+}
+
+// Tests that reader locks allow multiple simultaneous reader acquisitions.
+
+class ReaderReadWriteLockTestThread : public PlatformThread::Delegate {
+ public:
+ ReaderReadWriteLockTestThread(ReadWriteLock* lock) : lock_(lock) {}
+
+ void ThreadMain() override {
+ AutoReadLock locker(*lock_);
+ did_acquire_ = true;
+ }
+
+ bool did_acquire() const { return did_acquire_; }
+
+ private:
+ ReadWriteLock* lock_;
+ bool did_acquire_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(ReaderReadWriteLockTestThread);
+};
+
+TEST(ReadWriteLockTest, ReaderTwoThreads) {
+ ReadWriteLock lock;
+
+ AutoReadLock auto_lock(lock);
+
+ ReaderReadWriteLockTestThread thread(&lock);
+ PlatformThreadHandle handle;
+
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+ PlatformThread::Join(handle);
+ EXPECT_TRUE(thread.did_acquire());
+}
+
+// Tests that writer locks exclude reader locks.
+
+class ReadAndWriteReadWriteLockTestThread : public PlatformThread::Delegate {
+ public:
+ ReadAndWriteReadWriteLockTestThread(ReadWriteLock* lock, int* value)
+ : lock_(lock),
+ value_(value),
+ event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+ void ThreadMain() override {
+ AutoWriteLock locker(*lock_);
+ (*value_)++;
+ event_.Signal();
+ }
+
+ void Wait() {
+ event_.Wait();
+ }
+
+ private:
+ ReadWriteLock* lock_;
+ int* value_;
+ WaitableEvent event_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadAndWriteReadWriteLockTestThread);
+};
+
+TEST(ReadWriteLockTest, ReadAndWriteThreads) {
+ ReadWriteLock lock;
+ int value = 0;
+
+ ReadAndWriteReadWriteLockTestThread thread(&lock, &value);
+ PlatformThreadHandle handle;
+ {
+ AutoReadLock read_locker(lock);
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(10));
+
+ // |value| should be unchanged since we hold a reader lock.
+ EXPECT_EQ(0, value);
+ }
+
+ thread.Wait();
+ // After releasing our reader lock, the thread can acquire a write lock and
+ // change |value|.
+ EXPECT_EQ(1, value);
+ PlatformThread::Join(handle);
+}
+
+// Tests that writer locks actually exclude.
+
+class WriterReadWriteLockTestThread : public PlatformThread::Delegate {
+ public:
+ WriterReadWriteLockTestThread(ReadWriteLock* lock, int* value)
+ : lock_(lock), value_(value) {}
+
+ // Static helper which can also be called from the main thread.
+ static void DoStuff(ReadWriteLock* lock, int* value) {
+ for (int i = 0; i < 40; i++) {
+ AutoWriteLock locker(*lock);
+ int v = *value;
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(rand() % 10));
+ *value = v + 1;
+ }
+ }
+
+ void ThreadMain() override { DoStuff(lock_, value_); }
+
+ private:
+ ReadWriteLock* lock_;
+ int* value_;
+
+ DISALLOW_COPY_AND_ASSIGN(WriterReadWriteLockTestThread);
+};
+
+TEST(ReadWriteLockTest, MutexTwoThreads) {
+ ReadWriteLock lock;
+ int value = 0;
+
+ WriterReadWriteLockTestThread thread(&lock, &value);
+ PlatformThreadHandle handle;
+
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+
+ WriterReadWriteLockTestThread::DoStuff(&lock, &value);
+
+ PlatformThread::Join(handle);
+
+ EXPECT_EQ(2 * 40, value);
+}
+
+TEST(ReadWriteLockTest, MutexFourThreads) {
+ ReadWriteLock lock;
+ int value = 0;
+
+ WriterReadWriteLockTestThread thread1(&lock, &value);
+ WriterReadWriteLockTestThread thread2(&lock, &value);
+ WriterReadWriteLockTestThread thread3(&lock, &value);
+ PlatformThreadHandle handle1;
+ PlatformThreadHandle handle2;
+ PlatformThreadHandle handle3;
+
+ ASSERT_TRUE(PlatformThread::Create(0, &thread1, &handle1));
+ ASSERT_TRUE(PlatformThread::Create(0, &thread2, &handle2));
+ ASSERT_TRUE(PlatformThread::Create(0, &thread3, &handle3));
+
+ WriterReadWriteLockTestThread::DoStuff(&lock, &value);
+
+ PlatformThread::Join(handle1);
+ PlatformThread::Join(handle2);
+ PlatformThread::Join(handle3);
+
+ EXPECT_EQ(4 * 40, value);
+}
+
+} // namespace subtle
+} // namespace base
diff --git a/chromium/base/synchronization/read_write_lock_win.cc b/chromium/base/synchronization/read_write_lock_win.cc
new file mode 100644
index 00000000000..8ae626df66b
--- /dev/null
+++ b/chromium/base/synchronization/read_write_lock_win.cc
@@ -0,0 +1,31 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/read_write_lock.h"
+
+namespace base {
+namespace subtle {
+
+ReadWriteLock::ReadWriteLock() : native_handle_(SRWLOCK_INIT) {}
+
+ReadWriteLock::~ReadWriteLock() = default;
+
+void ReadWriteLock::ReadAcquire() {
+ ::AcquireSRWLockShared(&native_handle_);
+}
+
+void ReadWriteLock::ReadRelease() {
+ ::ReleaseSRWLockShared(&native_handle_);
+}
+
+void ReadWriteLock::WriteAcquire() {
+ ::AcquireSRWLockExclusive(&native_handle_);
+}
+
+void ReadWriteLock::WriteRelease() {
+ ::ReleaseSRWLockExclusive(&native_handle_);
+}
+
+} // namespace subtle
+} // namespace base
diff --git a/chromium/base/synchronization/waitable_event.h b/chromium/base/synchronization/waitable_event.h
index b5d91d00b51..3863e98455e 100644
--- a/chromium/base/synchronization/waitable_event.h
+++ b/chromium/base/synchronization/waitable_event.h
@@ -43,11 +43,18 @@ class TimeDelta;
// be better off just using an Windows event directly.
class BASE_EXPORT WaitableEvent {
public:
- // If manual_reset is true, then to set the event state to non-signaled, a
- // consumer must call the Reset method. If this parameter is false, then the
- // system automatically resets the event state to non-signaled after a single
- // waiting thread has been released.
- WaitableEvent(bool manual_reset, bool initially_signaled);
+ // Indicates whether a WaitableEvent should automatically reset the event
+ // state after a single waiting thread has been released or remain signaled
+ // until Reset() is manually invoked.
+ enum class ResetPolicy { MANUAL, AUTOMATIC };
+
+ // Indicates whether a new WaitableEvent should start in a signaled state or
+ // not.
+ enum class InitialState { SIGNALED, NOT_SIGNALED };
+
+ // Constructs a WaitableEvent with policy and initial state as detailed in
+ // the above enums.
+ WaitableEvent(ResetPolicy reset_policy, InitialState initial_state);
#if defined(OS_WIN)
// Create a WaitableEvent from an Event HANDLE which has already been
@@ -150,7 +157,7 @@ class BASE_EXPORT WaitableEvent {
struct WaitableEventKernel :
public RefCountedThreadSafe<WaitableEventKernel> {
public:
- WaitableEventKernel(bool manual_reset, bool initially_signaled);
+ WaitableEventKernel(ResetPolicy reset_policy, InitialState initial_state);
bool Dequeue(Waiter* waiter, void* tag);
diff --git a/chromium/base/synchronization/waitable_event_posix.cc b/chromium/base/synchronization/waitable_event_posix.cc
index 64d4376fe56..b32c8827116 100644
--- a/chromium/base/synchronization/waitable_event_posix.cc
+++ b/chromium/base/synchronization/waitable_event_posix.cc
@@ -39,12 +39,11 @@ namespace base {
// -----------------------------------------------------------------------------
// This is just an abstract base class for waking the two types of waiters
// -----------------------------------------------------------------------------
-WaitableEvent::WaitableEvent(bool manual_reset, bool initially_signaled)
- : kernel_(new WaitableEventKernel(manual_reset, initially_signaled)) {
-}
+WaitableEvent::WaitableEvent(ResetPolicy reset_policy,
+ InitialState initial_state)
+ : kernel_(new WaitableEventKernel(reset_policy, initial_state)) {}
-WaitableEvent::~WaitableEvent() {
-}
+WaitableEvent::~WaitableEvent() = default;
void WaitableEvent::Reset() {
base::AutoLock locked(kernel_->lock_);
@@ -348,14 +347,13 @@ size_t WaitableEvent::EnqueueMany
// -----------------------------------------------------------------------------
// Private functions...
-WaitableEvent::WaitableEventKernel::WaitableEventKernel(bool manual_reset,
- bool initially_signaled)
- : manual_reset_(manual_reset),
- signaled_(initially_signaled) {
-}
+WaitableEvent::WaitableEventKernel::WaitableEventKernel(
+ ResetPolicy reset_policy,
+ InitialState initial_state)
+ : manual_reset_(reset_policy == ResetPolicy::MANUAL),
+ signaled_(initial_state == InitialState::SIGNALED) {}
-WaitableEvent::WaitableEventKernel::~WaitableEventKernel() {
-}
+WaitableEvent::WaitableEventKernel::~WaitableEventKernel() = default;
// -----------------------------------------------------------------------------
// Wake all waiting waiters. Called with lock held.
diff --git a/chromium/base/synchronization/waitable_event_unittest.cc b/chromium/base/synchronization/waitable_event_unittest.cc
index 2930409b597..ac5c9f12558 100644
--- a/chromium/base/synchronization/waitable_event_unittest.cc
+++ b/chromium/base/synchronization/waitable_event_unittest.cc
@@ -15,7 +15,8 @@
namespace base {
TEST(WaitableEventTest, ManualBasics) {
- WaitableEvent event(true, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
EXPECT_FALSE(event.IsSignaled());
@@ -33,7 +34,8 @@ TEST(WaitableEventTest, ManualBasics) {
}
TEST(WaitableEventTest, AutoBasics) {
- WaitableEvent event(false, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
EXPECT_FALSE(event.IsSignaled());
@@ -55,8 +57,10 @@ TEST(WaitableEventTest, AutoBasics) {
TEST(WaitableEventTest, WaitManyShortcut) {
WaitableEvent* ev[5];
- for (unsigned i = 0; i < 5; ++i)
- ev[i] = new WaitableEvent(false, false);
+ for (unsigned i = 0; i < 5; ++i) {
+ ev[i] = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ }
ev[3]->Signal();
EXPECT_EQ(WaitableEvent::WaitMany(ev, 5), 3u);
@@ -94,7 +98,9 @@ class WaitableEventSignaler : public PlatformThread::Delegate {
// Tests that a WaitableEvent can be safely deleted when |Wait| is done without
// additional synchronization.
TEST(WaitableEventTest, WaitAndDelete) {
- WaitableEvent* ev = new WaitableEvent(false, false);
+ WaitableEvent* ev =
+ new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
WaitableEventSignaler signaler(TimeDelta::FromMilliseconds(10), ev);
PlatformThreadHandle thread;
@@ -110,8 +116,10 @@ TEST(WaitableEventTest, WaitAndDelete) {
// without additional synchronization.
TEST(WaitableEventTest, WaitMany) {
WaitableEvent* ev[5];
- for (unsigned i = 0; i < 5; ++i)
- ev[i] = new WaitableEvent(false, false);
+ for (unsigned i = 0; i < 5; ++i) {
+ ev[i] = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ }
WaitableEventSignaler signaler(TimeDelta::FromMilliseconds(10), ev[2]);
PlatformThreadHandle thread;
@@ -135,7 +143,9 @@ TEST(WaitableEventTest, WaitMany) {
#define MAYBE_TimedWait TimedWait
#endif
TEST(WaitableEventTest, MAYBE_TimedWait) {
- WaitableEvent* ev = new WaitableEvent(false, false);
+ WaitableEvent* ev =
+ new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
TimeDelta thread_delay = TimeDelta::FromMilliseconds(10);
WaitableEventSignaler signaler(thread_delay, ev);
diff --git a/chromium/base/synchronization/waitable_event_watcher_unittest.cc b/chromium/base/synchronization/waitable_event_watcher_unittest.cc
index 58444b3c4ef..8b76da58158 100644
--- a/chromium/base/synchronization/waitable_event_watcher_unittest.cc
+++ b/chromium/base/synchronization/waitable_event_watcher_unittest.cc
@@ -48,7 +48,8 @@ void RunTest_BasicSignal(MessageLoop::Type message_loop_type) {
MessageLoop message_loop(message_loop_type);
// A manual-reset event that is not yet signaled.
- WaitableEvent event(true, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
WaitableEventWatcher watcher;
EXPECT_TRUE(watcher.GetWatchedEvent() == NULL);
@@ -58,7 +59,7 @@ void RunTest_BasicSignal(MessageLoop::Type message_loop_type) {
event.Signal();
- MessageLoop::current()->Run();
+ RunLoop().Run();
EXPECT_TRUE(watcher.GetWatchedEvent() == NULL);
}
@@ -67,7 +68,8 @@ void RunTest_BasicCancel(MessageLoop::Type message_loop_type) {
MessageLoop message_loop(message_loop_type);
// A manual-reset event that is not yet signaled.
- WaitableEvent event(true, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
WaitableEventWatcher watcher;
@@ -80,7 +82,8 @@ void RunTest_CancelAfterSet(MessageLoop::Type message_loop_type) {
MessageLoop message_loop(message_loop_type);
// A manual-reset event that is not yet signaled.
- WaitableEvent event(true, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
WaitableEventWatcher watcher;
@@ -108,7 +111,8 @@ void RunTest_OutlivesMessageLoop(MessageLoop::Type message_loop_type) {
// Simulate a MessageLoop that dies before an WaitableEventWatcher. This
// ordinarily doesn't happen when people use the Thread class, but it can
// happen when people use the Singleton pattern or atexit.
- WaitableEvent event(true, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
{
WaitableEventWatcher watcher;
{
@@ -128,7 +132,9 @@ void RunTest_DeleteUnder(MessageLoop::Type message_loop_type) {
{
WaitableEventWatcher watcher;
- WaitableEvent* event = new WaitableEvent(false, false);
+ WaitableEvent* event =
+ new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
watcher.StartWatching(event, Bind(&QuitWhenSignaled));
delete event;
diff --git a/chromium/base/synchronization/waitable_event_win.cc b/chromium/base/synchronization/waitable_event_win.cc
index 89ace19961a..597716337be 100644
--- a/chromium/base/synchronization/waitable_event_win.cc
+++ b/chromium/base/synchronization/waitable_event_win.cc
@@ -16,8 +16,12 @@
namespace base {
-WaitableEvent::WaitableEvent(bool manual_reset, bool signaled)
- : handle_(CreateEvent(NULL, manual_reset, signaled, NULL)) {
+WaitableEvent::WaitableEvent(ResetPolicy reset_policy,
+ InitialState initial_state)
+ : handle_(CreateEvent(nullptr,
+ reset_policy == ResetPolicy::MANUAL,
+ initial_state == InitialState::SIGNALED,
+ nullptr)) {
// We're probably going to crash anyways if this is ever NULL, so we might as
// well make our stack reports more informative by crashing here.
CHECK(handle_.IsValid());
@@ -28,8 +32,7 @@ WaitableEvent::WaitableEvent(win::ScopedHandle handle)
CHECK(handle_.IsValid()) << "Tried to create WaitableEvent from NULL handle";
}
-WaitableEvent::~WaitableEvent() {
-}
+WaitableEvent::~WaitableEvent() = default;
void WaitableEvent::Reset() {
ResetEvent(handle_.Get());
diff --git a/chromium/base/sys_byteorder.h b/chromium/base/sys_byteorder.h
index ddb3f5bcda4..8d9066c7022 100644
--- a/chromium/base/sys_byteorder.h
+++ b/chromium/base/sys_byteorder.h
@@ -15,27 +15,35 @@
#include "build/build_config.h"
+#if defined(COMPILER_MSVC)
+#include <stdlib.h>
+#endif
+
namespace base {
// Returns a value with all bytes in |x| swapped, i.e. reverses the endianness.
inline uint16_t ByteSwap(uint16_t x) {
- return ((x & 0x00ff) << 8) | ((x & 0xff00) >> 8);
+#if defined(COMPILER_MSVC)
+ return _byteswap_ushort(x);
+#else
+ return __builtin_bswap16(x);
+#endif
}
inline uint32_t ByteSwap(uint32_t x) {
- return ((x & 0x000000fful) << 24) | ((x & 0x0000ff00ul) << 8) |
- ((x & 0x00ff0000ul) >> 8) | ((x & 0xff000000ul) >> 24);
+#if defined(COMPILER_MSVC)
+ return _byteswap_ulong(x);
+#else
+ return __builtin_bswap32(x);
+#endif
}
inline uint64_t ByteSwap(uint64_t x) {
- return ((x & 0x00000000000000ffull) << 56) |
- ((x & 0x000000000000ff00ull) << 40) |
- ((x & 0x0000000000ff0000ull) << 24) |
- ((x & 0x00000000ff000000ull) << 8) |
- ((x & 0x000000ff00000000ull) >> 8) |
- ((x & 0x0000ff0000000000ull) >> 24) |
- ((x & 0x00ff000000000000ull) >> 40) |
- ((x & 0xff00000000000000ull) >> 56);
+#if defined(COMPILER_MSVC)
+ return _byteswap_uint64(x);
+#else
+ return __builtin_bswap64(x);
+#endif
}
// Converts the bytes in |x| from host order (endianness) to little endian, and
diff --git a/chromium/base/sys_byteorder_unittest.cc b/chromium/base/sys_byteorder_unittest.cc
new file mode 100644
index 00000000000..0352c2a97c6
--- /dev/null
+++ b/chromium/base/sys_byteorder_unittest.cc
@@ -0,0 +1,122 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_byteorder.h"
+
+#include <stdint.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+const uint16_t k16BitTestData = 0xaabb;
+const uint16_t k16BitSwappedTestData = 0xbbaa;
+const uint32_t k32BitTestData = 0xaabbccdd;
+const uint32_t k32BitSwappedTestData = 0xddccbbaa;
+const uint64_t k64BitTestData = 0xaabbccdd44332211;
+const uint64_t k64BitSwappedTestData = 0x11223344ddccbbaa;
+
+} // namespace
+
+TEST(ByteOrderTest, ByteSwap16) {
+ uint16_t swapped = base::ByteSwap(k16BitTestData);
+ EXPECT_EQ(k16BitSwappedTestData, swapped);
+ uint16_t reswapped = base::ByteSwap(swapped);
+ EXPECT_EQ(k16BitTestData, reswapped);
+}
+
+TEST(ByteOrderTest, ByteSwap32) {
+ uint32_t swapped = base::ByteSwap(k32BitTestData);
+ EXPECT_EQ(k32BitSwappedTestData, swapped);
+ uint32_t reswapped = base::ByteSwap(swapped);
+ EXPECT_EQ(k32BitTestData, reswapped);
+}
+
+TEST(ByteOrderTest, ByteSwap64) {
+ uint64_t swapped = base::ByteSwap(k64BitTestData);
+ EXPECT_EQ(k64BitSwappedTestData, swapped);
+ uint64_t reswapped = base::ByteSwap(swapped);
+ EXPECT_EQ(k64BitTestData, reswapped);
+}
+
+TEST(ByteOrderTest, ByteSwapToLE16) {
+ uint16_t le = base::ByteSwapToLE16(k16BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ EXPECT_EQ(k16BitTestData, le);
+#else
+ EXPECT_EQ(k16BitSwappedTestData, le);
+#endif
+}
+
+TEST(ByteOrderTest, ByteSwapToLE32) {
+ uint32_t le = base::ByteSwapToLE32(k32BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ EXPECT_EQ(k32BitTestData, le);
+#else
+ EXPECT_EQ(k32BitSwappedTestData, le);
+#endif
+}
+
+TEST(ByteOrderTest, ByteSwapToLE64) {
+ uint64_t le = base::ByteSwapToLE64(k64BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ EXPECT_EQ(k64BitTestData, le);
+#else
+ EXPECT_EQ(k64BitSwappedTestData, le);
+#endif
+}
+
+TEST(ByteOrderTest, NetToHost16) {
+ uint16_t h = base::NetToHost16(k16BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ EXPECT_EQ(k16BitSwappedTestData, h);
+#else
+ EXPECT_EQ(k16BitTestData, h);
+#endif
+}
+
+TEST(ByteOrderTest, NetToHost32) {
+ uint32_t h = base::NetToHost32(k32BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ EXPECT_EQ(k32BitSwappedTestData, h);
+#else
+ EXPECT_EQ(k32BitTestData, h);
+#endif
+}
+
+TEST(ByteOrderTest, NetToHost64) {
+ uint64_t h = base::NetToHost64(k64BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ EXPECT_EQ(k64BitSwappedTestData, h);
+#else
+ EXPECT_EQ(k64BitTestData, h);
+#endif
+}
+
+TEST(ByteOrderTest, HostToNet16) {
+ uint16_t n = base::HostToNet16(k16BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ EXPECT_EQ(k16BitSwappedTestData, n);
+#else
+ EXPECT_EQ(k16BitTestData, n);
+#endif
+}
+
+TEST(ByteOrderTest, HostToNet32) {
+ uint32_t n = base::HostToNet32(k32BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ EXPECT_EQ(k32BitSwappedTestData, n);
+#else
+ EXPECT_EQ(k32BitTestData, n);
+#endif
+}
+
+TEST(ByteOrderTest, HostToNet64) {
+ uint64_t n = base::HostToNet64(k64BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ EXPECT_EQ(k64BitSwappedTestData, n);
+#else
+ EXPECT_EQ(k64BitTestData, n);
+#endif
+}
diff --git a/chromium/base/sys_info.h b/chromium/base/sys_info.h
index 5686dcbb49d..b10747703d2 100644
--- a/chromium/base/sys_info.h
+++ b/chromium/base/sys_info.h
@@ -50,6 +50,10 @@ class BASE_EXPORT SysInfo {
// or -1 on failure.
static int64_t AmountOfFreeDiskSpace(const FilePath& path);
+ // Return the total disk space in bytes on the volume containing |path|, or -1
+ // on failure.
+ static int64_t AmountOfTotalDiskSpace(const FilePath& path);
+
// Returns system uptime.
static TimeDelta Uptime();
@@ -93,12 +97,6 @@ class BASE_EXPORT SysInfo {
// allocate.
static size_t VMAllocationGranularity();
-#if defined(OS_POSIX) && !defined(OS_MACOSX)
- // Returns the maximum SysV shared memory segment size, or zero if there is no
- // limit.
- static uint64_t MaxSharedMemorySize();
-#endif // defined(OS_POSIX) && !defined(OS_MACOSX)
-
#if defined(OS_CHROMEOS)
typedef std::map<std::string, std::string> LsbReleaseMap;
diff --git a/chromium/base/sys_info_android.cc b/chromium/base/sys_info_android.cc
index bc0e3fa80da..447c2498de9 100644
--- a/chromium/base/sys_info_android.cc
+++ b/chromium/base/sys_info_android.cc
@@ -61,7 +61,7 @@ namespace {
// Default version of Android to fall back to when actual version numbers
// cannot be acquired. Use the latest Android release with a higher bug fix
// version to avoid unnecessarily comparison errors with the latest release.
-// This should be manually kept up-to-date on each Android release.
+// This should be manually kept up to date on each Android release.
const int kDefaultAndroidMajorVersion = 6;
const int kDefaultAndroidMinorVersion = 0;
const int kDefaultAndroidBugfixVersion = 99;
diff --git a/chromium/base/sys_info_linux.cc b/chromium/base/sys_info_linux.cc
index 300ef2c0c8b..298d245ecf5 100644
--- a/chromium/base/sys_info_linux.cc
+++ b/chromium/base/sys_info_linux.cc
@@ -33,28 +33,9 @@ int64_t AmountOfPhysicalMemory() {
return AmountOfMemory(_SC_PHYS_PAGES);
}
-uint64_t MaxSharedMemorySize() {
- std::string contents;
- base::ReadFileToString(base::FilePath("/proc/sys/kernel/shmmax"), &contents);
- DCHECK(!contents.empty());
- if (!contents.empty() && contents.back() == '\n') {
- contents.erase(contents.length() - 1);
- }
-
- uint64_t limit;
- if (!base::StringToUint64(contents, &limit)) {
- limit = 0;
- }
- DCHECK_GT(limit, 0u);
- return limit;
-}
-
base::LazyInstance<
base::internal::LazySysInfoValue<int64_t, AmountOfPhysicalMemory>>::Leaky
g_lazy_physical_memory = LAZY_INSTANCE_INITIALIZER;
-base::LazyInstance<
- base::internal::LazySysInfoValue<uint64_t, MaxSharedMemorySize>>::Leaky
- g_lazy_max_shared_memory = LAZY_INSTANCE_INITIALIZER;
} // namespace
@@ -71,11 +52,6 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
}
// static
-uint64_t SysInfo::MaxSharedMemorySize() {
- return g_lazy_max_shared_memory.Get().value();
-}
-
-// static
std::string SysInfo::CPUModelName() {
#if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL)
const char kCpuModelPrefix[] = "Hardware";
diff --git a/chromium/base/sys_info_posix.cc b/chromium/base/sys_info_posix.cc
index 85ae0391189..5d1c450139a 100644
--- a/chromium/base/sys_info_posix.cc
+++ b/chromium/base/sys_info_posix.cc
@@ -73,6 +73,20 @@ base::LazyInstance<
base::internal::LazySysInfoValue<int64_t, AmountOfVirtualMemory>>::Leaky
g_lazy_virtual_memory = LAZY_INSTANCE_INITIALIZER;
+bool GetDiskSpaceInfo(const base::FilePath& path,
+ int64_t* available_bytes,
+ int64_t* total_bytes) {
+ struct statvfs stats;
+ if (HANDLE_EINTR(statvfs(path.value().c_str(), &stats)) != 0)
+ return false;
+
+ if (available_bytes)
+ *available_bytes = static_cast<int64_t>(stats.f_bavail) * stats.f_frsize;
+ if (total_bytes)
+ *total_bytes = static_cast<int64_t>(stats.f_blocks) * stats.f_frsize;
+ return true;
+}
+
} // namespace
namespace base {
@@ -92,10 +106,20 @@ int64_t SysInfo::AmountOfVirtualMemory() {
int64_t SysInfo::AmountOfFreeDiskSpace(const FilePath& path) {
base::ThreadRestrictions::AssertIOAllowed();
- struct statvfs stats;
- if (HANDLE_EINTR(statvfs(path.value().c_str(), &stats)) != 0)
+ int64_t available;
+ if (!GetDiskSpaceInfo(path, &available, nullptr))
+ return -1;
+ return available;
+}
+
+// static
+int64_t SysInfo::AmountOfTotalDiskSpace(const FilePath& path) {
+ base::ThreadRestrictions::AssertIOAllowed();
+
+ int64_t total;
+ if (!GetDiskSpaceInfo(path, nullptr, &total))
return -1;
- return static_cast<int64_t>(stats.f_bavail) * stats.f_frsize;
+ return total;
}
#if !defined(OS_MACOSX) && !defined(OS_ANDROID)
diff --git a/chromium/base/sys_info_unittest.cc b/chromium/base/sys_info_unittest.cc
index 3f284ba8684..0231df63798 100644
--- a/chromium/base/sys_info_unittest.cc
+++ b/chromium/base/sys_info_unittest.cc
@@ -16,13 +16,6 @@
typedef PlatformTest SysInfoTest;
using base::FilePath;
-#if defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_ANDROID)
-TEST_F(SysInfoTest, MaxSharedMemorySize) {
- // We aren't actually testing that it's correct, just that it's sane.
- EXPECT_GT(base::SysInfo::MaxSharedMemorySize(), 0u);
-}
-#endif
-
TEST_F(SysInfoTest, NumProcs) {
// We aren't actually testing that it's correct, just that it's sane.
EXPECT_GE(base::SysInfo::NumberOfProcessors(), 1);
@@ -40,7 +33,15 @@ TEST_F(SysInfoTest, AmountOfFreeDiskSpace) {
// We aren't actually testing that it's correct, just that it's sane.
FilePath tmp_path;
ASSERT_TRUE(base::GetTempDir(&tmp_path));
- EXPECT_GT(base::SysInfo::AmountOfFreeDiskSpace(tmp_path), 0)
+ EXPECT_GE(base::SysInfo::AmountOfFreeDiskSpace(tmp_path), 0)
+ << tmp_path.value();
+}
+
+TEST_F(SysInfoTest, AmountOfTotalDiskSpace) {
+ // We aren't actually testing that it's correct, just that it's sane.
+ FilePath tmp_path;
+ ASSERT_TRUE(base::GetTempDir(&tmp_path));
+ EXPECT_GT(base::SysInfo::AmountOfTotalDiskSpace(tmp_path), 0)
<< tmp_path.value();
}
diff --git a/chromium/base/sys_info_win.cc b/chromium/base/sys_info_win.cc
index d0a77700f8f..cb184bae258 100644
--- a/chromium/base/sys_info_win.cc
+++ b/chromium/base/sys_info_win.cc
@@ -30,6 +30,28 @@ int64_t AmountOfMemory(DWORDLONG MEMORYSTATUSEX::*memory_field) {
return rv < 0 ? std::numeric_limits<int64_t>::max() : rv;
}
+bool GetDiskSpaceInfo(const base::FilePath& path,
+ int64_t* available_bytes,
+ int64_t* total_bytes) {
+ ULARGE_INTEGER available;
+ ULARGE_INTEGER total;
+ ULARGE_INTEGER free;
+ if (!GetDiskFreeSpaceExW(path.value().c_str(), &available, &total, &free))
+ return false;
+
+ if (available_bytes) {
+ *available_bytes = static_cast<int64_t>(available.QuadPart);
+ if (*available_bytes < 0)
+ *available_bytes = std::numeric_limits<int64_t>::max();
+ }
+ if (total_bytes) {
+ *total_bytes = static_cast<int64_t>(total.QuadPart);
+ if (*total_bytes < 0)
+ *total_bytes = std::numeric_limits<int64_t>::max();
+ }
+ return true;
+}
+
} // namespace
namespace base {
@@ -58,12 +80,20 @@ int64_t SysInfo::AmountOfVirtualMemory() {
int64_t SysInfo::AmountOfFreeDiskSpace(const FilePath& path) {
ThreadRestrictions::AssertIOAllowed();
- ULARGE_INTEGER available, total, free;
- if (!GetDiskFreeSpaceExW(path.value().c_str(), &available, &total, &free))
+ int64_t available;
+ if (!GetDiskSpaceInfo(path, &available, nullptr))
return -1;
+ return available;
+}
- int64_t rv = static_cast<int64_t>(available.QuadPart);
- return rv < 0 ? std::numeric_limits<int64_t>::max() : rv;
+// static
+int64_t SysInfo::AmountOfTotalDiskSpace(const FilePath& path) {
+ ThreadRestrictions::AssertIOAllowed();
+
+ int64_t total;
+ if (!GetDiskSpaceInfo(path, nullptr, &total))
+ return -1;
+ return total;
}
std::string SysInfo::OperatingSystemName() {
@@ -74,8 +104,9 @@ std::string SysInfo::OperatingSystemName() {
std::string SysInfo::OperatingSystemVersion() {
win::OSInfo* os_info = win::OSInfo::GetInstance();
win::OSInfo::VersionNumber version_number = os_info->version_number();
- std::string version(StringPrintf("%d.%d", version_number.major,
- version_number.minor));
+ std::string version(StringPrintf("%d.%d.%d", version_number.major,
+ version_number.minor,
+ version_number.build));
win::OSInfo::ServicePack service_pack = os_info->service_pack();
if (service_pack.major != 0) {
version += StringPrintf(" SP%d", service_pack.major);
diff --git a/chromium/base/task_runner.h b/chromium/base/task_runner.h
index 6dd82ccaca9..9593835eebb 100644
--- a/chromium/base/task_runner.h
+++ b/chromium/base/task_runner.h
@@ -9,13 +9,10 @@
#include "base/base_export.h"
#include "base/callback_forward.h"
+#include "base/location.h"
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
-namespace tracked_objects {
-class Location;
-} // namespace tracked_objects
-
namespace base {
struct TaskRunnerTraits;
diff --git a/chromium/base/task_scheduler/delayed_task_manager.cc b/chromium/base/task_scheduler/delayed_task_manager.cc
index 6286d5ef4f6..d648b9d4628 100644
--- a/chromium/base/task_scheduler/delayed_task_manager.cc
+++ b/chromium/base/task_scheduler/delayed_task_manager.cc
@@ -7,7 +7,7 @@
#include <utility>
#include "base/logging.h"
-#include "base/task_scheduler/scheduler_thread_pool.h"
+#include "base/task_scheduler/scheduler_worker_pool.h"
namespace base {
namespace internal {
@@ -15,13 +15,13 @@ namespace internal {
struct DelayedTaskManager::DelayedTask {
DelayedTask(std::unique_ptr<Task> task,
scoped_refptr<Sequence> sequence,
- SchedulerWorkerThread* worker_thread,
- SchedulerThreadPool* thread_pool,
+ SchedulerWorker* worker,
+ SchedulerWorkerPool* worker_pool,
uint64_t index)
: task(std::move(task)),
sequence(std::move(sequence)),
- worker_thread(worker_thread),
- thread_pool(thread_pool),
+ worker(worker),
+ worker_pool(worker_pool),
index(index) {}
DelayedTask(DelayedTask&& other) = default;
@@ -30,12 +30,12 @@ struct DelayedTaskManager::DelayedTask {
DelayedTask& operator=(DelayedTask&& other) = default;
- // |task| will be posted to |thread_pool| with |sequence| and |worker_thread|
+ // |task| will be posted to |worker_pool| with |sequence| and |worker|
// when it becomes ripe for execution.
std::unique_ptr<Task> task;
scoped_refptr<Sequence> sequence;
- SchedulerWorkerThread* worker_thread;
- SchedulerThreadPool* thread_pool;
+ SchedulerWorker* worker;
+ SchedulerWorkerPool* worker_pool;
// Ensures that tasks that have the same |delayed_run_time| are sorted
// according to the order in which they were added to the DelayedTaskManager.
@@ -55,11 +55,11 @@ DelayedTaskManager::~DelayedTaskManager() = default;
void DelayedTaskManager::AddDelayedTask(std::unique_ptr<Task> task,
scoped_refptr<Sequence> sequence,
- SchedulerWorkerThread* worker_thread,
- SchedulerThreadPool* thread_pool) {
+ SchedulerWorker* worker,
+ SchedulerWorkerPool* worker_pool) {
DCHECK(task);
DCHECK(sequence);
- DCHECK(thread_pool);
+ DCHECK(worker_pool);
const TimeTicks new_task_delayed_run_time = task->delayed_run_time;
TimeTicks current_delayed_run_time;
@@ -70,8 +70,8 @@ void DelayedTaskManager::AddDelayedTask(std::unique_ptr<Task> task,
if (!delayed_tasks_.empty())
current_delayed_run_time = delayed_tasks_.top().task->delayed_run_time;
- delayed_tasks_.emplace(std::move(task), std::move(sequence), worker_thread,
- thread_pool, ++delayed_task_index_);
+ delayed_tasks_.emplace(std::move(task), std::move(sequence), worker,
+ worker_pool, ++delayed_task_index_);
}
if (current_delayed_run_time.is_null() ||
@@ -103,9 +103,9 @@ void DelayedTaskManager::PostReadyTasks() {
// Post delayed tasks that are ready for execution.
for (auto& delayed_task : ready_tasks) {
- delayed_task.thread_pool->PostTaskWithSequenceNow(
+ delayed_task.worker_pool->PostTaskWithSequenceNow(
std::move(delayed_task.task), std::move(delayed_task.sequence),
- delayed_task.worker_thread);
+ delayed_task.worker);
}
}
diff --git a/chromium/base/task_scheduler/delayed_task_manager.h b/chromium/base/task_scheduler/delayed_task_manager.h
index f5690cd67c1..d773fe5da94 100644
--- a/chromium/base/task_scheduler/delayed_task_manager.h
+++ b/chromium/base/task_scheduler/delayed_task_manager.h
@@ -23,8 +23,8 @@
namespace base {
namespace internal {
-class SchedulerThreadPool;
-class SchedulerWorkerThread;
+class SchedulerWorker;
+class SchedulerWorkerPool;
// A DelayedTaskManager holds delayed Tasks until they become ripe for
// execution. This class is thread-safe.
@@ -36,18 +36,17 @@ class BASE_EXPORT DelayedTaskManager {
~DelayedTaskManager();
// Adds |task| to a queue of delayed tasks. The task will be posted to
- // |thread_pool| with |sequence| and |worker_thread| the first time that
+ // |worker_pool| with |sequence| and |worker| the first time that
// PostReadyTasks() is called while Now() is passed |task->delayed_run_time|.
- // |worker_thread| is a SchedulerWorkerThread owned by |thread_pool| or
- // nullptr.
+ // |worker| is a SchedulerWorker owned by |worker_pool| or nullptr.
//
- // TODO(robliao): Find a concrete way to manage the memory of |worker_thread|
- // and |thread_pool|. These objects are never deleted in production, but it is
+ // TODO(robliao): Find a concrete way to manage the memory of |worker| and
+ // |worker_pool|. These objects are never deleted in production, but it is
// better not to spread this assumption throughout the scheduler.
void AddDelayedTask(std::unique_ptr<Task> task,
scoped_refptr<Sequence> sequence,
- SchedulerWorkerThread* worker_thread,
- SchedulerThreadPool* thread_pool);
+ SchedulerWorker* worker,
+ SchedulerWorkerPool* worker_pool);
// Posts delayed tasks that are ripe for execution.
void PostReadyTasks();
diff --git a/chromium/base/task_scheduler/delayed_task_manager_unittest.cc b/chromium/base/task_scheduler/delayed_task_manager_unittest.cc
index dc9c15772c4..c1c85ef5e8b 100644
--- a/chromium/base/task_scheduler/delayed_task_manager_unittest.cc
+++ b/chromium/base/task_scheduler/delayed_task_manager_unittest.cc
@@ -12,7 +12,7 @@
#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/task_scheduler/scheduler_lock.h"
-#include "base/task_scheduler/scheduler_thread_pool.h"
+#include "base/task_scheduler/scheduler_worker_pool.h"
#include "base/task_scheduler/sequence.h"
#include "base/task_scheduler/task.h"
#include "base/time/time.h"
@@ -43,9 +43,9 @@ class TestDelayedTaskManager : public DelayedTaskManager {
DISALLOW_COPY_AND_ASSIGN(TestDelayedTaskManager);
};
-class MockSchedulerThreadPool : public SchedulerThreadPool {
+class MockSchedulerWorkerPool : public SchedulerWorkerPool {
public:
- // SchedulerThreadPool:
+ // SchedulerWorkerPool:
scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
const TaskTraits& traits,
ExecutionMode execution_mode) override {
@@ -60,21 +60,21 @@ class MockSchedulerThreadPool : public SchedulerThreadPool {
bool PostTaskWithSequence(std::unique_ptr<Task> task,
scoped_refptr<Sequence> sequence,
- SchedulerWorkerThread* worker_thread) override {
+ SchedulerWorker* worker) override {
NOTREACHED();
return true;
}
void PostTaskWithSequenceNow(std::unique_ptr<Task> task,
scoped_refptr<Sequence> sequence,
- SchedulerWorkerThread* worker_thread) override {
- PostTaskWithSequenceNowMock(task.get(), sequence.get(), worker_thread);
+ SchedulerWorker* worker) override {
+ PostTaskWithSequenceNowMock(task.get(), sequence.get(), worker);
}
MOCK_METHOD3(PostTaskWithSequenceNowMock,
void(const Task*,
const Sequence*,
- const SchedulerWorkerThread* worker_thread));
+ const SchedulerWorker* worker));
};
} // namespace
@@ -96,16 +96,16 @@ TEST(TaskSchedulerDelayedTaskManagerTest, PostReadyTaskBeforeDelayedRunTime) {
task->delayed_run_time = manager.Now() + TimeDelta::FromSeconds(1);
const Task* task_raw = task.get();
scoped_refptr<Sequence> sequence(new Sequence);
- testing::StrictMock<MockSchedulerThreadPool> thread_pool;
+ testing::StrictMock<MockSchedulerWorkerPool> worker_pool;
// Add |task| to the DelayedTaskManager.
EXPECT_CALL(manager, OnDelayedRunTimeUpdated());
- manager.AddDelayedTask(std::move(task), sequence, nullptr, &thread_pool);
+ manager.AddDelayedTask(std::move(task), sequence, nullptr, &worker_pool);
testing::Mock::VerifyAndClear(&manager);
EXPECT_EQ(task_raw->delayed_run_time, manager.GetDelayedRunTime());
// Ask the DelayedTaskManager to post tasks that are ripe for execution. Don't
- // expect any call to the mock method of |thread_pool|.
+ // expect any call to the mock method of |worker_pool|.
manager.PostReadyTasks();
// The delayed run time shouldn't have changed.
@@ -122,11 +122,11 @@ TEST(TaskSchedulerDelayedTaskManagerTest, PostReadyTasksAtDelayedRunTime) {
task->delayed_run_time = manager.Now() + TimeDelta::FromSeconds(1);
const Task* task_raw = task.get();
scoped_refptr<Sequence> sequence(new Sequence);
- testing::StrictMock<MockSchedulerThreadPool> thread_pool;
+ testing::StrictMock<MockSchedulerWorkerPool> worker_pool;
// Add |task| to the DelayedTaskManager.
EXPECT_CALL(manager, OnDelayedRunTimeUpdated());
- manager.AddDelayedTask(std::move(task), sequence, nullptr, &thread_pool);
+ manager.AddDelayedTask(std::move(task), sequence, nullptr, &worker_pool);
testing::Mock::VerifyAndClear(&manager);
EXPECT_EQ(task_raw->delayed_run_time, manager.GetDelayedRunTime());
@@ -134,7 +134,7 @@ TEST(TaskSchedulerDelayedTaskManagerTest, PostReadyTasksAtDelayedRunTime) {
manager.SetCurrentTime(task_raw->delayed_run_time);
// Ask the DelayedTaskManager to post tasks that are ripe for execution.
- EXPECT_CALL(thread_pool,
+ EXPECT_CALL(worker_pool,
PostTaskWithSequenceNowMock(task_raw, sequence.get(), nullptr));
manager.PostReadyTasks();
testing::Mock::VerifyAndClear(&manager);
@@ -151,11 +151,11 @@ TEST(TaskSchedulerDelayedTaskManagerTest, PostReadyTasksAfterDelayedRunTime) {
task->delayed_run_time = manager.Now() + TimeDelta::FromSeconds(1);
const Task* task_raw = task.get();
scoped_refptr<Sequence> sequence(new Sequence);
- testing::StrictMock<MockSchedulerThreadPool> thread_pool;
+ testing::StrictMock<MockSchedulerWorkerPool> worker_pool;
// Add |task| to the DelayedTaskManager.
EXPECT_CALL(manager, OnDelayedRunTimeUpdated());
- manager.AddDelayedTask(std::move(task), sequence, nullptr, &thread_pool);
+ manager.AddDelayedTask(std::move(task), sequence, nullptr, &worker_pool);
testing::Mock::VerifyAndClear(&manager);
EXPECT_EQ(task_raw->delayed_run_time, manager.GetDelayedRunTime());
@@ -164,7 +164,7 @@ TEST(TaskSchedulerDelayedTaskManagerTest, PostReadyTasksAfterDelayedRunTime) {
TimeDelta::FromSeconds(10));
// Ask the DelayedTaskManager to post tasks that are ripe for execution.
- EXPECT_CALL(thread_pool,
+ EXPECT_CALL(worker_pool,
PostTaskWithSequenceNowMock(task_raw, sequence.get(), nullptr));
manager.PostReadyTasks();
testing::Mock::VerifyAndClear(&manager);
@@ -177,7 +177,7 @@ TEST(TaskSchedulerDelayedTaskManagerTest, AddAndPostReadyTasks) {
testing::StrictMock<TestDelayedTaskManager> manager;
scoped_refptr<Sequence> sequence(new Sequence);
- testing::StrictMock<MockSchedulerThreadPool> thread_pool;
+ testing::StrictMock<MockSchedulerWorkerPool> worker_pool;
std::unique_ptr<Task> task_a(
new Task(FROM_HERE, Bind(&DoNothing), TaskTraits(), TimeDelta()));
@@ -197,20 +197,20 @@ TEST(TaskSchedulerDelayedTaskManagerTest, AddAndPostReadyTasks) {
// Add |task_a| to the DelayedTaskManager. The delayed run time should be
// updated to |task_a|'s delayed run time.
EXPECT_CALL(manager, OnDelayedRunTimeUpdated());
- manager.AddDelayedTask(std::move(task_a), sequence, nullptr, &thread_pool);
+ manager.AddDelayedTask(std::move(task_a), sequence, nullptr, &worker_pool);
testing::Mock::VerifyAndClear(&manager);
EXPECT_EQ(task_a_raw->delayed_run_time, manager.GetDelayedRunTime());
// Add |task_b| to the DelayedTaskManager. The delayed run time shouldn't
// change.
- manager.AddDelayedTask(std::move(task_b), sequence, nullptr, &thread_pool);
+ manager.AddDelayedTask(std::move(task_b), sequence, nullptr, &worker_pool);
testing::Mock::VerifyAndClear(&manager);
EXPECT_EQ(task_a_raw->delayed_run_time, manager.GetDelayedRunTime());
// Add |task_c| to the DelayedTaskManager. The delayed run time should be
// updated to |task_c|'s delayed run time.
EXPECT_CALL(manager, OnDelayedRunTimeUpdated());
- manager.AddDelayedTask(std::move(task_c), sequence, nullptr, &thread_pool);
+ manager.AddDelayedTask(std::move(task_c), sequence, nullptr, &worker_pool);
testing::Mock::VerifyAndClear(&manager);
EXPECT_EQ(task_c_raw->delayed_run_time, manager.GetDelayedRunTime());
@@ -220,10 +220,10 @@ TEST(TaskSchedulerDelayedTaskManagerTest, AddAndPostReadyTasks) {
// Ask the DelayedTaskManager to post tasks that are ripe for execution.
// |task_c_raw| should be posted and the delayed run time should become
// |task_a_raw|'s delayed run time.
- EXPECT_CALL(thread_pool,
+ EXPECT_CALL(worker_pool,
PostTaskWithSequenceNowMock(task_c_raw, sequence.get(), nullptr));
manager.PostReadyTasks();
- testing::Mock::VerifyAndClear(&thread_pool);
+ testing::Mock::VerifyAndClear(&worker_pool);
EXPECT_EQ(task_a_raw->delayed_run_time, manager.GetDelayedRunTime());
// Fast-forward time to |task_a_raw|'s delayed run time.
@@ -232,12 +232,12 @@ TEST(TaskSchedulerDelayedTaskManagerTest, AddAndPostReadyTasks) {
// Ask the DelayedTaskManager to post tasks that are ripe for execution.
// |task_a_raw| and |task_b_raw| should be posted and the delayed run time
// should become a null TimeTicks.
- EXPECT_CALL(thread_pool,
+ EXPECT_CALL(worker_pool,
PostTaskWithSequenceNowMock(task_a_raw, sequence.get(), nullptr));
- EXPECT_CALL(thread_pool,
+ EXPECT_CALL(worker_pool,
PostTaskWithSequenceNowMock(task_b_raw, sequence.get(), nullptr));
manager.PostReadyTasks();
- testing::Mock::VerifyAndClear(&thread_pool);
+ testing::Mock::VerifyAndClear(&worker_pool);
EXPECT_EQ(TimeTicks(), manager.GetDelayedRunTime());
}
diff --git a/chromium/base/task_scheduler/priority_queue.h b/chromium/base/task_scheduler/priority_queue.h
index ac1dbdeb977..b34c1d5c654 100644
--- a/chromium/base/task_scheduler/priority_queue.h
+++ b/chromium/base/task_scheduler/priority_queue.h
@@ -27,11 +27,11 @@ class BASE_EXPORT PriorityQueue {
// PriorityQueue. While a Transaction is alive, it is guaranteed that nothing
// else will access the PriorityQueue.
//
- // A WorkerThread needs to be able to Peek sequences from both its
- // PriorityQueues (single-threaded and shared) and then Pop the sequence with
- // the highest priority. If the Peek and the Pop are done through the same
- // Transaction, it is guaranteed that the PriorityQueue hasn't changed between
- // the 2 operations.
+ // A Worker needs to be able to Peek sequences from both its PriorityQueues
+ // (single-threaded and shared) and then Pop the sequence with the highest
+ // priority. If the Peek and the Pop are done through the same Transaction, it
+ // is guaranteed that the PriorityQueue hasn't changed between the 2
+ // operations.
class BASE_EXPORT Transaction : public NonThreadSafe {
public:
~Transaction();
diff --git a/chromium/base/task_scheduler/priority_queue_unittest.cc b/chromium/base/task_scheduler/priority_queue_unittest.cc
index cf8d0991dfe..d78790538d4 100644
--- a/chromium/base/task_scheduler/priority_queue_unittest.cc
+++ b/chromium/base/task_scheduler/priority_queue_unittest.cc
@@ -29,7 +29,8 @@ class ThreadBeginningTransaction : public SimpleThread {
explicit ThreadBeginningTransaction(PriorityQueue* priority_queue)
: SimpleThread("ThreadBeginningTransaction"),
priority_queue_(priority_queue),
- transaction_began_(true, false) {}
+ transaction_began_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
// SimpleThread:
void Run() override {
diff --git a/chromium/base/task_scheduler/scheduler_lock_unittest.cc b/chromium/base/task_scheduler/scheduler_lock_unittest.cc
index 6267559d1ec..daa50257f18 100644
--- a/chromium/base/task_scheduler/scheduler_lock_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_lock_unittest.cc
@@ -56,8 +56,11 @@ class BasicLockAcquireAndWaitThread : public SimpleThread {
explicit BasicLockAcquireAndWaitThread(SchedulerLock* lock)
: SimpleThread("BasicLockAcquireAndWaitThread"),
lock_(lock),
- lock_acquire_event_(false, false),
- main_thread_continue_event_(false, false) {}
+ lock_acquire_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ main_thread_continue_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED) {
+ }
void WaitForLockAcquisition() {
lock_acquire_event_.Wait();
diff --git a/chromium/base/task_scheduler/scheduler_service_thread.cc b/chromium/base/task_scheduler/scheduler_service_thread.cc
index 48c99729ae4..562e5c9058a 100644
--- a/chromium/base/task_scheduler/scheduler_service_thread.cc
+++ b/chromium/base/task_scheduler/scheduler_service_thread.cc
@@ -10,7 +10,7 @@
#include "base/memory/ptr_util.h"
#include "base/synchronization/waitable_event.h"
#include "base/task_scheduler/delayed_task_manager.h"
-#include "base/task_scheduler/scheduler_worker_thread.h"
+#include "base/task_scheduler/scheduler_worker.h"
#include "base/task_scheduler/sequence.h"
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
@@ -19,16 +19,15 @@ namespace base {
namespace internal {
namespace {
-class ServiceThreadDelegate : public SchedulerWorkerThread::Delegate {
+class ServiceThreadDelegate : public SchedulerWorker::Delegate {
public:
ServiceThreadDelegate(DelayedTaskManager* delayed_task_manager)
: delayed_task_manager_(delayed_task_manager) {}
- // SchedulerWorkerThread::Delegate:
- void OnMainEntry(SchedulerWorkerThread* worker_thread) override {}
+ // SchedulerWorker::Delegate:
+ void OnMainEntry(SchedulerWorker* worker) override {}
- scoped_refptr<Sequence> GetWork(SchedulerWorkerThread* worker_thread)
- override {
+ scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
delayed_task_manager_->PostReadyTasks();
return nullptr;
}
@@ -52,6 +51,10 @@ class ServiceThreadDelegate : public SchedulerWorkerThread::Delegate {
return sleep_time < zero_delta ? zero_delta : sleep_time;
}
+ bool CanDetach(SchedulerWorker* worker) override {
+ return false;
+ }
+
private:
DelayedTaskManager* const delayed_task_manager_;
@@ -65,28 +68,28 @@ SchedulerServiceThread::~SchedulerServiceThread() = default;
// static
std::unique_ptr<SchedulerServiceThread> SchedulerServiceThread::Create(
TaskTracker* task_tracker, DelayedTaskManager* delayed_task_manager) {
- std::unique_ptr<SchedulerWorkerThread> worker_thread =
- SchedulerWorkerThread::Create(
+ std::unique_ptr<SchedulerWorker> worker =
+ SchedulerWorker::Create(
ThreadPriority::NORMAL,
WrapUnique(new ServiceThreadDelegate(delayed_task_manager)),
- task_tracker);
- if (!worker_thread)
+ task_tracker,
+ SchedulerWorker::InitialState::ALIVE);
+ if (!worker)
return nullptr;
- return WrapUnique(new SchedulerServiceThread(std::move(worker_thread)));
+ return WrapUnique(new SchedulerServiceThread(std::move(worker)));
}
void SchedulerServiceThread::WakeUp() {
- worker_thread_->WakeUp();
+ worker_->WakeUp();
}
void SchedulerServiceThread::JoinForTesting() {
- worker_thread_->JoinForTesting();
+ worker_->JoinForTesting();
}
SchedulerServiceThread::SchedulerServiceThread(
- std::unique_ptr<SchedulerWorkerThread> worker_thread)
- : worker_thread_(std::move(worker_thread)) {}
+ std::unique_ptr<SchedulerWorker> worker) : worker_(std::move(worker)) {}
} // namespace internal
} // namespace base
diff --git a/chromium/base/task_scheduler/scheduler_service_thread.h b/chromium/base/task_scheduler/scheduler_service_thread.h
index d2cc9eef097..e6c9fd0cb97 100644
--- a/chromium/base/task_scheduler/scheduler_service_thread.h
+++ b/chromium/base/task_scheduler/scheduler_service_thread.h
@@ -14,7 +14,7 @@ namespace base {
namespace internal {
class DelayedTaskManager;
-class SchedulerWorkerThread;
+class SchedulerWorker;
class TaskTracker;
// A thread dedicated to performing Task Scheduler related work.
@@ -23,8 +23,8 @@ class BASE_EXPORT SchedulerServiceThread {
~SchedulerServiceThread();
// Creates a SchedulerServiceThread. |task_tracker| and |delayed_task_manager|
- // are passed through to the underlying SchedulerWorkerThread. Returns a
- // nullptr on failure.
+ // are passed through to the underlying SchedulerWorker. Returns a nullptr on
+ // failure.
static std::unique_ptr<SchedulerServiceThread> Create(
TaskTracker* task_tracker, DelayedTaskManager* delayed_task_manager);
@@ -37,9 +37,9 @@ class BASE_EXPORT SchedulerServiceThread {
void JoinForTesting();
private:
- SchedulerServiceThread(std::unique_ptr<SchedulerWorkerThread> worker_thread);
+ SchedulerServiceThread(std::unique_ptr<SchedulerWorker> worker);
- const std::unique_ptr<SchedulerWorkerThread> worker_thread_;
+ const std::unique_ptr<SchedulerWorker> worker_;
DISALLOW_COPY_AND_ASSIGN(SchedulerServiceThread);
};
diff --git a/chromium/base/task_scheduler/scheduler_service_thread_unittest.cc b/chromium/base/task_scheduler/scheduler_service_thread_unittest.cc
index e40df884228..c2a4162bd64 100644
--- a/chromium/base/task_scheduler/scheduler_service_thread_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_service_thread_unittest.cc
@@ -14,7 +14,7 @@
#include "base/memory/ref_counted.h"
#include "base/synchronization/waitable_event.h"
#include "base/task_scheduler/delayed_task_manager.h"
-#include "base/task_scheduler/scheduler_thread_pool_impl.h"
+#include "base/task_scheduler/scheduler_worker_pool_impl.h"
#include "base/task_scheduler/sequence.h"
#include "base/task_scheduler/task.h"
#include "base/task_scheduler/task_tracker.h"
@@ -53,19 +53,19 @@ class TaskSchedulerServiceThreadTest : public testing::Test {
TaskSchedulerServiceThreadTest() : delayed_task_manager_(Bind(&DoNothing)) {}
void SetUp() override {
- scheduler_thread_pool_ = SchedulerThreadPoolImpl::Create(
- "TestThreadPoolForSchedulerThread", ThreadPriority::BACKGROUND, 1u,
- SchedulerThreadPoolImpl::IORestriction::DISALLOWED,
+ scheduler_worker_pool_ = SchedulerWorkerPoolImpl::Create(
+ "TestWorkerPoolForSchedulerServiceThread", ThreadPriority::BACKGROUND,
+ 1u, SchedulerWorkerPoolImpl::IORestriction::DISALLOWED,
Bind(&ReEnqueueSequenceCallback), &task_tracker_,
&delayed_task_manager_);
- ASSERT_TRUE(scheduler_thread_pool_);
+ ASSERT_TRUE(scheduler_worker_pool_);
service_thread_ = SchedulerServiceThread::Create(
&task_tracker_, &delayed_task_manager_);
ASSERT_TRUE(service_thread_);
}
void TearDown() override {
- scheduler_thread_pool_->JoinForTesting();
+ scheduler_worker_pool_->JoinForTesting();
service_thread_->JoinForTesting();
}
@@ -77,8 +77,8 @@ class TaskSchedulerServiceThreadTest : public testing::Test {
return delayed_task_manager_;
}
- SchedulerThreadPoolImpl* thread_pool() {
- return scheduler_thread_pool_.get();
+ SchedulerWorkerPoolImpl* worker_pool() {
+ return scheduler_worker_pool_.get();
}
private:
@@ -88,7 +88,7 @@ class TaskSchedulerServiceThreadTest : public testing::Test {
DelayedTaskManager delayed_task_manager_;
TaskTracker task_tracker_;
- std::unique_ptr<SchedulerThreadPoolImpl> scheduler_thread_pool_;
+ std::unique_ptr<SchedulerWorkerPoolImpl> scheduler_worker_pool_;
std::unique_ptr<SchedulerServiceThread> service_thread_;
DISALLOW_COPY_AND_ASSIGN(TaskSchedulerServiceThreadTest);
@@ -98,12 +98,13 @@ class TaskSchedulerServiceThreadTest : public testing::Test {
// Tests that the service thread can handle a single delayed task.
TEST_F(TaskSchedulerServiceThreadTest, RunSingleDelayedTask) {
- WaitableEvent event(true, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
delayed_task_manager().AddDelayedTask(
WrapUnique(new Task(FROM_HERE,
Bind(&WaitableEvent::Signal, Unretained(&event)),
TaskTraits(), TimeDelta::FromMilliseconds(100))),
- make_scoped_refptr(new Sequence), nullptr, thread_pool());
+ make_scoped_refptr(new Sequence), nullptr, worker_pool());
// Waking the service thread shouldn't cause the task to be executed per its
// delay not having expired (racy in theory, see test-fixture meta-comment).
service_thread()->WakeUp();
@@ -122,19 +123,21 @@ TEST_F(TaskSchedulerServiceThreadTest, RunMultipleDelayedTasks) {
const TimeDelta delay1 = TimeDelta::FromMilliseconds(100);
const TimeDelta delay2 = TimeDelta::FromMilliseconds(200);
- WaitableEvent event1(true, false);
+ WaitableEvent event1(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
delayed_task_manager().AddDelayedTask(
WrapUnique(new Task(FROM_HERE,
Bind(&WaitableEvent::Signal, Unretained(&event1)),
TaskTraits(), delay1)),
- make_scoped_refptr(new Sequence), nullptr, thread_pool());
+ make_scoped_refptr(new Sequence), nullptr, worker_pool());
- WaitableEvent event2(true, false);
+ WaitableEvent event2(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
delayed_task_manager().AddDelayedTask(
WrapUnique(new Task(FROM_HERE,
Bind(&WaitableEvent::Signal, Unretained(&event2)),
TaskTraits(), delay2)),
- make_scoped_refptr(new Sequence), nullptr, thread_pool());
+ make_scoped_refptr(new Sequence), nullptr, worker_pool());
// Adding the task shouldn't have caused them to be executed.
EXPECT_FALSE(event1.IsSignaled());
diff --git a/chromium/base/task_scheduler/scheduler_thread_pool.h b/chromium/base/task_scheduler/scheduler_thread_pool.h
deleted file mode 100644
index 774b1113765..00000000000
--- a/chromium/base/task_scheduler/scheduler_thread_pool.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TASK_SCHEDULER_SCHEDULER_THREAD_POOL_H_
-#define BASE_TASK_SCHEDULER_SCHEDULER_THREAD_POOL_H_
-
-#include <memory>
-
-#include "base/base_export.h"
-#include "base/memory/ref_counted.h"
-#include "base/task_runner.h"
-#include "base/task_scheduler/sequence.h"
-#include "base/task_scheduler/task.h"
-#include "base/task_scheduler/task_traits.h"
-
-namespace base {
-namespace internal {
-
-class SchedulerWorkerThread;
-class SequenceSortKey;
-
-// Interface for a thread pool.
-class BASE_EXPORT SchedulerThreadPool {
- public:
- virtual ~SchedulerThreadPool() = default;
-
- // Returns a TaskRunner whose PostTask invocations will result in scheduling
- // Tasks with |traits| and |execution_mode| in this SchedulerThreadPool.
- virtual scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
- const TaskTraits& traits,
- ExecutionMode execution_mode) = 0;
-
- // Inserts |sequence| with |sequence_sort_key| into a queue of Sequences that
- // can be processed by any worker thread owned by this SchedulerThreadPool.
- // Must only be used to put |sequence| back into a queue after running a Task
- // from it. The thread that calls this doesn't have to belong to this
- // SchedulerThreadPool.
- virtual void ReEnqueueSequence(scoped_refptr<Sequence> sequence,
- const SequenceSortKey& sequence_sort_key) = 0;
-
- // Posts |task| to be executed by this SchedulerThreadPool as part of
- // |sequence|. If |worker_thread| is non-null, |task| will be scheduled to run
- // on it specifically (note: |worker_thread| must be owned by this
- // SchedulerThreadPool); otherwise, |task| will be added to the pending shared
- // work. |task| won't be executed before its delayed run time, if any. Returns
- // true if |task| is posted.
- virtual bool PostTaskWithSequence(std::unique_ptr<Task> task,
- scoped_refptr<Sequence> sequence,
- SchedulerWorkerThread* worker_thread) = 0;
-
- // Posts |task| to be executed by this SchedulerThreadPool as part of
- // |sequence|. If |worker_thread| is non-null, |task| will be scheduled to run
- // on it specifically (note: |worker_thread| must be owned by this
- // SchedulerThreadPool); otherwise, |task| will be added to the pending shared
- // work. This must only be called after |task| has gone through
- // PostTaskWithSequence() and after |task|'s delayed run time.
- virtual void PostTaskWithSequenceNow(
- std::unique_ptr<Task> task,
- scoped_refptr<Sequence> sequence,
- SchedulerWorkerThread* worker_thread) = 0;
-};
-
-} // namespace internal
-} // namespace base
-
-#endif // BASE_TASK_SCHEDULER_SCHEDULER_THREAD_POOL_H_
diff --git a/chromium/base/task_scheduler/scheduler_thread_pool_impl.h b/chromium/base/task_scheduler/scheduler_thread_pool_impl.h
deleted file mode 100644
index 1c1f1869e4f..00000000000
--- a/chromium/base/task_scheduler/scheduler_thread_pool_impl.h
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TASK_SCHEDULER_SCHEDULER_THREAD_POOL_IMPL_H_
-#define BASE_TASK_SCHEDULER_SCHEDULER_THREAD_POOL_IMPL_H_
-
-#include <stddef.h>
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "base/base_export.h"
-#include "base/callback.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/strings/string_piece.h"
-#include "base/synchronization/condition_variable.h"
-#include "base/task_runner.h"
-#include "base/task_scheduler/priority_queue.h"
-#include "base/task_scheduler/scheduler_lock.h"
-#include "base/task_scheduler/scheduler_thread_pool.h"
-#include "base/task_scheduler/scheduler_worker_thread.h"
-#include "base/task_scheduler/scheduler_worker_thread_stack.h"
-#include "base/task_scheduler/sequence.h"
-#include "base/task_scheduler/task.h"
-#include "base/task_scheduler/task_traits.h"
-#include "base/threading/platform_thread.h"
-
-namespace base {
-namespace internal {
-
-class DelayedTaskManager;
-class TaskTracker;
-
-// A pool of threads that run Tasks. This class is thread-safe.
-class BASE_EXPORT SchedulerThreadPoolImpl : public SchedulerThreadPool {
- public:
- enum class IORestriction {
- ALLOWED,
- DISALLOWED,
- };
-
- // Callback invoked when a Sequence isn't empty after a worker thread pops a
- // Task from it.
- using ReEnqueueSequenceCallback = Callback<void(scoped_refptr<Sequence>)>;
-
- // Destroying a SchedulerThreadPool returned by CreateThreadPool() is not
- // allowed in production; it is always leaked. In tests, it can only be
- // destroyed after JoinForTesting() has returned.
- ~SchedulerThreadPoolImpl() override;
-
- // Creates a SchedulerThreadPool labeled |name| with up to |max_threads|
- // threads of priority |thread_priority|. |io_restriction| indicates whether
- // Tasks on the constructed thread pool are allowed to make I/O calls.
- // |re_enqueue_sequence_callback| will be invoked after a thread of this
- // thread pool tries to run a Task. |task_tracker| is used to handle shutdown
- // behavior of Tasks. |delayed_task_manager| handles Tasks posted with a
- // delay. Returns nullptr on failure to create a thread pool with at least one
- // thread.
- static std::unique_ptr<SchedulerThreadPoolImpl> Create(
- StringPiece name,
- ThreadPriority thread_priority,
- size_t max_threads,
- IORestriction io_restriction,
- const ReEnqueueSequenceCallback& re_enqueue_sequence_callback,
- TaskTracker* task_tracker,
- DelayedTaskManager* delayed_task_manager);
-
- // Waits until all threads are idle.
- void WaitForAllWorkerThreadsIdleForTesting();
-
- // Joins all threads of this thread pool. Tasks that are already running are
- // allowed to complete their execution. This can only be called once.
- void JoinForTesting();
-
- // SchedulerThreadPool:
- scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
- const TaskTraits& traits,
- ExecutionMode execution_mode) override;
- void ReEnqueueSequence(scoped_refptr<Sequence> sequence,
- const SequenceSortKey& sequence_sort_key) override;
- bool PostTaskWithSequence(std::unique_ptr<Task> task,
- scoped_refptr<Sequence> sequence,
- SchedulerWorkerThread* worker_thread) override;
- void PostTaskWithSequenceNow(std::unique_ptr<Task> task,
- scoped_refptr<Sequence> sequence,
- SchedulerWorkerThread* worker_thread) override;
-
- private:
- class SchedulerWorkerThreadDelegateImpl;
-
- SchedulerThreadPoolImpl(StringPiece name,
- IORestriction io_restriction,
- TaskTracker* task_tracker,
- DelayedTaskManager* delayed_task_manager);
-
- bool Initialize(
- ThreadPriority thread_priority,
- size_t max_threads,
- const ReEnqueueSequenceCallback& re_enqueue_sequence_callback);
-
- // Wakes up the last thread from this thread pool to go idle, if any.
- void WakeUpOneThread();
-
- // Adds |worker_thread| to |idle_worker_threads_stack_|.
- void AddToIdleWorkerThreadsStack(SchedulerWorkerThread* worker_thread);
-
- // Removes |worker_thread| from |idle_worker_threads_stack_|.
- void RemoveFromIdleWorkerThreadsStack(SchedulerWorkerThread* worker_thread);
-
- // The name of this thread pool, used to label its worker threads.
- const std::string name_;
-
- // All worker threads owned by this thread pool. Only modified during
- // initialization of the thread pool.
- std::vector<std::unique_ptr<SchedulerWorkerThread>> worker_threads_;
-
- // Synchronizes access to |next_worker_thread_index_|.
- SchedulerLock next_worker_thread_index_lock_;
-
- // Index of the worker thread that will be assigned to the next single-
- // threaded TaskRunner returned by this pool.
- size_t next_worker_thread_index_ = 0;
-
- // PriorityQueue from which all threads of this thread pool get work.
- PriorityQueue shared_priority_queue_;
-
- // Indicates whether Tasks on this thread pool are allowed to make I/O calls.
- const IORestriction io_restriction_;
-
- // Synchronizes access to |idle_worker_threads_stack_| and
- // |idle_worker_threads_stack_cv_for_testing_|. Has |shared_priority_queue_|'s
- // lock as its predecessor so that a thread can be pushed to
- // |idle_worker_threads_stack_| within the scope of a Transaction (more
- // details in GetWork()).
- SchedulerLock idle_worker_threads_stack_lock_;
-
- // Stack of idle worker threads.
- SchedulerWorkerThreadStack idle_worker_threads_stack_;
-
- // Signaled when all worker threads become idle.
- std::unique_ptr<ConditionVariable> idle_worker_threads_stack_cv_for_testing_;
-
- // Signaled once JoinForTesting() has returned.
- WaitableEvent join_for_testing_returned_;
-
-#if DCHECK_IS_ON()
- // Signaled when all threads have been created.
- WaitableEvent threads_created_;
-#endif
-
- TaskTracker* const task_tracker_;
- DelayedTaskManager* const delayed_task_manager_;
-
- DISALLOW_COPY_AND_ASSIGN(SchedulerThreadPoolImpl);
-};
-
-} // namespace internal
-} // namespace base
-
-#endif // BASE_TASK_SCHEDULER_SCHEDULER_THREAD_POOL_IMPL_H_
diff --git a/chromium/base/task_scheduler/scheduler_unique_stack.h b/chromium/base/task_scheduler/scheduler_unique_stack.h
deleted file mode 100644
index 4f9ce14a508..00000000000
--- a/chromium/base/task_scheduler/scheduler_unique_stack.h
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TASK_SCHEDULER_SCHEDULER_UNIQUE_STACK_H_
-#define BASE_TASK_SCHEDULER_SCHEDULER_UNIQUE_STACK_H_
-
-#include <stddef.h>
-
-#include <algorithm>
-#include <vector>
-
-#include "base/logging.h"
-#include "base/macros.h"
-
-namespace base {
-namespace internal {
-
-// A stack that supports removal of arbitrary values and doesn't allow multiple
-// insertions of the same value. This class is NOT thread-safe.
-template <typename T>
-class SchedulerUniqueStack {
- public:
- SchedulerUniqueStack();
- ~SchedulerUniqueStack();
-
- // Inserts |val| at the top of the stack. |val| must not already be on the
- // stack.
- void Push(const T& val);
-
- // Removes the top value from the stack and returns it. Cannot be called on an
- // empty stack.
- T Pop();
-
- // Removes |val| from the stack.
- void Remove(const T& val);
-
- // Returns the number of values on the stack.
- size_t Size() const;
-
- // Returns true if the stack is empty.
- bool Empty() const;
-
- private:
- std::vector<T> stack_;
-
- DISALLOW_COPY_AND_ASSIGN(SchedulerUniqueStack);
-};
-
-template <typename T>
-SchedulerUniqueStack<T>::SchedulerUniqueStack() = default;
-
-template <typename T>
-SchedulerUniqueStack<T>::~SchedulerUniqueStack() = default;
-
-template <typename T>
-void SchedulerUniqueStack<T>::Push(const T& val) {
- DCHECK(std::find(stack_.begin(), stack_.end(), val) == stack_.end())
- << "Value already on stack";
- stack_.push_back(val);
-}
-
-template <typename T>
-T SchedulerUniqueStack<T>::Pop() {
- DCHECK(!stack_.empty());
- const T val = stack_.back();
- stack_.pop_back();
- return val;
-}
-
-template <typename T>
-void SchedulerUniqueStack<T>::Remove(const T& val) {
- auto it = std::find(stack_.begin(), stack_.end(), val);
- if (it != stack_.end())
- stack_.erase(it);
-}
-
-template <typename T>
-size_t SchedulerUniqueStack<T>::Size() const {
- return stack_.size();
-}
-
-template <typename T>
-bool SchedulerUniqueStack<T>::Empty() const {
- return stack_.empty();
-}
-
-} // namespace internal
-} // namespace base
-
-#endif // BASE_TASK_SCHEDULER_SCHEDULER_UNIQUE_STACK_H_
diff --git a/chromium/base/task_scheduler/scheduler_unique_stack_unittest.cc b/chromium/base/task_scheduler/scheduler_unique_stack_unittest.cc
deleted file mode 100644
index 8ef72dd2000..00000000000
--- a/chromium/base/task_scheduler/scheduler_unique_stack_unittest.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/task_scheduler/scheduler_unique_stack.h"
-
-#include "base/task_scheduler/test_utils.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace internal {
-
-// Verify that Push() and Pop() add/remove values in FIFO order.
-TEST(TaskSchedulerUniqueStackTest, PushPop) {
- SchedulerUniqueStack<int> stack;
- EXPECT_TRUE(stack.Empty());
- EXPECT_EQ(0U, stack.Size());
-
- stack.Push(1);
- EXPECT_FALSE(stack.Empty());
- EXPECT_EQ(1U, stack.Size());
-
- stack.Push(2);
- EXPECT_FALSE(stack.Empty());
- EXPECT_EQ(2U, stack.Size());
-
- stack.Push(3);
- EXPECT_FALSE(stack.Empty());
- EXPECT_EQ(3U, stack.Size());
-
- EXPECT_EQ(3, stack.Pop());
- EXPECT_FALSE(stack.Empty());
- EXPECT_EQ(2U, stack.Size());
-
- stack.Push(3);
- EXPECT_FALSE(stack.Empty());
- EXPECT_EQ(3U, stack.Size());
-
- EXPECT_EQ(3, stack.Pop());
- EXPECT_FALSE(stack.Empty());
- EXPECT_EQ(2U, stack.Size());
-
- EXPECT_EQ(2, stack.Pop());
- EXPECT_FALSE(stack.Empty());
- EXPECT_EQ(1U, stack.Size());
-
- EXPECT_EQ(1, stack.Pop());
- EXPECT_TRUE(stack.Empty());
- EXPECT_EQ(0U, stack.Size());
-}
-
-// Verify that a value can be removed by Remove().
-TEST(TaskSchedulerUniqueStackTest, Remove) {
- SchedulerUniqueStack<int> stack;
- EXPECT_TRUE(stack.Empty());
- EXPECT_EQ(0U, stack.Size());
-
- stack.Push(1);
- EXPECT_FALSE(stack.Empty());
- EXPECT_EQ(1U, stack.Size());
-
- stack.Push(2);
- EXPECT_FALSE(stack.Empty());
- EXPECT_EQ(2U, stack.Size());
-
- stack.Push(3);
- EXPECT_FALSE(stack.Empty());
- EXPECT_EQ(3U, stack.Size());
-
- stack.Remove(2);
- EXPECT_FALSE(stack.Empty());
- EXPECT_EQ(2U, stack.Size());
-
- EXPECT_EQ(3, stack.Pop());
- EXPECT_FALSE(stack.Empty());
- EXPECT_EQ(1U, stack.Size());
-
- EXPECT_EQ(1, stack.Pop());
- EXPECT_TRUE(stack.Empty());
- EXPECT_EQ(0U, stack.Size());
-}
-
-// Verify that a value can be pushed again after it has been removed.
-TEST(TaskSchedulerUniqueStackTest, PushAfterRemove) {
- SchedulerUniqueStack<int> stack;
- EXPECT_EQ(0U, stack.Size());
- EXPECT_TRUE(stack.Empty());
-
- stack.Push(5);
- EXPECT_EQ(1U, stack.Size());
- EXPECT_FALSE(stack.Empty());
-
- stack.Remove(5);
- EXPECT_EQ(0U, stack.Size());
- EXPECT_TRUE(stack.Empty());
-
- stack.Push(5);
- EXPECT_EQ(1U, stack.Size());
- EXPECT_FALSE(stack.Empty());
-}
-
-// Verify that Push() DCHECKs when a value is inserted twice.
-TEST(TaskSchedulerUniqueStackTest, PushTwice) {
- SchedulerUniqueStack<int> stack;
- stack.Push(5);
- EXPECT_DCHECK_DEATH({ stack.Push(5); }, "");
-}
-
-} // namespace internal
-} // namespace base
diff --git a/chromium/base/task_scheduler/scheduler_worker.cc b/chromium/base/task_scheduler/scheduler_worker.cc
new file mode 100644
index 00000000000..fcbc28382fa
--- /dev/null
+++ b/chromium/base/task_scheduler/scheduler_worker.cc
@@ -0,0 +1,222 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_worker.h"
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/task_scheduler/task_tracker.h"
+
+namespace base {
+namespace internal {
+
+class SchedulerWorker::Thread : public PlatformThread::Delegate {
+ public:
+ ~Thread() override = default;
+
+ static std::unique_ptr<Thread> Create(SchedulerWorker* outer) {
+ std::unique_ptr<Thread> thread(new Thread(outer));
+ thread->Initialize();
+ if (thread->thread_handle_.is_null())
+ return nullptr;
+ return thread;
+ }
+
+ // PlatformThread::Delegate.
+ void ThreadMain() override {
+ // Set if this thread was detached.
+ std::unique_ptr<Thread> detached_thread;
+
+ outer_->delegate_->OnMainEntry(outer_);
+
+ // A SchedulerWorker starts out waiting for work.
+ WaitForWork();
+
+ while (!outer_->task_tracker_->shutdown_completed() &&
+ !outer_->ShouldExitForTesting()) {
+ DCHECK(outer_);
+ // Get the sequence containing the next task to execute.
+ scoped_refptr<Sequence> sequence = outer_->delegate_->GetWork(outer_);
+ if (!sequence) {
+ if (outer_->delegate_->CanDetach(outer_)) {
+ detached_thread = outer_->Detach();
+ if (detached_thread) {
+ DCHECK_EQ(detached_thread.get(), this);
+ PlatformThread::Detach(thread_handle_);
+ outer_ = nullptr;
+ break;
+ }
+ }
+ WaitForWork();
+ continue;
+ }
+
+ outer_->task_tracker_->RunTask(sequence->PeekTask());
+
+ const bool sequence_became_empty = sequence->PopTask();
+
+ // If |sequence| isn't empty immediately after the pop, re-enqueue it to
+ // maintain the invariant that a non-empty Sequence is always referenced
+ // by either a PriorityQueue or a SchedulerWorker. If it is empty
+ // and there are live references to it, it will be enqueued when a Task is
+ // added to it. Otherwise, it will be destroyed at the end of this scope.
+ if (!sequence_became_empty)
+ outer_->delegate_->ReEnqueueSequence(std::move(sequence));
+
+ // Calling WakeUp() guarantees that this SchedulerWorker will run
+ // Tasks from Sequences returned by the GetWork() method of |delegate_|
+ // until it returns nullptr. Resetting |wake_up_event_| here doesn't break
+ // this invariant and avoids a useless loop iteration before going to
+ // sleep if WakeUp() is called while this SchedulerWorker is awake.
+ wake_up_event_.Reset();
+ }
+
+ // If a wake up is pending and we successfully detached, somehow |outer_|
+ // was able to signal us which means it probably thinks we're still alive.
+ // This is bad as it will cause the WakeUp to no-op and |outer_| will be
+ // stuck forever.
+ DCHECK(!detached_thread || !IsWakeUpPending()) <<
+ "This thread was detached and woken up at the same time.";
+ }
+
+ void Join() { PlatformThread::Join(thread_handle_); }
+
+ void WakeUp() { wake_up_event_.Signal(); }
+
+ bool IsWakeUpPending() { return wake_up_event_.IsSignaled(); }
+
+ private:
+ Thread(SchedulerWorker* outer)
+ : outer_(outer),
+ wake_up_event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {
+ DCHECK(outer_);
+ }
+
+ void Initialize() {
+ constexpr size_t kDefaultStackSize = 0;
+ PlatformThread::CreateWithPriority(kDefaultStackSize, this,
+ &thread_handle_,
+ outer_->thread_priority_);
+ }
+
+ void WaitForWork() {
+ DCHECK(outer_);
+ const TimeDelta sleep_time = outer_->delegate_->GetSleepTimeout();
+ if (sleep_time.is_max()) {
+ // Calling TimedWait with TimeDelta::Max is not recommended per
+ // http://crbug.com/465948.
+ wake_up_event_.Wait();
+ } else {
+ wake_up_event_.TimedWait(sleep_time);
+ }
+ wake_up_event_.Reset();
+ }
+
+ PlatformThreadHandle thread_handle_;
+
+ SchedulerWorker* outer_;
+
+ // Event signaled to wake up this thread.
+ WaitableEvent wake_up_event_;
+
+ DISALLOW_COPY_AND_ASSIGN(Thread);
+};
+
+std::unique_ptr<SchedulerWorker> SchedulerWorker::Create(
+ ThreadPriority thread_priority,
+ std::unique_ptr<Delegate> delegate,
+ TaskTracker* task_tracker,
+ InitialState initial_state) {
+ std::unique_ptr<SchedulerWorker> worker(
+ new SchedulerWorker(thread_priority, std::move(delegate), task_tracker));
+ // Creation happens before any other thread can reference this one, so no
+ // synchronization is necessary.
+ if (initial_state == SchedulerWorker::InitialState::ALIVE) {
+ worker->CreateThread();
+ if (!worker->thread_) {
+ return nullptr;
+ }
+ }
+
+ return worker;
+}
+
+SchedulerWorker::~SchedulerWorker() {
+ // It is unexpected for |thread_| to be alive and for SchedulerWorker to
+ // destroy since SchedulerWorker owns the delegate needed by |thread_|.
+ // For testing, this generally means JoinForTesting was not called.
+ DCHECK(!thread_);
+}
+
+void SchedulerWorker::WakeUp() {
+ AutoSchedulerLock auto_lock(thread_lock_);
+ if (!thread_)
+ CreateThreadAssertSynchronized();
+
+ if (thread_)
+ thread_->WakeUp();
+}
+
+void SchedulerWorker::JoinForTesting() {
+ {
+ AutoSchedulerLock auto_lock(should_exit_for_testing_lock_);
+ should_exit_for_testing_ = true;
+ }
+ WakeUp();
+
+ // Normally holding a lock and joining is dangerous. However, since this is
+ // only for testing, we're okay since the only scenario that could impact this
+ // is a call to Detach, which is disallowed by having the delegate always
+ // return false for the CanDetach call.
+ AutoSchedulerLock auto_lock(thread_lock_);
+ if (thread_)
+ thread_->Join();
+
+ thread_.reset();
+}
+
+bool SchedulerWorker::ThreadAliveForTesting() const {
+ AutoSchedulerLock auto_lock(thread_lock_);
+ return !!thread_;
+}
+
+SchedulerWorker::SchedulerWorker(ThreadPriority thread_priority,
+ std::unique_ptr<Delegate> delegate,
+ TaskTracker* task_tracker)
+ : thread_priority_(thread_priority),
+ delegate_(std::move(delegate)),
+ task_tracker_(task_tracker) {
+ DCHECK(delegate_);
+ DCHECK(task_tracker_);
+}
+
+std::unique_ptr<SchedulerWorker::Thread> SchedulerWorker::Detach() {
+ DCHECK(!ShouldExitForTesting()) << "Worker was already joined";
+ AutoSchedulerLock auto_lock(thread_lock_);
+ // If a wakeup is pending, then a WakeUp() came in while we were deciding to
+ // detach. This means we can't go away anymore since we would break the
+ // guarantee that we call GetWork() after a successful wakeup.
+ return thread_->IsWakeUpPending() ? nullptr : std::move(thread_);
+}
+
+void SchedulerWorker::CreateThread() {
+ thread_ = Thread::Create(this);
+}
+
+void SchedulerWorker::CreateThreadAssertSynchronized() {
+ thread_lock_.AssertAcquired();
+ CreateThread();
+}
+
+bool SchedulerWorker::ShouldExitForTesting() const {
+ AutoSchedulerLock auto_lock(should_exit_for_testing_lock_);
+ return should_exit_for_testing_;
+}
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/task_scheduler/scheduler_worker.h b/chromium/base/task_scheduler/scheduler_worker.h
new file mode 100644
index 00000000000..71d4cbcc916
--- /dev/null
+++ b/chromium/base/task_scheduler/scheduler_worker.h
@@ -0,0 +1,152 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+class TaskTracker;
+
+// A worker that manages a single thread to run Tasks from Sequences returned
+// by a delegate.
+//
+// A SchedulerWorker starts out sleeping. It is woken up by a call to WakeUp().
+// After a wake-up, a SchedulerWorker runs Tasks from Sequences returned by the
+// GetWork() method of its delegate as long as it doesn't return nullptr. It
+// also periodically checks with its TaskTracker whether shutdown has completed
+// and exits when it has.
+//
+// The worker is free to release and reallocate the platform thread with
+// guidance from the delegate.
+//
+// This class is thread-safe.
+class BASE_EXPORT SchedulerWorker {
+ public:
+ // Delegate interface for SchedulerWorker. The methods are always called from
+ // a thread managed by the SchedulerWorker instance.
+ class Delegate {
+ public:
+ virtual ~Delegate() = default;
+
+ // Called by a thread managed by |worker| when it enters its main function.
+ // If a thread is recreated after detachment, this call will occur again.
+ virtual void OnMainEntry(SchedulerWorker* worker) = 0;
+
+ // Called by a thread managed by |worker| to get a Sequence from which to
+ // run a Task.
+ virtual scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) = 0;
+
+ // Called when |sequence| isn't empty after the SchedulerWorker pops a Task
+ // from it. |sequence| is the last Sequence returned by GetWork().
+ virtual void ReEnqueueSequence(scoped_refptr<Sequence> sequence) = 0;
+
+ // Called by a thread to determine how long to sleep before the next call to
+ // GetWork(). GetWork() may be called before this timeout expires if the
+ // worker's WakeUp() method is called.
+ virtual TimeDelta GetSleepTimeout() = 0;
+
+ // Called by a thread if it is allowed to detach if the last call to
+ // GetWork() returned nullptr.
+ //
+ // It is the responsibility of the delegate to determine if detachment is
+ // safe. If the delegate is responsible for thread-affine work, detachment
+ // is generally not safe.
+ //
+ // When true is returned:
+ // - The next WakeUp() could be more costly due to new thread creation.
+ // - The worker will take this as a signal that it can detach, but it is not
+ // obligated to do so.
+ // This MUST return false if SchedulerWorker::JoinForTesting() is in
+ // progress.
+ virtual bool CanDetach(SchedulerWorker* worker) = 0;
+ };
+
+ enum class InitialState { ALIVE, DETACHED };
+
+ // Creates a SchedulerWorker with priority |thread_priority| that runs Tasks
+ // from Sequences returned by |delegate|. |task_tracker| is used to handle
+ // shutdown behavior of Tasks. If |worker_state| is DETACHED, the thread will
+ // be created upon a WakeUp(). Returns nullptr if creating the underlying
+ // platform thread fails during Create().
+ static std::unique_ptr<SchedulerWorker> Create(
+ ThreadPriority thread_priority,
+ std::unique_ptr<Delegate> delegate,
+ TaskTracker* task_tracker,
+ InitialState initial_state);
+
+ // Destroying a SchedulerWorker in production is not allowed; it is always
+ // leaked. In tests, it can only be destroyed after JoinForTesting() has
+ // returned.
+ ~SchedulerWorker();
+
+ // Wakes up this SchedulerWorker if it wasn't already awake. After this
+ // is called, this SchedulerWorker will run Tasks from Sequences
+ // returned by the GetWork() method of its delegate until it returns nullptr.
+ // WakeUp() may fail if the worker is detached and it fails to allocate a new
+ // worker. If this happens, there will be no call to GetWork().
+ void WakeUp();
+
+ SchedulerWorker::Delegate* delegate() { return delegate_.get(); }
+
+ // Joins this SchedulerWorker. If a Task is already running, it will be
+ // allowed to complete its execution. This can only be called once.
+ void JoinForTesting();
+
+ // Returns true if the worker is alive.
+ bool ThreadAliveForTesting() const;
+
+ private:
+ class Thread;
+
+ SchedulerWorker(ThreadPriority thread_priority,
+ std::unique_ptr<Delegate> delegate,
+ TaskTracker* task_tracker);
+
+ // Returns the thread instance if the detach was successful so that it can be
+ // freed upon termination of the thread.
+ // If the detach is not possible, returns nullptr.
+ std::unique_ptr<SchedulerWorker::Thread> Detach();
+
+ void CreateThread();
+
+ void CreateThreadAssertSynchronized();
+
+ bool ShouldExitForTesting() const;
+
+ // Synchronizes access to |thread_|
+ mutable SchedulerLock thread_lock_;
+
+ // The underlying thread for this SchedulerWorker.
+ std::unique_ptr<Thread> thread_;
+
+ const ThreadPriority thread_priority_;
+ const std::unique_ptr<Delegate> delegate_;
+ TaskTracker* const task_tracker_;
+
+ // Synchronizes access to |should_exit_for_testing_|.
+ mutable SchedulerLock should_exit_for_testing_lock_;
+
+ // True once JoinForTesting() has been called.
+ bool should_exit_for_testing_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerWorker);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_H_
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool.h b/chromium/base/task_scheduler/scheduler_worker_pool.h
new file mode 100644
index 00000000000..43dce606c62
--- /dev/null
+++ b/chromium/base/task_scheduler/scheduler_worker_pool.h
@@ -0,0 +1,66 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/memory/ref_counted.h"
+#include "base/task_runner.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_traits.h"
+
+namespace base {
+namespace internal {
+
+class SchedulerWorker;
+class SequenceSortKey;
+
+// Interface for a worker pool.
+class BASE_EXPORT SchedulerWorkerPool {
+ public:
+ virtual ~SchedulerWorkerPool() = default;
+
+ // Returns a TaskRunner whose PostTask invocations will result in scheduling
+ // Tasks with |traits| and |execution_mode| in this SchedulerWorkerPool.
+ virtual scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
+ const TaskTraits& traits,
+ ExecutionMode execution_mode) = 0;
+
+ // Inserts |sequence| with |sequence_sort_key| into a queue of Sequences that
+ // can be processed by any worker owned by this SchedulerWorkerPool. Must only
+ // be used to put |sequence| back into a queue after running a Task from it.
+ // The thread that calls this doesn't have to belong to this
+ // SchedulerWorkerPool.
+ virtual void ReEnqueueSequence(scoped_refptr<Sequence> sequence,
+ const SequenceSortKey& sequence_sort_key) = 0;
+
+ // Posts |task| to be executed by this SchedulerWorkerPool as part of
+ // |sequence|. If |worker| is non-null, |task| will be scheduled to run on it
+ // specifically (note: |worker| must be owned by this SchedulerWorkerPool);
+ // otherwise, |task| will be added to the pending shared work. |task| won't be
+ // executed before its delayed run time, if any. Returns true if |task| is
+ // posted.
+ virtual bool PostTaskWithSequence(std::unique_ptr<Task> task,
+ scoped_refptr<Sequence> sequence,
+ SchedulerWorker* worker) = 0;
+
+ // Posts |task| to be executed by this SchedulerWorkerPool as part of
+ // |sequence|. If |worker| is non-null, |task| will be scheduled to run on it
+ // specifically (note: |worker| must be owned by this SchedulerWorkerPool);
+ // otherwise, |task| will be added to the pending shared work. This must only
+ // be called after |task| has gone through PostTaskWithSequence() and after
+ // |task|'s delayed run time.
+ virtual void PostTaskWithSequenceNow(std::unique_ptr<Task> task,
+ scoped_refptr<Sequence> sequence,
+ SchedulerWorker* worker) = 0;
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_H_
diff --git a/chromium/base/task_scheduler/scheduler_thread_pool_impl.cc b/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
index 3433ce6dd3b..06933eb32db 100644
--- a/chromium/base/task_scheduler/scheduler_thread_pool_impl.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/task_scheduler/scheduler_thread_pool_impl.h"
+#include "base/task_scheduler/scheduler_worker_pool_impl.h"
#include <stddef.h>
@@ -27,43 +27,43 @@ namespace internal {
namespace {
-// SchedulerThreadPool that owns the current thread, if any.
-LazyInstance<ThreadLocalPointer<const SchedulerThreadPool>>::Leaky
- tls_current_thread_pool = LAZY_INSTANCE_INITIALIZER;
+// SchedulerWorker that owns the current thread, if any.
+LazyInstance<ThreadLocalPointer<const SchedulerWorker>>::Leaky
+ tls_current_worker = LAZY_INSTANCE_INITIALIZER;
-// SchedulerWorkerThread that owns the current thread, if any.
-LazyInstance<ThreadLocalPointer<const SchedulerWorkerThread>>::Leaky
- tls_current_worker_thread = LAZY_INSTANCE_INITIALIZER;
+// SchedulerWorkerPool that owns the current thread, if any.
+LazyInstance<ThreadLocalPointer<const SchedulerWorkerPool>>::Leaky
+ tls_current_worker_pool = LAZY_INSTANCE_INITIALIZER;
// A task runner that runs tasks with the PARALLEL ExecutionMode.
class SchedulerParallelTaskRunner : public TaskRunner {
public:
// Constructs a SchedulerParallelTaskRunner which can be used to post tasks so
- // long as |thread_pool| is alive.
- // TODO(robliao): Find a concrete way to manage |thread_pool|'s memory.
+ // long as |worker_pool| is alive.
+ // TODO(robliao): Find a concrete way to manage |worker_pool|'s memory.
SchedulerParallelTaskRunner(const TaskTraits& traits,
- SchedulerThreadPool* thread_pool)
- : traits_(traits), thread_pool_(thread_pool) {}
+ SchedulerWorkerPool* worker_pool)
+ : traits_(traits), worker_pool_(worker_pool) {}
// TaskRunner:
bool PostDelayedTask(const tracked_objects::Location& from_here,
const Closure& closure,
TimeDelta delay) override {
// Post the task as part of a one-off single-task Sequence.
- return thread_pool_->PostTaskWithSequence(
+ return worker_pool_->PostTaskWithSequence(
WrapUnique(new Task(from_here, closure, traits_, delay)),
make_scoped_refptr(new Sequence), nullptr);
}
bool RunsTasksOnCurrentThread() const override {
- return tls_current_thread_pool.Get().Get() == thread_pool_;
+ return tls_current_worker_pool.Get().Get() == worker_pool_;
}
private:
~SchedulerParallelTaskRunner() override = default;
const TaskTraits traits_;
- SchedulerThreadPool* const thread_pool_;
+ SchedulerWorkerPool* const worker_pool_;
DISALLOW_COPY_AND_ASSIGN(SchedulerParallelTaskRunner);
};
@@ -72,11 +72,11 @@ class SchedulerParallelTaskRunner : public TaskRunner {
class SchedulerSequencedTaskRunner : public SequencedTaskRunner {
public:
// Constructs a SchedulerSequencedTaskRunner which can be used to post tasks
- // so long as |thread_pool| is alive.
- // TODO(robliao): Find a concrete way to manage |thread_pool|'s memory.
+ // so long as |worker_pool| is alive.
+ // TODO(robliao): Find a concrete way to manage |worker_pool|'s memory.
SchedulerSequencedTaskRunner(const TaskTraits& traits,
- SchedulerThreadPool* thread_pool)
- : traits_(traits), thread_pool_(thread_pool) {}
+ SchedulerWorkerPool* worker_pool)
+ : traits_(traits), worker_pool_(worker_pool) {}
// SequencedTaskRunner:
bool PostDelayedTask(const tracked_objects::Location& from_here,
@@ -86,7 +86,7 @@ class SchedulerSequencedTaskRunner : public SequencedTaskRunner {
task->sequenced_task_runner_ref = this;
// Post the task as part of |sequence_|.
- return thread_pool_->PostTaskWithSequence(std::move(task), sequence_,
+ return worker_pool_->PostTaskWithSequence(std::move(task), sequence_,
nullptr);
}
@@ -98,7 +98,7 @@ class SchedulerSequencedTaskRunner : public SequencedTaskRunner {
}
bool RunsTasksOnCurrentThread() const override {
- return tls_current_thread_pool.Get().Get() == thread_pool_;
+ return tls_current_worker_pool.Get().Get() == worker_pool_;
}
private:
@@ -108,7 +108,7 @@ class SchedulerSequencedTaskRunner : public SequencedTaskRunner {
const scoped_refptr<Sequence> sequence_ = new Sequence;
const TaskTraits traits_;
- SchedulerThreadPool* const thread_pool_;
+ SchedulerWorkerPool* const worker_pool_;
DISALLOW_COPY_AND_ASSIGN(SchedulerSequencedTaskRunner);
};
@@ -117,15 +117,15 @@ class SchedulerSequencedTaskRunner : public SequencedTaskRunner {
class SchedulerSingleThreadTaskRunner : public SingleThreadTaskRunner {
public:
// Constructs a SchedulerSingleThreadTaskRunner which can be used to post
- // tasks so long as |thread_pool| and |worker_thread| are alive.
- // TODO(robliao): Find a concrete way to manage the memory of |thread_pool|
- // and |worker_thread|.
+ // tasks so long as |worker_pool| and |worker| are alive.
+ // TODO(robliao): Find a concrete way to manage the memory of |worker_pool|
+ // and |worker|.
SchedulerSingleThreadTaskRunner(const TaskTraits& traits,
- SchedulerThreadPool* thread_pool,
- SchedulerWorkerThread* worker_thread)
+ SchedulerWorkerPool* worker_pool,
+ SchedulerWorker* worker)
: traits_(traits),
- thread_pool_(thread_pool),
- worker_thread_(worker_thread) {}
+ worker_pool_(worker_pool),
+ worker_(worker) {}
// SingleThreadTaskRunner:
bool PostDelayedTask(const tracked_objects::Location& from_here,
@@ -134,9 +134,9 @@ class SchedulerSingleThreadTaskRunner : public SingleThreadTaskRunner {
std::unique_ptr<Task> task(new Task(from_here, closure, traits_, delay));
task->single_thread_task_runner_ref = this;
- // Post the task to be executed by |worker_thread_| as part of |sequence_|.
- return thread_pool_->PostTaskWithSequence(std::move(task), sequence_,
- worker_thread_);
+ // Post the task to be executed by |worker_| as part of |sequence_|.
+ return worker_pool_->PostTaskWithSequence(std::move(task), sequence_,
+ worker_);
}
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
@@ -147,7 +147,7 @@ class SchedulerSingleThreadTaskRunner : public SingleThreadTaskRunner {
}
bool RunsTasksOnCurrentThread() const override {
- return tls_current_worker_thread.Get().Get() == worker_thread_;
+ return tls_current_worker.Get().Get() == worker_;
}
private:
@@ -157,58 +157,57 @@ class SchedulerSingleThreadTaskRunner : public SingleThreadTaskRunner {
const scoped_refptr<Sequence> sequence_ = new Sequence;
const TaskTraits traits_;
- SchedulerThreadPool* const thread_pool_;
- SchedulerWorkerThread* const worker_thread_;
+ SchedulerWorkerPool* const worker_pool_;
+ SchedulerWorker* const worker_;
DISALLOW_COPY_AND_ASSIGN(SchedulerSingleThreadTaskRunner);
};
// Only used in DCHECKs.
-bool ContainsWorkerThread(
- const std::vector<std::unique_ptr<SchedulerWorkerThread>>& worker_threads,
- const SchedulerWorkerThread* worker_thread) {
- auto it = std::find_if(
- worker_threads.begin(), worker_threads.end(),
- [worker_thread](const std::unique_ptr<SchedulerWorkerThread>& i) {
- return i.get() == worker_thread;
+bool ContainsWorker(
+ const std::vector<std::unique_ptr<SchedulerWorker>>& workers,
+ const SchedulerWorker* worker) {
+ auto it = std::find_if(workers.begin(), workers.end(),
+ [worker](const std::unique_ptr<SchedulerWorker>& i) {
+ return i.get() == worker;
});
- return it != worker_threads.end();
+ return it != workers.end();
}
} // namespace
-class SchedulerThreadPoolImpl::SchedulerWorkerThreadDelegateImpl
- : public SchedulerWorkerThread::Delegate {
+class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl
+ : public SchedulerWorker::Delegate {
public:
- // |outer| owns the worker thread for which this delegate is constructed.
+ // |outer| owns the worker for which this delegate is constructed.
// |re_enqueue_sequence_callback| is invoked when ReEnqueueSequence() is
// called with a non-single-threaded Sequence. |shared_priority_queue| is a
- // PriorityQueue whose transactions may overlap with the worker thread's
+ // PriorityQueue whose transactions may overlap with the worker's
// single-threaded PriorityQueue's transactions. |index| will be appended to
- // this thread's name to uniquely identify it.
- SchedulerWorkerThreadDelegateImpl(
- SchedulerThreadPoolImpl* outer,
+ // the pool name to label the underlying worker threads.
+ SchedulerWorkerDelegateImpl(
+ SchedulerWorkerPoolImpl* outer,
const ReEnqueueSequenceCallback& re_enqueue_sequence_callback,
const PriorityQueue* shared_priority_queue,
int index);
- ~SchedulerWorkerThreadDelegateImpl() override;
+ ~SchedulerWorkerDelegateImpl() override;
PriorityQueue* single_threaded_priority_queue() {
return &single_threaded_priority_queue_;
}
- // SchedulerWorkerThread::Delegate:
- void OnMainEntry(SchedulerWorkerThread* worker_thread) override;
- scoped_refptr<Sequence> GetWork(
- SchedulerWorkerThread* worker_thread) override;
+ // SchedulerWorker::Delegate:
+ void OnMainEntry(SchedulerWorker* worker) override;
+ scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override;
void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override;
TimeDelta GetSleepTimeout() override;
+ bool CanDetach(SchedulerWorker* worker) override;
private:
- SchedulerThreadPoolImpl* outer_;
+ SchedulerWorkerPoolImpl* outer_;
const ReEnqueueSequenceCallback re_enqueue_sequence_callback_;
- // Single-threaded PriorityQueue for the worker thread.
+ // Single-threaded PriorityQueue for the worker.
PriorityQueue single_threaded_priority_queue_;
// True if the last Sequence returned by GetWork() was extracted from
@@ -217,17 +216,17 @@ class SchedulerThreadPoolImpl::SchedulerWorkerThreadDelegateImpl
const int index_;
- DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerThreadDelegateImpl);
+ DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerDelegateImpl);
};
-SchedulerThreadPoolImpl::~SchedulerThreadPoolImpl() {
- // SchedulerThreadPool should never be deleted in production unless its
+SchedulerWorkerPoolImpl::~SchedulerWorkerPoolImpl() {
+ // SchedulerWorkerPool should never be deleted in production unless its
// initialization failed.
- DCHECK(join_for_testing_returned_.IsSignaled() || worker_threads_.empty());
+ DCHECK(join_for_testing_returned_.IsSignaled() || workers_.empty());
}
// static
-std::unique_ptr<SchedulerThreadPoolImpl> SchedulerThreadPoolImpl::Create(
+std::unique_ptr<SchedulerWorkerPoolImpl> SchedulerWorkerPoolImpl::Create(
StringPiece name,
ThreadPriority thread_priority,
size_t max_threads,
@@ -235,31 +234,31 @@ std::unique_ptr<SchedulerThreadPoolImpl> SchedulerThreadPoolImpl::Create(
const ReEnqueueSequenceCallback& re_enqueue_sequence_callback,
TaskTracker* task_tracker,
DelayedTaskManager* delayed_task_manager) {
- std::unique_ptr<SchedulerThreadPoolImpl> thread_pool(
- new SchedulerThreadPoolImpl(name, io_restriction, task_tracker,
+ std::unique_ptr<SchedulerWorkerPoolImpl> worker_pool(
+ new SchedulerWorkerPoolImpl(name, io_restriction, task_tracker,
delayed_task_manager));
- if (thread_pool->Initialize(thread_priority, max_threads,
+ if (worker_pool->Initialize(thread_priority, max_threads,
re_enqueue_sequence_callback)) {
- return thread_pool;
+ return worker_pool;
}
return nullptr;
}
-void SchedulerThreadPoolImpl::WaitForAllWorkerThreadsIdleForTesting() {
- AutoSchedulerLock auto_lock(idle_worker_threads_stack_lock_);
- while (idle_worker_threads_stack_.Size() < worker_threads_.size())
- idle_worker_threads_stack_cv_for_testing_->Wait();
+void SchedulerWorkerPoolImpl::WaitForAllWorkersIdleForTesting() {
+ AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
+ while (idle_workers_stack_.Size() < workers_.size())
+ idle_workers_stack_cv_for_testing_->Wait();
}
-void SchedulerThreadPoolImpl::JoinForTesting() {
- for (const auto& worker_thread : worker_threads_)
- worker_thread->JoinForTesting();
+void SchedulerWorkerPoolImpl::JoinForTesting() {
+ for (const auto& worker : workers_)
+ worker->JoinForTesting();
DCHECK(!join_for_testing_returned_.IsSignaled());
join_for_testing_returned_.Signal();
}
-scoped_refptr<TaskRunner> SchedulerThreadPoolImpl::CreateTaskRunnerWithTraits(
+scoped_refptr<TaskRunner> SchedulerWorkerPoolImpl::CreateTaskRunnerWithTraits(
const TaskTraits& traits,
ExecutionMode execution_mode) {
switch (execution_mode) {
@@ -271,18 +270,17 @@ scoped_refptr<TaskRunner> SchedulerThreadPoolImpl::CreateTaskRunnerWithTraits(
case ExecutionMode::SINGLE_THREADED: {
// TODO(fdoray): Find a way to take load into account when assigning a
- // SchedulerWorkerThread to a SingleThreadTaskRunner. Also, this code
- // assumes that all SchedulerWorkerThreads are alive. Eventually, we might
+ // SchedulerWorker to a SingleThreadTaskRunner. Also, this code
+ // assumes that all SchedulerWorkers are alive. Eventually, we might
// decide to tear down threads that haven't run tasks for a long time.
- size_t worker_thread_index;
+ size_t worker_index;
{
- AutoSchedulerLock auto_lock(next_worker_thread_index_lock_);
- worker_thread_index = next_worker_thread_index_;
- next_worker_thread_index_ =
- (next_worker_thread_index_ + 1) % worker_threads_.size();
+ AutoSchedulerLock auto_lock(next_worker_index_lock_);
+ worker_index = next_worker_index_;
+ next_worker_index_ = (next_worker_index_ + 1) % workers_.size();
}
return make_scoped_refptr(new SchedulerSingleThreadTaskRunner(
- traits, this, worker_threads_[worker_thread_index].get()));
+ traits, this, workers_[worker_index].get()));
}
}
@@ -290,7 +288,7 @@ scoped_refptr<TaskRunner> SchedulerThreadPoolImpl::CreateTaskRunnerWithTraits(
return nullptr;
}
-void SchedulerThreadPoolImpl::ReEnqueueSequence(
+void SchedulerWorkerPoolImpl::ReEnqueueSequence(
scoped_refptr<Sequence> sequence,
const SequenceSortKey& sequence_sort_key) {
shared_priority_queue_.BeginTransaction()->Push(std::move(sequence),
@@ -300,56 +298,52 @@ void SchedulerThreadPoolImpl::ReEnqueueSequence(
// soon try to get another Sequence from which to run a Task. If the thread
// belongs to this pool, it will get that Sequence from
// |shared_priority_queue_|. When that's the case, there is no need to wake up
- // another thread after |sequence| is inserted in |shared_priority_queue_|. If
- // we did wake up another thread, we would waste resources by having more
- // threads trying to get a Sequence from |shared_priority_queue_| than the
+ // another worker after |sequence| is inserted in |shared_priority_queue_|. If
+ // we did wake up another worker, we would waste resources by having more
+ // workers trying to get a Sequence from |shared_priority_queue_| than the
// number of Sequences in it.
- if (tls_current_thread_pool.Get().Get() != this)
- WakeUpOneThread();
+ if (tls_current_worker_pool.Get().Get() != this)
+ WakeUpOneWorker();
}
-bool SchedulerThreadPoolImpl::PostTaskWithSequence(
+bool SchedulerWorkerPoolImpl::PostTaskWithSequence(
std::unique_ptr<Task> task,
scoped_refptr<Sequence> sequence,
- SchedulerWorkerThread* worker_thread) {
+ SchedulerWorker* worker) {
DCHECK(task);
DCHECK(sequence);
- DCHECK(!worker_thread ||
- ContainsWorkerThread(worker_threads_, worker_thread));
+ DCHECK(!worker || ContainsWorker(workers_, worker));
if (!task_tracker_->WillPostTask(task.get()))
return false;
if (task->delayed_run_time.is_null()) {
- PostTaskWithSequenceNow(std::move(task), std::move(sequence),
- worker_thread);
+ PostTaskWithSequenceNow(std::move(task), std::move(sequence), worker);
} else {
delayed_task_manager_->AddDelayedTask(std::move(task), std::move(sequence),
- worker_thread, this);
+ worker, this);
}
return true;
}
-void SchedulerThreadPoolImpl::PostTaskWithSequenceNow(
+void SchedulerWorkerPoolImpl::PostTaskWithSequenceNow(
std::unique_ptr<Task> task,
scoped_refptr<Sequence> sequence,
- SchedulerWorkerThread* worker_thread) {
+ SchedulerWorker* worker) {
DCHECK(task);
DCHECK(sequence);
- DCHECK(!worker_thread ||
- ContainsWorkerThread(worker_threads_, worker_thread));
+ DCHECK(!worker || ContainsWorker(workers_, worker));
// Confirm that |task| is ready to run (its delayed run time is either null or
// in the past).
DCHECK_LE(task->delayed_run_time, delayed_task_manager_->Now());
- // Because |worker_thread| belongs to this thread pool, we know that the type
- // of its delegate is SchedulerWorkerThreadDelegateImpl.
+ // Because |worker| belongs to this worker pool, we know that the type
+ // of its delegate is SchedulerWorkerDelegateImpl.
PriorityQueue* const priority_queue =
- worker_thread
- ? static_cast<SchedulerWorkerThreadDelegateImpl*>(
- worker_thread->delegate())
+ worker
+ ? static_cast<SchedulerWorkerDelegateImpl*>(worker->delegate())
->single_threaded_priority_queue()
: &shared_priority_queue_;
DCHECK(priority_queue);
@@ -360,23 +354,23 @@ void SchedulerThreadPoolImpl::PostTaskWithSequenceNow(
// inserted into it. Otherwise, one of these must be true:
// - |sequence| is already in a PriorityQueue (not necessarily
// |shared_priority_queue_|), or,
- // - A worker thread is running a Task from |sequence|. It will insert
- // |sequence| in a PriorityQueue once it's done running the Task.
+ // - A worker is running a Task from |sequence|. It will insert |sequence|
+ // in a PriorityQueue once it's done running the Task.
const auto sequence_sort_key = sequence->GetSortKey();
priority_queue->BeginTransaction()->Push(std::move(sequence),
sequence_sort_key);
- // Wake up a worker thread to process |sequence|.
- if (worker_thread)
- worker_thread->WakeUp();
+ // Wake up a worker to process |sequence|.
+ if (worker)
+ worker->WakeUp();
else
- WakeUpOneThread();
+ WakeUpOneWorker();
}
}
-SchedulerThreadPoolImpl::SchedulerWorkerThreadDelegateImpl::
- SchedulerWorkerThreadDelegateImpl(
- SchedulerThreadPoolImpl* outer,
+SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+ SchedulerWorkerDelegateImpl(
+ SchedulerWorkerPoolImpl* outer,
const ReEnqueueSequenceCallback& re_enqueue_sequence_callback,
const PriorityQueue* shared_priority_queue,
int index)
@@ -385,34 +379,34 @@ SchedulerThreadPoolImpl::SchedulerWorkerThreadDelegateImpl::
single_threaded_priority_queue_(shared_priority_queue),
index_(index) {}
-SchedulerThreadPoolImpl::SchedulerWorkerThreadDelegateImpl::
- ~SchedulerWorkerThreadDelegateImpl() = default;
+SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+ ~SchedulerWorkerDelegateImpl() = default;
-void SchedulerThreadPoolImpl::SchedulerWorkerThreadDelegateImpl::OnMainEntry(
- SchedulerWorkerThread* worker_thread) {
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnMainEntry(
+ SchedulerWorker* worker) {
#if DCHECK_IS_ON()
- // Wait for |outer_->threads_created_| to avoid traversing
- // |outer_->worker_threads_| while it is being filled by Initialize().
- outer_->threads_created_.Wait();
- DCHECK(ContainsWorkerThread(outer_->worker_threads_, worker_thread));
+ // Wait for |outer_->workers_created_| to avoid traversing
+ // |outer_->workers_| while it is being filled by Initialize().
+ outer_->workers_created_.Wait();
+ DCHECK(ContainsWorker(outer_->workers_, worker));
#endif
PlatformThread::SetName(
StringPrintf("%sWorker%d", outer_->name_.c_str(), index_));
- DCHECK(!tls_current_worker_thread.Get().Get());
- DCHECK(!tls_current_thread_pool.Get().Get());
- tls_current_worker_thread.Get().Set(worker_thread);
- tls_current_thread_pool.Get().Set(outer_);
+ DCHECK(!tls_current_worker.Get().Get());
+ DCHECK(!tls_current_worker_pool.Get().Get());
+ tls_current_worker.Get().Set(worker);
+ tls_current_worker_pool.Get().Set(outer_);
ThreadRestrictions::SetIOAllowed(outer_->io_restriction_ ==
IORestriction::ALLOWED);
}
scoped_refptr<Sequence>
-SchedulerThreadPoolImpl::SchedulerWorkerThreadDelegateImpl::GetWork(
- SchedulerWorkerThread* worker_thread) {
- DCHECK(ContainsWorkerThread(outer_->worker_threads_, worker_thread));
+SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork(
+ SchedulerWorker* worker) {
+ DCHECK(ContainsWorker(outer_->workers_, worker));
scoped_refptr<Sequence> sequence;
{
@@ -425,19 +419,19 @@ SchedulerThreadPoolImpl::SchedulerWorkerThreadDelegateImpl::GetWork(
single_threaded_transaction->IsEmpty()) {
single_threaded_transaction.reset();
- // |shared_transaction| is kept alive while |worker_thread| is added to
- // |idle_worker_threads_stack_| to avoid this race:
+ // |shared_transaction| is kept alive while |worker| is added to
+ // |idle_workers_stack_| to avoid this race:
// 1. This thread creates a Transaction, finds |shared_priority_queue_|
// empty and ends the Transaction.
// 2. Other thread creates a Transaction, inserts a Sequence into
// |shared_priority_queue_| and ends the Transaction. This can't happen
// if the Transaction of step 1 is still active because because there
// can only be one active Transaction per PriorityQueue at a time.
- // 3. Other thread calls WakeUpOneThread(). No thread is woken up because
- // |idle_worker_threads_stack_| is empty.
- // 4. This thread adds itself to |idle_worker_threads_stack_| and goes to
- // sleep. No thread runs the Sequence inserted in step 2.
- outer_->AddToIdleWorkerThreadsStack(worker_thread);
+ // 3. Other thread calls WakeUpOneWorker(). No thread is woken up because
+ // |idle_workers_stack_| is empty.
+ // 4. This thread adds itself to |idle_workers_stack_| and goes to sleep.
+ // No thread runs the Sequence inserted in step 2.
+ outer_->AddToIdleWorkersStack(worker);
return nullptr;
}
@@ -461,11 +455,11 @@ SchedulerThreadPoolImpl::SchedulerWorkerThreadDelegateImpl::GetWork(
}
DCHECK(sequence);
- outer_->RemoveFromIdleWorkerThreadsStack(worker_thread);
+ outer_->RemoveFromIdleWorkersStack(worker);
return sequence;
}
-void SchedulerThreadPoolImpl::SchedulerWorkerThreadDelegateImpl::
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
ReEnqueueSequence(scoped_refptr<Sequence> sequence) {
if (last_sequence_is_single_threaded_) {
// A single-threaded Sequence is always re-enqueued in the single-threaded
@@ -480,24 +474,31 @@ void SchedulerThreadPoolImpl::SchedulerWorkerThreadDelegateImpl::
}
}
-TimeDelta SchedulerThreadPoolImpl::SchedulerWorkerThreadDelegateImpl::
+TimeDelta SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
GetSleepTimeout() {
return TimeDelta::Max();
}
-SchedulerThreadPoolImpl::SchedulerThreadPoolImpl(
+bool SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::CanDetach(
+ SchedulerWorker* worker) {
+ return false;
+}
+
+SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
StringPiece name,
IORestriction io_restriction,
TaskTracker* task_tracker,
DelayedTaskManager* delayed_task_manager)
: name_(name.as_string()),
io_restriction_(io_restriction),
- idle_worker_threads_stack_lock_(shared_priority_queue_.container_lock()),
- idle_worker_threads_stack_cv_for_testing_(
- idle_worker_threads_stack_lock_.CreateConditionVariable()),
- join_for_testing_returned_(true, false),
+ idle_workers_stack_lock_(shared_priority_queue_.container_lock()),
+ idle_workers_stack_cv_for_testing_(
+ idle_workers_stack_lock_.CreateConditionVariable()),
+ join_for_testing_returned_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
#if DCHECK_IS_ON()
- threads_created_(true, false),
+ workers_created_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
#endif
task_tracker_(task_tracker),
delayed_task_manager_(delayed_task_manager) {
@@ -505,58 +506,59 @@ SchedulerThreadPoolImpl::SchedulerThreadPoolImpl(
DCHECK(delayed_task_manager_);
}
-bool SchedulerThreadPoolImpl::Initialize(
+bool SchedulerWorkerPoolImpl::Initialize(
ThreadPriority thread_priority,
size_t max_threads,
const ReEnqueueSequenceCallback& re_enqueue_sequence_callback) {
- AutoSchedulerLock auto_lock(idle_worker_threads_stack_lock_);
+ AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
- DCHECK(worker_threads_.empty());
+ DCHECK(workers_.empty());
for (size_t i = 0; i < max_threads; ++i) {
- std::unique_ptr<SchedulerWorkerThread> worker_thread =
- SchedulerWorkerThread::Create(
- thread_priority, WrapUnique(new SchedulerWorkerThreadDelegateImpl(
+ std::unique_ptr<SchedulerWorker> worker =
+ SchedulerWorker::Create(
+ thread_priority, WrapUnique(new SchedulerWorkerDelegateImpl(
this, re_enqueue_sequence_callback,
&shared_priority_queue_, static_cast<int>(i))),
- task_tracker_);
- if (!worker_thread)
+ task_tracker_,
+ SchedulerWorker::InitialState::ALIVE);
+ if (!worker)
break;
- idle_worker_threads_stack_.Push(worker_thread.get());
- worker_threads_.push_back(std::move(worker_thread));
+ idle_workers_stack_.Push(worker.get());
+ workers_.push_back(std::move(worker));
}
#if DCHECK_IS_ON()
- threads_created_.Signal();
+ workers_created_.Signal();
#endif
- return !worker_threads_.empty();
+ return !workers_.empty();
}
-void SchedulerThreadPoolImpl::WakeUpOneThread() {
- SchedulerWorkerThread* worker_thread;
+void SchedulerWorkerPoolImpl::WakeUpOneWorker() {
+ SchedulerWorker* worker;
{
- AutoSchedulerLock auto_lock(idle_worker_threads_stack_lock_);
- worker_thread = idle_worker_threads_stack_.Pop();
+ AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
+ worker = idle_workers_stack_.Pop();
}
- if (worker_thread)
- worker_thread->WakeUp();
+ if (worker)
+ worker->WakeUp();
}
-void SchedulerThreadPoolImpl::AddToIdleWorkerThreadsStack(
- SchedulerWorkerThread* worker_thread) {
- AutoSchedulerLock auto_lock(idle_worker_threads_stack_lock_);
- idle_worker_threads_stack_.Push(worker_thread);
- DCHECK_LE(idle_worker_threads_stack_.Size(), worker_threads_.size());
+void SchedulerWorkerPoolImpl::AddToIdleWorkersStack(
+ SchedulerWorker* worker) {
+ AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
+ idle_workers_stack_.Push(worker);
+ DCHECK_LE(idle_workers_stack_.Size(), workers_.size());
- if (idle_worker_threads_stack_.Size() == worker_threads_.size())
- idle_worker_threads_stack_cv_for_testing_->Broadcast();
+ if (idle_workers_stack_.Size() == workers_.size())
+ idle_workers_stack_cv_for_testing_->Broadcast();
}
-void SchedulerThreadPoolImpl::RemoveFromIdleWorkerThreadsStack(
- SchedulerWorkerThread* worker_thread) {
- AutoSchedulerLock auto_lock(idle_worker_threads_stack_lock_);
- idle_worker_threads_stack_.Remove(worker_thread);
+void SchedulerWorkerPoolImpl::RemoveFromIdleWorkersStack(
+ SchedulerWorker* worker) {
+ AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
+ idle_workers_stack_.Remove(worker);
}
} // namespace internal
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl.h b/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
new file mode 100644
index 00000000000..935c79a80c5
--- /dev/null
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
@@ -0,0 +1,164 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_IMPL_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_IMPL_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/task_runner.h"
+#include "base/task_scheduler/priority_queue.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/scheduler_worker.h"
+#include "base/task_scheduler/scheduler_worker_pool.h"
+#include "base/task_scheduler/scheduler_worker_stack.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+namespace internal {
+
+class DelayedTaskManager;
+class TaskTracker;
+
+// A pool of workers that run Tasks. This class is thread-safe.
+class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
+ public:
+ enum class IORestriction {
+ ALLOWED,
+ DISALLOWED,
+ };
+
+ // Callback invoked when a Sequence isn't empty after a worker pops a Task
+ // from it.
+ using ReEnqueueSequenceCallback = Callback<void(scoped_refptr<Sequence>)>;
+
+ // Destroying a SchedulerWorkerPoolImpl returned by Create() is not allowed in
+ // production; it is always leaked. In tests, it can only be destroyed after
+ // JoinForTesting() has returned.
+ ~SchedulerWorkerPoolImpl() override;
+
+ // Creates a SchedulerWorkerPoolImpl labeled |name| with up to |max_threads|
+ // threads of priority |thread_priority|. |io_restriction| indicates whether
+ // Tasks on the constructed worker pool are allowed to make I/O calls.
+ // |re_enqueue_sequence_callback| will be invoked after a worker of this
+ // worker pool tries to run a Task. |task_tracker| is used to handle shutdown
+ // behavior of Tasks. |delayed_task_manager| handles Tasks posted with a
+ // delay. Returns nullptr on failure to create a worker pool with at least one
+ // thread.
+ static std::unique_ptr<SchedulerWorkerPoolImpl> Create(
+ StringPiece name,
+ ThreadPriority thread_priority,
+ size_t max_threads,
+ IORestriction io_restriction,
+ const ReEnqueueSequenceCallback& re_enqueue_sequence_callback,
+ TaskTracker* task_tracker,
+ DelayedTaskManager* delayed_task_manager);
+
+ // Waits until all workers are idle.
+ void WaitForAllWorkersIdleForTesting();
+
+ // Joins all workers of this worker pool. Tasks that are already running are
+ // allowed to complete their execution. This can only be called once.
+ void JoinForTesting();
+
+ // SchedulerWorkerPool:
+ scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
+ const TaskTraits& traits,
+ ExecutionMode execution_mode) override;
+ void ReEnqueueSequence(scoped_refptr<Sequence> sequence,
+ const SequenceSortKey& sequence_sort_key) override;
+ bool PostTaskWithSequence(std::unique_ptr<Task> task,
+ scoped_refptr<Sequence> sequence,
+ SchedulerWorker* worker) override;
+ void PostTaskWithSequenceNow(std::unique_ptr<Task> task,
+ scoped_refptr<Sequence> sequence,
+ SchedulerWorker* worker) override;
+
+ private:
+ class SchedulerWorkerDelegateImpl;
+
+ SchedulerWorkerPoolImpl(StringPiece name,
+ IORestriction io_restriction,
+ TaskTracker* task_tracker,
+ DelayedTaskManager* delayed_task_manager);
+
+ bool Initialize(
+ ThreadPriority thread_priority,
+ size_t max_threads,
+ const ReEnqueueSequenceCallback& re_enqueue_sequence_callback);
+
+ // Wakes up the last worker from this worker pool to go idle, if any.
+ void WakeUpOneWorker();
+
+ // Adds |worker| to |idle_workers_stack_|.
+ void AddToIdleWorkersStack(SchedulerWorker* worker);
+
+ // Removes |worker| from |idle_workers_stack_|.
+ void RemoveFromIdleWorkersStack(SchedulerWorker* worker);
+
+ // The name of this worker pool, used to label its worker threads.
+ const std::string name_;
+
+ // All worker owned by this worker pool. Only modified during initialization
+ // of the worker pool.
+ std::vector<std::unique_ptr<SchedulerWorker>> workers_;
+
+ // Synchronizes access to |next_worker_index_|.
+ SchedulerLock next_worker_index_lock_;
+
+ // Index of the worker that will be assigned to the next single-threaded
+ // TaskRunner returned by this pool.
+ size_t next_worker_index_ = 0;
+
+ // PriorityQueue from which all threads of this worker pool get work.
+ PriorityQueue shared_priority_queue_;
+
+ // Indicates whether Tasks on this worker pool are allowed to make I/O calls.
+ const IORestriction io_restriction_;
+
+ // Synchronizes access to |idle_workers_stack_| and
+ // |idle_workers_stack_cv_for_testing_|. Has |shared_priority_queue_|'s
+ // lock as its predecessor so that a worker can be pushed to
+ // |idle_workers_stack_| within the scope of a Transaction (more
+ // details in GetWork()).
+ SchedulerLock idle_workers_stack_lock_;
+
+ // Stack of idle workers.
+ SchedulerWorkerStack idle_workers_stack_;
+
+ // Signaled when all workers become idle.
+ std::unique_ptr<ConditionVariable> idle_workers_stack_cv_for_testing_;
+
+ // Signaled once JoinForTesting() has returned.
+ WaitableEvent join_for_testing_returned_;
+
+#if DCHECK_IS_ON()
+ // Signaled when all workers have been created.
+ WaitableEvent workers_created_;
+#endif
+
+ TaskTracker* const task_tracker_;
+ DelayedTaskManager* const delayed_task_manager_;
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerPoolImpl);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_IMPL_H_
diff --git a/chromium/base/task_scheduler/scheduler_thread_pool_impl_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
index 8af2c52ffea..7766f72e0d2 100644
--- a/chromium/base/task_scheduler/scheduler_thread_pool_impl_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/task_scheduler/scheduler_thread_pool_impl.h"
+#include "base/task_scheduler/scheduler_worker_pool_impl.h"
#include <stddef.h>
@@ -35,11 +35,11 @@ namespace base {
namespace internal {
namespace {
-const size_t kNumThreadsInThreadPool = 4;
+const size_t kNumWorkersInWorkerPool = 4;
const size_t kNumThreadsPostingTasks = 4;
const size_t kNumTasksPostedPerThread = 150;
-using IORestriction = SchedulerThreadPoolImpl::IORestriction;
+using IORestriction = SchedulerWorkerPoolImpl::IORestriction;
class TestDelayedTaskManager : public DelayedTaskManager {
public:
@@ -56,27 +56,27 @@ class TestDelayedTaskManager : public DelayedTaskManager {
DISALLOW_COPY_AND_ASSIGN(TestDelayedTaskManager);
};
-class TaskSchedulerThreadPoolImplTest
+class TaskSchedulerWorkerPoolImplTest
: public testing::TestWithParam<ExecutionMode> {
protected:
- TaskSchedulerThreadPoolImplTest() = default;
+ TaskSchedulerWorkerPoolImplTest() = default;
void SetUp() override {
- thread_pool_ = SchedulerThreadPoolImpl::Create(
- "TestThreadPoolWithFileIO", ThreadPriority::NORMAL,
- kNumThreadsInThreadPool, IORestriction::ALLOWED,
- Bind(&TaskSchedulerThreadPoolImplTest::ReEnqueueSequenceCallback,
+ worker_pool_ = SchedulerWorkerPoolImpl::Create(
+ "TestWorkerPoolWithFileIO", ThreadPriority::NORMAL,
+ kNumWorkersInWorkerPool, IORestriction::ALLOWED,
+ Bind(&TaskSchedulerWorkerPoolImplTest::ReEnqueueSequenceCallback,
Unretained(this)),
&task_tracker_, &delayed_task_manager_);
- ASSERT_TRUE(thread_pool_);
+ ASSERT_TRUE(worker_pool_);
}
void TearDown() override {
- thread_pool_->WaitForAllWorkerThreadsIdleForTesting();
- thread_pool_->JoinForTesting();
+ worker_pool_->WaitForAllWorkersIdleForTesting();
+ worker_pool_->JoinForTesting();
}
- std::unique_ptr<SchedulerThreadPoolImpl> thread_pool_;
+ std::unique_ptr<SchedulerWorkerPoolImpl> worker_pool_;
TaskTracker task_tracker_;
TestDelayedTaskManager delayed_task_manager_;
@@ -87,10 +87,10 @@ class TaskSchedulerThreadPoolImplTest
// TaskScheduler which would first determine which PriorityQueue the
// sequence must be re-enqueued.
const SequenceSortKey sort_key(sequence->GetSortKey());
- thread_pool_->ReEnqueueSequence(std::move(sequence), sort_key);
+ worker_pool_->ReEnqueueSequence(std::move(sequence), sort_key);
}
- DISALLOW_COPY_AND_ASSIGN(TaskSchedulerThreadPoolImplTest);
+ DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplTest);
};
using PostNestedTask = test::TestTaskFactory::PostNestedTask;
@@ -99,26 +99,26 @@ class ThreadPostingTasks : public SimpleThread {
public:
enum class WaitBeforePostTask {
NO_WAIT,
- WAIT_FOR_ALL_THREADS_IDLE,
+ WAIT_FOR_ALL_WORKERS_IDLE,
};
- // Constructs a thread that posts tasks to |thread_pool| through an
+ // Constructs a thread that posts tasks to |worker_pool| through an
// |execution_mode| task runner. If |wait_before_post_task| is
- // WAIT_FOR_ALL_THREADS_IDLE, the thread waits until all worker threads in
- // |thread_pool| are idle before posting a new task. If |post_nested_task| is
+ // WAIT_FOR_ALL_WORKERS_IDLE, the thread waits until all workers in
+ // |worker_pool| are idle before posting a new task. If |post_nested_task| is
// YES, each task posted by this thread posts another task when it runs.
- ThreadPostingTasks(SchedulerThreadPoolImpl* thread_pool,
+ ThreadPostingTasks(SchedulerWorkerPoolImpl* worker_pool,
ExecutionMode execution_mode,
WaitBeforePostTask wait_before_post_task,
PostNestedTask post_nested_task)
: SimpleThread("ThreadPostingTasks"),
- thread_pool_(thread_pool),
+ worker_pool_(worker_pool),
wait_before_post_task_(wait_before_post_task),
post_nested_task_(post_nested_task),
- factory_(thread_pool_->CreateTaskRunnerWithTraits(TaskTraits(),
+ factory_(worker_pool_->CreateTaskRunnerWithTraits(TaskTraits(),
execution_mode),
execution_mode) {
- DCHECK(thread_pool_);
+ DCHECK(worker_pool_);
}
const test::TestTaskFactory* factory() const { return &factory_; }
@@ -129,14 +129,14 @@ class ThreadPostingTasks : public SimpleThread {
for (size_t i = 0; i < kNumTasksPostedPerThread; ++i) {
if (wait_before_post_task_ ==
- WaitBeforePostTask::WAIT_FOR_ALL_THREADS_IDLE) {
- thread_pool_->WaitForAllWorkerThreadsIdleForTesting();
+ WaitBeforePostTask::WAIT_FOR_ALL_WORKERS_IDLE) {
+ worker_pool_->WaitForAllWorkersIdleForTesting();
}
EXPECT_TRUE(factory_.PostTask(post_nested_task_, Closure()));
}
}
- SchedulerThreadPoolImpl* const thread_pool_;
+ SchedulerWorkerPoolImpl* const worker_pool_;
const scoped_refptr<TaskRunner> task_runner_;
const WaitBeforePostTask wait_before_post_task_;
const PostNestedTask post_nested_task_;
@@ -153,12 +153,12 @@ void ShouldNotRunCallback() {
} // namespace
-TEST_P(TaskSchedulerThreadPoolImplTest, PostTasks) {
+TEST_P(TaskSchedulerWorkerPoolImplTest, PostTasks) {
// Create threads to post tasks.
std::vector<std::unique_ptr<ThreadPostingTasks>> threads_posting_tasks;
for (size_t i = 0; i < kNumThreadsPostingTasks; ++i) {
threads_posting_tasks.push_back(WrapUnique(new ThreadPostingTasks(
- thread_pool_.get(), GetParam(), WaitBeforePostTask::NO_WAIT,
+ worker_pool_.get(), GetParam(), WaitBeforePostTask::NO_WAIT,
PostNestedTask::NO)));
threads_posting_tasks.back()->Start();
}
@@ -169,20 +169,20 @@ TEST_P(TaskSchedulerThreadPoolImplTest, PostTasks) {
thread_posting_tasks->factory()->WaitForAllTasksToRun();
}
- // Wait until all worker threads are idle to be sure that no task accesses
+ // Wait until all workers are idle to be sure that no task accesses
// its TestTaskFactory after |thread_posting_tasks| is destroyed.
- thread_pool_->WaitForAllWorkerThreadsIdleForTesting();
+ worker_pool_->WaitForAllWorkersIdleForTesting();
}
-TEST_P(TaskSchedulerThreadPoolImplTest, PostTasksWaitAllThreadsIdle) {
- // Create threads to post tasks. To verify that worker threads can sleep and
- // be woken up when new tasks are posted, wait for all threads to become idle
- // before posting a new task.
+TEST_P(TaskSchedulerWorkerPoolImplTest, PostTasksWaitAllWorkersIdle) {
+ // Create threads to post tasks. To verify that workers can sleep and be woken
+ // up when new tasks are posted, wait for all workers to become idle before
+ // posting a new task.
std::vector<std::unique_ptr<ThreadPostingTasks>> threads_posting_tasks;
for (size_t i = 0; i < kNumThreadsPostingTasks; ++i) {
threads_posting_tasks.push_back(WrapUnique(new ThreadPostingTasks(
- thread_pool_.get(), GetParam(),
- WaitBeforePostTask::WAIT_FOR_ALL_THREADS_IDLE, PostNestedTask::NO)));
+ worker_pool_.get(), GetParam(),
+ WaitBeforePostTask::WAIT_FOR_ALL_WORKERS_IDLE, PostNestedTask::NO)));
threads_posting_tasks.back()->Start();
}
@@ -192,18 +192,18 @@ TEST_P(TaskSchedulerThreadPoolImplTest, PostTasksWaitAllThreadsIdle) {
thread_posting_tasks->factory()->WaitForAllTasksToRun();
}
- // Wait until all worker threads are idle to be sure that no task accesses
- // its TestTaskFactory after |thread_posting_tasks| is destroyed.
- thread_pool_->WaitForAllWorkerThreadsIdleForTesting();
+ // Wait until all workers are idle to be sure that no task accesses its
+ // TestTaskFactory after |thread_posting_tasks| is destroyed.
+ worker_pool_->WaitForAllWorkersIdleForTesting();
}
-TEST_P(TaskSchedulerThreadPoolImplTest, NestedPostTasks) {
+TEST_P(TaskSchedulerWorkerPoolImplTest, NestedPostTasks) {
// Create threads to post tasks. Each task posted by these threads will post
// another task when it runs.
std::vector<std::unique_ptr<ThreadPostingTasks>> threads_posting_tasks;
for (size_t i = 0; i < kNumThreadsPostingTasks; ++i) {
threads_posting_tasks.push_back(WrapUnique(new ThreadPostingTasks(
- thread_pool_.get(), GetParam(), WaitBeforePostTask::NO_WAIT,
+ worker_pool_.get(), GetParam(), WaitBeforePostTask::NO_WAIT,
PostNestedTask::YES)));
threads_posting_tasks.back()->Start();
}
@@ -214,20 +214,21 @@ TEST_P(TaskSchedulerThreadPoolImplTest, NestedPostTasks) {
thread_posting_tasks->factory()->WaitForAllTasksToRun();
}
- // Wait until all worker threads are idle to be sure that no task accesses
- // its TestTaskFactory after |thread_posting_tasks| is destroyed.
- thread_pool_->WaitForAllWorkerThreadsIdleForTesting();
+ // Wait until all workers are idle to be sure that no task accesses its
+ // TestTaskFactory after |thread_posting_tasks| is destroyed.
+ worker_pool_->WaitForAllWorkersIdleForTesting();
}
-TEST_P(TaskSchedulerThreadPoolImplTest, PostTasksWithOneAvailableThread) {
- // Post blocking tasks to keep all threads busy except one until |event| is
+TEST_P(TaskSchedulerWorkerPoolImplTest, PostTasksWithOneAvailableWorker) {
+ // Post blocking tasks to keep all workers busy except one until |event| is
// signaled. Use different factories so that tasks are added to different
// sequences and can run simultaneously when the execution mode is SEQUENCED.
- WaitableEvent event(true, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
std::vector<std::unique_ptr<test::TestTaskFactory>> blocked_task_factories;
- for (size_t i = 0; i < (kNumThreadsInThreadPool - 1); ++i) {
+ for (size_t i = 0; i < (kNumWorkersInWorkerPool - 1); ++i) {
blocked_task_factories.push_back(WrapUnique(new test::TestTaskFactory(
- thread_pool_->CreateTaskRunnerWithTraits(TaskTraits(), GetParam()),
+ worker_pool_->CreateTaskRunnerWithTraits(TaskTraits(), GetParam()),
GetParam())));
EXPECT_TRUE(blocked_task_factories.back()->PostTask(
PostNestedTask::NO, Bind(&WaitableEvent::Wait, Unretained(&event))));
@@ -235,9 +236,9 @@ TEST_P(TaskSchedulerThreadPoolImplTest, PostTasksWithOneAvailableThread) {
}
// Post |kNumTasksPostedPerThread| tasks that should all run despite the fact
- // that only one thread in |thread_pool_| isn't busy.
+ // that only one worker in |worker_pool_| isn't busy.
test::TestTaskFactory short_task_factory(
- thread_pool_->CreateTaskRunnerWithTraits(TaskTraits(), GetParam()),
+ worker_pool_->CreateTaskRunnerWithTraits(TaskTraits(), GetParam()),
GetParam());
for (size_t i = 0; i < kNumTasksPostedPerThread; ++i)
EXPECT_TRUE(short_task_factory.PostTask(PostNestedTask::NO, Closure()));
@@ -246,21 +247,22 @@ TEST_P(TaskSchedulerThreadPoolImplTest, PostTasksWithOneAvailableThread) {
// Release tasks waiting on |event|.
event.Signal();
- // Wait until all worker threads are idle to be sure that no task accesses
+ // Wait until all workers are idle to be sure that no task accesses
// its TestTaskFactory after it is destroyed.
- thread_pool_->WaitForAllWorkerThreadsIdleForTesting();
+ worker_pool_->WaitForAllWorkersIdleForTesting();
}
-TEST_P(TaskSchedulerThreadPoolImplTest, Saturate) {
- // Verify that it is possible to have |kNumThreadsInThreadPool|
+TEST_P(TaskSchedulerWorkerPoolImplTest, Saturate) {
+ // Verify that it is possible to have |kNumWorkersInWorkerPool|
// tasks/sequences running simultaneously. Use different factories so that the
// blocking tasks are added to different sequences and can run simultaneously
// when the execution mode is SEQUENCED.
- WaitableEvent event(true, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
std::vector<std::unique_ptr<test::TestTaskFactory>> factories;
- for (size_t i = 0; i < kNumThreadsInThreadPool; ++i) {
+ for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
factories.push_back(WrapUnique(new test::TestTaskFactory(
- thread_pool_->CreateTaskRunnerWithTraits(TaskTraits(), GetParam()),
+ worker_pool_->CreateTaskRunnerWithTraits(TaskTraits(), GetParam()),
GetParam())));
EXPECT_TRUE(factories.back()->PostTask(
PostNestedTask::NO, Bind(&WaitableEvent::Wait, Unretained(&event))));
@@ -270,27 +272,28 @@ TEST_P(TaskSchedulerThreadPoolImplTest, Saturate) {
// Release tasks waiting on |event|.
event.Signal();
- // Wait until all worker threads are idle to be sure that no task accesses
+ // Wait until all workers are idle to be sure that no task accesses
// its TestTaskFactory after it is destroyed.
- thread_pool_->WaitForAllWorkerThreadsIdleForTesting();
+ worker_pool_->WaitForAllWorkersIdleForTesting();
}
// Verify that a Task can't be posted after shutdown.
-TEST_P(TaskSchedulerThreadPoolImplTest, PostTaskAfterShutdown) {
+TEST_P(TaskSchedulerWorkerPoolImplTest, PostTaskAfterShutdown) {
auto task_runner =
- thread_pool_->CreateTaskRunnerWithTraits(TaskTraits(), GetParam());
+ worker_pool_->CreateTaskRunnerWithTraits(TaskTraits(), GetParam());
task_tracker_.Shutdown();
EXPECT_FALSE(task_runner->PostTask(FROM_HERE, Bind(&ShouldNotRunCallback)));
}
// Verify that a Task posted with a delay is added to the DelayedTaskManager and
// doesn't run before its delay expires.
-TEST_P(TaskSchedulerThreadPoolImplTest, PostDelayedTask) {
+TEST_P(TaskSchedulerWorkerPoolImplTest, PostDelayedTask) {
EXPECT_TRUE(delayed_task_manager_.GetDelayedRunTime().is_null());
// Post a delayed task.
- WaitableEvent task_ran(true, false);
- EXPECT_TRUE(thread_pool_->CreateTaskRunnerWithTraits(TaskTraits(), GetParam())
+ WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ EXPECT_TRUE(worker_pool_->CreateTaskRunnerWithTraits(TaskTraits(), GetParam())
->PostDelayedTask(FROM_HERE, Bind(&WaitableEvent::Signal,
Unretained(&task_ran)),
TimeDelta::FromSeconds(10)));
@@ -311,13 +314,13 @@ TEST_P(TaskSchedulerThreadPoolImplTest, PostDelayedTask) {
}
INSTANTIATE_TEST_CASE_P(Parallel,
- TaskSchedulerThreadPoolImplTest,
+ TaskSchedulerWorkerPoolImplTest,
::testing::Values(ExecutionMode::PARALLEL));
INSTANTIATE_TEST_CASE_P(Sequenced,
- TaskSchedulerThreadPoolImplTest,
+ TaskSchedulerWorkerPoolImplTest,
::testing::Values(ExecutionMode::SEQUENCED));
INSTANTIATE_TEST_CASE_P(SingleThreaded,
- TaskSchedulerThreadPoolImplTest,
+ TaskSchedulerWorkerPoolImplTest,
::testing::Values(ExecutionMode::SINGLE_THREADED));
namespace {
@@ -344,40 +347,41 @@ void ExpectIORestriction(IORestriction io_restriction, WaitableEvent* event) {
event->Signal();
}
-class TaskSchedulerThreadPoolImplIORestrictionTest
+class TaskSchedulerWorkerPoolImplIORestrictionTest
: public testing::TestWithParam<IORestriction> {
public:
- TaskSchedulerThreadPoolImplIORestrictionTest() = default;
+ TaskSchedulerWorkerPoolImplIORestrictionTest() = default;
private:
- DISALLOW_COPY_AND_ASSIGN(TaskSchedulerThreadPoolImplIORestrictionTest);
+ DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplIORestrictionTest);
};
} // namespace
-TEST_P(TaskSchedulerThreadPoolImplIORestrictionTest, IORestriction) {
+TEST_P(TaskSchedulerWorkerPoolImplIORestrictionTest, IORestriction) {
TaskTracker task_tracker;
DelayedTaskManager delayed_task_manager(Bind(&DoNothing));
- auto thread_pool = SchedulerThreadPoolImpl::Create(
- "TestThreadPoolWithParam", ThreadPriority::NORMAL, 1U, GetParam(),
+ auto worker_pool = SchedulerWorkerPoolImpl::Create(
+ "TestWorkerPoolWithParam", ThreadPriority::NORMAL, 1U, GetParam(),
Bind(&NotReachedReEnqueueSequenceCallback), &task_tracker,
&delayed_task_manager);
- ASSERT_TRUE(thread_pool);
+ ASSERT_TRUE(worker_pool);
- WaitableEvent task_ran(true, false);
- thread_pool->CreateTaskRunnerWithTraits(TaskTraits(), ExecutionMode::PARALLEL)
+ WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ worker_pool->CreateTaskRunnerWithTraits(TaskTraits(), ExecutionMode::PARALLEL)
->PostTask(FROM_HERE, Bind(&ExpectIORestriction, GetParam(), &task_ran));
task_ran.Wait();
- thread_pool->JoinForTesting();
+ worker_pool->JoinForTesting();
}
INSTANTIATE_TEST_CASE_P(IOAllowed,
- TaskSchedulerThreadPoolImplIORestrictionTest,
+ TaskSchedulerWorkerPoolImplIORestrictionTest,
::testing::Values(IORestriction::ALLOWED));
INSTANTIATE_TEST_CASE_P(IODisallowed,
- TaskSchedulerThreadPoolImplIORestrictionTest,
+ TaskSchedulerWorkerPoolImplIORestrictionTest,
::testing::Values(IORestriction::DISALLOWED));
} // namespace internal
diff --git a/chromium/base/task_scheduler/scheduler_worker_stack.cc b/chromium/base/task_scheduler/scheduler_worker_stack.cc
new file mode 100644
index 00000000000..7746373aa66
--- /dev/null
+++ b/chromium/base/task_scheduler/scheduler_worker_stack.cc
@@ -0,0 +1,39 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_worker_stack.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+
+namespace base {
+namespace internal {
+
+SchedulerWorkerStack::SchedulerWorkerStack() = default;
+
+SchedulerWorkerStack::~SchedulerWorkerStack() = default;
+
+void SchedulerWorkerStack::Push(SchedulerWorker* worker) {
+ DCHECK(std::find(stack_.begin(), stack_.end(), worker) == stack_.end())
+ << "SchedulerWorker already on stack";
+ stack_.push_back(worker);
+}
+
+SchedulerWorker* SchedulerWorkerStack::Pop() {
+ if (IsEmpty())
+ return nullptr;
+ SchedulerWorker* const worker = stack_.back();
+ stack_.pop_back();
+ return worker;
+}
+
+void SchedulerWorkerStack::Remove(const SchedulerWorker* worker) {
+ auto it = std::find(stack_.begin(), stack_.end(), worker);
+ if (it != stack_.end())
+ stack_.erase(it);
+}
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/task_scheduler/scheduler_worker_stack.h b/chromium/base/task_scheduler/scheduler_worker_stack.h
new file mode 100644
index 00000000000..1d1748ca105
--- /dev/null
+++ b/chromium/base/task_scheduler/scheduler_worker_stack.h
@@ -0,0 +1,56 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_STACK_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_STACK_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace internal {
+
+class SchedulerWorker;
+
+// A stack of SchedulerWorkers. Supports removal of arbitrary
+// SchedulerWorkers. DCHECKs when a SchedulerWorker is inserted
+// multiple times. SchedulerWorkers are not owned by the stack. Push() is
+// amortized O(1). Pop(), Size() and Empty() are O(1). Remove is O(n). This
+// class is NOT thread-safe.
+class BASE_EXPORT SchedulerWorkerStack {
+ public:
+ SchedulerWorkerStack();
+ ~SchedulerWorkerStack();
+
+ // Inserts |worker| at the top of the stack. |worker| must not already be on
+ // the stack.
+ void Push(SchedulerWorker* worker);
+
+ // Removes the top SchedulerWorker from the stack and returns it.
+ // Returns nullptr if the stack is empty.
+ SchedulerWorker* Pop();
+
+ // Removes |worker| from the stack.
+ void Remove(const SchedulerWorker* worker);
+
+ // Returns the number of SchedulerWorkers on the stack.
+ size_t Size() const { return stack_.size(); }
+
+ // Returns true if the stack is empty.
+ bool IsEmpty() const { return stack_.empty(); }
+
+ private:
+ std::vector<SchedulerWorker*> stack_;
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerStack);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_STACK_H_
diff --git a/chromium/base/task_scheduler/scheduler_worker_thread_stack_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc
index 629b7795224..2a77008b646 100644
--- a/chromium/base/task_scheduler/scheduler_worker_thread_stack_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/task_scheduler/scheduler_worker_thread_stack.h"
+#include "base/task_scheduler/scheduler_worker_stack.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
-#include "base/task_scheduler/scheduler_worker_thread.h"
+#include "base/task_scheduler/scheduler_worker.h"
#include "base/task_scheduler/sequence.h"
#include "base/task_scheduler/task_tracker.h"
#include "base/task_scheduler/test_utils.h"
@@ -20,50 +20,52 @@ namespace internal {
namespace {
-class MockSchedulerWorkerThreadDelegate
- : public SchedulerWorkerThread::Delegate {
+class MockSchedulerWorkerDelegate : public SchedulerWorker::Delegate {
public:
- void OnMainEntry(SchedulerWorkerThread* worker_thread) override {}
- scoped_refptr<Sequence> GetWork(
- SchedulerWorkerThread* worker_thread) override {
+ void OnMainEntry(SchedulerWorker* worker) override {}
+ scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
return nullptr;
}
void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override {
ADD_FAILURE() << "This delegate not expect to have sequences to reenqueue.";
}
TimeDelta GetSleepTimeout() override {
- ADD_FAILURE() <<
- "The mock thread is not expected to be woken before it is shutdown";
return TimeDelta::Max();
}
+ bool CanDetach(SchedulerWorker* worker) override {
+ return false;
+ }
};
-class TaskSchedulerWorkerThreadStackTest : public testing::Test {
+class TaskSchedulerWorkerStackTest : public testing::Test {
protected:
void SetUp() override {
- thread_a_ = SchedulerWorkerThread::Create(
+ worker_a_ = SchedulerWorker::Create(
ThreadPriority::NORMAL,
- WrapUnique(new MockSchedulerWorkerThreadDelegate), &task_tracker_);
- ASSERT_TRUE(thread_a_);
- thread_b_ = SchedulerWorkerThread::Create(
+ WrapUnique(new MockSchedulerWorkerDelegate), &task_tracker_,
+ SchedulerWorker::InitialState::ALIVE);
+ ASSERT_TRUE(worker_a_);
+ worker_b_ = SchedulerWorker::Create(
ThreadPriority::NORMAL,
- WrapUnique(new MockSchedulerWorkerThreadDelegate), &task_tracker_);
- ASSERT_TRUE(thread_b_);
- thread_c_ = SchedulerWorkerThread::Create(
+ WrapUnique(new MockSchedulerWorkerDelegate), &task_tracker_,
+ SchedulerWorker::InitialState::ALIVE);
+ ASSERT_TRUE(worker_b_);
+ worker_c_ = SchedulerWorker::Create(
ThreadPriority::NORMAL,
- WrapUnique(new MockSchedulerWorkerThreadDelegate), &task_tracker_);
- ASSERT_TRUE(thread_c_);
+ WrapUnique(new MockSchedulerWorkerDelegate), &task_tracker_,
+ SchedulerWorker::InitialState::ALIVE);
+ ASSERT_TRUE(worker_c_);
}
void TearDown() override {
- thread_a_->JoinForTesting();
- thread_b_->JoinForTesting();
- thread_c_->JoinForTesting();
+ worker_a_->JoinForTesting();
+ worker_b_->JoinForTesting();
+ worker_c_->JoinForTesting();
}
- std::unique_ptr<SchedulerWorkerThread> thread_a_;
- std::unique_ptr<SchedulerWorkerThread> thread_b_;
- std::unique_ptr<SchedulerWorkerThread> thread_c_;
+ std::unique_ptr<SchedulerWorker> worker_a_;
+ std::unique_ptr<SchedulerWorker> worker_b_;
+ std::unique_ptr<SchedulerWorker> worker_c_;
private:
TaskTracker task_tracker_;
@@ -72,99 +74,99 @@ class TaskSchedulerWorkerThreadStackTest : public testing::Test {
} // namespace
// Verify that Push() and Pop() add/remove values in FIFO order.
-TEST_F(TaskSchedulerWorkerThreadStackTest, PushPop) {
- SchedulerWorkerThreadStack stack;
+TEST_F(TaskSchedulerWorkerStackTest, PushPop) {
+ SchedulerWorkerStack stack;
EXPECT_TRUE(stack.IsEmpty());
EXPECT_EQ(0U, stack.Size());
- stack.Push(thread_a_.get());
+ stack.Push(worker_a_.get());
EXPECT_FALSE(stack.IsEmpty());
EXPECT_EQ(1U, stack.Size());
- stack.Push(thread_b_.get());
+ stack.Push(worker_b_.get());
EXPECT_FALSE(stack.IsEmpty());
EXPECT_EQ(2U, stack.Size());
- stack.Push(thread_c_.get());
+ stack.Push(worker_c_.get());
EXPECT_FALSE(stack.IsEmpty());
EXPECT_EQ(3U, stack.Size());
- EXPECT_EQ(thread_c_.get(), stack.Pop());
+ EXPECT_EQ(worker_c_.get(), stack.Pop());
EXPECT_FALSE(stack.IsEmpty());
EXPECT_EQ(2U, stack.Size());
- stack.Push(thread_c_.get());
+ stack.Push(worker_c_.get());
EXPECT_FALSE(stack.IsEmpty());
EXPECT_EQ(3U, stack.Size());
- EXPECT_EQ(thread_c_.get(), stack.Pop());
+ EXPECT_EQ(worker_c_.get(), stack.Pop());
EXPECT_FALSE(stack.IsEmpty());
EXPECT_EQ(2U, stack.Size());
- EXPECT_EQ(thread_b_.get(), stack.Pop());
+ EXPECT_EQ(worker_b_.get(), stack.Pop());
EXPECT_FALSE(stack.IsEmpty());
EXPECT_EQ(1U, stack.Size());
- EXPECT_EQ(thread_a_.get(), stack.Pop());
+ EXPECT_EQ(worker_a_.get(), stack.Pop());
EXPECT_TRUE(stack.IsEmpty());
EXPECT_EQ(0U, stack.Size());
}
// Verify that a value can be removed by Remove().
-TEST_F(TaskSchedulerWorkerThreadStackTest, Remove) {
- SchedulerWorkerThreadStack stack;
+TEST_F(TaskSchedulerWorkerStackTest, Remove) {
+ SchedulerWorkerStack stack;
EXPECT_TRUE(stack.IsEmpty());
EXPECT_EQ(0U, stack.Size());
- stack.Push(thread_a_.get());
+ stack.Push(worker_a_.get());
EXPECT_FALSE(stack.IsEmpty());
EXPECT_EQ(1U, stack.Size());
- stack.Push(thread_b_.get());
+ stack.Push(worker_b_.get());
EXPECT_FALSE(stack.IsEmpty());
EXPECT_EQ(2U, stack.Size());
- stack.Push(thread_c_.get());
+ stack.Push(worker_c_.get());
EXPECT_FALSE(stack.IsEmpty());
EXPECT_EQ(3U, stack.Size());
- stack.Remove(thread_b_.get());
+ stack.Remove(worker_b_.get());
EXPECT_FALSE(stack.IsEmpty());
EXPECT_EQ(2U, stack.Size());
- EXPECT_EQ(thread_c_.get(), stack.Pop());
+ EXPECT_EQ(worker_c_.get(), stack.Pop());
EXPECT_FALSE(stack.IsEmpty());
EXPECT_EQ(1U, stack.Size());
- EXPECT_EQ(thread_a_.get(), stack.Pop());
+ EXPECT_EQ(worker_a_.get(), stack.Pop());
EXPECT_TRUE(stack.IsEmpty());
EXPECT_EQ(0U, stack.Size());
}
// Verify that a value can be pushed again after it has been removed.
-TEST_F(TaskSchedulerWorkerThreadStackTest, PushAfterRemove) {
- SchedulerWorkerThreadStack stack;
+TEST_F(TaskSchedulerWorkerStackTest, PushAfterRemove) {
+ SchedulerWorkerStack stack;
EXPECT_EQ(0U, stack.Size());
EXPECT_TRUE(stack.IsEmpty());
- stack.Push(thread_a_.get());
+ stack.Push(worker_a_.get());
EXPECT_EQ(1U, stack.Size());
EXPECT_FALSE(stack.IsEmpty());
- stack.Remove(thread_a_.get());
+ stack.Remove(worker_a_.get());
EXPECT_EQ(0U, stack.Size());
EXPECT_TRUE(stack.IsEmpty());
- stack.Push(thread_a_.get());
+ stack.Push(worker_a_.get());
EXPECT_EQ(1U, stack.Size());
EXPECT_FALSE(stack.IsEmpty());
}
// Verify that Push() DCHECKs when a value is inserted twice.
-TEST_F(TaskSchedulerWorkerThreadStackTest, PushTwice) {
- SchedulerWorkerThreadStack stack;
- stack.Push(thread_a_.get());
- EXPECT_DCHECK_DEATH({ stack.Push(thread_a_.get()); }, "");
+TEST_F(TaskSchedulerWorkerStackTest, PushTwice) {
+ SchedulerWorkerStack stack;
+ stack.Push(worker_a_.get());
+ EXPECT_DCHECK_DEATH({ stack.Push(worker_a_.get()); }, "");
}
} // namespace internal
diff --git a/chromium/base/task_scheduler/scheduler_worker_thread.cc b/chromium/base/task_scheduler/scheduler_worker_thread.cc
deleted file mode 100644
index ca65a6d1fdc..00000000000
--- a/chromium/base/task_scheduler/scheduler_worker_thread.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/task_scheduler/scheduler_worker_thread.h"
-
-#include <stddef.h>
-
-#include <utility>
-
-#include "base/logging.h"
-#include "base/task_scheduler/task_tracker.h"
-
-namespace base {
-namespace internal {
-
-std::unique_ptr<SchedulerWorkerThread> SchedulerWorkerThread::Create(
- ThreadPriority thread_priority,
- std::unique_ptr<Delegate> delegate,
- TaskTracker* task_tracker) {
- std::unique_ptr<SchedulerWorkerThread> worker_thread(
- new SchedulerWorkerThread(thread_priority, std::move(delegate),
- task_tracker));
-
- if (worker_thread->thread_handle_.is_null())
- return nullptr;
- return worker_thread;
-}
-
-SchedulerWorkerThread::~SchedulerWorkerThread() {
- DCHECK(ShouldExitForTesting());
-}
-
-void SchedulerWorkerThread::WakeUp() {
- wake_up_event_.Signal();
-}
-
-void SchedulerWorkerThread::JoinForTesting() {
- {
- AutoSchedulerLock auto_lock(should_exit_for_testing_lock_);
- should_exit_for_testing_ = true;
- }
- WakeUp();
- PlatformThread::Join(thread_handle_);
-}
-
-SchedulerWorkerThread::SchedulerWorkerThread(ThreadPriority thread_priority,
- std::unique_ptr<Delegate> delegate,
- TaskTracker* task_tracker)
- : wake_up_event_(false, false),
- delegate_(std::move(delegate)),
- task_tracker_(task_tracker) {
- DCHECK(delegate_);
- DCHECK(task_tracker_);
-
- const size_t kDefaultStackSize = 0;
- PlatformThread::CreateWithPriority(kDefaultStackSize, this, &thread_handle_,
- thread_priority);
-}
-
-void SchedulerWorkerThread::ThreadMain() {
- delegate_->OnMainEntry(this);
-
- // A SchedulerWorkerThread starts out sleeping.
- wake_up_event_.Wait();
-
- while (!task_tracker_->shutdown_completed() && !ShouldExitForTesting()) {
- // Get the sequence containing the next task to execute.
- scoped_refptr<Sequence> sequence = delegate_->GetWork(this);
-
- if (!sequence) {
- TimeDelta sleep_time = delegate_->GetSleepTimeout();
- if (sleep_time.is_max()) {
- // Calling TimedWait with TimeDelta::Max is not recommended per
- // http://crbug.com/465948.
- wake_up_event_.Wait();
- } else {
- wake_up_event_.TimedWait(sleep_time);
- }
- continue;
- }
-
- task_tracker_->RunTask(sequence->PeekTask());
-
- const bool sequence_became_empty = sequence->PopTask();
-
- // If |sequence| isn't empty immediately after the pop, re-enqueue it to
- // maintain the invariant that a non-empty Sequence is always referenced by
- // either a PriorityQueue or a SchedulerWorkerThread. If it is empty and
- // there are live references to it, it will be enqueued when a Task is added
- // to it. Otherwise, it will be destroyed at the end of this scope.
- if (!sequence_became_empty)
- delegate_->ReEnqueueSequence(std::move(sequence));
-
- // Calling WakeUp() guarantees that this SchedulerWorkerThread will run
- // Tasks from Sequences returned by the GetWork() method of |delegate_|
- // until it returns nullptr. Resetting |wake_up_event_| here doesn't break
- // this invariant and avoids a useless loop iteration before going to sleep
- // if WakeUp() is called while this SchedulerWorkerThread is awake.
- wake_up_event_.Reset();
- }
-}
-
-bool SchedulerWorkerThread::ShouldExitForTesting() const {
- AutoSchedulerLock auto_lock(should_exit_for_testing_lock_);
- return should_exit_for_testing_;
-}
-
-} // namespace internal
-} // namespace base
diff --git a/chromium/base/task_scheduler/scheduler_worker_thread.h b/chromium/base/task_scheduler/scheduler_worker_thread.h
deleted file mode 100644
index b6f0860da0e..00000000000
--- a/chromium/base/task_scheduler/scheduler_worker_thread.h
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_THREAD_H_
-#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_THREAD_H_
-
-#include <memory>
-
-#include "base/base_export.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/task_scheduler/scheduler_lock.h"
-#include "base/task_scheduler/sequence.h"
-#include "base/threading/platform_thread.h"
-#include "base/time/time.h"
-
-namespace base {
-namespace internal {
-
-class TaskTracker;
-
-// A thread that runs Tasks from Sequences returned by a delegate.
-//
-// A SchedulerWorkerThread starts out sleeping. It is woken up by a call to
-// WakeUp(). After a wake-up, a SchedulerWorkerThread runs Tasks from Sequences
-// returned by the GetWork() method of its delegate as long as it doesn't return
-// nullptr. It also periodically checks with its TaskTracker whether shutdown
-// has completed and exits when it has.
-//
-// This class is thread-safe.
-class BASE_EXPORT SchedulerWorkerThread : public PlatformThread::Delegate {
- public:
- // Delegate interface for SchedulerWorkerThread. The methods are always called
- // from the thread managed by the SchedulerWorkerThread instance.
- class Delegate {
- public:
- virtual ~Delegate() = default;
-
- // Called by |worker_thread| when it enters its main function.
- virtual void OnMainEntry(SchedulerWorkerThread* worker_thread) = 0;
-
- // Called by |worker_thread| to get a Sequence from which to run a Task.
- virtual scoped_refptr<Sequence> GetWork(
- SchedulerWorkerThread* worker_thread) = 0;
-
- // Called when |sequence| isn't empty after the SchedulerWorkerThread pops a
- // Task from it. |sequence| is the last Sequence returned by GetWork().
- virtual void ReEnqueueSequence(scoped_refptr<Sequence> sequence) = 0;
-
- // Called by |worker_thread| to determine how long to sleep before the next
- // call to GetWork(). GetWork() may be called before this timeout expires
- // if the thread's WakeUp() method is called.
- virtual TimeDelta GetSleepTimeout() = 0;
- };
-
- // Creates a SchedulerWorkerThread with priority |thread_priority| that runs
- // Tasks from Sequences returned by |delegate|. |task_tracker| is used to
- // handle shutdown behavior of Tasks. Returns nullptr if creating the
- // underlying platform thread fails.
- static std::unique_ptr<SchedulerWorkerThread> Create(
- ThreadPriority thread_priority,
- std::unique_ptr<Delegate> delegate,
- TaskTracker* task_tracker);
-
- // Destroying a SchedulerWorkerThread in production is not allowed; it is
- // always leaked. In tests, it can only be destroyed after JoinForTesting()
- // has returned.
- ~SchedulerWorkerThread() override;
-
- // Wakes up this SchedulerWorkerThread if it wasn't already awake. After this
- // is called, this SchedulerWorkerThread will run Tasks from Sequences
- // returned by the GetWork() method of its delegate until it returns nullptr.
- void WakeUp();
-
- SchedulerWorkerThread::Delegate* delegate() { return delegate_.get(); }
-
- // Joins this SchedulerWorkerThread. If a Task is already running, it will be
- // allowed to complete its execution. This can only be called once.
- void JoinForTesting();
-
- private:
- SchedulerWorkerThread(ThreadPriority thread_priority,
- std::unique_ptr<Delegate> delegate,
- TaskTracker* task_tracker);
-
- // PlatformThread::Delegate:
- void ThreadMain() override;
-
- bool ShouldExitForTesting() const;
-
- // Platform thread managed by this SchedulerWorkerThread.
- PlatformThreadHandle thread_handle_;
-
- // Event signaled to wake up this SchedulerWorkerThread.
- WaitableEvent wake_up_event_;
-
- const std::unique_ptr<Delegate> delegate_;
- TaskTracker* const task_tracker_;
-
- // Synchronizes access to |should_exit_for_testing_|.
- mutable SchedulerLock should_exit_for_testing_lock_;
-
- // True once JoinForTesting() has been called.
- bool should_exit_for_testing_ = false;
-
- DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerThread);
-};
-
-} // namespace internal
-} // namespace base
-
-#endif // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_THREAD_H_
diff --git a/chromium/base/task_scheduler/scheduler_worker_thread_stack.cc b/chromium/base/task_scheduler/scheduler_worker_thread_stack.cc
deleted file mode 100644
index d74ed525742..00000000000
--- a/chromium/base/task_scheduler/scheduler_worker_thread_stack.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/task_scheduler/scheduler_worker_thread_stack.h"
-
-#include <algorithm>
-
-#include "base/logging.h"
-
-namespace base {
-namespace internal {
-
-SchedulerWorkerThreadStack::SchedulerWorkerThreadStack() = default;
-
-SchedulerWorkerThreadStack::~SchedulerWorkerThreadStack() = default;
-
-void SchedulerWorkerThreadStack::Push(SchedulerWorkerThread* worker_thread) {
- DCHECK(std::find(stack_.begin(), stack_.end(), worker_thread) == stack_.end())
- << "SchedulerWorkerThread already on stack";
- stack_.push_back(worker_thread);
-}
-
-SchedulerWorkerThread* SchedulerWorkerThreadStack::Pop() {
- if (IsEmpty())
- return nullptr;
- SchedulerWorkerThread* const worker_thread = stack_.back();
- stack_.pop_back();
- return worker_thread;
-}
-
-void SchedulerWorkerThreadStack::Remove(
- const SchedulerWorkerThread* worker_thread) {
- auto it = std::find(stack_.begin(), stack_.end(), worker_thread);
- if (it != stack_.end())
- stack_.erase(it);
-}
-
-} // namespace internal
-} // namespace base
diff --git a/chromium/base/task_scheduler/scheduler_worker_thread_stack.h b/chromium/base/task_scheduler/scheduler_worker_thread_stack.h
deleted file mode 100644
index f669af1dcd2..00000000000
--- a/chromium/base/task_scheduler/scheduler_worker_thread_stack.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_THREAD_STACK_H_
-#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_THREAD_STACK_H_
-
-#include <stddef.h>
-
-#include <vector>
-
-#include "base/base_export.h"
-#include "base/macros.h"
-
-namespace base {
-namespace internal {
-
-class SchedulerWorkerThread;
-
-// A stack of SchedulerWorkerThreads. Supports removal of arbitrary
-// SchedulerWorkerThreads. DCHECKs when a SchedulerWorkerThread is inserted
-// multiple times. SchedulerWorkerThreads are not owned by the stack. Push() is
-// amortized O(1). Pop(), Size() and Empty() are O(1). Remove is O(n). This
-// class is NOT thread-safe.
-class BASE_EXPORT SchedulerWorkerThreadStack {
- public:
- SchedulerWorkerThreadStack();
- ~SchedulerWorkerThreadStack();
-
- // Inserts |worker_thread| at the top of the stack. |worker_thread| must not
- // already be on the stack.
- void Push(SchedulerWorkerThread* worker_thread);
-
- // Removes the top SchedulerWorkerThread from the stack and returns it.
- // Returns nullptr if the stack is empty.
- SchedulerWorkerThread* Pop();
-
- // Removes |worker_thread| from the stack.
- void Remove(const SchedulerWorkerThread* worker_thread);
-
- // Returns the number of SchedulerWorkerThreads on the stack.
- size_t Size() const { return stack_.size(); }
-
- // Returns true if the stack is empty.
- bool IsEmpty() const { return stack_.empty(); }
-
- private:
- std::vector<SchedulerWorkerThread*> stack_;
-
- DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerThreadStack);
-};
-
-} // namespace internal
-} // namespace base
-
-#endif // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_THREAD_STACK_H_
diff --git a/chromium/base/task_scheduler/scheduler_worker_thread_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_unittest.cc
index 9fb1ecc49d7..437bbedfdb5 100644
--- a/chromium/base/task_scheduler/scheduler_worker_thread_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/task_scheduler/scheduler_worker_thread.h"
+#include "base/task_scheduler/scheduler_worker.h"
#include <stddef.h>
@@ -28,25 +28,28 @@ namespace {
const size_t kNumSequencesPerTest = 150;
// The test parameter is the number of Tasks per Sequence returned by GetWork().
-class TaskSchedulerWorkerThreadTest : public testing::TestWithParam<size_t> {
+class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
protected:
- TaskSchedulerWorkerThreadTest()
- : main_entry_called_(true, false),
+ TaskSchedulerWorkerTest()
+ : main_entry_called_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
num_get_work_cv_(lock_.CreateConditionVariable()),
- worker_thread_set_(true, false) {}
+ worker_set_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
void SetUp() override {
- worker_thread_ = SchedulerWorkerThread::Create(
+ worker_ = SchedulerWorker::Create(
ThreadPriority::NORMAL,
- WrapUnique(new TestSchedulerWorkerThreadDelegate(this)),
- &task_tracker_);
- ASSERT_TRUE(worker_thread_);
- worker_thread_set_.Signal();
+ WrapUnique(new TestSchedulerWorkerDelegate(this)),
+ &task_tracker_,
+ SchedulerWorker::InitialState::ALIVE);
+ ASSERT_TRUE(worker_);
+ worker_set_.Signal();
main_entry_called_.Wait();
}
void TearDown() override {
- worker_thread_->JoinForTesting();
+ worker_->JoinForTesting();
}
size_t TasksPerSequence() const { return GetParam(); }
@@ -84,19 +87,19 @@ class TaskSchedulerWorkerThreadTest : public testing::TestWithParam<size_t> {
return re_enqueued_sequences_;
}
- std::unique_ptr<SchedulerWorkerThread> worker_thread_;
+ std::unique_ptr<SchedulerWorker> worker_;
private:
- class TestSchedulerWorkerThreadDelegate
- : public SchedulerWorkerThread::Delegate {
+ class TestSchedulerWorkerDelegate
+ : public SchedulerWorker::Delegate {
public:
- TestSchedulerWorkerThreadDelegate(TaskSchedulerWorkerThreadTest* outer)
+ TestSchedulerWorkerDelegate(TaskSchedulerWorkerTest* outer)
: outer_(outer) {}
- // SchedulerWorkerThread::Delegate:
- void OnMainEntry(SchedulerWorkerThread* worker_thread) override {
- outer_->worker_thread_set_.Wait();
- EXPECT_EQ(outer_->worker_thread_.get(), worker_thread);
+ // SchedulerWorker::Delegate:
+ void OnMainEntry(SchedulerWorker* worker) override {
+ outer_->worker_set_.Wait();
+ EXPECT_EQ(outer_->worker_.get(), worker);
// Without synchronization, OnMainEntry() could be called twice without
// generating an error.
@@ -105,9 +108,8 @@ class TaskSchedulerWorkerThreadTest : public testing::TestWithParam<size_t> {
outer_->main_entry_called_.Signal();
}
- scoped_refptr<Sequence> GetWork(
- SchedulerWorkerThread* worker_thread) override {
- EXPECT_EQ(outer_->worker_thread_.get(), worker_thread);
+ scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
+ EXPECT_EQ(outer_->worker_.get(), worker);
{
AutoSchedulerLock auto_lock(outer_->lock_);
@@ -129,7 +131,7 @@ class TaskSchedulerWorkerThreadTest : public testing::TestWithParam<size_t> {
scoped_refptr<Sequence> sequence(new Sequence);
for (size_t i = 0; i < outer_->TasksPerSequence(); ++i) {
std::unique_ptr<Task> task(new Task(
- FROM_HERE, Bind(&TaskSchedulerWorkerThreadTest::RunTaskCallback,
+ FROM_HERE, Bind(&TaskSchedulerWorkerTest::RunTaskCallback,
Unretained(outer_)),
TaskTraits(), TimeDelta()));
EXPECT_TRUE(outer_->task_tracker_.WillPostTask(task.get()));
@@ -170,8 +172,12 @@ class TaskSchedulerWorkerThreadTest : public testing::TestWithParam<size_t> {
return TimeDelta::Max();
}
+ bool CanDetach(SchedulerWorker* worker) override {
+ return false;
+ }
+
private:
- TaskSchedulerWorkerThreadTest* outer_;
+ TaskSchedulerWorkerTest* outer_;
};
void RunTaskCallback() {
@@ -210,15 +216,15 @@ class TaskSchedulerWorkerThreadTest : public testing::TestWithParam<size_t> {
// Number of times that RunTaskCallback() has been called.
size_t num_run_tasks_ = 0;
- // Signaled after |worker_thread_| is set.
- WaitableEvent worker_thread_set_;
+ // Signaled after |worker_| is set.
+ WaitableEvent worker_set_;
- DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerThreadTest);
+ DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerTest);
};
// Verify that when GetWork() continuously returns Sequences, all Tasks in these
-// Sequences run successfully. The test wakes up the SchedulerWorkerThread once.
-TEST_P(TaskSchedulerWorkerThreadTest, ContinuousWork) {
+// Sequences run successfully. The test wakes up the SchedulerWorker once.
+TEST_P(TaskSchedulerWorkerTest, ContinuousWork) {
// Set GetWork() to return |kNumSequencesPerTest| Sequences before starting to
// return nullptr.
SetNumSequencesToCreate(kNumSequencesPerTest);
@@ -228,17 +234,17 @@ TEST_P(TaskSchedulerWorkerThreadTest, ContinuousWork) {
const size_t kExpectedNumGetWork = kNumSequencesPerTest + 1;
SetMaxGetWork(kExpectedNumGetWork);
- // Wake up |worker_thread_| and wait until GetWork() has been invoked the
+ // Wake up |worker_| and wait until GetWork() has been invoked the
// expected amount of times.
- worker_thread_->WakeUp();
+ worker_->WakeUp();
WaitForNumGetWork(kExpectedNumGetWork);
// All tasks should have run.
EXPECT_EQ(kNumSequencesPerTest, NumRunTasks());
// If Sequences returned by GetWork() contain more than one Task, they aren't
- // empty after the worker thread pops Tasks from them and thus should be
- // returned to EnqueueSequence().
+ // empty after the worker pops Tasks from them and thus should be returned to
+ // EnqueueSequence().
if (TasksPerSequence() > 1)
EXPECT_EQ(CreatedSequences(), EnqueuedSequences());
else
@@ -247,8 +253,8 @@ TEST_P(TaskSchedulerWorkerThreadTest, ContinuousWork) {
// Verify that when GetWork() alternates between returning a Sequence and
// returning nullptr, all Tasks in the returned Sequences run successfully. The
-// test wakes up the SchedulerWorkerThread once for each Sequence.
-TEST_P(TaskSchedulerWorkerThreadTest, IntermittentWork) {
+// test wakes up the SchedulerWorker once for each Sequence.
+TEST_P(TaskSchedulerWorkerTest, IntermittentWork) {
for (size_t i = 0; i < kNumSequencesPerTest; ++i) {
// Set GetWork() to return 1 Sequence before starting to return
// nullptr.
@@ -259,17 +265,17 @@ TEST_P(TaskSchedulerWorkerThreadTest, IntermittentWork) {
const size_t expected_num_get_work = 2 * (i + 1);
SetMaxGetWork(expected_num_get_work);
- // Wake up |worker_thread_| and wait until GetWork() has been invoked
+ // Wake up |worker_| and wait until GetWork() has been invoked
// the expected amount of times.
- worker_thread_->WakeUp();
+ worker_->WakeUp();
WaitForNumGetWork(expected_num_get_work);
// The Task should have run
EXPECT_EQ(i + 1, NumRunTasks());
// If Sequences returned by GetWork() contain more than one Task, they
- // aren't empty after the worker thread pops Tasks from them and thus should
- // be returned to EnqueueSequence().
+ // aren't empty after the worker pops Tasks from them and thus should be
+ // returned to EnqueueSequence().
if (TasksPerSequence() > 1)
EXPECT_EQ(CreatedSequences(), EnqueuedSequences());
else
@@ -278,12 +284,143 @@ TEST_P(TaskSchedulerWorkerThreadTest, IntermittentWork) {
}
INSTANTIATE_TEST_CASE_P(OneTaskPerSequence,
- TaskSchedulerWorkerThreadTest,
+ TaskSchedulerWorkerTest,
::testing::Values(1));
INSTANTIATE_TEST_CASE_P(TwoTasksPerSequence,
- TaskSchedulerWorkerThreadTest,
+ TaskSchedulerWorkerTest,
::testing::Values(2));
+namespace {
+
+class ControllableDetachDelegate : public SchedulerWorker::Delegate {
+ public:
+ ControllableDetachDelegate()
+ : work_processed_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ detach_requested_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+ ~ControllableDetachDelegate() override = default;
+
+ // SchedulerWorker::Delegate:
+ void OnMainEntry(SchedulerWorker* worker) override {}
+
+ scoped_refptr<Sequence> GetWork(SchedulerWorker* worker)
+ override {
+ // Sends one item of work to signal |work_processed_|. On subsequent calls,
+ // sends nullptr to indicate there's no more work to be done.
+ if (work_requested_)
+ return nullptr;
+
+ work_requested_ = true;
+ scoped_refptr<Sequence> sequence(new Sequence);
+ std::unique_ptr<Task> task(new Task(
+ FROM_HERE, Bind(&WaitableEvent::Signal, Unretained(&work_processed_)),
+ TaskTraits(), TimeDelta()));
+ sequence->PushTask(std::move(task));
+ return sequence;
+ }
+
+ void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override {
+ ADD_FAILURE() <<
+ "GetWork() returns a sequence of one, so there's nothing to reenqueue.";
+ }
+
+ TimeDelta GetSleepTimeout() override {
+ return TimeDelta::Max();
+ }
+
+ bool CanDetach(SchedulerWorker* worker) override {
+ detach_requested_.Signal();
+ return can_detach_;
+ }
+
+ void WaitForWorkToRun() {
+ work_processed_.Wait();
+ }
+
+ void WaitForDetachRequest() {
+ detach_requested_.Wait();
+ }
+
+ void ResetState() {
+ work_requested_ = false;
+ work_processed_.Reset();
+ detach_requested_.Reset();
+ }
+
+ void set_can_detach(bool can_detach) { can_detach_ = can_detach; }
+
+ private:
+ bool work_requested_ = false;
+ bool can_detach_ = false;
+ WaitableEvent work_processed_;
+ WaitableEvent detach_requested_;
+
+ DISALLOW_COPY_AND_ASSIGN(ControllableDetachDelegate);
+};
+
+} // namespace
+
+TEST(TaskSchedulerWorkerTest, WorkerDetaches) {
+ TaskTracker task_tracker;
+ // Will be owned by SchedulerWorker.
+ ControllableDetachDelegate* delegate = new ControllableDetachDelegate;
+ delegate->set_can_detach(true);
+ std::unique_ptr<SchedulerWorker> worker =
+ SchedulerWorker::Create(
+ ThreadPriority::NORMAL, WrapUnique(delegate), &task_tracker,
+ SchedulerWorker::InitialState::ALIVE);
+ worker->WakeUp();
+ delegate->WaitForWorkToRun();
+ delegate->WaitForDetachRequest();
+ // Sleep to give a chance for the detach to happen. A yield is too short.
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(50));
+ ASSERT_FALSE(worker->ThreadAliveForTesting());
+}
+
+TEST(TaskSchedulerWorkerTest, WorkerDetachesAndWakes) {
+ TaskTracker task_tracker;
+ // Will be owned by SchedulerWorker.
+ ControllableDetachDelegate* delegate = new ControllableDetachDelegate;
+ delegate->set_can_detach(true);
+ std::unique_ptr<SchedulerWorker> worker =
+ SchedulerWorker::Create(
+ ThreadPriority::NORMAL, WrapUnique(delegate), &task_tracker,
+ SchedulerWorker::InitialState::ALIVE);
+ worker->WakeUp();
+ delegate->WaitForWorkToRun();
+ delegate->WaitForDetachRequest();
+ // Sleep to give a chance for the detach to happen. A yield is too short.
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(50));
+ ASSERT_FALSE(worker->ThreadAliveForTesting());
+
+ delegate->ResetState();
+ delegate->set_can_detach(false);
+ worker->WakeUp();
+ delegate->WaitForWorkToRun();
+ delegate->WaitForDetachRequest();
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(50));
+ ASSERT_TRUE(worker->ThreadAliveForTesting());
+ worker->JoinForTesting();
+}
+
+TEST(TaskSchedulerWorkerTest, CreateDetached) {
+ TaskTracker task_tracker;
+ // Will be owned by SchedulerWorker.
+ ControllableDetachDelegate* delegate = new ControllableDetachDelegate;
+ std::unique_ptr<SchedulerWorker> worker =
+ SchedulerWorker::Create(
+ ThreadPriority::NORMAL, WrapUnique(delegate), &task_tracker,
+ SchedulerWorker::InitialState::DETACHED);
+ ASSERT_FALSE(worker->ThreadAliveForTesting());
+ worker->WakeUp();
+ delegate->WaitForWorkToRun();
+ delegate->WaitForDetachRequest();
+ ASSERT_TRUE(worker->ThreadAliveForTesting());
+ worker->JoinForTesting();
+}
+
} // namespace
} // namespace internal
} // namespace base
diff --git a/chromium/base/task_scheduler/sequence.h b/chromium/base/task_scheduler/sequence.h
index 37cb8d5b9aa..3fa037fa358 100644
--- a/chromium/base/task_scheduler/sequence.h
+++ b/chromium/base/task_scheduler/sequence.h
@@ -26,8 +26,8 @@ namespace internal {
// Note: there is a known refcounted-ownership cycle in the Scheduler
// architecture: Sequence -> Task -> TaskRunner -> Sequence -> ...
// This is okay so long as the other owners of Sequence (PriorityQueue and
-// SchedulerWorkerThread in alternance and
-// SchedulerThreadPoolImpl::SchedulerWorkerThreadDelegateImpl::GetWork()
+// SchedulerWorker in alternation and
+// SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork()
// temporarily) keep running it (and taking Tasks from it as a result). A
// dangling reference cycle would only occur should they release their reference
// to it while it's not empty. In other words, it is only correct for them to
diff --git a/chromium/base/task_scheduler/task_scheduler.cc b/chromium/base/task_scheduler/task_scheduler.cc
index d4a5ca371a2..fd72c12ecd6 100644
--- a/chromium/base/task_scheduler/task_scheduler.cc
+++ b/chromium/base/task_scheduler/task_scheduler.cc
@@ -5,7 +5,6 @@
#include "base/task_scheduler/task_scheduler.h"
#include "base/logging.h"
-#include "base/task_scheduler/task_scheduler_impl.h"
namespace base {
@@ -23,11 +22,6 @@ void TaskScheduler::SetInstance(std::unique_ptr<TaskScheduler> task_scheduler) {
}
// static
-void TaskScheduler::InitializeDefaultTaskScheduler() {
- SetInstance(internal::TaskSchedulerImpl::Create());
-}
-
-// static
TaskScheduler* TaskScheduler::GetInstance() {
return g_task_scheduler;
}
diff --git a/chromium/base/task_scheduler/task_scheduler.h b/chromium/base/task_scheduler/task_scheduler.h
index b8da64b891a..5f851b491cc 100644
--- a/chromium/base/task_scheduler/task_scheduler.h
+++ b/chromium/base/task_scheduler/task_scheduler.h
@@ -48,24 +48,17 @@ class BASE_EXPORT TaskScheduler {
// called once.
virtual void Shutdown() = 0;
- // SetInstance() and InitializeDefaultTaskScheduler() register a TaskScheduler
- // to handle tasks posted through the post_task.h API for this process. The
- // registered TaskScheduler will only be deleted when a new TaskScheduler is
- // registered (i.e. otherwise leaked on shutdown). The methods must not be
- // called when TaskRunners created by the previous TaskScheduler are still
- // alive. The methods are not thread-safe; proper synchronization is required
- // to use the post_task.h API after registering a new TaskScheduler.
-
// Registers |task_scheduler| to handle tasks posted through the post_task.h
- // API for this process.
+ // API for this process. The registered TaskScheduler will only be deleted
+ // when a new TaskScheduler is registered (i.e. otherwise leaked on shutdown).
+ // This must not be called when TaskRunners created by the previous
+ // TaskScheduler are still alive. This method is not thread-safe; proper
+ // synchronization is required to use the post_task.h API after registering a
+ // new TaskScheduler.
static void SetInstance(std::unique_ptr<TaskScheduler> task_scheduler);
- // Initializes the default task scheduler for this process.
- static void InitializeDefaultTaskScheduler();
-
- // Retrieve the TaskScheduler set via SetInstance() or
- // InitializeDefaultTaskScheduler(). This should be used very rarely; most
- // users of TaskScheduler should use the post_task.h API.
+ // Retrieve the TaskScheduler set via SetInstance(). This should be used very
+ // rarely; most users of TaskScheduler should use the post_task.h API.
static TaskScheduler* GetInstance();
};
diff --git a/chromium/base/task_scheduler/task_scheduler_impl.cc b/chromium/base/task_scheduler/task_scheduler_impl.cc
index 03d09069800..2f5b68f6a78 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl.cc
+++ b/chromium/base/task_scheduler/task_scheduler_impl.cc
@@ -10,7 +10,6 @@
#include "base/bind_helpers.h"
#include "base/memory/ptr_util.h"
#include "base/task_scheduler/scheduler_service_thread.h"
-#include "base/task_scheduler/scheduler_thread_pool_impl.h"
#include "base/task_scheduler/sequence_sort_key.h"
#include "base/task_scheduler/task.h"
#include "base/time/time.h"
@@ -19,9 +18,13 @@ namespace base {
namespace internal {
// static
-std::unique_ptr<TaskSchedulerImpl> TaskSchedulerImpl::Create() {
- std::unique_ptr<TaskSchedulerImpl> scheduler(new TaskSchedulerImpl);
- scheduler->Initialize();
+std::unique_ptr<TaskSchedulerImpl> TaskSchedulerImpl::Create(
+ const std::vector<WorkerPoolCreationArgs>& worker_pools,
+ const WorkerPoolIndexForTraitsCallback&
+ worker_pool_index_for_traits_callback) {
+ std::unique_ptr<TaskSchedulerImpl> scheduler(
+ new TaskSchedulerImpl(worker_pool_index_for_traits_callback));
+ scheduler->Initialize(worker_pools);
return scheduler;
}
@@ -36,7 +39,7 @@ void TaskSchedulerImpl::PostTaskWithTraits(
const TaskTraits& traits,
const Closure& task) {
// Post |task| as part of a one-off single-task Sequence.
- GetThreadPoolForTraits(traits)->PostTaskWithSequence(
+ GetWorkerPoolForTraits(traits)->PostTaskWithSequence(
WrapUnique(new Task(from_here, task, traits, TimeDelta())),
make_scoped_refptr(new Sequence), nullptr);
}
@@ -44,7 +47,7 @@ void TaskSchedulerImpl::PostTaskWithTraits(
scoped_refptr<TaskRunner> TaskSchedulerImpl::CreateTaskRunnerWithTraits(
const TaskTraits& traits,
ExecutionMode execution_mode) {
- return GetThreadPoolForTraits(traits)->CreateTaskRunnerWithTraits(
+ return GetWorkerPoolForTraits(traits)->CreateTaskRunnerWithTraits(
traits, execution_mode);
}
@@ -57,79 +60,58 @@ void TaskSchedulerImpl::JoinForTesting() {
#if DCHECK_IS_ON()
DCHECK(!join_for_testing_returned_.IsSignaled());
#endif
- background_thread_pool_->JoinForTesting();
- background_file_io_thread_pool_->JoinForTesting();
- normal_thread_pool_->JoinForTesting();
- normal_file_io_thread_pool_->JoinForTesting();
+ for (const auto& worker_pool : worker_pools_)
+ worker_pool->JoinForTesting();
service_thread_->JoinForTesting();
#if DCHECK_IS_ON()
join_for_testing_returned_.Signal();
#endif
}
-TaskSchedulerImpl::TaskSchedulerImpl()
- : delayed_task_manager_(Bind(&TaskSchedulerImpl::OnDelayedRunTimeUpdated,
- Unretained(this)))
+TaskSchedulerImpl::TaskSchedulerImpl(const WorkerPoolIndexForTraitsCallback&
+ worker_pool_index_for_traits_callback)
+ : delayed_task_manager_(
+ Bind(&TaskSchedulerImpl::OnDelayedRunTimeUpdated, Unretained(this))),
+ worker_pool_index_for_traits_callback_(
+ worker_pool_index_for_traits_callback)
#if DCHECK_IS_ON()
- ,
- join_for_testing_returned_(true, false)
+ ,
+ join_for_testing_returned_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED)
#endif
{
+ DCHECK(!worker_pool_index_for_traits_callback_.is_null());
}
-void TaskSchedulerImpl::Initialize() {
- using IORestriction = SchedulerThreadPoolImpl::IORestriction;
+void TaskSchedulerImpl::Initialize(
+ const std::vector<WorkerPoolCreationArgs>& worker_pools) {
+ DCHECK(!worker_pools.empty());
- const SchedulerThreadPoolImpl::ReEnqueueSequenceCallback
+ const SchedulerWorkerPoolImpl::ReEnqueueSequenceCallback
re_enqueue_sequence_callback =
Bind(&TaskSchedulerImpl::ReEnqueueSequenceCallback, Unretained(this));
- // TODO(fdoray): Derive the number of threads per pool from hardware
- // characteristics rather than using hard-coded constants.
-
- // Passing pointers to objects owned by |this| to
- // SchedulerThreadPoolImpl::Create() is safe because a TaskSchedulerImpl can't
- // be deleted before all its thread pools have been joined.
- background_thread_pool_ = SchedulerThreadPoolImpl::Create(
- "TaskSchedulerBackground", ThreadPriority::BACKGROUND, 1U,
- IORestriction::DISALLOWED, re_enqueue_sequence_callback, &task_tracker_,
- &delayed_task_manager_);
- CHECK(background_thread_pool_);
-
- background_file_io_thread_pool_ = SchedulerThreadPoolImpl::Create(
- "TaskSchedulerBackgroundFileIO", ThreadPriority::BACKGROUND, 1U,
- IORestriction::ALLOWED, re_enqueue_sequence_callback, &task_tracker_,
- &delayed_task_manager_);
- CHECK(background_file_io_thread_pool_);
-
- normal_thread_pool_ = SchedulerThreadPoolImpl::Create(
- "TaskSchedulerForeground", ThreadPriority::NORMAL, 4U,
- IORestriction::DISALLOWED, re_enqueue_sequence_callback, &task_tracker_,
- &delayed_task_manager_);
- CHECK(normal_thread_pool_);
-
- normal_file_io_thread_pool_ = SchedulerThreadPoolImpl::Create(
- "TaskSchedulerForegroundFileIO", ThreadPriority::NORMAL, 12U,
- IORestriction::ALLOWED, re_enqueue_sequence_callback, &task_tracker_,
- &delayed_task_manager_);
- CHECK(normal_file_io_thread_pool_);
+ for (const auto& worker_pool : worker_pools) {
+ // Passing pointers to objects owned by |this| to
+ // SchedulerWorkerPoolImpl::Create() is safe because a TaskSchedulerImpl
+ // can't be deleted before all its worker pools have been joined.
+ worker_pools_.push_back(SchedulerWorkerPoolImpl::Create(
+ worker_pool.name, worker_pool.thread_priority, worker_pool.max_threads,
+ worker_pool.io_restriction, re_enqueue_sequence_callback,
+ &task_tracker_, &delayed_task_manager_));
+ CHECK(worker_pools_.back());
+ }
service_thread_ = SchedulerServiceThread::Create(&task_tracker_,
&delayed_task_manager_);
CHECK(service_thread_);
}
-SchedulerThreadPool* TaskSchedulerImpl::GetThreadPoolForTraits(
+SchedulerWorkerPool* TaskSchedulerImpl::GetWorkerPoolForTraits(
const TaskTraits& traits) {
- if (traits.with_file_io()) {
- if (traits.priority() == TaskPriority::BACKGROUND)
- return background_file_io_thread_pool_.get();
- return normal_file_io_thread_pool_.get();
- }
-
- if (traits.priority() == TaskPriority::BACKGROUND)
- return background_thread_pool_.get();
- return normal_thread_pool_.get();
+ const size_t index = worker_pool_index_for_traits_callback_.Run(traits);
+ DCHECK_LT(index, worker_pools_.size());
+ return worker_pools_[index].get();
}
void TaskSchedulerImpl::ReEnqueueSequenceCallback(
@@ -144,7 +126,7 @@ void TaskSchedulerImpl::ReEnqueueSequenceCallback(
// specific priority.
traits.WithPriority(sort_key.priority());
- GetThreadPoolForTraits(traits)->ReEnqueueSequence(std::move(sequence),
+ GetWorkerPoolForTraits(traits)->ReEnqueueSequence(std::move(sequence),
sort_key);
}
diff --git a/chromium/base/task_scheduler/task_scheduler_impl.h b/chromium/base/task_scheduler/task_scheduler_impl.h
index f9d789ea4c5..02cbe3c6c51 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl.h
+++ b/chromium/base/task_scheduler/task_scheduler_impl.h
@@ -5,32 +5,63 @@
#ifndef BASE_TASK_SCHEDULER_TASK_SCHEDULER_IMPL_H_
#define BASE_TASK_SCHEDULER_TASK_SCHEDULER_IMPL_H_
+#include <stddef.h>
+
#include <memory>
+#include <string>
+#include <vector>
#include "base/base_export.h"
+#include "base/callback.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/synchronization/waitable_event.h"
#include "base/task_runner.h"
#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/scheduler_worker_pool_impl.h"
#include "base/task_scheduler/sequence.h"
#include "base/task_scheduler/task_scheduler.h"
#include "base/task_scheduler/task_tracker.h"
#include "base/task_scheduler/task_traits.h"
+#include "base/threading/thread.h"
namespace base {
namespace internal {
class SchedulerServiceThread;
-class SchedulerThreadPoolImpl;
// Default TaskScheduler implementation. This class is thread-safe.
class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
public:
- // Creates and returns an initialized TaskSchedulerImpl. CHECKs on failure to
- // do so (never returns null).
- static std::unique_ptr<TaskSchedulerImpl> Create();
+ struct WorkerPoolCreationArgs {
+ // Name of the pool. Used to label the pool's threads.
+ std::string name;
+
+ // Priority of the pool's threads.
+ ThreadPriority thread_priority;
+
+ // Whether I/O is allowed in the pool.
+ SchedulerWorkerPoolImpl::IORestriction io_restriction;
+
+ // Maximum number of threads in the pool.
+ size_t max_threads;
+ };
+
+ // Returns the index of the worker pool in which a task with |traits| should
+ // run. This should be coded in a future-proof way: new traits should
+ // gracefully map to a default pool.
+ using WorkerPoolIndexForTraitsCallback =
+ Callback<size_t(const TaskTraits& traits)>;
+
+ // Creates and returns an initialized TaskSchedulerImpl. CHECKs on failure.
+ // |worker_pools| describes the worker pools to create.
+ // |worker_pool_index_for_traits_callback| returns the index in |worker_pools|
+ // of the worker pool in which a task with given traits should run.
+ static std::unique_ptr<TaskSchedulerImpl> Create(
+ const std::vector<WorkerPoolCreationArgs>& worker_pools,
+ const WorkerPoolIndexForTraitsCallback&
+ worker_pool_index_for_traits_callback);
// Destroying a TaskSchedulerImpl is not allowed in production; it is always
// leaked. In tests, it can only be destroyed after JoinForTesting() has
@@ -51,15 +82,16 @@ class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
void JoinForTesting();
private:
- TaskSchedulerImpl();
+ TaskSchedulerImpl(const WorkerPoolIndexForTraitsCallback&
+ worker_pool_index_for_traits_callback);
- void Initialize();
+ void Initialize(const std::vector<WorkerPoolCreationArgs>& worker_pools);
- // Returns the thread pool that runs Tasks with |traits|.
- SchedulerThreadPool* GetThreadPoolForTraits(const TaskTraits& traits);
+ // Returns the worker pool that runs Tasks with |traits|.
+ SchedulerWorkerPool* GetWorkerPoolForTraits(const TaskTraits& traits);
// Callback invoked when a non-single-thread |sequence| isn't empty after a
- // worker thread pops a Task from it.
+ // worker pops a Task from it.
void ReEnqueueSequenceCallback(scoped_refptr<Sequence> sequence);
// Callback invoked when the delayed run time is changed from the
@@ -68,19 +100,8 @@ class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
TaskTracker task_tracker_;
DelayedTaskManager delayed_task_manager_;
-
- // Thread pool for BACKGROUND Tasks without file I/O.
- std::unique_ptr<SchedulerThreadPoolImpl> background_thread_pool_;
-
- // Thread pool for BACKGROUND Tasks with file I/O.
- std::unique_ptr<SchedulerThreadPoolImpl> background_file_io_thread_pool_;
-
- // Thread pool for USER_VISIBLE and USER_BLOCKING Tasks without file I/O.
- std::unique_ptr<SchedulerThreadPoolImpl> normal_thread_pool_;
-
- // Thread pool for USER_VISIBLE and USER_BLOCKING Tasks with file I/O.
- std::unique_ptr<SchedulerThreadPoolImpl> normal_file_io_thread_pool_;
-
+ const WorkerPoolIndexForTraitsCallback worker_pool_index_for_traits_callback_;
+ std::vector<std::unique_ptr<SchedulerWorkerPoolImpl>> worker_pools_;
std::unique_ptr<SchedulerServiceThread> service_thread_;
#if DCHECK_IS_ON()
diff --git a/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc b/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc
index c491a4a1345..c14056ca670 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc
+++ b/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc
@@ -38,23 +38,6 @@ struct TraitsExecutionModePair {
ExecutionMode execution_mode;
};
-class TaskSchedulerImplTest
- : public testing::TestWithParam<TraitsExecutionModePair> {
- protected:
- TaskSchedulerImplTest() = default;
-
- void SetUp() override {
- scheduler_ = TaskSchedulerImpl::Create();
- EXPECT_TRUE(scheduler_);
- }
- void TearDown() override { scheduler_->JoinForTesting(); }
-
- std::unique_ptr<TaskSchedulerImpl> scheduler_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TaskSchedulerImplTest);
-};
-
#if ENABLE_THREAD_RESTRICTIONS
// Returns whether I/O calls are allowed on the current thread.
bool GetIOAllowed() {
@@ -152,13 +135,73 @@ std::vector<TraitsExecutionModePair> GetTraitsExecutionModePairs() {
return params;
}
+enum WorkerPoolType {
+ BACKGROUND_WORKER_POOL = 0,
+ BACKGROUND_FILE_IO_WORKER_POOL,
+ FOREGROUND_WORKER_POOL,
+ FOREGROUND_FILE_IO_WORKER_POOL,
+};
+
+size_t GetThreadPoolIndexForTraits(const TaskTraits& traits) {
+ if (traits.with_file_io()) {
+ return traits.priority() == TaskPriority::BACKGROUND
+ ? BACKGROUND_FILE_IO_WORKER_POOL
+ : FOREGROUND_FILE_IO_WORKER_POOL;
+ }
+ return traits.priority() == TaskPriority::BACKGROUND ? BACKGROUND_WORKER_POOL
+ : FOREGROUND_WORKER_POOL;
+}
+
+class TaskSchedulerImplTest
+ : public testing::TestWithParam<TraitsExecutionModePair> {
+ protected:
+ TaskSchedulerImplTest() = default;
+
+ void SetUp() override {
+ using IORestriction = SchedulerWorkerPoolImpl::IORestriction;
+
+ std::vector<TaskSchedulerImpl::WorkerPoolCreationArgs> worker_pools;
+
+ ASSERT_EQ(BACKGROUND_WORKER_POOL, worker_pools.size());
+ worker_pools.push_back({"TaskSchedulerBackground",
+ ThreadPriority::BACKGROUND,
+ IORestriction::DISALLOWED, 1U});
+
+ ASSERT_EQ(BACKGROUND_FILE_IO_WORKER_POOL, worker_pools.size());
+ worker_pools.push_back({"TaskSchedulerBackgroundFileIO",
+ ThreadPriority::BACKGROUND, IORestriction::ALLOWED,
+ 3U});
+
+ ASSERT_EQ(FOREGROUND_WORKER_POOL, worker_pools.size());
+ worker_pools.push_back({"TaskSchedulerForeground", ThreadPriority::NORMAL,
+ IORestriction::DISALLOWED, 4U});
+
+ ASSERT_EQ(FOREGROUND_FILE_IO_WORKER_POOL, worker_pools.size());
+ worker_pools.push_back({"TaskSchedulerForegroundFileIO",
+ ThreadPriority::NORMAL, IORestriction::ALLOWED,
+ 12U});
+
+ scheduler_ = TaskSchedulerImpl::Create(worker_pools,
+ Bind(&GetThreadPoolIndexForTraits));
+ ASSERT_TRUE(scheduler_);
+ }
+
+ void TearDown() override { scheduler_->JoinForTesting(); }
+
+ std::unique_ptr<TaskSchedulerImpl> scheduler_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TaskSchedulerImplTest);
+};
+
} // namespace
// Verifies that a Task posted via PostTaskWithTraits with parameterized
// TaskTraits runs on a thread with the expected priority and I/O restrictions.
// The ExecutionMode parameter is ignored by this test.
TEST_P(TaskSchedulerImplTest, PostTaskWithTraits) {
- WaitableEvent task_ran(true, false);
+ WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
scheduler_->PostTaskWithTraits(
FROM_HERE, GetParam().traits,
Bind(&VerifyTaskEnvironementAndSignalEvent, GetParam().traits,
@@ -193,13 +236,11 @@ INSTANTIATE_TEST_CASE_P(OneTraitsExecutionModePair,
// TaskTraits and ExecutionModes. Verifies that each Task runs on a thread with
// the expected priority and I/O restrictions and respects the characteristics
// of its ExecutionMode.
-TEST(TaskSchedulerImplTest, MultipleTraitsExecutionModePairs) {
- std::unique_ptr<TaskSchedulerImpl> scheduler = TaskSchedulerImpl::Create();
-
+TEST_F(TaskSchedulerImplTest, MultipleTraitsExecutionModePairs) {
std::vector<std::unique_ptr<ThreadPostingTasks>> threads_posting_tasks;
for (const auto& traits_execution_mode_pair : GetTraitsExecutionModePairs()) {
threads_posting_tasks.push_back(WrapUnique(new ThreadPostingTasks(
- scheduler.get(), traits_execution_mode_pair.traits,
+ scheduler_.get(), traits_execution_mode_pair.traits,
traits_execution_mode_pair.execution_mode)));
threads_posting_tasks.back()->Start();
}
@@ -208,11 +249,9 @@ TEST(TaskSchedulerImplTest, MultipleTraitsExecutionModePairs) {
thread->WaitForAllTasksToRun();
thread->Join();
}
-
- scheduler->JoinForTesting();
}
-// TODO(fdoray): Add tests with Sequences that move around thread pools once
+// TODO(fdoray): Add tests with Sequences that move around worker pools once
// child TaskRunners are supported.
} // namespace internal
diff --git a/chromium/base/task_scheduler/task_tracker.cc b/chromium/base/task_scheduler/task_tracker.cc
index 9e85b5cfc17..4a272cd54a9 100644
--- a/chromium/base/task_scheduler/task_tracker.cc
+++ b/chromium/base/task_scheduler/task_tracker.cc
@@ -171,8 +171,8 @@ bool TaskTracker::BeforeRunTask(TaskShutdownBehavior shutdown_behavior) {
// or should be blocking shutdown if it was posted before it did.
DCHECK_NE(shutdown_behavior, TaskShutdownBehavior::BLOCK_SHUTDOWN);
- // A WorkerThread might extract a non BLOCK_SHUTDOWN task from a
- // PriorityQueue after shutdown. It shouldn't be allowed to run it.
+ // A worker might extract a non BLOCK_SHUTDOWN task from a PriorityQueue
+ // after shutdown. It shouldn't be allowed to run it.
return false;
}
diff --git a/chromium/base/task_scheduler/task_tracker_unittest.cc b/chromium/base/task_scheduler/task_tracker_unittest.cc
index 8f04f3a02e3..ea924ce8949 100644
--- a/chromium/base/task_scheduler/task_tracker_unittest.cc
+++ b/chromium/base/task_scheduler/task_tracker_unittest.cc
@@ -36,7 +36,8 @@ class ThreadCallingShutdown : public SimpleThread {
explicit ThreadCallingShutdown(TaskTracker* tracker)
: SimpleThread("ThreadCallingShutdown"),
tracker_(tracker),
- has_returned_(true, false) {}
+ has_returned_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
// Returns true once the async call to Shutdown() has returned.
bool has_returned() { return has_returned_.IsSignaled(); }
@@ -163,7 +164,8 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunBeforeShutdown) {
TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunLongTaskBeforeShutdown) {
// Create a task that will block until |event| is signaled.
- WaitableEvent event(false, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
std::unique_ptr<Task> blocked_task(
new Task(FROM_HERE, Bind(&WaitableEvent::Wait, Unretained(&event)),
TaskTraits().WithShutdownBehavior(GetParam()), TimeDelta()));
diff --git a/chromium/base/task_scheduler/task_traits.h b/chromium/base/task_scheduler/task_traits.h
index 523fd137b66..0c0d304dcf5 100644
--- a/chromium/base/task_scheduler/task_traits.h
+++ b/chromium/base/task_scheduler/task_traits.h
@@ -41,7 +41,7 @@ enum class TaskPriority {
enum class TaskShutdownBehavior {
// Tasks posted with this mode which have not started executing before
// shutdown is initiated will never run. Tasks with this mode running at
- // shutdown will be ignored (the worker thread will not be joined).
+ // shutdown will be ignored (the worker will not be joined).
//
// This option provides a nice way to post stuff you don't want blocking
// shutdown. For example, you might be doing a slow DNS lookup and if it's
diff --git a/chromium/base/template_util.h b/chromium/base/template_util.h
index 74c8e5afdc6..1bfc1ac814a 100644
--- a/chromium/base/template_util.h
+++ b/chromium/base/template_util.h
@@ -6,11 +6,23 @@
#define BASE_TEMPLATE_UTIL_H_
#include <stddef.h>
+#include <iosfwd>
#include <type_traits>
#include <utility>
#include "build/build_config.h"
+// This hacks around libstdc++ 4.6 missing stuff in type_traits, while we need
+// to support it.
+#define CR_GLIBCXX_4_7_0 20120322
+#define CR_GLIBCXX_4_5_4 20120702
+#define CR_GLIBCXX_4_6_4 20121127
+#if defined(__GLIBCXX__) && \
+ (__GLIBCXX__ < CR_GLIBCXX_4_7_0 || __GLIBCXX__ == CR_GLIBCXX_4_5_4 || \
+ __GLIBCXX__ == CR_GLIBCXX_4_6_4)
+#define CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
+#endif
+
namespace base {
template <class T> struct is_non_const_reference : std::false_type {};
@@ -57,6 +69,15 @@ struct IsAssignableImpl
template <class Lvalue, class Rvalue>
struct IsAssignableImpl<Lvalue, Rvalue, true> : public std::false_type {};
+// Uses expression SFINAE to detect whether using operator<< would work.
+template <typename T, typename = void>
+struct SupportsOstreamOperator : std::false_type {};
+template <typename T>
+struct SupportsOstreamOperator<T,
+ decltype(void(std::declval<std::ostream&>()
+ << std::declval<T>()))>
+ : std::true_type {};
+
} // namespace internal
// TODO(crbug.com/554293): Remove this when all platforms have this in the std
@@ -82,6 +103,31 @@ struct is_move_assignable
const typename std::add_rvalue_reference<T>::type> {
};
+// underlying_type produces the integer type backing an enum type.
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX)
+template <typename T>
+struct underlying_type {
+ using type = __underlying_type(T);
+};
+#else
+template <typename T>
+using underlying_type = std::underlying_type<T>;
+#endif
+
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX)
+template <class T>
+using is_trivially_destructible = std::has_trivial_destructor<T>;
+#else
+template <class T>
+using is_trivially_destructible = std::is_trivially_destructible<T>;
+#endif
+
} // namespace base
+#undef CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
+
#endif // BASE_TEMPLATE_UTIL_H_
diff --git a/chromium/base/template_util_unittest.cc b/chromium/base/template_util_unittest.cc
index 5686d7c752d..921596474b6 100644
--- a/chromium/base/template_util_unittest.cc
+++ b/chromium/base/template_util_unittest.cc
@@ -4,11 +4,26 @@
#include "base/template_util.h"
+#include <string>
+
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
namespace {
+enum SimpleEnum { SIMPLE_ENUM };
+enum EnumWithExplicitType : uint64_t { ENUM_WITH_EXPLICIT_TYPE };
+enum class ScopedEnum { SCOPED_ENUM };
+enum class ScopedEnumWithOperator { SCOPED_ENUM_WITH_OPERATOR };
+std::ostream& operator<<(std::ostream& os, ScopedEnumWithOperator v) {
+ return os;
+}
+struct SimpleStruct {};
+struct StructWithOperator {};
+std::ostream& operator<<(std::ostream& os, const StructWithOperator& v) {
+ return os;
+}
+
// is_non_const_reference<Type>
static_assert(!is_non_const_reference<int>::value, "IsNonConstReference");
static_assert(!is_non_const_reference<const int&>::value,
@@ -48,5 +63,67 @@ static_assert(is_move_assignable<AssignCopy>::value, "IsMoveAssignable");
static_assert(is_move_assignable<AssignNoCopy>::value, "IsMoveAssignable");
static_assert(!is_move_assignable<AssignNoMove>::value, "IsMoveAssignable");
+// A few standard types that definitely support printing.
+static_assert(internal::SupportsOstreamOperator<int>::value,
+ "ints should be printable");
+static_assert(internal::SupportsOstreamOperator<const char*>::value,
+ "C strings should be printable");
+static_assert(internal::SupportsOstreamOperator<std::string>::value,
+ "std::string should be printable");
+
+// Various kinds of enums operator<< support.
+static_assert(internal::SupportsOstreamOperator<SimpleEnum>::value,
+ "simple enum should be printable by value");
+static_assert(internal::SupportsOstreamOperator<const SimpleEnum&>::value,
+ "simple enum should be printable by const ref");
+static_assert(internal::SupportsOstreamOperator<EnumWithExplicitType>::value,
+ "enum with explicit type should be printable by value");
+static_assert(
+ internal::SupportsOstreamOperator<const EnumWithExplicitType&>::value,
+ "enum with explicit type should be printable by const ref");
+static_assert(!internal::SupportsOstreamOperator<ScopedEnum>::value,
+ "scoped enum should not be printable by value");
+static_assert(!internal::SupportsOstreamOperator<const ScopedEnum&>::value,
+ "simple enum should not be printable by const ref");
+static_assert(internal::SupportsOstreamOperator<ScopedEnumWithOperator>::value,
+ "scoped enum with operator<< should be printable by value");
+static_assert(
+ internal::SupportsOstreamOperator<const ScopedEnumWithOperator&>::value,
+ "scoped enum with operator<< should be printable by const ref");
+
+// operator<< support on structs.
+static_assert(!internal::SupportsOstreamOperator<SimpleStruct>::value,
+ "simple struct should not be printable by value");
+static_assert(!internal::SupportsOstreamOperator<const SimpleStruct&>::value,
+ "simple struct should not be printable by const ref");
+static_assert(internal::SupportsOstreamOperator<StructWithOperator>::value,
+ "struct with operator<< should be printable by value");
+static_assert(
+ internal::SupportsOstreamOperator<const StructWithOperator&>::value,
+ "struct with operator<< should be printable by const ref");
+
+// underlying type of enums
+static_assert(std::is_integral<underlying_type<SimpleEnum>::type>::value,
+ "simple enum must have some integral type");
+static_assert(
+ std::is_same<underlying_type<EnumWithExplicitType>::type, uint64_t>::value,
+ "explicit type must be detected");
+static_assert(std::is_same<underlying_type<ScopedEnum>::type, int>::value,
+ "scoped enum defaults to int");
+
+struct TriviallyDestructible {
+ int field;
+};
+
+class NonTriviallyDestructible {
+ ~NonTriviallyDestructible() {}
+};
+
+static_assert(is_trivially_destructible<int>::value, "IsTriviallyDestructible");
+static_assert(is_trivially_destructible<TriviallyDestructible>::value,
+ "IsTriviallyDestructible");
+static_assert(!is_trivially_destructible<NonTriviallyDestructible>::value,
+ "IsTriviallyDestructible");
+
} // namespace
} // namespace base
diff --git a/chromium/base/third_party/libevent/README.chromium b/chromium/base/third_party/libevent/README.chromium
index 4bc235d4739..1462e884079 100644
--- a/chromium/base/third_party/libevent/README.chromium
+++ b/chromium/base/third_party/libevent/README.chromium
@@ -35,3 +35,5 @@ static library using GYP.
which is missing in the newlib-based PNaCl toolchain.
7) Stub out signal.c for nacl_helper_nonsfi. socketpair() will be prohibited
by sandbox in nacl_helper_nonsfi.
+8) Remove an unnecessary workaround for OS X 10.4 from kqueue.c. It was causing
+ problems on macOS Sierra.
diff --git a/chromium/base/third_party/libevent/kqueue.c b/chromium/base/third_party/libevent/kqueue.c
index ee740eec1d6..3c2ffd5524c 100644
--- a/chromium/base/third_party/libevent/kqueue.c
+++ b/chromium/base/third_party/libevent/kqueue.c
@@ -140,28 +140,6 @@ kq_init(struct event_base *base)
TAILQ_INIT(&kqueueop->evsigevents[i]);
}
- /* Check for Mac OS X kqueue bug. */
- memset(&kqueueop->changes[0], 0, sizeof kqueueop->changes[0]);
- kqueueop->changes[0].ident = -1;
- kqueueop->changes[0].filter = EVFILT_READ;
- kqueueop->changes[0].flags = EV_ADD;
- /*
- * If kqueue works, then kevent will succeed, and it will
- * stick an error in events[0]. If kqueue is broken, then
- * kevent will fail.
- */
- if (kevent(kq,
- kqueueop->changes, 1, kqueueop->events, NEVENT, NULL) != 1 ||
- kqueueop->events[0].ident != -1 ||
- kqueueop->events[0].flags != EV_ERROR) {
- event_warn("%s: detected broken kqueue; not using.", __func__);
- free(kqueueop->changes);
- free(kqueueop->events);
- free(kqueueop);
- close(kq);
- return (NULL);
- }
-
return (kqueueop);
}
diff --git a/chromium/base/threading/platform_thread.h b/chromium/base/threading/platform_thread.h
index 72da93bf56d..9b217a9c656 100644
--- a/chromium/base/threading/platform_thread.h
+++ b/chromium/base/threading/platform_thread.h
@@ -142,8 +142,8 @@ class BASE_EXPORT PlatformThread {
// Sleeps for the specified duration.
static void Sleep(base::TimeDelta duration);
- // Sets the thread name visible to debuggers/tools. This has no effect
- // otherwise.
+ // Sets the thread name visible to debuggers/tools. This will try to
+ // initialize the context for current thread unless it's a WorkerThread.
static void SetName(const std::string& name);
// Gets the thread name, if previously set by SetName.
@@ -180,6 +180,10 @@ class BASE_EXPORT PlatformThread {
// |thread_handle|.
static void Join(PlatformThreadHandle thread_handle);
+ // Detaches and releases the thread handle. The thread is no longer joinable
+ // and |thread_handle| is invalidated after this call.
+ static void Detach(PlatformThreadHandle thread_handle);
+
// Toggles the current thread's priority at runtime. A thread may not be able
// to raise its priority back up after lowering it if the process does not
// have a proper permission, e.g. CAP_SYS_NICE on Linux. A thread may not be
diff --git a/chromium/base/threading/platform_thread_posix.cc b/chromium/base/threading/platform_thread_posix.cc
index d8bcf923a2b..2321b3cd49d 100644
--- a/chromium/base/threading/platform_thread_posix.cc
+++ b/chromium/base/threading/platform_thread_posix.cc
@@ -209,6 +209,11 @@ void PlatformThread::Join(PlatformThreadHandle thread_handle) {
CHECK_EQ(0, pthread_join(thread_handle.platform_handle(), NULL));
}
+// static
+void PlatformThread::Detach(PlatformThreadHandle thread_handle) {
+ CHECK_EQ(0, pthread_detach(thread_handle.platform_handle()));
+}
+
// Mac has its own Set/GetCurrentThreadPriority() implementations.
#if !defined(OS_MACOSX)
diff --git a/chromium/base/threading/platform_thread_unittest.cc b/chromium/base/threading/platform_thread_unittest.cc
index 82221e11009..2d99ed87500 100644
--- a/chromium/base/threading/platform_thread_unittest.cc
+++ b/chromium/base/threading/platform_thread_unittest.cc
@@ -21,48 +21,76 @@
namespace base {
-// Trivial tests that thread runs and doesn't crash on create and join ---------
+// Trivial tests that thread runs and doesn't crash on create, join, or detach -
namespace {
class TrivialThread : public PlatformThread::Delegate {
public:
- TrivialThread() : did_run_(false) {}
+ TrivialThread() : run_event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
- void ThreadMain() override { did_run_ = true; }
+ void ThreadMain() override { run_event_.Signal(); }
- bool did_run() const { return did_run_; }
+ WaitableEvent& run_event() { return run_event_; }
private:
- bool did_run_;
+ WaitableEvent run_event_;
DISALLOW_COPY_AND_ASSIGN(TrivialThread);
};
} // namespace
-TEST(PlatformThreadTest, Trivial) {
+TEST(PlatformThreadTest, TrivialJoin) {
TrivialThread thread;
PlatformThreadHandle handle;
- ASSERT_FALSE(thread.did_run());
+ ASSERT_FALSE(thread.run_event().IsSignaled());
ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
PlatformThread::Join(handle);
- ASSERT_TRUE(thread.did_run());
+ ASSERT_TRUE(thread.run_event().IsSignaled());
}
-TEST(PlatformThreadTest, TrivialTimesTen) {
+TEST(PlatformThreadTest, TrivialJoinTimesTen) {
TrivialThread thread[10];
PlatformThreadHandle handle[arraysize(thread)];
for (size_t n = 0; n < arraysize(thread); n++)
- ASSERT_FALSE(thread[n].did_run());
+ ASSERT_FALSE(thread[n].run_event().IsSignaled());
for (size_t n = 0; n < arraysize(thread); n++)
ASSERT_TRUE(PlatformThread::Create(0, &thread[n], &handle[n]));
for (size_t n = 0; n < arraysize(thread); n++)
PlatformThread::Join(handle[n]);
for (size_t n = 0; n < arraysize(thread); n++)
- ASSERT_TRUE(thread[n].did_run());
+ ASSERT_TRUE(thread[n].run_event().IsSignaled());
+}
+
+// The following detach tests are by nature racy. The run_event approximates the
+// end and termination of the thread, but threads could persist shortly after
+// the test completes.
+TEST(PlatformThreadTest, TrivialDetach) {
+ TrivialThread thread;
+ PlatformThreadHandle handle;
+
+ ASSERT_FALSE(thread.run_event().IsSignaled());
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+ PlatformThread::Detach(handle);
+ thread.run_event().Wait();
+}
+
+TEST(PlatformThreadTest, TrivialDetachTimesTen) {
+ TrivialThread thread[10];
+ PlatformThreadHandle handle[arraysize(thread)];
+
+ for (size_t n = 0; n < arraysize(thread); n++)
+ ASSERT_FALSE(thread[n].run_event().IsSignaled());
+ for (size_t n = 0; n < arraysize(thread); n++) {
+ ASSERT_TRUE(PlatformThread::Create(0, &thread[n], &handle[n]));
+ PlatformThread::Detach(handle[n]);
+ }
+ for (size_t n = 0; n < arraysize(thread); n++)
+ thread[n].run_event().Wait();
}
// Tests of basic thread functions ---------------------------------------------
@@ -73,8 +101,10 @@ class FunctionTestThread : public PlatformThread::Delegate {
public:
FunctionTestThread()
: thread_id_(kInvalidThreadId),
- termination_ready_(true, false),
- terminate_thread_(true, false),
+ termination_ready_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ terminate_thread_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
done_(false) {}
~FunctionTestThread() override {
EXPECT_TRUE(terminate_thread_.IsSignaled())
diff --git a/chromium/base/threading/platform_thread_win.cc b/chromium/base/threading/platform_thread_win.cc
index eae236634d1..6030992d680 100644
--- a/chromium/base/threading/platform_thread_win.cc
+++ b/chromium/base/threading/platform_thread_win.cc
@@ -13,7 +13,6 @@
#include "base/threading/thread_restrictions.h"
#include "base/tracked_objects.h"
#include "base/win/scoped_handle.h"
-#include "base/win/windows_version.h"
namespace base {
@@ -100,10 +99,8 @@ bool CreateThreadInternal(size_t stack_size,
PlatformThreadHandle* out_thread_handle,
ThreadPriority priority) {
unsigned int flags = 0;
- if (stack_size > 0 && base::win::GetVersion() >= base::win::VERSION_XP) {
+ if (stack_size > 0) {
flags = STACK_SIZE_PARAM_IS_A_RESERVATION;
- } else {
- stack_size = 0;
}
ThreadParams* params = new ThreadParams;
@@ -217,15 +214,13 @@ void PlatformThread::Join(PlatformThreadHandle thread_handle) {
// Wait for the thread to exit. It should already have terminated but make
// sure this assumption is valid.
- DWORD result = WaitForSingleObject(thread_handle.platform_handle(), INFINITE);
- if (result != WAIT_OBJECT_0) {
- // Debug info for bug 127931.
- DWORD error = GetLastError();
- debug::Alias(&error);
- debug::Alias(&result);
- CHECK(false);
- }
+ CHECK_EQ(WAIT_OBJECT_0,
+ WaitForSingleObject(thread_handle.platform_handle(), INFINITE));
+ CloseHandle(thread_handle.platform_handle());
+}
+// static
+void PlatformThread::Detach(PlatformThreadHandle thread_handle) {
CloseHandle(thread_handle.platform_handle());
}
diff --git a/chromium/base/threading/sequenced_task_runner_handle.cc b/chromium/base/threading/sequenced_task_runner_handle.cc
index 2c3af3255d5..88b36a8d648 100644
--- a/chromium/base/threading/sequenced_task_runner_handle.cc
+++ b/chromium/base/threading/sequenced_task_runner_handle.cc
@@ -8,7 +8,6 @@
#include "base/lazy_instance.h"
#include "base/logging.h"
-#include "base/sequenced_task_runner.h"
#include "base/threading/sequenced_worker_pool.h"
#include "base/threading/thread_local.h"
#include "base/threading/thread_task_runner_handle.h"
diff --git a/chromium/base/threading/sequenced_task_runner_handle.h b/chromium/base/threading/sequenced_task_runner_handle.h
index e6da18d215d..e6dec1e9f88 100644
--- a/chromium/base/threading/sequenced_task_runner_handle.h
+++ b/chromium/base/threading/sequenced_task_runner_handle.h
@@ -8,11 +8,10 @@
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
namespace base {
-class SequencedTaskRunner;
-
class BASE_EXPORT SequencedTaskRunnerHandle {
public:
// Returns a SequencedTaskRunner which guarantees that posted tasks will only
diff --git a/chromium/base/threading/sequenced_task_runner_handle_unittest.cc b/chromium/base/threading/sequenced_task_runner_handle_unittest.cc
index 21969745515..381aa51d77a 100644
--- a/chromium/base/threading/sequenced_task_runner_handle_unittest.cc
+++ b/chromium/base/threading/sequenced_task_runner_handle_unittest.cc
@@ -63,7 +63,8 @@ TEST_F(SequencedTaskRunnerHandleTest, FromSequencedWorkerPoolTask) {
// Wrap the SequencedWorkerPool to avoid leaks due to its asynchronous
// destruction.
SequencedWorkerPoolOwner owner(3, "Test");
- WaitableEvent event(false, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
owner.pool()->PostSequencedWorkerTask(
owner.pool()->GetSequenceToken(), FROM_HERE,
base::Bind(
@@ -77,7 +78,8 @@ TEST_F(SequencedTaskRunnerHandleTest, FromUnsequencedTask) {
// Wrap the SequencedWorkerPool to avoid leaks due to its asynchronous
// destruction.
SequencedWorkerPoolOwner owner(3, "Test");
- WaitableEvent event(false, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
owner.pool()->PostWorkerTask(
FROM_HERE,
base::Bind(
diff --git a/chromium/base/threading/sequenced_worker_pool_unittest.cc b/chromium/base/threading/sequenced_worker_pool_unittest.cc
index 58dc317a398..d73b4f20e09 100644
--- a/chromium/base/threading/sequenced_worker_pool_unittest.cc
+++ b/chromium/base/threading/sequenced_worker_pool_unittest.cc
@@ -966,7 +966,8 @@ TEST_F(SequencedWorkerPoolTest, GetSequencedTaskRunnerForCurrentThread) {
SequencedWorkerPool::GetSequencedTaskRunnerForCurrentThread();
EXPECT_FALSE(local_task_runner);
- WaitableEvent event(false, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
Closure signal = Bind(&WaitableEvent::Signal, Unretained(&event));
scoped_refptr<SequencedTaskRunner> task_runner_1 =
pool()->GetSequencedTaskRunner(SequencedWorkerPool::GetSequenceToken());
@@ -1025,7 +1026,8 @@ void VerifySequenceOnDestruction(const Closure& callback) {
TEST_F(SequencedWorkerPoolTest, CheckSequenceOnDestruction) {
EnsureAllWorkersCreated();
- WaitableEvent event(false, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
Closure signal = Bind(&WaitableEvent::Signal, Unretained(&event));
pool()->PostWorkerTask(FROM_HERE, Bind(&VerifySequenceOnDestruction, signal));
event.Wait();
diff --git a/chromium/base/threading/simple_thread.cc b/chromium/base/threading/simple_thread.cc
index 7059ceab766..6c64a17d6ab 100644
--- a/chromium/base/threading/simple_thread.cc
+++ b/chromium/base/threading/simple_thread.cc
@@ -12,15 +12,24 @@
namespace base {
SimpleThread::SimpleThread(const std::string& name_prefix)
- : name_prefix_(name_prefix), name_(name_prefix),
- thread_(), event_(true, false), tid_(0), joined_(false) {
-}
+ : name_prefix_(name_prefix),
+ name_(name_prefix),
+ thread_(),
+ event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ tid_(0),
+ joined_(false) {}
SimpleThread::SimpleThread(const std::string& name_prefix,
const Options& options)
- : name_prefix_(name_prefix), name_(name_prefix), options_(options),
- thread_(), event_(true, false), tid_(0), joined_(false) {
-}
+ : name_prefix_(name_prefix),
+ name_(name_prefix),
+ options_(options),
+ thread_(),
+ event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ tid_(0),
+ joined_(false) {}
SimpleThread::~SimpleThread() {
DCHECK(HasBeenStarted()) << "SimpleThread was never started.";
@@ -93,8 +102,8 @@ DelegateSimpleThreadPool::DelegateSimpleThreadPool(
int num_threads)
: name_prefix_(name_prefix),
num_threads_(num_threads),
- dry_(true, false) {
-}
+ dry_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
DelegateSimpleThreadPool::~DelegateSimpleThreadPool() {
DCHECK(threads_.empty());
diff --git a/chromium/base/threading/simple_thread_unittest.cc b/chromium/base/threading/simple_thread_unittest.cc
index 7229d362f14..14dd4591f18 100644
--- a/chromium/base/threading/simple_thread_unittest.cc
+++ b/chromium/base/threading/simple_thread_unittest.cc
@@ -95,7 +95,8 @@ TEST(SimpleThreadTest, CreateAndJoin) {
TEST(SimpleThreadTest, WaitForEvent) {
// Create a thread, and wait for it to signal us.
- WaitableEvent event(true, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
WaitEventRunner runner(&event);
DelegateSimpleThread thread(&runner, "event_waiter");
@@ -108,7 +109,8 @@ TEST(SimpleThreadTest, WaitForEvent) {
}
TEST(SimpleThreadTest, NamedWithOptions) {
- WaitableEvent event(true, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
WaitEventRunner runner(&event);
SimpleThread::Options options;
@@ -152,7 +154,8 @@ TEST(SimpleThreadTest, ThreadPool) {
// We can reuse our pool. Verify that all 10 threads can actually run in
// parallel, so this test will only pass if there are actually 10 threads.
AtomicSequenceNumber seq2;
- WaitableEvent event(true, false);
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
// Changing 9 to 10, for example, would cause us JoinAll() to never return.
VerifyPoolRunner verifier(&seq2, 9, &event);
pool.Start();
diff --git a/chromium/base/threading/thread.cc b/chromium/base/threading/thread.cc
index 58631301dda..11aaea1d7f8 100644
--- a/chromium/base/threading/thread.cc
+++ b/chromium/base/threading/thread.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/lazy_instance.h"
#include "base/location.h"
+#include "base/run_loop.h"
#include "base/synchronization/waitable_event.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "base/threading/thread_id_name_manager.h"
@@ -66,11 +67,13 @@ Thread::Thread(const std::string& name)
running_(false),
thread_(0),
id_(kInvalidThreadId),
- id_event_(true, false),
+ id_event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
message_loop_(nullptr),
message_loop_timer_slack_(TIMER_SLACK_NONE),
name_(name),
- start_event_(true, false) {
+ start_event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {
}
Thread::~Thread() {
@@ -199,7 +202,7 @@ bool Thread::IsRunning() const {
}
void Thread::Run(MessageLoop* message_loop) {
- message_loop->Run();
+ RunLoop().Run();
}
void Thread::SetThreadWasQuitProperly(bool flag) {
@@ -229,7 +232,6 @@ void Thread::ThreadMain() {
DCHECK(message_loop_);
std::unique_ptr<MessageLoop> message_loop(message_loop_);
message_loop_->BindToCurrentThread();
- message_loop_->set_thread_name(name_);
message_loop_->SetTimerSlack(message_loop_timer_slack_);
#if defined(OS_WIN)
diff --git a/chromium/base/threading/thread_local_unittest.cc b/chromium/base/threading/thread_local_unittest.cc
index e94c1db1c8d..cdc1ca6f567 100644
--- a/chromium/base/threading/thread_local_unittest.cc
+++ b/chromium/base/threading/thread_local_unittest.cc
@@ -82,7 +82,8 @@ TEST(ThreadLocalTest, Pointer) {
static char* const kBogusPointer = reinterpret_cast<char*>(0x1234);
char* tls_val;
- base::WaitableEvent done(true, false);
+ base::WaitableEvent done(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
GetThreadLocal getter(&tlp, &done);
getter.set_ptr(&tls_val);
diff --git a/chromium/base/threading/thread_perftest.cc b/chromium/base/threading/thread_perftest.cc
index 5958f1aa6de..1df13883169 100644
--- a/chromium/base/threading/thread_perftest.cc
+++ b/chromium/base/threading/thread_perftest.cc
@@ -35,7 +35,8 @@ const int kNumRuns = 100000;
class ThreadPerfTest : public testing::Test {
public:
ThreadPerfTest()
- : done_(false, false) {
+ : done_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED) {
// Disable the task profiler as it adds significant cost!
CommandLine::Init(0, NULL);
CommandLine::ForCurrentProcess()->AppendSwitchASCII(
@@ -59,7 +60,8 @@ class ThreadPerfTest : public testing::Test {
}
base::ThreadTicks ThreadNow(base::Thread* thread) {
- base::WaitableEvent done(false, false);
+ base::WaitableEvent done(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
base::ThreadTicks ticks;
thread->task_runner()->PostTask(
FROM_HERE, base::Bind(&ThreadPerfTest::TimeOnThread,
@@ -178,8 +180,11 @@ template <typename WaitableEventType>
class EventPerfTest : public ThreadPerfTest {
public:
void Init() override {
- for (size_t i = 0; i < threads_.size(); i++)
- events_.push_back(new WaitableEventType(false, false));
+ for (size_t i = 0; i < threads_.size(); i++) {
+ events_.push_back(
+ new WaitableEventType(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED));
+ }
}
void Reset() override { events_.clear(); }
@@ -226,10 +231,11 @@ TEST_F(WaitableEventPerfTest, EventPingPong) {
// Build a minimal event using ConditionVariable.
class ConditionVariableEvent {
public:
- ConditionVariableEvent(bool manual_reset, bool initially_signaled)
+ ConditionVariableEvent(WaitableEvent::ResetPolicy reset_policy,
+ WaitableEvent::InitialState initial_state)
: cond_(&lock_), signaled_(false) {
- DCHECK(!manual_reset);
- DCHECK(!initially_signaled);
+ DCHECK_EQ(WaitableEvent::ResetPolicy::AUTOMATIC, reset_policy);
+ DCHECK_EQ(WaitableEvent::InitialState::NOT_SIGNALED, initial_state);
}
void Signal() {
@@ -265,9 +271,10 @@ TEST_F(ConditionVariablePerfTest, EventPingPong) {
// way to force a context switch, we should use that instead.
class PthreadEvent {
public:
- PthreadEvent(bool manual_reset, bool initially_signaled) {
- DCHECK(!manual_reset);
- DCHECK(!initially_signaled);
+ PthreadEvent(WaitableEvent::ResetPolicy reset_policy,
+ WaitableEvent::InitialState initial_state) {
+ DCHECK_EQ(WaitableEvent::ResetPolicy::AUTOMATIC, reset_policy);
+ DCHECK_EQ(WaitableEvent::InitialState::NOT_SIGNALED, initial_state);
pthread_mutex_init(&mutex_, 0);
pthread_cond_init(&cond_, 0);
signaled_ = false;
diff --git a/chromium/base/threading/thread_restrictions.h b/chromium/base/threading/thread_restrictions.h
index 92b7bd5b2f6..90d8433192c 100644
--- a/chromium/base/threading/thread_restrictions.h
+++ b/chromium/base/threading/thread_restrictions.h
@@ -44,7 +44,7 @@ class ScopedAllowWaitForAndroidLayoutTests;
class ScopedAllowWaitForDebugURL;
class SoftwareOutputDeviceMus;
class TextInputClientMac;
-class RasterWorkerPool;
+class CategorizedWorkerPool;
} // namespace content
namespace dbus {
class Bus;
@@ -53,9 +53,6 @@ namespace disk_cache {
class BackendImpl;
class InFlightIO;
}
-namespace gles2 {
-class CommandBufferClientImpl;
-}
namespace gpu {
class GpuChannelHost;
}
@@ -63,8 +60,10 @@ namespace mojo {
namespace common {
class MessagePumpMojo;
}
+class SyncCallRestrictions;
}
namespace mus {
+class CommandBufferClientImpl;
class CommandBufferLocal;
class GpuState;
}
@@ -201,7 +200,7 @@ class BASE_EXPORT ThreadRestrictions {
friend class ::ScopedAllowWaitForLegacyWebViewApi;
friend class cc::CompletionEvent;
friend class cc::SingleThreadTaskGraphRunner;
- friend class content::RasterWorkerPool;
+ friend class content::CategorizedWorkerPool;
friend class remoting::AutoThread;
friend class ui::WindowResizeHelperMac;
friend class MessagePumpDefault;
@@ -211,8 +210,9 @@ class BASE_EXPORT ThreadRestrictions {
friend class ThreadTestHelper;
friend class PlatformThread;
friend class android::JavaHandlerThread;
- friend class gles2::CommandBufferClientImpl;
friend class mojo::common::MessagePumpMojo;
+ friend class mojo::SyncCallRestrictions;
+ friend class mus::CommandBufferClientImpl;
friend class mus::CommandBufferLocal;
friend class mus::GpuState;
diff --git a/chromium/base/threading/thread_task_runner_handle.cc b/chromium/base/threading/thread_task_runner_handle.cc
index 1b7c13a750c..190e18ffc68 100644
--- a/chromium/base/threading/thread_task_runner_handle.cc
+++ b/chromium/base/threading/thread_task_runner_handle.cc
@@ -8,7 +8,6 @@
#include "base/lazy_instance.h"
#include "base/logging.h"
-#include "base/single_thread_task_runner.h"
#include "base/threading/sequenced_task_runner_handle.h"
#include "base/threading/thread_local.h"
diff --git a/chromium/base/threading/thread_task_runner_handle.h b/chromium/base/threading/thread_task_runner_handle.h
index 72ce49e1bd0..c8e58935f09 100644
--- a/chromium/base/threading/thread_task_runner_handle.h
+++ b/chromium/base/threading/thread_task_runner_handle.h
@@ -8,11 +8,10 @@
#include "base/base_export.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
namespace base {
-class SingleThreadTaskRunner;
-
// ThreadTaskRunnerHandle stores a reference to a thread's TaskRunner
// in thread-local storage. Callers can then retrieve the TaskRunner
// for the current thread by calling ThreadTaskRunnerHandle::Get().
diff --git a/chromium/base/threading/thread_unittest.cc b/chromium/base/threading/thread_unittest.cc
index 7bf83f81a45..b01f52fdb3a 100644
--- a/chromium/base/threading/thread_unittest.cc
+++ b/chromium/base/threading/thread_unittest.cc
@@ -214,7 +214,8 @@ TEST_F(ThreadTest, ThreadId) {
b.Start();
// Post a task that calls GetThreadId() on the created thread.
- base::WaitableEvent event(false, false);
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
base::PlatformThreadId id_from_new_thread;
a.task_runner()->PostTask(
FROM_HERE, base::Bind(ReturnThreadId, &a, &id_from_new_thread, &event));
diff --git a/chromium/base/threading/worker_pool_posix_unittest.cc b/chromium/base/threading/worker_pool_posix_unittest.cc
index 99a93696070..6cefeed34e5 100644
--- a/chromium/base/threading/worker_pool_posix_unittest.cc
+++ b/chromium/base/threading/worker_pool_posix_unittest.cc
@@ -96,7 +96,8 @@ class PosixDynamicThreadPoolTest : public testing::Test {
counter_(0),
num_waiting_to_start_(0),
num_waiting_to_start_cv_(&num_waiting_to_start_lock_),
- start_(true, false) {}
+ start_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
void SetUp() override {
peer_.set_num_idle_threads_cv(new ConditionVariable(peer_.lock()));
diff --git a/chromium/base/threading/worker_pool_unittest.cc b/chromium/base/threading/worker_pool_unittest.cc
index 27af50be678..ef4bed136e9 100644
--- a/chromium/base/threading/worker_pool_unittest.cc
+++ b/chromium/base/threading/worker_pool_unittest.cc
@@ -26,7 +26,10 @@ namespace {
class PostTaskAndReplyTester
: public base::RefCountedThreadSafe<PostTaskAndReplyTester> {
public:
- PostTaskAndReplyTester() : finished_(false), test_event_(false, false) {}
+ PostTaskAndReplyTester()
+ : finished_(false),
+ test_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
void RunTest() {
ASSERT_TRUE(thread_checker_.CalledOnValidThread());
@@ -69,8 +72,10 @@ class PostTaskAndReplyTester
} // namespace
TEST_F(WorkerPoolTest, PostTask) {
- WaitableEvent test_event(false, false);
- WaitableEvent long_test_event(false, false);
+ WaitableEvent test_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent long_test_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
WorkerPool::PostTask(FROM_HERE,
base::Bind(&WaitableEvent::Signal,
diff --git a/chromium/base/time/OWNERS b/chromium/base/time/OWNERS
new file mode 100644
index 00000000000..02bdb39030f
--- /dev/null
+++ b/chromium/base/time/OWNERS
@@ -0,0 +1 @@
+miu@chromium.org
diff --git a/chromium/base/time/time.cc b/chromium/base/time/time.cc
index 76ffeb74411..3670f557589 100644
--- a/chromium/base/time/time.cc
+++ b/chromium/base/time/time.cc
@@ -136,11 +136,6 @@ std::ostream& operator<<(std::ostream& os, TimeDelta time_delta) {
// Time -----------------------------------------------------------------------
// static
-Time Time::Max() {
- return Time(std::numeric_limits<int64_t>::max());
-}
-
-// static
Time Time::FromTimeT(time_t tt) {
if (tt == 0)
return Time(); // Preserve 0 so we can tell it doesn't exist.
@@ -263,6 +258,14 @@ bool Time::FromStringInternal(const char* time_string,
return true;
}
+// static
+bool Time::ExplodedMostlyEquals(const Exploded& lhs, const Exploded& rhs) {
+ return lhs.year == rhs.year && lhs.month == rhs.month &&
+ lhs.day_of_month == rhs.day_of_month && lhs.hour == rhs.hour &&
+ lhs.minute == rhs.minute && lhs.second == rhs.second &&
+ lhs.millisecond == rhs.millisecond;
+}
+
std::ostream& operator<<(std::ostream& os, Time time) {
Time::Exploded exploded;
time.UTCExplode(&exploded);
diff --git a/chromium/base/time/time.h b/chromium/base/time/time.h
index 399ec826ce3..efece969b02 100644
--- a/chromium/base/time/time.h
+++ b/chromium/base/time/time.h
@@ -56,6 +56,7 @@
#include <limits>
#include "base/base_export.h"
+#include "base/compiler_specific.h"
#include "base/numerics/safe_math.h"
#include "build/build_config.h"
@@ -311,6 +312,12 @@ class TimeBase {
// Returns true if this object represents the maximum time.
bool is_max() const { return us_ == std::numeric_limits<int64_t>::max(); }
+ // Returns the maximum time, which should be greater than any reasonable time
+ // with which we might compare it.
+ static TimeClass Max() {
+ return TimeClass(std::numeric_limits<int64_t>::max());
+ }
+
// For serializing only. Use FromInternalValue() to reconstitute. Please don't
// use this and do arithmetic on it, as it is more error prone than using the
// provided operators.
@@ -438,10 +445,6 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
// times are increasing, or that two calls to Now() won't be the same.
static Time Now();
- // Returns the maximum time, which should be greater than any reasonable time
- // with which we might compare it.
- static Time Max();
-
// Returns the current time. Same as Now() except that this function always
// uses system time so that there are no discrepancies between the returned
// time and system time even on virtual environments including our test bot.
@@ -519,11 +522,29 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
// Converts an exploded structure representing either the local time or UTC
// into a Time class.
+ // TODO(maksims): Get rid of these in favor of the methods below when
+ // all the callers stop using these ones.
static Time FromUTCExploded(const Exploded& exploded) {
- return FromExploded(false, exploded);
+ base::Time time;
+ ignore_result(FromUTCExploded(exploded, &time));
+ return time;
}
static Time FromLocalExploded(const Exploded& exploded) {
- return FromExploded(true, exploded);
+ base::Time time;
+ ignore_result(FromLocalExploded(exploded, &time));
+ return time;
+ }
+
+ // Converts an exploded structure representing either the local time or UTC
+ // into a Time class. Returns false on a failure when, for example, a day of
+ // month is set to 31 on a 28-30 day month.
+ static bool FromUTCExploded(const Exploded& exploded,
+ Time* time) WARN_UNUSED_RESULT {
+ return FromExploded(false, exploded, time);
+ }
+ static bool FromLocalExploded(const Exploded& exploded,
+ Time* time) WARN_UNUSED_RESULT {
+ return FromExploded(true, exploded, time);
}
// Converts a string representation of time to a Time object.
@@ -564,8 +585,12 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
void Explode(bool is_local, Exploded* exploded) const;
// Unexplodes a given time assuming the source is either local time
- // |is_local = true| or UTC |is_local = false|.
- static Time FromExploded(bool is_local, const Exploded& exploded);
+ // |is_local = true| or UTC |is_local = false|. Function returns false on
+ // failure and sets |time| to Time(0). Otherwise returns true and sets |time|
+ // to non-exploded time.
+ static bool FromExploded(bool is_local,
+ const Exploded& exploded,
+ Time* time) WARN_UNUSED_RESULT;
// Converts a string representation of time to a Time object.
// An example of a time string which is converted is as below:-
@@ -577,6 +602,9 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
static bool FromStringInternal(const char* time_string,
bool is_local,
Time* parsed_time);
+
+ // Comparison does not consider |day_of_week| when doing the operation.
+ static bool ExplodedMostlyEquals(const Exploded& lhs, const Exploded& rhs);
};
// static
@@ -639,7 +667,13 @@ constexpr TimeDelta TimeDelta::FromDouble(double value) {
// static
constexpr TimeDelta TimeDelta::FromProduct(int64_t value,
int64_t positive_value) {
- return (DCHECK(positive_value > 0),
+ return (
+#if !defined(_PREFAST_) || !defined(OS_WIN)
+ // Avoid internal compiler errors in /analyze builds with VS 2015
+ // update 3.
+ // https://connect.microsoft.com/VisualStudio/feedback/details/2870865
+ DCHECK(positive_value > 0),
+#endif
value > std::numeric_limits<int64_t>::max() / positive_value
? Max()
: value < -std::numeric_limits<int64_t>::max() / positive_value
diff --git a/chromium/base/time/time_mac.cc b/chromium/base/time/time_mac.cc
index c23c4917e75..373ec3a3bc9 100644
--- a/chromium/base/time/time_mac.cc
+++ b/chromium/base/time/time_mac.cc
@@ -34,7 +34,7 @@ int64_t ComputeCurrentTicks() {
struct timeval boottime;
int mib[2] = {CTL_KERN, KERN_BOOTTIME};
size_t size = sizeof(boottime);
- int kr = sysctl(mib, arraysize(mib), &boottime, &size, NULL, 0);
+ int kr = sysctl(mib, arraysize(mib), &boottime, &size, nullptr, 0);
DCHECK_EQ(KERN_SUCCESS, kr);
base::TimeDelta time_difference = base::Time::Now() -
(base::Time::FromTimeT(boottime.tv_sec) +
@@ -168,7 +168,7 @@ Time Time::NowFromSystemTime() {
}
// static
-Time Time::FromExploded(bool is_local, const Exploded& exploded) {
+bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
base::ScopedCFTypeRef<CFTimeZoneRef> time_zone(
is_local
? CFTimeZoneCopySystem()
@@ -184,8 +184,28 @@ Time Time::FromExploded(bool is_local, const Exploded& exploded) {
exploded.day_of_month, exploded.hour, exploded.minute, exploded.second,
exploded.millisecond);
CFAbsoluteTime seconds = absolute_time + kCFAbsoluteTimeIntervalSince1970;
- return Time(static_cast<int64_t>(seconds * kMicrosecondsPerSecond) +
- kWindowsEpochDeltaMicroseconds);
+
+ base::Time converted_time =
+ Time(static_cast<int64_t>(seconds * kMicrosecondsPerSecond) +
+ kWindowsEpochDeltaMicroseconds);
+
+ // If |exploded.day_of_month| is set to 31
+ // on a 28-30 day month, it will return the first day of the next month.
+ // Thus round-trip the time and compare the initial |exploded| with
+ // |utc_to_exploded| time.
+ base::Time::Exploded to_exploded;
+ if (!is_local)
+ converted_time.UTCExplode(&to_exploded);
+ else
+ converted_time.LocalExplode(&to_exploded);
+
+ if (ExplodedMostlyEquals(to_exploded, exploded)) {
+ *time = converted_time;
+ return true;
+ }
+
+ *time = Time(0);
+ return false;
}
void Time::Explode(bool is_local, Exploded* exploded) const {
diff --git a/chromium/base/time/time_posix.cc b/chromium/base/time/time_posix.cc
index 32614bc086d..495e249f006 100644
--- a/chromium/base/time/time_posix.cc
+++ b/chromium/base/time/time_posix.cc
@@ -211,7 +211,7 @@ void Time::Explode(bool is_local, Exploded* exploded) const {
}
// static
-Time Time::FromExploded(bool is_local, const Exploded& exploded) {
+bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
struct tm timestruct;
timestruct.tm_sec = exploded.second;
timestruct.tm_min = exploded.minute;
@@ -301,8 +301,26 @@ Time Time::FromExploded(bool is_local, const Exploded& exploded) {
}
// Adjust from Unix (1970) to Windows (1601) epoch.
- return Time((milliseconds * kMicrosecondsPerMillisecond) +
- kWindowsEpochDeltaMicroseconds);
+ base::Time converted_time =
+ Time((milliseconds * kMicrosecondsPerMillisecond) +
+ kWindowsEpochDeltaMicroseconds);
+
+ // If |exploded.day_of_month| is set to 31 on a 28-30 day month, it will
+ // return the first day of the next month. Thus round-trip the time and
+ // compare the initial |exploded| with |utc_to_exploded| time.
+ base::Time::Exploded to_exploded;
+ if (!is_local)
+ converted_time.UTCExplode(&to_exploded);
+ else
+ converted_time.LocalExplode(&to_exploded);
+
+ if (ExplodedMostlyEquals(to_exploded, exploded)) {
+ *time = converted_time;
+ return true;
+ }
+
+ *time = Time(0);
+ return false;
}
// TimeTicks ------------------------------------------------------------------
diff --git a/chromium/base/time/time_unittest.cc b/chromium/base/time/time_unittest.cc
index 25c6ca59430..4f47d56522b 100644
--- a/chromium/base/time/time_unittest.cc
+++ b/chromium/base/time/time_unittest.cc
@@ -21,6 +21,52 @@ namespace base {
namespace {
+TEST(TimeTestOutOfBounds, FromExplodedOutOfBoundsTime) {
+ // FromUTCExploded must set time to Time(0) and failure, if the day is set to
+ // 31 on a 28-30 day month. Test |exploded| returns Time(0) on 31st of
+ // February and 31st of April. New implementation handles this.
+
+ const struct DateTestData {
+ Time::Exploded explode;
+ bool is_valid;
+ } kDateTestData[] = {
+ // 31st of February
+ {{2016, 2, 0, 31, 12, 30, 0, 0}, true},
+ // 31st of April
+ {{2016, 4, 0, 31, 8, 43, 0, 0}, true},
+ // Negative month
+ {{2016, -5, 0, 2, 4, 10, 0, 0}, false},
+ // Negative date of month
+ {{2016, 6, 0, -15, 2, 50, 0, 0}, false},
+ // Negative hours
+ {{2016, 7, 0, 10, -11, 29, 0, 0}, false},
+ // Negative minutes
+ {{2016, 3, 0, 14, 10, -29, 0, 0}, false},
+ // Negative seconds
+ {{2016, 10, 0, 25, 7, 47, -30, 0}, false},
+ // Negative milliseconds
+ {{2016, 10, 0, 25, 7, 47, 20, -500}, false},
+ // Hours are too large
+ {{2016, 7, 0, 10, 26, 29, 0, 0}, false},
+ // Minutes are too large
+ {{2016, 3, 0, 14, 10, 78, 0, 0}, false},
+ // Seconds are too large
+ {{2016, 10, 0, 25, 7, 47, 234, 0}, false},
+ // Milliseconds are too large
+ {{2016, 10, 0, 25, 6, 31, 23, 1643}, false},
+ };
+
+ for (const auto& test : kDateTestData) {
+ EXPECT_EQ(test.explode.HasValidValues(), test.is_valid);
+
+ base::Time result;
+ EXPECT_FALSE(base::Time::FromUTCExploded(test.explode, &result));
+ EXPECT_TRUE(result.is_null());
+ EXPECT_FALSE(base::Time::FromLocalExploded(test.explode, &result));
+ EXPECT_TRUE(result.is_null());
+ }
+}
+
// Specialized test fixture allowing time strings without timezones to be
// tested by comparing them to a known time in the local zone.
// See also pr_time_unittests.cc
@@ -80,7 +126,8 @@ TEST_F(TimeTest, TimeT) {
EXPECT_EQ(tms.tm_sec, exploded.second);
// Convert exploded back to the time struct.
- Time our_time_2 = Time::FromLocalExploded(exploded);
+ Time our_time_2;
+ EXPECT_TRUE(Time::FromLocalExploded(exploded, &our_time_2));
EXPECT_TRUE(our_time_1 == our_time_2);
time_t now_t_2 = our_time_2.ToTimeT();
@@ -119,7 +166,8 @@ TEST_F(TimeTest, FromExplodedWithMilliseconds) {
Time::Exploded exploded1 = {0};
now.UTCExplode(&exploded1);
exploded1.millisecond = 500;
- Time time = Time::FromUTCExploded(exploded1);
+ Time time;
+ EXPECT_TRUE(Time::FromUTCExploded(exploded1, &time));
Time::Exploded exploded2 = {0};
time.UTCExplode(&exploded2);
EXPECT_EQ(exploded1.millisecond, exploded2.millisecond);
@@ -137,7 +185,8 @@ TEST_F(TimeTest, LocalExplode) {
Time::Exploded exploded;
a.LocalExplode(&exploded);
- Time b = Time::FromLocalExploded(exploded);
+ Time b;
+ EXPECT_TRUE(Time::FromLocalExploded(exploded, &b));
// The exploded structure doesn't have microseconds, and on Mac & Linux, the
// internal OS conversion uses seconds, which will cause truncation. So we
@@ -150,7 +199,8 @@ TEST_F(TimeTest, UTCExplode) {
Time::Exploded exploded;
a.UTCExplode(&exploded);
- Time b = Time::FromUTCExploded(exploded);
+ Time b;
+ EXPECT_TRUE(Time::FromUTCExploded(exploded, &b));
EXPECT_TRUE((a - b) < TimeDelta::FromSeconds(1));
}
@@ -565,7 +615,8 @@ TEST_F(TimeTest, FromLocalExplodedCrashOnAndroid) {
static char buffer[] = "TZ=America/Santiago";
putenv(buffer);
tzset();
- Time t = Time::FromLocalExploded(midnight);
+ Time t;
+ EXPECT_TRUE(Time::FromLocalExploded(midnight, &t));
EXPECT_EQ(1381633200, t.ToTimeT());
}
#endif // OS_ANDROID
@@ -787,7 +838,8 @@ TEST(TimeDelta, WindowsEpoch) {
exploded.minute = 0;
exploded.second = 0;
exploded.millisecond = 0;
- Time t = Time::FromUTCExploded(exploded);
+ Time t;
+ EXPECT_TRUE(Time::FromUTCExploded(exploded, &t));
// Unix 1970 epoch.
EXPECT_EQ(INT64_C(11644473600000000), t.ToInternalValue());
diff --git a/chromium/base/time/time_win.cc b/chromium/base/time/time_win.cc
index ac3197a0c70..8708eb21c90 100644
--- a/chromium/base/time/time_win.cc
+++ b/chromium/base/time/time_win.cc
@@ -235,7 +235,7 @@ bool Time::IsHighResolutionTimerInUse() {
}
// static
-Time Time::FromExploded(bool is_local, const Exploded& exploded) {
+bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
// Create the system struct representing our exploded time. It will either be
// in local time or UTC.
SYSTEMTIME st;
@@ -253,17 +253,19 @@ Time Time::FromExploded(bool is_local, const Exploded& exploded) {
// Ensure that it's in UTC.
if (is_local) {
SYSTEMTIME utc_st;
- success = TzSpecificLocalTimeToSystemTime(NULL, &st, &utc_st) &&
+ success = TzSpecificLocalTimeToSystemTime(nullptr, &st, &utc_st) &&
SystemTimeToFileTime(&utc_st, &ft);
} else {
success = !!SystemTimeToFileTime(&st, &ft);
}
if (!success) {
- NOTREACHED() << "Unable to convert time";
- return Time(0);
+ *time = Time(0);
+ return false;
}
- return Time(FileTimeToMicroseconds(ft));
+
+ *time = Time(FileTimeToMicroseconds(ft));
+ return true;
}
void Time::Explode(bool is_local, Exploded* exploded) const {
@@ -288,7 +290,7 @@ void Time::Explode(bool is_local, Exploded* exploded) const {
// daylight saving time, it will take daylight saving time into account,
// even if the time you are converting is in standard time.
success = FileTimeToSystemTime(&utc_ft, &utc_st) &&
- SystemTimeToTzSpecificLocalTime(NULL, &utc_st, &st);
+ SystemTimeToTzSpecificLocalTime(nullptr, &utc_st, &st);
} else {
success = !!FileTimeToSystemTime(&utc_ft, &st);
}
diff --git a/chromium/base/timer/timer_unittest.cc b/chromium/base/timer/timer_unittest.cc
index e56efac6e39..6fcd25b93a3 100644
--- a/chromium/base/timer/timer_unittest.cc
+++ b/chromium/base/timer/timer_unittest.cc
@@ -10,7 +10,9 @@
#include "base/macros.h"
#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
#include "base/test/test_simple_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -115,7 +117,7 @@ void RunTest_OneShotTimer(base::MessageLoop::Type message_loop_type) {
OneShotTimerTester f(&did_run);
f.Start();
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
EXPECT_TRUE(did_run);
}
@@ -127,7 +129,7 @@ void RunTest_OneShotTimer_Cancel(base::MessageLoop::Type message_loop_type) {
OneShotTimerTester* a = new OneShotTimerTester(&did_run_a);
// This should run before the timer expires.
- base::MessageLoop::current()->DeleteSoon(FROM_HERE, a);
+ base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
// Now start the timer.
a->Start();
@@ -136,7 +138,7 @@ void RunTest_OneShotTimer_Cancel(base::MessageLoop::Type message_loop_type) {
OneShotTimerTester b(&did_run_b);
b.Start();
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
EXPECT_FALSE(did_run_a);
EXPECT_TRUE(did_run_b);
@@ -150,7 +152,7 @@ void RunTest_OneShotSelfDeletingTimer(
OneShotSelfDeletingTimerTester f(&did_run);
f.Start();
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
EXPECT_TRUE(did_run);
}
@@ -163,7 +165,7 @@ void RunTest_RepeatingTimer(base::MessageLoop::Type message_loop_type,
RepeatingTimerTester f(&did_run, delay);
f.Start();
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
EXPECT_TRUE(did_run);
}
@@ -176,7 +178,7 @@ void RunTest_RepeatingTimer_Cancel(base::MessageLoop::Type message_loop_type,
RepeatingTimerTester* a = new RepeatingTimerTester(&did_run_a, delay);
// This should run before the timer expires.
- base::MessageLoop::current()->DeleteSoon(FROM_HERE, a);
+ base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
// Now start the timer.
a->Start();
@@ -185,7 +187,7 @@ void RunTest_RepeatingTimer_Cancel(base::MessageLoop::Type message_loop_type,
RepeatingTimerTester b(&did_run_b, delay);
b.Start();
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
EXPECT_FALSE(did_run_a);
EXPECT_TRUE(did_run_b);
@@ -215,7 +217,7 @@ void RunTest_DelayTimer_NoCall(base::MessageLoop::Type message_loop_type) {
bool did_run = false;
OneShotTimerTester tester(&did_run);
tester.Start();
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
ASSERT_FALSE(target.signaled());
}
@@ -231,7 +233,7 @@ void RunTest_DelayTimer_OneCall(base::MessageLoop::Type message_loop_type) {
bool did_run = false;
OneShotTimerTester tester(&did_run, 100 /* milliseconds */);
tester.Start();
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
ASSERT_TRUE(target.signaled());
}
@@ -270,7 +272,7 @@ void RunTest_DelayTimer_Reset(base::MessageLoop::Type message_loop_type) {
bool did_run = false;
OneShotTimerTester tester(&did_run, 300);
tester.Start();
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
ASSERT_TRUE(target.signaled());
}
@@ -513,7 +515,7 @@ TEST(TimerTest, ContinuationStopStart) {
timer.Stop();
timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(40),
base::Bind(&SetCallbackHappened2));
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
EXPECT_FALSE(g_callback_happened1);
EXPECT_TRUE(g_callback_happened2);
}
@@ -529,7 +531,7 @@ TEST(TimerTest, ContinuationReset) {
timer.Reset();
// Since Reset happened before task ran, the user_task must not be cleared:
ASSERT_FALSE(timer.user_task().is_null());
- base::MessageLoop::current()->Run();
+ base::RunLoop().Run();
EXPECT_TRUE(g_callback_happened1);
}
}
diff --git a/chromium/base/trace_event/etw_manifest/BUILD.gn b/chromium/base/trace_event/etw_manifest/BUILD.gn
index 1e16672825e..19c4ecfdc40 100644
--- a/chromium/base/trace_event/etw_manifest/BUILD.gn
+++ b/chromium/base/trace_event/etw_manifest/BUILD.gn
@@ -18,8 +18,12 @@ message_compiler("chrome_events_win") {
user_mode_logging = true
- # TOOD(brucedawson) bug 569989: Enable ETW manifest and compile and link it
- # into the proper places. Enabling as-is may add the resources to too many
- # targets. See the bug for more information.
+ # The only code generated from chrome_events_win.man is a header file that
+ # is included by trace_event_etw_export_win.cc, so there is no need to
+ # compile any generated code. The other thing which compile_generated_code
+ # controls in this context is linking in the .res file generated from the
+ # manifest. However this is only needed for ETW provider registration which
+ # is done by UIforETW (https://github.com/google/UIforETW) and therefore the
+ # manifest resource can be skipped in Chrome.
compile_generated_code = false
}
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context.cc b/chromium/base/trace_event/heap_profiler_allocation_context.cc
index 374d5043d19..0f330a817ed 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context.cc
@@ -31,12 +31,23 @@ bool operator==(const Backtrace& lhs, const Backtrace& rhs) {
return std::equal(lhs.frames, lhs.frames + lhs.frame_count, rhs.frames);
}
+bool operator!=(const Backtrace& lhs, const Backtrace& rhs) {
+ return !(lhs == rhs);
+}
+
AllocationContext::AllocationContext(): type_name(nullptr) {}
+AllocationContext::AllocationContext(const Backtrace& backtrace,
+ const char* type_name)
+ : backtrace(backtrace), type_name(type_name) {}
+
bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) {
return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name);
}
+bool operator!=(const AllocationContext& lhs, const AllocationContext& rhs) {
+ return !(lhs == rhs);
+}
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context.h b/chromium/base/trace_event/heap_profiler_allocation_context.h
index 3566dd08f5d..24e2dec73f1 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context.h
+++ b/chromium/base/trace_event/heap_profiler_allocation_context.h
@@ -71,18 +71,20 @@ struct BASE_EXPORT Backtrace {
// If the stack is higher than what can be stored here, the bottom frames
// (the ones closer to main()) are stored. Depth of 12 is enough for most
// pseudo traces (see above), but not for native traces, where we need more.
- enum { kMaxFrameCount = 24 };
+ enum { kMaxFrameCount = 48 };
StackFrame frames[kMaxFrameCount];
size_t frame_count;
};
bool BASE_EXPORT operator==(const Backtrace& lhs, const Backtrace& rhs);
+bool BASE_EXPORT operator!=(const Backtrace& lhs, const Backtrace& rhs);
// The |AllocationContext| is context metadata that is kept for every allocation
// when heap profiling is enabled. To simplify memory management for book-
// keeping, this struct has a fixed size.
struct BASE_EXPORT AllocationContext {
AllocationContext();
+ AllocationContext(const Backtrace& backtrace, const char* type_name);
Backtrace backtrace;
@@ -95,6 +97,8 @@ struct BASE_EXPORT AllocationContext {
bool BASE_EXPORT operator==(const AllocationContext& lhs,
const AllocationContext& rhs);
+bool BASE_EXPORT operator!=(const AllocationContext& lhs,
+ const AllocationContext& rhs);
// Struct to store the size and count of the allocations.
struct AllocationMetrics {
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
index fac4a8a7b43..31f311a918e 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -168,8 +168,8 @@ AllocationContext AllocationContextTracker::GetContextSnapshot() {
CaptureMode mode = static_cast<CaptureMode>(
subtle::NoBarrier_Load(&capture_mode_));
- auto backtrace = std::begin(ctx.backtrace.frames);
- auto backtrace_end = std::end(ctx.backtrace.frames);
+ auto* backtrace = std::begin(ctx.backtrace.frames);
+ auto* backtrace_end = std::end(ctx.backtrace.frames);
if (!thread_name_) {
// Ignore the string allocation made by GetAndLeakThreadName to avoid
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
index 07d5f253dd4..3064a6a7117 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -34,8 +34,8 @@ void AssertBacktraceEquals(const StackFrame(&expected_backtrace)[N]) {
AllocationContextTracker::GetInstanceForCurrentThread()
->GetContextSnapshot();
- auto actual = std::begin(ctx.backtrace.frames);
- auto actual_bottom = actual + ctx.backtrace.frame_count;
+ auto* actual = std::begin(ctx.backtrace.frames);
+ auto* actual_bottom = actual + ctx.backtrace.frame_count;
auto expected = std::begin(expected_backtrace);
auto expected_bottom = std::end(expected_backtrace);
diff --git a/chromium/base/trace_event/heap_profiler_allocation_register.cc b/chromium/base/trace_event/heap_profiler_allocation_register.cc
index a0fc4be282d..2c2cd378bbd 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_register.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_register.cc
@@ -4,116 +4,20 @@
#include "base/trace_event/heap_profiler_allocation_register.h"
+#include <algorithm>
+
#include "base/trace_event/trace_event_memory_overhead.h"
namespace base {
namespace trace_event {
-AllocationRegister::AllocationRegister()
- : AllocationRegister(kNumBuckets * kNumCellsPerBucket) {}
-
-AllocationRegister::AllocationRegister(uint32_t num_cells)
- // Reserve enough address space to store |num_cells_| entries if necessary,
- // with a guard page after it to crash the program when attempting to store
- // more entries.
- : num_cells_(num_cells),
- cells_(static_cast<Cell*>(AllocateVirtualMemory(num_cells_ *
- sizeof(Cell)))),
- buckets_(static_cast<CellIndex*>(
- AllocateVirtualMemory(kNumBuckets * sizeof(CellIndex)))),
-
- // The free list is empty. The first unused cell is cell 1, because index
- // 0 is used as list terminator.
- free_list_(0),
- next_unused_cell_(1) {}
-
-AllocationRegister::~AllocationRegister() {
- FreeVirtualMemory(buckets_, kNumBuckets * sizeof(CellIndex));
- FreeVirtualMemory(cells_, num_cells_ * sizeof(Cell));
-}
-
-void AllocationRegister::Insert(void* address,
- size_t size,
- AllocationContext context) {
- DCHECK(address != nullptr);
- if (size == 0)
- return;
-
- CellIndex* idx_ptr = Lookup(address);
-
- // If the index is 0, the address is not yet present, so insert it.
- if (*idx_ptr == 0) {
- *idx_ptr = GetFreeCell();
-
- // The address stored in a cell is const as long as it is exposed (via the
- // iterators or |Get|), but because cells are re-used, a const cast is
- // required to set it on insert and remove.
- void* const& allocation_address = cells_[*idx_ptr].allocation.address;
- const_cast<void*&>(allocation_address) = address;
- cells_[*idx_ptr].next = 0;
- }
-
- cells_[*idx_ptr].allocation.size = size;
- cells_[*idx_ptr].allocation.context = context;
-}
-
-void AllocationRegister::Remove(void* address) {
- // Get a pointer to the index of the cell that stores |address|. The index can
- // be an element of |buckets_| or the |next| member of a cell.
- CellIndex* idx_ptr = Lookup(address);
- CellIndex freed_idx = *idx_ptr;
-
- // If the index is 0, the address was not there in the first place.
- if (freed_idx == 0)
- return;
-
- // The cell at the index is now free, remove it from the linked list for
- // |Hash(address)|.
- Cell* freed_cell = &cells_[freed_idx];
- *idx_ptr = freed_cell->next;
-
- // Put the free cell at the front of the free list.
- freed_cell->next = free_list_;
- free_list_ = freed_idx;
-
- // Reset the address, so that on iteration the free cell is ignored.
- const_cast<void*&>(freed_cell->allocation.address) = nullptr;
-}
-
-AllocationRegister::Allocation* AllocationRegister::Get(void* address) {
- CellIndex* idx_ptr = Lookup(address);
-
- // If the index is 0, the address is not present in the table.
- return *idx_ptr == 0 ? nullptr : &cells_[*idx_ptr].allocation;
-}
-
-AllocationRegister::ConstIterator AllocationRegister::begin() const {
- // Initialize the iterator's index to 0. Cell 0 never stores an entry.
- ConstIterator iterator(*this, 0);
- // Incrementing will advance the iterator to the first used cell.
- ++iterator;
- return iterator;
-}
-
-AllocationRegister::ConstIterator AllocationRegister::end() const {
- // Cell |next_unused_cell_ - 1| is the last cell that could contain an entry,
- // so index |next_unused_cell_| is an iterator past the last element, in line
- // with the STL iterator conventions.
- return ConstIterator(*this, next_unused_cell_);
-}
-
AllocationRegister::ConstIterator::ConstIterator(
- const AllocationRegister& alloc_register,
- CellIndex index)
- : register_(alloc_register), index_(index) {}
+ const AllocationRegister& alloc_register, AllocationIndex index)
+ : register_(alloc_register),
+ index_(index) {}
void AllocationRegister::ConstIterator::operator++() {
- // Find the next cell with a non-null address until all cells that could
- // possibly be used have been iterated. A null address indicates a free cell.
- do {
- index_++;
- } while (index_ < register_.next_unused_cell_ &&
- register_.cells_[index_].allocation.address == nullptr);
+ index_ = register_.allocations_.Next(index_ + 1);
}
bool AllocationRegister::ConstIterator::operator!=(
@@ -121,53 +25,38 @@ bool AllocationRegister::ConstIterator::operator!=(
return index_ != other.index_;
}
-const AllocationRegister::Allocation& AllocationRegister::ConstIterator::
-operator*() const {
- return register_.cells_[index_].allocation;
+AllocationRegister::Allocation
+AllocationRegister::ConstIterator::operator*() const {
+ return register_.GetAllocation(index_);
}
-AllocationRegister::CellIndex* AllocationRegister::Lookup(void* address) {
- // The list head is in |buckets_| at the hash offset.
- CellIndex* idx_ptr = &buckets_[Hash(address)];
+size_t AllocationRegister::BacktraceHasher::operator () (
+ const Backtrace& backtrace) const {
+ const size_t kSampleLength = 10;
- // Chase down the list until the cell that holds |address| is found,
- // or until the list ends.
- while (*idx_ptr != 0 && cells_[*idx_ptr].allocation.address != address)
- idx_ptr = &cells_[*idx_ptr].next;
+ uintptr_t total_value = 0;
- return idx_ptr;
-}
-
-AllocationRegister::CellIndex AllocationRegister::GetFreeCell() {
- // First try to re-use a cell from the freelist.
- if (free_list_) {
- CellIndex idx = free_list_;
- free_list_ = cells_[idx].next;
- return idx;
+ size_t head_end = std::min(backtrace.frame_count, kSampleLength);
+ for (size_t i = 0; i != head_end; ++i) {
+ total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
}
- // Otherwise pick the next cell that has not been touched before.
- CellIndex idx = next_unused_cell_;
- next_unused_cell_++;
-
- // If the hash table has too little capacity (when too little address space
- // was reserved for |cells_|), |next_unused_cell_| can be an index outside of
- // the allocated storage. A guard page is allocated there to crash the
- // program in that case. There are alternative solutions:
- // - Deal with it, increase capacity by reallocating |cells_|.
- // - Refuse to insert and let the caller deal with it.
- // Because free cells are re-used before accessing fresh cells with a higher
- // index, and because reserving address space without touching it is cheap,
- // the simplest solution is to just allocate a humongous chunk of address
- // space.
+ size_t tail_start = backtrace.frame_count -
+ std::min(backtrace.frame_count - head_end, kSampleLength);
+ for (size_t i = tail_start; i != backtrace.frame_count; ++i) {
+ total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
+ }
- DCHECK_LT(next_unused_cell_, num_cells_ + 1);
+ total_value += backtrace.frame_count;
- return idx;
+ // These magic constants give best results in terms of average collisions
+ // per backtrace. They were found by replaying real backtraces from Linux
+ // and Android against different hash functions.
+ return (total_value * 131101) >> 14;
}
-// static
-uint32_t AllocationRegister::Hash(void* address) {
+size_t AllocationRegister::AddressHasher::operator () (
+ const void* address) const {
// The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
// been chosen carefully based on measurements with real-word data (addresses
// recorded from a Chrome trace run). It is the first prime after 2^17. For
@@ -178,22 +67,114 @@ uint32_t AllocationRegister::Hash(void* address) {
const uintptr_t a = 131101;
const uintptr_t shift = 14;
const uintptr_t h = (key * a) >> shift;
- return static_cast<uint32_t>(h) & kNumBucketsMask;
+ return h;
+}
+
+AllocationRegister::AllocationRegister()
+ : AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {}
+
+AllocationRegister::AllocationRegister(size_t allocation_capacity,
+ size_t backtrace_capacity)
+ : allocations_(allocation_capacity),
+ backtraces_(backtrace_capacity) {}
+
+AllocationRegister::~AllocationRegister() {
+}
+
+void AllocationRegister::Insert(const void* address,
+ size_t size,
+ const AllocationContext& context) {
+ DCHECK(address != nullptr);
+ if (size == 0) {
+ return;
+ }
+
+ AllocationInfo info = {
+ size,
+ context.type_name,
+ InsertBacktrace(context.backtrace)
+ };
+
+ // Try to insert the allocation.
+ auto index_and_flag = allocations_.Insert(address, info);
+ if (!index_and_flag.second) {
+ // |address| is already there - overwrite the allocation info.
+ auto& old_info = allocations_.Get(index_and_flag.first).second;
+ RemoveBacktrace(old_info.backtrace_index);
+ old_info = info;
+ }
+}
+
+void AllocationRegister::Remove(const void* address) {
+ auto index = allocations_.Find(address);
+ if (index == AllocationMap::kInvalidKVIndex) {
+ return;
+ }
+
+ const AllocationInfo& info = allocations_.Get(index).second;
+ RemoveBacktrace(info.backtrace_index);
+ allocations_.Remove(index);
+}
+
+bool AllocationRegister::Get(const void* address,
+ Allocation* out_allocation) const {
+ auto index = allocations_.Find(address);
+ if (index == AllocationMap::kInvalidKVIndex) {
+ return false;
+ }
+
+ if (out_allocation) {
+ *out_allocation = GetAllocation(index);
+ }
+ return true;
+}
+
+AllocationRegister::ConstIterator AllocationRegister::begin() const {
+ return ConstIterator(*this, allocations_.Next(0));
+}
+
+AllocationRegister::ConstIterator AllocationRegister::end() const {
+ return ConstIterator(*this, AllocationMap::kInvalidKVIndex);
}
void AllocationRegister::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) const {
- // Estimate memory overhead by counting all of the cells that have ever been
- // touched. Don't report mmapped memory as allocated, because it has not been
- // allocated by malloc.
size_t allocated = sizeof(AllocationRegister);
size_t resident = sizeof(AllocationRegister)
- // Include size of touched cells (size of |*cells_|).
- + sizeof(Cell) * next_unused_cell_
- // Size of |*buckets_|.
- + sizeof(CellIndex) * kNumBuckets;
+ + allocations_.EstimateUsedMemory()
+ + backtraces_.EstimateUsedMemory();
overhead->Add("AllocationRegister", allocated, resident);
}
+AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
+ const Backtrace& backtrace) {
+ auto index = backtraces_.Insert(backtrace, 0).first;
+ auto& backtrace_and_count = backtraces_.Get(index);
+ backtrace_and_count.second++;
+ return index;
+}
+
+void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
+ auto& backtrace_and_count = backtraces_.Get(index);
+ if (--backtrace_and_count.second == 0) {
+ // Backtrace is not referenced anymore - remove it.
+ backtraces_.Remove(index);
+ }
+}
+
+AllocationRegister::Allocation AllocationRegister::GetAllocation(
+ AllocationMap::KVIndex index) const {
+ const auto& address_and_info = allocations_.Get(index);
+ const auto& backtrace_and_count = backtraces_.Get(
+ address_and_info.second.backtrace_index);
+ return {
+ address_and_info.first,
+ address_and_info.second.size,
+ AllocationContext(
+ backtrace_and_count.first,
+ address_and_info.second.type_name)
+ };
+}
+
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/heap_profiler_allocation_register.h b/chromium/base/trace_event/heap_profiler_allocation_register.h
index 976f2f50a9c..86e2721c56e 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_register.h
+++ b/chromium/base/trace_event/heap_profiler_allocation_register.h
@@ -8,77 +8,288 @@
#include <stddef.h>
#include <stdint.h>
+#include <utility>
+
+#include "base/bits.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/process/process_metrics.h"
+#include "base/template_util.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
namespace base {
namespace trace_event {
+class AllocationRegisterTest;
+
+namespace internal {
+
+// Allocates a region of virtual address space of |size| rounded up to the
+// system page size. The memory is zeroed by the system. A guard page is
+// added after the end.
+void* AllocateGuardedVirtualMemory(size_t size);
+
+// Frees a region of virtual address space allocated by a call to
+// |AllocateVirtualMemory|.
+void FreeGuardedVirtualMemory(void* address, size_t allocated_size);
+
+// Hash map that mmaps memory only once in the constructor. Its API is
+// similar to std::unordered_map, only index (KVIndex) is used to address
+template <size_t NumBuckets, class Key, class Value, class KeyHasher>
+class FixedHashMap {
+ // To keep things simple we don't call destructors.
+ static_assert(is_trivially_destructible<Key>::value &&
+ is_trivially_destructible<Value>::value,
+ "Key and Value shouldn't have destructors");
+ public:
+ using KVPair = std::pair<const Key, Value>;
+
+ // For implementation simplicity API uses integer index instead
+ // of iterators. Most operations (except FindValidIndex) on KVIndex
+ // are O(1).
+ using KVIndex = size_t;
+ static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1);
+
+ // Capacity controls how many items this hash map can hold, and largely
+ // affects memory footprint.
+ FixedHashMap(size_t capacity)
+ : num_cells_(capacity),
+ cells_(static_cast<Cell*>(
+ AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
+ buckets_(static_cast<Bucket*>(
+ AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
+ free_list_(nullptr),
+ next_unused_cell_(0) {}
+
+ ~FixedHashMap() {
+ FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell));
+ FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket));
+ }
+
+ std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) {
+ Cell** p_cell = Lookup(key);
+ Cell* cell = *p_cell;
+ if (cell) {
+ return {static_cast<KVIndex>(cell - cells_), false}; // not inserted
+ }
+
+ // Get a free cell and link it.
+ *p_cell = cell = GetFreeCell();
+ cell->p_prev = p_cell;
+ cell->next = nullptr;
+
+ // Initialize key/value pair. Since key is 'const Key' this is the
+ // only way to initialize it.
+ new (&cell->kv) KVPair(key, value);
+
+ return {static_cast<KVIndex>(cell - cells_), true}; // inserted
+ }
+
+ void Remove(KVIndex index) {
+ DCHECK_LT(index, next_unused_cell_);
+
+ Cell* cell = &cells_[index];
+
+ // Unlink the cell.
+ *cell->p_prev = cell->next;
+ if (cell->next) {
+ cell->next->p_prev = cell->p_prev;
+ }
+ cell->p_prev = nullptr; // mark as free
+
+ // Add it to the free list.
+ cell->next = free_list_;
+ free_list_ = cell;
+ }
+
+ KVIndex Find(const Key& key) const {
+ Cell* cell = *Lookup(key);
+ return cell ? static_cast<KVIndex>(cell - cells_) : kInvalidKVIndex;
+ }
+
+ KVPair& Get(KVIndex index) {
+ return cells_[index].kv;
+ }
+
+ const KVPair& Get(KVIndex index) const {
+ return cells_[index].kv;
+ }
+
+ // Finds next index that has a KVPair associated with it. Search starts
+ // with the specified index. Returns kInvalidKVIndex if nothing was found.
+ // To find the first valid index, call this function with 0. Continue
+ // calling with the last_index + 1 until kInvalidKVIndex is returned.
+ KVIndex Next(KVIndex index) const {
+ for (;index < next_unused_cell_; ++index) {
+ if (cells_[index].p_prev) {
+ return index;
+ }
+ }
+ return kInvalidKVIndex;
+ }
+
+ // Estimates number of bytes used in allocated memory regions.
+ size_t EstimateUsedMemory() const {
+ size_t page_size = base::GetPageSize();
+ // |next_unused_cell_| is the first cell that wasn't touched, i.e.
+ // it's the number of touched cells.
+ return bits::Align(sizeof(Cell) * next_unused_cell_, page_size) +
+ bits::Align(sizeof(Bucket) * NumBuckets, page_size);
+ }
+
+ private:
+ friend base::trace_event::AllocationRegisterTest;
+
+ struct Cell {
+ KVPair kv;
+ Cell* next;
+
+ // Conceptually this is |prev| in a doubly linked list. However, buckets
+ // also participate in the bucket's cell list - they point to the list's
+ // head and also need to be linked / unlinked properly. To treat these two
+ // cases uniformly, instead of |prev| we're storing "pointer to a Cell*
+ // that points to this Cell" kind of thing. So |p_prev| points to a bucket
+ // for the first cell in a list, and points to |next| of the previous cell
+ // for any other cell. With that Lookup() is the only function that handles
+ // buckets / cells differently.
+ // If |p_prev| is nullptr, the cell is in the free list.
+ Cell** p_prev;
+ };
+
+ using Bucket = Cell*;
+
+ // Returns a pointer to the cell that contains or should contain the entry
+ // for |key|. The pointer may point at an element of |buckets_| or at the
+ // |next| member of an element of |cells_|.
+ Cell** Lookup(const Key& key) const {
+ // The list head is in |buckets_| at the hash offset.
+ Cell** p_cell = &buckets_[Hash(key)];
+
+ // Chase down the list until the cell that holds |key| is found,
+ // or until the list ends.
+ while (*p_cell && (*p_cell)->kv.first != key) {
+ p_cell = &(*p_cell)->next;
+ }
+
+ return p_cell;
+ }
+
+ // Returns a cell that is not being used to store an entry (either by
+ // recycling from the free list or by taking a fresh cell).
+ Cell* GetFreeCell() {
+ // First try to re-use a cell from the free list.
+ if (free_list_) {
+ Cell* cell = free_list_;
+ free_list_ = cell->next;
+ return cell;
+ }
+
+ // Otherwise pick the next cell that has not been touched before.
+ size_t idx = next_unused_cell_;
+ next_unused_cell_++;
+
+ // If the hash table has too little capacity (when too little address space
+ // was reserved for |cells_|), |next_unused_cell_| can be an index outside
+ // of the allocated storage. A guard page is allocated there to crash the
+ // program in that case. There are alternative solutions:
+ // - Deal with it, increase capacity by reallocating |cells_|.
+ // - Refuse to insert and let the caller deal with it.
+ // Because free cells are re-used before accessing fresh cells with a higher
+ // index, and because reserving address space without touching it is cheap,
+ // the simplest solution is to just allocate a humongous chunk of address
+ // space.
+
+ DCHECK_LT(next_unused_cell_, num_cells_ + 1);
+
+ return &cells_[idx];
+ }
+
+ // Returns a value in the range [0, NumBuckets - 1] (inclusive).
+ size_t Hash(const Key& key) const {
+ if (NumBuckets == (NumBuckets & ~(NumBuckets - 1))) {
+ // NumBuckets is a power of 2.
+ return KeyHasher()(key) & (NumBuckets - 1);
+ } else {
+ return KeyHasher()(key) % NumBuckets;
+ }
+ }
+
+ // Number of cells.
+ size_t const num_cells_;
+
+ // The array of cells. This array is backed by mmapped memory. Lower indices
+ // are accessed first, higher indices are accessed only when the |free_list_|
+ // is empty. This is to minimize the amount of resident memory used.
+ Cell* const cells_;
+
+ // The array of buckets (pointers into |cells_|). |buckets_[Hash(key)]| will
+ // contain the pointer to the linked list of cells for |Hash(key)|.
+ // This array is backed by mmapped memory.
+ mutable Bucket* buckets_;
+
+ // The head of the free list.
+ Cell* free_list_;
+
+ // The index of the first element of |cells_| that has not been used before.
+ // If the free list is empty and a new cell is needed, the cell at this index
+ // is used. This is the high water mark for the number of entries stored.
+ size_t next_unused_cell_;
+
+ DISALLOW_COPY_AND_ASSIGN(FixedHashMap);
+};
+
+} // namespace internal
+
class TraceEventMemoryOverhead;
// The allocation register keeps track of all allocations that have not been
-// freed. It is a memory map-backed hash table that stores size and context
-// indexed by address. The hash table is tailored specifically for this use
-// case. The common case is that an entry is inserted and removed after a
-// while, lookup without modifying the table is not an intended use case. The
-// hash table is implemented as an array of linked lists. The size of this
-// array is fixed, but it does not limit the amount of entries that can be
-// stored.
-//
-// Replaying a recording of Chrome's allocations and frees against this hash
-// table takes about 15% of the time that it takes to replay them against
-// |std::map|.
+// freed. Internally it has two hashtables: one for Backtraces and one for
+// actual allocations. Sizes of both hashtables are fixed, and this class
+// allocates (mmaps) only in its constructor.
class BASE_EXPORT AllocationRegister {
public:
- // The data stored in the hash table;
- // contains the details about an allocation.
+ // Details about an allocation.
struct Allocation {
- void* const address;
+ const void* address;
size_t size;
AllocationContext context;
};
- // An iterator that iterates entries in the hash table efficiently, but in no
- // particular order. It can do this by iterating the cells and ignoring the
- // linked lists altogether. Instead of checking whether a cell is in the free
- // list to see if it should be skipped, a null address is used to indicate
- // that a cell is free.
+ // An iterator that iterates entries in no particular order.
class BASE_EXPORT ConstIterator {
public:
void operator++();
bool operator!=(const ConstIterator& other) const;
- const Allocation& operator*() const;
+ Allocation operator*() const;
private:
friend class AllocationRegister;
- using CellIndex = uint32_t;
+ using AllocationIndex = size_t;
- ConstIterator(const AllocationRegister& alloc_register, CellIndex index);
+ ConstIterator(const AllocationRegister& alloc_register,
+ AllocationIndex index);
const AllocationRegister& register_;
- CellIndex index_;
+ AllocationIndex index_;
};
AllocationRegister();
- explicit AllocationRegister(uint32_t num_cells);
+ AllocationRegister(size_t allocation_capacity, size_t backtrace_capacity);
~AllocationRegister();
// Inserts allocation details into the table. If the address was present
- // already, its details are updated. |address| must not be null. (This is
- // because null is used to mark free cells, to allow efficient iteration of
- // the hash table.)
- void Insert(void* address, size_t size, AllocationContext context);
+ // already, its details are updated. |address| must not be null.
+ void Insert(const void* address,
+ size_t size,
+ const AllocationContext& context);
// Removes the address from the table if it is present. It is ok to call this
// with a null pointer.
- void Remove(void* address);
+ void Remove(const void* address);
- // Returns a pointer to the allocation at the address, or null if there is no
- // allocation at that address. This can be used to change the allocation
- // context after insertion, for example to change the type name.
- Allocation* Get(void* address);
+ // Finds allocation for the address and fills |out_allocation|.
+ bool Get(const void* address, Allocation* out_allocation) const;
ConstIterator begin() const;
ConstIterator end() const;
@@ -87,85 +298,54 @@ class BASE_EXPORT AllocationRegister {
void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const;
private:
- friend class AllocationRegisterTest;
- using CellIndex = uint32_t;
-
- // A cell can store allocation details (size and context) by address. Cells
- // are part of a linked list via the |next| member. This list is either the
- // list for a particular hash, or the free list. All cells are contiguous in
- // memory in one big array. Therefore, on 64-bit systems, space can be saved
- // by storing 32-bit indices instead of pointers as links. Index 0 is used as
- // the list terminator.
- struct Cell {
- CellIndex next;
- Allocation allocation;
+ friend AllocationRegisterTest;
+
+ // Expect max 1.5M allocations. Number of buckets is 2^18 for optimal
+ // hashing and should be changed together with AddressHasher.
+ static const size_t kAllocationBuckets = 1 << 18;
+ static const size_t kAllocationCapacity = 1500000;
+
+ // Expect max 2^15 unique backtraces. Can be changed to 2^16 without
+ // needing to tweak BacktraceHasher implementation.
+ static const size_t kBacktraceBuckets = 1 << 15;
+ static const size_t kBacktraceCapacity = kBacktraceBuckets;
+
+ struct BacktraceHasher {
+ size_t operator () (const Backtrace& backtrace) const;
};
- // The number of buckets, 2^17, approximately 130 000, has been tuned for
- // Chrome's typical number of outstanding allocations. (This number varies
- // between processes. Most processes have a sustained load of ~30k unfreed
- // allocations, but some processes have peeks around 100k-400k allocations.)
- // Because of the size of the table, it is likely that every |buckets_|
- // access and every |cells_| access will incur a cache miss. Microbenchmarks
- // suggest that it is worthwile to use more memory for the table to avoid
- // chasing down the linked list, until the size is 2^18. The number of buckets
- // is a power of two so modular indexing can be done with bitwise and.
- static const uint32_t kNumBuckets = 0x20000;
- static const uint32_t kNumBucketsMask = kNumBuckets - 1;
-
- // Reserve address space to store at most this number of entries. High
- // capacity does not imply high memory usage due to the access pattern. The
- // only constraint on the number of cells is that on 32-bit systems address
- // space is scarce (i.e. reserving 2GiB of address space for the entries is
- // not an option). A value of ~3M entries is large enough to handle spikes in
- // the number of allocations, and modest enough to require no more than a few
- // dozens of MiB of address space.
- static const uint32_t kNumCellsPerBucket = 10;
-
- // Returns a value in the range [0, kNumBuckets - 1] (inclusive).
- static uint32_t Hash(void* address);
-
- // Allocates a region of virtual address space of |size| rounded up to the
- // system page size. The memory is zeroed by the system. A guard page is
- // added after the end.
- static void* AllocateVirtualMemory(size_t size);
-
- // Frees a region of virtual address space allocated by a call to
- // |AllocateVirtualMemory|.
- static void FreeVirtualMemory(void* address, size_t allocated_size);
-
- // Returns a pointer to the variable that contains or should contain the
- // index of the cell that stores the entry for |address|. The pointer may
- // point at an element of |buckets_| or at the |next| member of an element of
- // |cells_|. If the value pointed at is 0, |address| is not in the table.
- CellIndex* Lookup(void* address);
-
- // Takes a cell that is not being used to store an entry (either by recycling
- // from the free list or by taking a fresh cell) and returns its index.
- CellIndex GetFreeCell();
-
- // The maximum number of cells which can be allocated.
- uint32_t const num_cells_;
+ using BacktraceMap = internal::FixedHashMap<
+ kBacktraceBuckets,
+ Backtrace,
+ size_t, // Number of references to the backtrace (the key). Incremented
+ // when an allocation that references the backtrace is inserted,
+ // and decremented when the allocation is removed. When the
+ // number drops to zero, the backtrace is removed from the map.
+ BacktraceHasher>;
- // The array of cells. This array is backed by mmapped memory. Lower indices
- // are accessed first, higher indices are only accessed when required. In
- // this way, even if a huge amount of address space has been mmapped, only
- // the cells that are actually used will be backed by physical memory.
- Cell* const cells_;
+ struct AllocationInfo {
+ size_t size;
+ const char* type_name;
+ BacktraceMap::KVIndex backtrace_index;
+ };
- // The array of indices into |cells_|. |buckets_[Hash(address)]| will contain
- // the index of the head of the linked list for |Hash(address)|. A value of 0
- // indicates an empty list. This array is backed by mmapped memory.
- CellIndex* const buckets_;
+ struct AddressHasher {
+ size_t operator () (const void* address) const;
+ };
- // The head of the free list. This is the index of the cell. A value of 0
- // means that the free list is empty.
- CellIndex free_list_;
+ using AllocationMap = internal::FixedHashMap<
+ kAllocationBuckets,
+ const void*,
+ AllocationInfo,
+ AddressHasher>;
- // The index of the first element of |cells_| that has not been used before.
- // If the free list is empty and a new cell is needed, the cell at this index
- // is used. This is the high water mark for the number of entries stored.
- CellIndex next_unused_cell_;
+ BacktraceMap::KVIndex InsertBacktrace(const Backtrace& backtrace);
+ void RemoveBacktrace(BacktraceMap::KVIndex index);
+
+ Allocation GetAllocation(AllocationMap::KVIndex) const;
+
+ AllocationMap allocations_;
+ BacktraceMap backtraces_;
DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
};
diff --git a/chromium/base/trace_event/heap_profiler_allocation_register_posix.cc b/chromium/base/trace_event/heap_profiler_allocation_register_posix.cc
index c38d7e69182..94eeb4df88a 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_register_posix.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_register_posix.cc
@@ -18,6 +18,7 @@
namespace base {
namespace trace_event {
+namespace internal {
namespace {
size_t GetGuardSize() {
@@ -25,8 +26,7 @@ size_t GetGuardSize() {
}
}
-// static
-void* AllocationRegister::AllocateVirtualMemory(size_t size) {
+void* AllocateGuardedVirtualMemory(size_t size) {
size = bits::Align(size, GetPageSize());
// Add space for a guard page at the end.
@@ -48,12 +48,11 @@ void* AllocationRegister::AllocateVirtualMemory(size_t size) {
return addr;
}
-// static
-void AllocationRegister::FreeVirtualMemory(void* address,
- size_t allocated_size) {
+void FreeGuardedVirtualMemory(void* address, size_t allocated_size) {
size_t size = bits::Align(allocated_size, GetPageSize()) + GetGuardSize();
munmap(address, size);
}
+} // namespace internal
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/heap_profiler_allocation_register_unittest.cc b/chromium/base/trace_event/heap_profiler_allocation_register_unittest.cc
index b356aa7853b..7eee61aa35e 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_register_unittest.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_register_unittest.cc
@@ -16,20 +16,21 @@ namespace trace_event {
class AllocationRegisterTest : public testing::Test {
public:
- static const uint32_t kNumBuckets = AllocationRegister::kNumBuckets;
+ // Use a lower number of backtrace cells for unittests to avoid reserving
+ // a virtual region which is too big.
+ static const size_t kAllocationBuckets =
+ AllocationRegister::kAllocationBuckets + 100;
+ static const size_t kAllocationCapacity = kAllocationBuckets;
+ static const size_t kBacktraceCapacity = 10;
// Returns the number of cells that the |AllocationRegister| can store per
// system page.
- size_t GetNumCellsPerPage() {
- return GetPageSize() / sizeof(AllocationRegister::Cell);
+ size_t GetAllocationCapacityPerPage() {
+ return GetPageSize() / sizeof(AllocationRegister::AllocationMap::Cell);
}
- uint32_t GetHighWaterMark(const AllocationRegister& reg) {
- return reg.next_unused_cell_;
- }
-
- uint32_t GetNumCells(const AllocationRegister& reg) {
- return reg.num_cells_;
+ size_t GetHighWaterMark(const AllocationRegister& reg) {
+ return reg.allocations_.next_unused_cell_;
}
};
@@ -56,7 +57,7 @@ size_t SumAllSizes(const AllocationRegister& reg) {
}
TEST_F(AllocationRegisterTest, InsertRemove) {
- AllocationRegister reg;
+ AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
AllocationContext ctx;
// Zero-sized allocations should be discarded.
@@ -90,7 +91,7 @@ TEST_F(AllocationRegisterTest, InsertRemove) {
}
TEST_F(AllocationRegisterTest, DoubleFreeIsAllowed) {
- AllocationRegister reg;
+ AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
AllocationContext ctx;
reg.Insert(reinterpret_cast<void*>(1), 1, ctx);
@@ -103,9 +104,7 @@ TEST_F(AllocationRegisterTest, DoubleFreeIsAllowed) {
}
TEST_F(AllocationRegisterTest, DoubleInsertOverwrites) {
- // TODO(ruuda): Although double insert happens in practice, it should not.
- // Find out the cause and ban double insert if possible.
- AllocationRegister reg;
+ AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
AllocationContext ctx;
StackFrame frame1 = StackFrame::FromTraceEventName("Foo");
StackFrame frame2 = StackFrame::FromTraceEventName("Bar");
@@ -139,12 +138,12 @@ TEST_F(AllocationRegisterTest, DoubleInsertOverwrites) {
// register still behaves correctly.
TEST_F(AllocationRegisterTest, InsertRemoveCollisions) {
size_t expected_sum = 0;
- AllocationRegister reg;
+ AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
AllocationContext ctx;
// By inserting 100 more entries than the number of buckets, there will be at
- // least 100 collisions.
- for (uintptr_t i = 1; i <= kNumBuckets + 100; i++) {
+ // least 100 collisions (100 = kAllocationCapacity - kAllocationBuckets).
+ for (uintptr_t i = 1; i <= kAllocationCapacity; i++) {
size_t size = i % 31;
expected_sum += size;
reg.Insert(reinterpret_cast<void*>(i), size, ctx);
@@ -156,7 +155,7 @@ TEST_F(AllocationRegisterTest, InsertRemoveCollisions) {
EXPECT_EQ(expected_sum, SumAllSizes(reg));
- for (uintptr_t i = 1; i <= kNumBuckets + 100; i++) {
+ for (uintptr_t i = 1; i <= kAllocationCapacity; i++) {
size_t size = i % 31;
expected_sum -= size;
reg.Remove(reinterpret_cast<void*>(i));
@@ -176,7 +175,7 @@ TEST_F(AllocationRegisterTest, InsertRemoveCollisions) {
// free list is utilised properly.
TEST_F(AllocationRegisterTest, InsertRemoveRandomOrder) {
size_t expected_sum = 0;
- AllocationRegister reg;
+ AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
AllocationContext ctx;
uintptr_t generator = 3;
@@ -216,74 +215,52 @@ TEST_F(AllocationRegisterTest, InsertRemoveRandomOrder) {
TEST_F(AllocationRegisterTest, ChangeContextAfterInsertion) {
using Allocation = AllocationRegister::Allocation;
- const char kStdString[] = "std::string";
- AllocationRegister reg;
+ AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
AllocationContext ctx;
reg.Insert(reinterpret_cast<void*>(17), 1, ctx);
reg.Insert(reinterpret_cast<void*>(19), 2, ctx);
reg.Insert(reinterpret_cast<void*>(23), 3, ctx);
+ Allocation a;
+
// Looking up addresses that were not inserted should return null.
// A null pointer lookup is a valid thing to do.
- EXPECT_EQ(nullptr, reg.Get(nullptr));
- EXPECT_EQ(nullptr, reg.Get(reinterpret_cast<void*>(13)));
-
- Allocation* a17 = reg.Get(reinterpret_cast<void*>(17));
- Allocation* a19 = reg.Get(reinterpret_cast<void*>(19));
- Allocation* a23 = reg.Get(reinterpret_cast<void*>(23));
+ EXPECT_FALSE(reg.Get(nullptr, &a));
+ EXPECT_FALSE(reg.Get(reinterpret_cast<void*>(13), &a));
- EXPECT_NE(nullptr, a17);
- EXPECT_NE(nullptr, a19);
- EXPECT_NE(nullptr, a23);
-
- a17->size = 100;
- a19->context.type_name = kStdString;
+ EXPECT_TRUE(reg.Get(reinterpret_cast<void*>(17), &a));
+ EXPECT_TRUE(reg.Get(reinterpret_cast<void*>(19), &a));
+ EXPECT_TRUE(reg.Get(reinterpret_cast<void*>(23), &a));
reg.Remove(reinterpret_cast<void*>(23));
// Lookup should not find any garbage after removal.
- EXPECT_EQ(nullptr, reg.Get(reinterpret_cast<void*>(23)));
-
- // Mutating allocations should have modified the allocations in the register.
- for (const Allocation& allocation : reg) {
- if (allocation.address == reinterpret_cast<void*>(17))
- EXPECT_EQ(100u, allocation.size);
- if (allocation.address == reinterpret_cast<void*>(19))
- EXPECT_EQ(kStdString, allocation.context.type_name);
- }
+ EXPECT_FALSE(reg.Get(reinterpret_cast<void*>(23), &a));
reg.Remove(reinterpret_cast<void*>(17));
reg.Remove(reinterpret_cast<void*>(19));
- EXPECT_EQ(nullptr, reg.Get(reinterpret_cast<void*>(17)));
- EXPECT_EQ(nullptr, reg.Get(reinterpret_cast<void*>(19)));
+ EXPECT_FALSE(reg.Get(reinterpret_cast<void*>(17), &a));
+ EXPECT_FALSE(reg.Get(reinterpret_cast<void*>(19), &a));
}
// Check that the process aborts due to hitting the guard page when inserting
// too many elements.
#if GTEST_HAS_DEATH_TEST
TEST_F(AllocationRegisterTest, OverflowDeathTest) {
- // Use a smaller register to prevent OOM errors on low-end devices.
- AllocationRegister reg(static_cast<uint32_t>(GetNumCellsPerPage()));
+ const size_t allocation_capacity = GetAllocationCapacityPerPage();
+ AllocationRegister reg(allocation_capacity, kBacktraceCapacity);
AllocationContext ctx;
- uintptr_t i;
+ size_t i;
- // Fill up all of the memory allocated for the register. |GetNumCells(reg)|
- // minus 1 elements are inserted, because cell 0 is unused, so this should
- // fill up the available cells exactly.
- for (i = 1; i < GetNumCells(reg); i++) {
- reg.Insert(reinterpret_cast<void*>(i), 1, ctx);
+ // Fill up all of the memory allocated for the register's allocation map.
+ for (i = 0; i < allocation_capacity; i++) {
+ reg.Insert(reinterpret_cast<void*>(i + 1), 1, ctx);
}
- // Adding just one extra element might still work because the allocated memory
- // is rounded up to the page size. Adding a page full of elements should cause
- // overflow.
- const size_t cells_per_page = GetNumCellsPerPage();
-
- ASSERT_DEATH(for (size_t j = 0; j < cells_per_page; j++) {
- reg.Insert(reinterpret_cast<void*>(i + j), 1, ctx);
- }, "");
+ // Adding just one extra element should cause overflow.
+ ASSERT_DEATH(reg.Insert(reinterpret_cast<void*>(i + 1), 1, ctx), "");
}
#endif
diff --git a/chromium/base/trace_event/heap_profiler_allocation_register_win.cc b/chromium/base/trace_event/heap_profiler_allocation_register_win.cc
index bc0afbf3401..39cbb180728 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_register_win.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_register_win.cc
@@ -13,6 +13,7 @@
namespace base {
namespace trace_event {
+namespace internal {
namespace {
size_t GetGuardSize() {
@@ -20,8 +21,7 @@ size_t GetGuardSize() {
}
}
-// static
-void* AllocationRegister::AllocateVirtualMemory(size_t size) {
+void* AllocateGuardedVirtualMemory(size_t size) {
size = bits::Align(size, GetPageSize());
// Add space for a guard page at the end.
@@ -50,14 +50,13 @@ void* AllocationRegister::AllocateVirtualMemory(size_t size) {
return addr;
}
-// static
-void AllocationRegister::FreeVirtualMemory(void* address,
- size_t allocated_size) {
+void FreeGuardedVirtualMemory(void* address, size_t allocated_size) {
// For |VirtualFree|, the size passed with |MEM_RELEASE| mut be 0. Windows
// automatically frees the entire region that was reserved by the
// |VirtualAlloc| with flag |MEM_RESERVE|.
VirtualFree(address, 0, MEM_RELEASE);
}
+} // namespace internal
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc b/chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc
index 6b31bd6e9f5..44e43875441 100644
--- a/chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc
+++ b/chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc
@@ -12,8 +12,9 @@ namespace trace_event {
TEST(JavaHeapDumpProviderTest, JavaHeapDump) {
auto jhdp = JavaHeapDumpProvider::GetInstance();
- std::unique_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(nullptr));
MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+ std::unique_ptr<ProcessMemoryDump> pmd(
+ new ProcessMemoryDump(nullptr, dump_args));
jhdp->OnMemoryDump(dump_args, pmd.get());
}
diff --git a/chromium/base/trace_event/malloc_dump_provider.cc b/chromium/base/trace_event/malloc_dump_provider.cc
index cf69859404e..3b1a933bce5 100644
--- a/chromium/base/trace_event/malloc_dump_provider.cc
+++ b/chromium/base/trace_event/malloc_dump_provider.cc
@@ -229,7 +229,7 @@ void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
// This is the case of GetInstanceForCurrentThread() being called for the
// first time, which causes a new() inside the tracker which re-enters the
// heap profiler, in which case we just want to early out.
- auto tracker = AllocationContextTracker::GetInstanceForCurrentThread();
+ auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread();
if (!tracker)
return;
AllocationContext context = tracker->GetContextSnapshot();
diff --git a/chromium/base/trace_event/memory_allocator_dump.cc b/chromium/base/trace_event/memory_allocator_dump.cc
index f9b5799c05e..7583763889e 100644
--- a/chromium/base/trace_event/memory_allocator_dump.cc
+++ b/chromium/base/trace_event/memory_allocator_dump.cc
@@ -80,6 +80,13 @@ void MemoryAllocatorDump::AddScalarF(const char* name,
void MemoryAllocatorDump::AddString(const char* name,
const char* units,
const std::string& value) {
+ // String attributes are disabled in background mode.
+ if (process_memory_dump_->dump_args().level_of_detail ==
+ MemoryDumpLevelOfDetail::BACKGROUND) {
+ NOTREACHED();
+ return;
+ }
+
attributes_->BeginDictionary(name);
attributes_->SetString("type", kTypeString);
attributes_->SetString("units", units);
diff --git a/chromium/base/trace_event/memory_allocator_dump_unittest.cc b/chromium/base/trace_event/memory_allocator_dump_unittest.cc
index 359f081154e..1bf9715917d 100644
--- a/chromium/base/trace_event/memory_allocator_dump_unittest.cc
+++ b/chromium/base/trace_event/memory_allocator_dump_unittest.cc
@@ -129,8 +129,8 @@ TEST(MemoryAllocatorDumpTest, GuidGeneration) {
TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
FakeMemoryAllocatorDumpProvider fmadp;
- ProcessMemoryDump pmd(new MemoryDumpSessionState);
MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+ ProcessMemoryDump pmd(new MemoryDumpSessionState, dump_args);
fmadp.OnMemoryDump(dump_args, &pmd);
@@ -176,7 +176,8 @@ TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
#if !defined(NDEBUG) && !defined(OS_ANDROID) && !defined(OS_IOS)
TEST(MemoryAllocatorDumpTest, ForbidDuplicatesDeathTest) {
FakeMemoryAllocatorDumpProvider fmadp;
- ProcessMemoryDump pmd(new MemoryDumpSessionState);
+ MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+ ProcessMemoryDump pmd(new MemoryDumpSessionState, dump_args);
pmd.CreateAllocatorDump("foo_allocator");
pmd.CreateAllocatorDump("bar_allocator/heap");
ASSERT_DEATH(pmd.CreateAllocatorDump("foo_allocator"), "");
diff --git a/chromium/base/trace_event/memory_dump_manager.cc b/chromium/base/trace_event/memory_dump_manager.cc
index b14d265f19e..eed070a7829 100644
--- a/chromium/base/trace_event/memory_dump_manager.cc
+++ b/chromium/base/trace_event/memory_dump_manager.cc
@@ -23,6 +23,7 @@
#include "base/trace_event/malloc_dump_provider.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/memory_dump_session_state.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_argument.h"
@@ -46,27 +47,8 @@ const char* kTraceEventArgNames[] = {"dumps"};
const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
StaticAtomicSequenceNumber g_next_guid;
-uint32_t g_periodic_dumps_count = 0;
-uint32_t g_heavy_dumps_rate = 0;
MemoryDumpManager* g_instance_for_testing = nullptr;
-void RequestPeriodicGlobalDump() {
- MemoryDumpLevelOfDetail level_of_detail;
- if (g_heavy_dumps_rate == 0) {
- level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
- } else {
- level_of_detail = g_periodic_dumps_count == 0
- ? MemoryDumpLevelOfDetail::DETAILED
- : MemoryDumpLevelOfDetail::LIGHT;
-
- if (++g_periodic_dumps_count == g_heavy_dumps_rate)
- g_periodic_dumps_count = 0;
- }
-
- MemoryDumpManager::GetInstance()->RequestGlobalDump(
- MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
-}
-
// Callback wrapper to hook upon the completion of RequestGlobalDump() and
// inject trace markers.
void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
@@ -116,6 +98,9 @@ const char* const MemoryDumpManager::kTraceCategory =
TRACE_DISABLED_BY_DEFAULT("memory-infra");
// static
+const char* const MemoryDumpManager::kLogPrefix = "Memory-infra dump";
+
+// static
const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
// static
@@ -272,8 +257,10 @@ void MemoryDumpManager::RegisterDumpProviderInternal(
if (dumper_registrations_ignored_for_testing_)
return;
+ bool whitelisted_for_background_mode = IsMemoryDumpProviderWhitelisted(name);
scoped_refptr<MemoryDumpProviderInfo> mdpinfo =
- new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options);
+ new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
+ whitelisted_for_background_mode);
{
AutoLock lock(lock_);
@@ -351,8 +338,13 @@ void MemoryDumpManager::RequestGlobalDump(
MemoryDumpType dump_type,
MemoryDumpLevelOfDetail level_of_detail,
const MemoryDumpCallback& callback) {
- // Bail out immediately if tracing is not enabled at all.
- if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) {
+ // Bail out immediately if tracing is not enabled at all or if the dump mode
+ // is not allowed.
+ if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) ||
+ !IsDumpModeAllowed(level_of_detail)) {
+ VLOG(1) << kLogPrefix << " failed because " << kTraceCategory
+ << " tracing category is not enabled or the requested dump mode is "
+ "not allowed by trace config.";
if (!callback.is_null())
callback.Run(0u /* guid */, false /* success */);
return;
@@ -396,15 +388,33 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump",
TRACE_ID_MANGLE(args.dump_guid));
+ // If argument filter is enabled then only background mode dumps should be
+ // allowed. In case the trace config passed for background tracing session
+ // missed the allowed modes argument, it crashes here instead of creating
+ // unexpected dumps.
+ if (TraceLog::GetInstance()
+ ->GetCurrentTraceConfig()
+ .IsArgumentFilterEnabled()) {
+ CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail);
+ }
+
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
{
AutoLock lock(lock_);
+
// |dump_thread_| can be nullptr is tracing was disabled before reaching
// here. SetupNextMemoryDump() is robust enough to tolerate it and will
// NACK the dump.
pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
args, dump_providers_, session_state_, callback,
dump_thread_ ? dump_thread_->task_runner() : nullptr));
+
+ // Safety check to prevent reaching here without calling RequestGlobalDump,
+ // with disallowed modes. If |session_state_| is null then tracing is
+ // disabled.
+ CHECK(!session_state_ ||
+ session_state_->memory_dump_config().allowed_dump_modes.count(
+ args.level_of_detail));
}
TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
@@ -438,6 +448,14 @@ void MemoryDumpManager::SetupNextMemoryDump(
// Anyway either tracing is stopped or this was the last hop, create a trace
// event, add it to the trace and finalize process dump invoking the callback.
if (!pmd_async_state->dump_thread_task_runner.get()) {
+ if (pmd_async_state->pending_dump_providers.empty()) {
+ VLOG(1) << kLogPrefix << " failed because dump thread was destroyed"
+ << " before finalizing the dump";
+ } else {
+ VLOG(1) << kLogPrefix << " failed because dump thread was destroyed"
+ << " before dumping "
+ << pmd_async_state->pending_dump_providers.back().get()->name;
+ }
pmd_async_state->dump_successful = false;
pmd_async_state->pending_dump_providers.clear();
}
@@ -449,6 +467,15 @@ void MemoryDumpManager::SetupNextMemoryDump(
MemoryDumpProviderInfo* mdpinfo =
pmd_async_state->pending_dump_providers.back().get();
+ // If we are in background tracing, we should invoke only the whitelisted
+ // providers. Ignore other providers and continue.
+ if (pmd_async_state->req_args.level_of_detail ==
+ MemoryDumpLevelOfDetail::BACKGROUND &&
+ !mdpinfo->whitelisted_for_background_mode) {
+ pmd_async_state->pending_dump_providers.pop_back();
+ return SetupNextMemoryDump(std::move(pmd_async_state));
+ }
+
// If the dump provider did not specify a task runner affinity, dump on
// |dump_thread_| which is already checked above for presence.
SequencedTaskRunner* task_runner = mdpinfo->task_runner.get();
@@ -547,9 +574,10 @@ void MemoryDumpManager::InvokeOnMemoryDump(
// process), non-zero when the coordinator process creates dumps on behalf
// of child processes (see crbug.com/461788).
ProcessId target_pid = mdpinfo->options.target_pid;
- ProcessMemoryDump* pmd =
- pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid);
MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail};
+ ProcessMemoryDump* pmd =
+ pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid,
+ args);
bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
mdpinfo->consecutive_failures =
dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
@@ -602,8 +630,11 @@ void MemoryDumpManager::FinalizeDumpAndAddToTrace(
bool tracing_still_enabled;
TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &tracing_still_enabled);
- if (!tracing_still_enabled)
+ if (!tracing_still_enabled) {
pmd_async_state->dump_successful = false;
+ VLOG(1) << kLogPrefix << " failed because tracing was disabled before"
+ << " the dump was completed";
+ }
if (!pmd_async_state->callback.is_null()) {
pmd_async_state->callback.Run(dump_guid, pmd_async_state->dump_successful);
@@ -632,78 +663,57 @@ void MemoryDumpManager::OnTraceLogEnabled() {
return;
}
- AutoLock lock(lock_);
-
- DCHECK(delegate_); // At this point we must have a delegate.
- session_state_ = new MemoryDumpSessionState;
-
+ const TraceConfig trace_config =
+ TraceLog::GetInstance()->GetCurrentTraceConfig();
+ scoped_refptr<MemoryDumpSessionState> session_state =
+ new MemoryDumpSessionState;
+ session_state->SetMemoryDumpConfig(trace_config.memory_dump_config());
if (heap_profiling_enabled_) {
// If heap profiling is enabled, the stack frame deduplicator and type name
// deduplicator will be in use. Add a metadata events to write the frames
// and type IDs.
- session_state_->SetStackFrameDeduplicator(
+ session_state->SetStackFrameDeduplicator(
WrapUnique(new StackFrameDeduplicator));
- session_state_->SetTypeNameDeduplicator(
+ session_state->SetTypeNameDeduplicator(
WrapUnique(new TypeNameDeduplicator));
TRACE_EVENT_API_ADD_METADATA_EVENT(
TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
"stackFrames",
- WrapUnique(
- new SessionStateConvertableProxy<StackFrameDeduplicator>(
- session_state_,
- &MemoryDumpSessionState::stack_frame_deduplicator)));
+ WrapUnique(new SessionStateConvertableProxy<StackFrameDeduplicator>(
+ session_state, &MemoryDumpSessionState::stack_frame_deduplicator)));
TRACE_EVENT_API_ADD_METADATA_EVENT(
TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
"typeNames",
WrapUnique(new SessionStateConvertableProxy<TypeNameDeduplicator>(
- session_state_, &MemoryDumpSessionState::type_name_deduplicator)));
+ session_state, &MemoryDumpSessionState::type_name_deduplicator)));
}
- DCHECK(!dump_thread_);
- dump_thread_ = std::move(dump_thread);
- subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
+ {
+ AutoLock lock(lock_);
- // TODO(primiano): This is a temporary hack to disable periodic memory dumps
- // when running memory benchmarks until telemetry uses TraceConfig to
- // enable/disable periodic dumps. See crbug.com/529184 .
- if (!is_coordinator_ ||
- CommandLine::ForCurrentProcess()->HasSwitch(
- "enable-memory-benchmarking")) {
- return;
- }
+ DCHECK(delegate_); // At this point we must have a delegate.
+ session_state_ = session_state;
- // Enable periodic dumps. At the moment the periodic support is limited to at
- // most one low-detail periodic dump and at most one high-detail periodic
- // dump. If both are specified the high-detail period must be an integer
- // multiple of the low-level one.
- g_periodic_dumps_count = 0;
- const TraceConfig trace_config =
- TraceLog::GetInstance()->GetCurrentTraceConfig();
- session_state_->SetMemoryDumpConfig(trace_config.memory_dump_config());
- const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list =
- trace_config.memory_dump_config().triggers;
- if (triggers_list.empty())
- return;
+ DCHECK(!dump_thread_);
+ dump_thread_ = std::move(dump_thread);
- uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
- uint32_t heavy_dump_period_ms = 0;
- DCHECK_LE(triggers_list.size(), 2u);
- for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
- DCHECK(config.periodic_interval_ms);
- if (config.level_of_detail == MemoryDumpLevelOfDetail::DETAILED)
- heavy_dump_period_ms = config.periodic_interval_ms;
- min_timer_period_ms =
- std::min(min_timer_period_ms, config.periodic_interval_ms);
+ subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
+
+ // TODO(primiano): This is a temporary hack to disable periodic memory dumps
+ // when running memory benchmarks until telemetry uses TraceConfig to
+ // enable/disable periodic dumps. See crbug.com/529184 .
+ if (!is_coordinator_ ||
+ CommandLine::ForCurrentProcess()->HasSwitch(
+ "enable-memory-benchmarking")) {
+ return;
+ }
}
- DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
- g_heavy_dumps_rate = heavy_dump_period_ms / min_timer_period_ms;
- periodic_dump_timer_.Start(FROM_HERE,
- TimeDelta::FromMilliseconds(min_timer_period_ms),
- base::Bind(&RequestPeriodicGlobalDump));
+ // Enable periodic dumps if necessary.
+ periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers);
}
void MemoryDumpManager::OnTraceLogDisabled() {
@@ -725,6 +735,14 @@ void MemoryDumpManager::OnTraceLogDisabled() {
dump_thread->Stop();
}
+bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
+ AutoLock lock(lock_);
+ if (!session_state_)
+ return false;
+ return session_state_->memory_dump_config().allowed_dump_modes.count(
+ dump_mode) != 0;
+}
+
uint64_t MemoryDumpManager::GetTracingProcessId() const {
return delegate_->GetTracingProcessId();
}
@@ -733,13 +751,15 @@ MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
MemoryDumpProvider* dump_provider,
const char* name,
scoped_refptr<SequencedTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options)
+ const MemoryDumpProvider::Options& options,
+ bool whitelisted_for_background_mode)
: dump_provider(dump_provider),
name(name),
task_runner(std::move(task_runner)),
options(options),
consecutive_failures(0),
- disabled(false) {}
+ disabled(false),
+ whitelisted_for_background_mode(whitelisted_for_background_mode) {}
MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
@@ -765,7 +785,7 @@ MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
session_state(std::move(session_state)),
callback(callback),
dump_successful(true),
- callback_task_runner(MessageLoop::current()->task_runner()),
+ callback_task_runner(ThreadTaskRunnerHandle::Get()),
dump_thread_task_runner(std::move(dump_thread_task_runner)) {
pending_dump_providers.reserve(dump_providers.size());
pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
@@ -775,15 +795,89 @@ MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
}
ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState::
- GetOrCreateMemoryDumpContainerForProcess(ProcessId pid) {
+ GetOrCreateMemoryDumpContainerForProcess(ProcessId pid,
+ const MemoryDumpArgs& dump_args) {
auto iter = process_dumps.find(pid);
if (iter == process_dumps.end()) {
std::unique_ptr<ProcessMemoryDump> new_pmd(
- new ProcessMemoryDump(session_state));
+ new ProcessMemoryDump(session_state, dump_args));
iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
}
return iter->second.get();
}
+MemoryDumpManager::PeriodicGlobalDumpTimer::PeriodicGlobalDumpTimer() {}
+
+MemoryDumpManager::PeriodicGlobalDumpTimer::~PeriodicGlobalDumpTimer() {
+ Stop();
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::Start(
+ const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list) {
+ if (triggers_list.empty())
+ return;
+
+ // At the moment the periodic support is limited to at most one periodic
+ // trigger per dump mode. All intervals should be an integer multiple of the
+ // smallest interval specified.
+ periodic_dumps_count_ = 0;
+ uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
+ uint32_t light_dump_period_ms = 0;
+ uint32_t heavy_dump_period_ms = 0;
+ DCHECK_LE(triggers_list.size(), 3u);
+ auto* mdm = MemoryDumpManager::GetInstance();
+ for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
+ DCHECK_NE(0u, config.periodic_interval_ms);
+ switch (config.level_of_detail) {
+ case MemoryDumpLevelOfDetail::BACKGROUND:
+ DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND));
+ break;
+ case MemoryDumpLevelOfDetail::LIGHT:
+ DCHECK_EQ(0u, light_dump_period_ms);
+ DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT));
+ light_dump_period_ms = config.periodic_interval_ms;
+ break;
+ case MemoryDumpLevelOfDetail::DETAILED:
+ DCHECK_EQ(0u, heavy_dump_period_ms);
+ DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED));
+ heavy_dump_period_ms = config.periodic_interval_ms;
+ break;
+ }
+ min_timer_period_ms =
+ std::min(min_timer_period_ms, config.periodic_interval_ms);
+ }
+
+ DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms);
+ light_dump_rate_ = light_dump_period_ms / min_timer_period_ms;
+ DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
+ heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms;
+
+ timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms),
+ base::Bind(&PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump,
+ base::Unretained(this)));
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::Stop() {
+ if (IsRunning()) {
+ timer_.Stop();
+ }
+}
+
+bool MemoryDumpManager::PeriodicGlobalDumpTimer::IsRunning() {
+ return timer_.IsRunning();
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump() {
+ MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
+ if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0)
+ level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
+ if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0)
+ level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
+ ++periodic_dumps_count_;
+
+ MemoryDumpManager::GetInstance()->RequestGlobalDump(
+ MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
+}
+
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/memory_dump_manager.h b/chromium/base/trace_event/memory_dump_manager.h
index 817768afedd..06b772c6e4b 100644
--- a/chromium/base/trace_event/memory_dump_manager.h
+++ b/chromium/base/trace_event/memory_dump_manager.h
@@ -40,6 +40,7 @@ class MemoryDumpSessionState;
class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
public:
static const char* const kTraceCategory;
+ static const char* const kLogPrefix;
// This value is returned as the tracing id of the child processes by
// GetTracingProcessId() when tracing is not enabled.
@@ -115,10 +116,14 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
void OnTraceLogEnabled() override;
void OnTraceLogDisabled() override;
+ // Returns true if the dump mode is allowed for current tracing session.
+ bool IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode);
+
// Returns the MemoryDumpSessionState object, which is shared by all the
// ProcessMemoryDump and MemoryAllocatorDump instances through all the tracing
// session lifetime.
- const scoped_refptr<MemoryDumpSessionState>& session_state() const {
+ const scoped_refptr<MemoryDumpSessionState>& session_state_for_testing()
+ const {
return session_state_;
}
@@ -176,7 +181,8 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
const char* name,
scoped_refptr<SequencedTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options);
+ const MemoryDumpProvider::Options& options,
+ bool whitelisted_for_background_mode);
MemoryDumpProvider* const dump_provider;
@@ -200,6 +206,9 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// Flagged either by the auto-disable logic or during unregistration.
bool disabled;
+ // True if the dump provider is whitelisted for background mode.
+ const bool whitelisted_for_background_mode;
+
private:
friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
~MemoryDumpProviderInfo();
@@ -221,7 +230,9 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
~ProcessMemoryDumpAsyncState();
// Gets or creates the memory dump container for the given target process.
- ProcessMemoryDump* GetOrCreateMemoryDumpContainerForProcess(ProcessId pid);
+ ProcessMemoryDump* GetOrCreateMemoryDumpContainerForProcess(
+ ProcessId pid,
+ const MemoryDumpArgs& dump_args);
// A map of ProcessId -> ProcessMemoryDump, one for each target process
// being dumped from the current process. Typically each process dumps only
@@ -262,6 +273,31 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDumpAsyncState);
};
+ // Sets up periodic memory dump timers to start global dump requests based on
+ // the dump triggers from trace config.
+ class BASE_EXPORT PeriodicGlobalDumpTimer {
+ public:
+ PeriodicGlobalDumpTimer();
+ ~PeriodicGlobalDumpTimer();
+
+ void Start(const std::vector<TraceConfig::MemoryDumpConfig::Trigger>&
+ triggers_list);
+ void Stop();
+
+ bool IsRunning();
+
+ private:
+ // Periodically called by the timer.
+ void RequestPeriodicGlobalDump();
+
+ RepeatingTimer timer_;
+ uint32_t periodic_dumps_count_;
+ uint32_t light_dump_rate_;
+ uint32_t heavy_dump_rate_;
+
+ DISALLOW_COPY_AND_ASSIGN(PeriodicGlobalDumpTimer);
+ };
+
static const int kMaxConsecutiveFailuresCount;
static const char* const kSystemAllocatorPoolName;
@@ -325,7 +361,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
subtle::AtomicWord memory_tracing_enabled_;
// For time-triggered periodic dumps.
- RepeatingTimer periodic_dump_timer_;
+ PeriodicGlobalDumpTimer periodic_dump_timer_;
// Thread used for MemoryDumpProviders which don't specify a task runner
// affinity.
diff --git a/chromium/base/trace_event/memory_dump_manager_unittest.cc b/chromium/base/trace_event/memory_dump_manager_unittest.cc
index c1295efac65..d14093cbcc3 100644
--- a/chromium/base/trace_event/memory_dump_manager_unittest.cc
+++ b/chromium/base/trace_event/memory_dump_manager_unittest.cc
@@ -23,6 +23,7 @@
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_buffer.h"
#include "base/trace_event/trace_config_memory_test_util.h"
@@ -48,16 +49,24 @@ MATCHER(IsLightDump, "") {
return arg.level_of_detail == MemoryDumpLevelOfDetail::LIGHT;
}
+MATCHER(IsBackgroundDump, "") {
+ return arg.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND;
+}
+
namespace {
+const char* kMDPName = "TestDumpProvider";
+const char* kWhitelistedMDPName = "WhitelistedTestDumpProvider";
+const char* const kTestMDPWhitelist[] = {kWhitelistedMDPName, nullptr};
+
void RegisterDumpProvider(
MemoryDumpProvider* mdp,
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options) {
+ const MemoryDumpProvider::Options& options,
+ const char* name = kMDPName) {
MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
mdm->set_dumper_registrations_ignored_for_testing(false);
- const char* kMDPName = "TestDumpProvider";
- mdm->RegisterDumpProvider(mdp, kMDPName, std::move(task_runner), options);
+ mdm->RegisterDumpProvider(mdp, name, std::move(task_runner), options);
mdm->set_dumper_registrations_ignored_for_testing(true);
}
@@ -71,7 +80,6 @@ void RegisterDumpProviderWithSequencedTaskRunner(
const MemoryDumpProvider::Options& options) {
MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
mdm->set_dumper_registrations_ignored_for_testing(false);
- const char* kMDPName = "TestDumpProvider";
mdm->RegisterDumpProviderWithSequencedTaskRunner(mdp, kMDPName, task_runner,
options);
mdm->set_dumper_registrations_ignored_for_testing(true);
@@ -218,7 +226,7 @@ class MemoryDumpManagerTest : public testing::Test {
RunLoop run_loop;
MemoryDumpCallback callback =
Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
- MessageLoop::current()->task_runner(), run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
mdm_->RequestGlobalDump(dump_type, level_of_detail, callback);
run_loop.Run();
}
@@ -331,7 +339,8 @@ TEST_F(MemoryDumpManagerTest, SharedSessionState) {
RegisterDumpProvider(&mdp2);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
- const MemoryDumpSessionState* session_state = mdm_->session_state().get();
+ const MemoryDumpSessionState* session_state =
+ mdm_->session_state_for_testing().get();
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
EXPECT_CALL(mdp1, OnMemoryDump(_, _))
.Times(2)
@@ -464,11 +473,11 @@ TEST_F(MemoryDumpManagerTest, RespectTaskRunnerAffinity) {
// invoked a number of times equal to its index.
for (uint32_t i = kNumInitialThreads; i > 0; --i) {
threads.push_back(WrapUnique(new Thread("test thread")));
- auto thread = threads.back().get();
+ auto* thread = threads.back().get();
thread->Start();
scoped_refptr<SingleThreadTaskRunner> task_runner = thread->task_runner();
mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
- auto mdp = mdps.back().get();
+ auto* mdp = mdps.back().get();
RegisterDumpProvider(mdp, task_runner, kDefaultOptions);
EXPECT_CALL(*mdp, OnMemoryDump(_, _))
.Times(i)
@@ -895,7 +904,9 @@ TEST_F(MemoryDumpManagerTest, TraceConfigExpectationsWhenIsCoordinator) {
// Tests against race conditions that might arise when disabling tracing in the
// middle of a global memory dump.
TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
- base::WaitableEvent tracing_disabled_event(false, false);
+ base::WaitableEvent tracing_disabled_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
InitializeMemoryDumpManager(false /* is_coordinator */);
// Register a bound dump provider.
@@ -932,7 +943,7 @@ TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
RunLoop run_loop;
MemoryDumpCallback callback =
Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
- MessageLoop::current()->task_runner(), run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
MemoryDumpLevelOfDetail::DETAILED, callback);
DisableTracing();
@@ -945,7 +956,9 @@ TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
// Tests against race conditions that can happen if tracing is disabled before
// the CreateProcessDump() call. Real-world regression: crbug.com/580295 .
TEST_F(MemoryDumpManagerTest, DisableTracingRightBeforeStartOfDump) {
- base::WaitableEvent tracing_disabled_event(false, false);
+ base::WaitableEvent tracing_disabled_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
InitializeMemoryDumpManager(false /* is_coordinator */);
std::unique_ptr<Thread> mdp_thread(new Thread("test thread"));
@@ -1099,5 +1112,60 @@ TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoonDuringDump) {
DisableTracing();
}
+TEST_F(MemoryDumpManagerTest, TestWhitelistingMDP) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ SetDumpProviderWhitelistForTesting(kTestMDPWhitelist);
+ std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider);
+ RegisterDumpProvider(mdp1.get());
+ std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider);
+ RegisterDumpProvider(mdp2.get(), nullptr, kDefaultOptions,
+ kWhitelistedMDPName);
+
+ EXPECT_CALL(*mdp1, OnMemoryDump(_, _)).Times(0);
+ EXPECT_CALL(*mdp2, OnMemoryDump(_, _)).Times(1).WillOnce(Return(true));
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_FALSE(IsPeriodicDumpingEnabled());
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::BACKGROUND);
+ DisableTracing();
+}
+
+TEST_F(MemoryDumpManagerTest, TestBackgroundTracingSetup) {
+ InitializeMemoryDumpManager(true /* is_coordinator */);
+
+ RunLoop run_loop;
+ auto quit_closure = run_loop.QuitClosure();
+
+ testing::InSequence sequence;
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(IsBackgroundDump(), _))
+ .Times(5);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(IsBackgroundDump(), _))
+ .WillOnce(Invoke([quit_closure](const MemoryDumpRequestArgs& args,
+ const MemoryDumpCallback& callback) {
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, quit_closure);
+ }));
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(AnyNumber());
+
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
+ 1 /* period_ms */));
+
+ // Only background mode dumps should be allowed with the trace config.
+ last_callback_success_ = false;
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::LIGHT);
+ EXPECT_FALSE(last_callback_success_);
+ last_callback_success_ = false;
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ EXPECT_FALSE(last_callback_success_);
+
+ ASSERT_TRUE(IsPeriodicDumpingEnabled());
+ run_loop.Run();
+ DisableTracing();
+}
+
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/memory_dump_provider.h b/chromium/base/trace_event/memory_dump_provider.h
index 79ab7934866..c899ea9c346 100644
--- a/chromium/base/trace_event/memory_dump_provider.h
+++ b/chromium/base/trace_event/memory_dump_provider.h
@@ -15,12 +15,6 @@ namespace trace_event {
class ProcessMemoryDump;
-// Args passed to OnMemoryDump(). This is to avoid rewriting all the subclasses
-// in the codebase when extending the MemoryDumpProvider API.
-struct MemoryDumpArgs {
- MemoryDumpLevelOfDetail level_of_detail;
-};
-
// The contract interface that memory dump providers must implement.
class BASE_EXPORT MemoryDumpProvider {
public:
diff --git a/chromium/base/trace_event/memory_dump_request_args.cc b/chromium/base/trace_event/memory_dump_request_args.cc
index 48b5ba6d2c0..e6c5b87b226 100644
--- a/chromium/base/trace_event/memory_dump_request_args.cc
+++ b/chromium/base/trace_event/memory_dump_request_args.cc
@@ -28,6 +28,8 @@ const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
const char* MemoryDumpLevelOfDetailToString(
const MemoryDumpLevelOfDetail& level_of_detail) {
switch (level_of_detail) {
+ case MemoryDumpLevelOfDetail::BACKGROUND:
+ return "background";
case MemoryDumpLevelOfDetail::LIGHT:
return "light";
case MemoryDumpLevelOfDetail::DETAILED:
@@ -39,6 +41,8 @@ const char* MemoryDumpLevelOfDetailToString(
MemoryDumpLevelOfDetail StringToMemoryDumpLevelOfDetail(
const std::string& str) {
+ if (str == "background")
+ return MemoryDumpLevelOfDetail::BACKGROUND;
if (str == "light")
return MemoryDumpLevelOfDetail::LIGHT;
if (str == "detailed")
diff --git a/chromium/base/trace_event/memory_dump_request_args.h b/chromium/base/trace_event/memory_dump_request_args.h
index 00d560ec6ac..f3ff9d8e3b5 100644
--- a/chromium/base/trace_event/memory_dump_request_args.h
+++ b/chromium/base/trace_event/memory_dump_request_args.h
@@ -28,13 +28,25 @@ enum class MemoryDumpType {
};
// Tells the MemoryDumpProvider(s) how much detailed their dumps should be.
-// MemoryDumpProvider instances must guarantee that level of detail does not
-// affect the total size reported in the root node, but only the granularity of
-// the child MemoryAllocatorDump(s).
-enum class MemoryDumpLevelOfDetail {
- LIGHT, // Few entries, typically a fixed number, per dump.
- DETAILED, // Unrestricted amount of entries per dump.
- LAST = DETAILED // For IPC Macros.
+enum class MemoryDumpLevelOfDetail : uint32_t {
+ FIRST,
+
+ // For background tracing mode. The dump time is quick, and typically just the
+ // totals are expected. Suballocations need not be specified. Dump name must
+ // contain only pre-defined strings and string arguments cannot be added.
+ BACKGROUND = FIRST,
+
+ // For the levels below, MemoryDumpProvider instances must guarantee that the
+ // total size reported in the root node is consistent. Only the granularity of
+ // the child MemoryAllocatorDump(s) differs with the levels.
+
+ // Few entries, typically a fixed number, per dump.
+ LIGHT,
+
+ // Unrestricted amount of entries per dump.
+ DETAILED,
+
+ LAST = DETAILED
};
// Initial request arguments for a global memory dump. (see
@@ -49,6 +61,13 @@ struct BASE_EXPORT MemoryDumpRequestArgs {
MemoryDumpLevelOfDetail level_of_detail;
};
+// Args for ProcessMemoryDump and passed to OnMemoryDump calls for memory dump
+// providers. Dump providers are expected to read the args for creating dumps.
+struct MemoryDumpArgs {
+ // Specifies how detailed the dumps should be.
+ MemoryDumpLevelOfDetail level_of_detail;
+};
+
using MemoryDumpCallback = Callback<void(uint64_t dump_guid, bool success)>;
BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
diff --git a/chromium/base/trace_event/memory_infra_background_whitelist.cc b/chromium/base/trace_event/memory_infra_background_whitelist.cc
new file mode 100644
index 00000000000..aed187fa1d8
--- /dev/null
+++ b/chromium/base/trace_event/memory_infra_background_whitelist.cc
@@ -0,0 +1,131 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_infra_background_whitelist.h"
+
+#include <ctype.h>
+#include <string.h>
+
+#include <string>
+
+namespace base {
+namespace trace_event {
+namespace {
+
+// The names of dump providers whitelisted for background tracing. Dump
+// providers can be added here only if the background mode dump has very
+// less performance and memory overhead.
+const char* const kDumpProviderWhitelist[] = {
+ "BlinkGC",
+ "ChildDiscardableSharedMemoryManager",
+ "DOMStorage",
+ "HostDiscardableSharedMemoryManager",
+ "IndexedDBBackingStore",
+ "JavaHeap",
+ "LeveldbValueStore",
+ "Malloc",
+ "PartitionAlloc",
+ "ProcessMemoryMetrics",
+ "Skia",
+ "Sql",
+ "V8Isolate",
+ "WinHeap",
+ nullptr // End of list marker.
+};
+
+// A list of string names that are allowed for the memory allocator dumps in
+// background mode.
+const char* const kAllocatorDumpNameWhitelist[] = {
+ "blink_gc",
+ "blink_gc/allocated_objects",
+ "discardable",
+ "discardable/child_0x?",
+ "dom_storage/0x?/cache_size",
+ "dom_storage/session_storage_0x?",
+ "java_heap",
+ "java_heap/allocated_objects",
+ "leveldb/index_db/0x?",
+ "leveldb/value_store/Extensions.Database.Open.Settings/0x?",
+ "leveldb/value_store/Extensions.Database.Open.Rules/0x?",
+ "leveldb/value_store/Extensions.Database.Open.State/0x?",
+ "leveldb/value_store/Extensions.Database.Open/0x?",
+ "leveldb/value_store/Extensions.Database.Restore/0x?",
+ "leveldb/value_store/Extensions.Database.Value.Restore/0x?",
+ "malloc",
+ "malloc/allocated_objects",
+ "malloc/metadata_fragmentation_caches",
+ "partition_alloc/allocated_objects",
+ "partition_alloc/partitions",
+ "partition_alloc/partitions/buffer",
+ "partition_alloc/partitions/fast_malloc",
+ "partition_alloc/partitions/layout",
+ "skia/sk_glyph_cache",
+ "skia/sk_resource_cache",
+ "sqlite",
+ "v8/isolate_0x?/heap_spaces",
+ "v8/isolate_0x?/heap_spaces/code_space",
+ "v8/isolate_0x?/heap_spaces/large_object_space",
+ "v8/isolate_0x?/heap_spaces/map_space",
+ "v8/isolate_0x?/heap_spaces/new_space",
+ "v8/isolate_0x?/heap_spaces/old_space",
+ "v8/isolate_0x?/heap_spaces/other_spaces",
+ "v8/isolate_0x?/malloc",
+ "v8/isolate_0x?/zapped_for_debug",
+ "winheap",
+ "winheap/allocated_objects",
+ nullptr // End of list marker.
+};
+
+const char* const* g_dump_provider_whitelist = kDumpProviderWhitelist;
+const char* const* g_allocator_dump_name_whitelist =
+ kAllocatorDumpNameWhitelist;
+
+} // namespace
+
+bool IsMemoryDumpProviderWhitelisted(const char* mdp_name) {
+ for (size_t i = 0; g_dump_provider_whitelist[i] != nullptr; ++i) {
+ if (strcmp(mdp_name, g_dump_provider_whitelist[i]) == 0)
+ return true;
+ }
+ return false;
+}
+
+bool IsMemoryAllocatorDumpNameWhitelisted(const std::string& name) {
+ // Remove special characters, numbers (including hexadecimal which are marked
+ // by '0x') from the given string.
+ const size_t length = name.size();
+ std::string stripped_str;
+ stripped_str.reserve(length);
+ bool parsing_hex = false;
+ for (size_t i = 0; i < length; ++i) {
+ if (parsing_hex && isxdigit(name[i]))
+ continue;
+ parsing_hex = false;
+ if (i + 1 < length && name[i] == '0' && name[i + 1] == 'x') {
+ parsing_hex = true;
+ stripped_str.append("0x?");
+ ++i;
+ } else {
+ stripped_str.push_back(name[i]);
+ }
+ }
+
+ for (size_t i = 0; g_allocator_dump_name_whitelist[i] != nullptr; ++i) {
+ if (stripped_str == g_allocator_dump_name_whitelist[i]) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void SetDumpProviderWhitelistForTesting(const char* const* list) {
+ g_dump_provider_whitelist = list;
+}
+
+void SetAllocatorDumpNameWhitelistForTesting(const char* const* list) {
+ g_allocator_dump_name_whitelist = list;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/memory_infra_background_whitelist.h b/chromium/base/trace_event/memory_infra_background_whitelist.h
new file mode 100644
index 00000000000..b8d704ae241
--- /dev/null
+++ b/chromium/base/trace_event/memory_infra_background_whitelist.h
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
+#define BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
+
+// This file contains the whitelists for background mode to limit the tracing
+// overhead and remove sensitive information from traces.
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace trace_event {
+
+// Checks if the given |mdp_name| is in the whitelist.
+bool BASE_EXPORT IsMemoryDumpProviderWhitelisted(const char* mdp_name);
+
+// Checks if the given |name| matches any of the whitelisted patterns.
+bool BASE_EXPORT IsMemoryAllocatorDumpNameWhitelisted(const std::string& name);
+
+// The whitelist is replaced with the given list for tests. The last element of
+// the list must be nullptr.
+void BASE_EXPORT SetDumpProviderWhitelistForTesting(const char* const* list);
+void BASE_EXPORT
+SetAllocatorDumpNameWhitelistForTesting(const char* const* list);
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
diff --git a/chromium/base/trace_event/process_memory_dump.cc b/chromium/base/trace_event/process_memory_dump.cc
index 52eccbe1a0c..826989237bc 100644
--- a/chromium/base/trace_event/process_memory_dump.cc
+++ b/chromium/base/trace_event/process_memory_dump.cc
@@ -12,6 +12,7 @@
#include "base/process/process_metrics.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/heap_profiler_heap_dump_writer.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/process_memory_totals.h"
#include "base/trace_event/trace_event_argument.h"
#include "build/build_config.h"
@@ -48,6 +49,9 @@ size_t GetSystemPageCount(size_t mapped_size, size_t page_size) {
} // namespace
+// static
+bool ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = false;
+
#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
// static
size_t ProcessMemoryDump::GetSystemPageSize() {
@@ -148,10 +152,12 @@ size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
#endif // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
ProcessMemoryDump::ProcessMemoryDump(
- scoped_refptr<MemoryDumpSessionState> session_state)
+ scoped_refptr<MemoryDumpSessionState> session_state,
+ const MemoryDumpArgs& dump_args)
: has_process_totals_(false),
has_process_mmaps_(false),
- session_state_(std::move(session_state)) {}
+ session_state_(std::move(session_state)),
+ dump_args_(dump_args) {}
ProcessMemoryDump::~ProcessMemoryDump() {}
@@ -170,6 +176,13 @@ MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
std::unique_ptr<MemoryAllocatorDump> mad) {
+ // In background mode return the black hole dump, if invalid dump name is
+ // given.
+ if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND &&
+ !IsMemoryAllocatorDumpNameWhitelisted(mad->absolute_name())) {
+ return GetBlackHoleMad();
+ }
+
auto insertion_result = allocator_dumps_.insert(
std::make_pair(mad->absolute_name(), std::move(mad)));
MemoryAllocatorDump* inserted_mad = insertion_result.first->second.get();
@@ -181,7 +194,11 @@ MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
const std::string& absolute_name) const {
auto it = allocator_dumps_.find(absolute_name);
- return it == allocator_dumps_.end() ? nullptr : it->second.get();
+ if (it != allocator_dumps_.end())
+ return it->second.get();
+ if (black_hole_mad_)
+ return black_hole_mad_.get();
+ return nullptr;
}
MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
@@ -192,6 +209,10 @@ MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
const MemoryAllocatorDumpGuid& guid) {
+ // Global dumps are disabled in background mode.
+ if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+ return GetBlackHoleMad();
+
// A shared allocator dump can be shared within a process and the guid could
// have been created already.
MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
@@ -206,6 +227,10 @@ MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
MemoryAllocatorDump* ProcessMemoryDump::CreateWeakSharedGlobalAllocatorDump(
const MemoryAllocatorDumpGuid& guid) {
+ // Global dumps are disabled in background mode.
+ if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+ return GetBlackHoleMad();
+
MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
if (mad)
return mad;
@@ -219,21 +244,16 @@ MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
return GetAllocatorDump(GetSharedGlobalAllocatorDumpName(guid));
}
-void ProcessMemoryDump::AddHeapDump(const std::string& absolute_name,
- std::unique_ptr<TracedValue> heap_dump) {
- DCHECK_EQ(0ul, heap_dumps_.count(absolute_name));
- heap_dumps_[absolute_name] = std::move(heap_dump);
-}
-
void ProcessMemoryDump::DumpHeapUsage(
const base::hash_map<base::trace_event::AllocationContext,
base::trace_event::AllocationMetrics>& metrics_by_context,
base::trace_event::TraceEventMemoryOverhead& overhead,
const char* allocator_name) {
if (!metrics_by_context.empty()) {
+ DCHECK_EQ(0ul, heap_dumps_.count(allocator_name));
std::unique_ptr<TracedValue> heap_dump = ExportHeapDump(
metrics_by_context, *session_state());
- AddHeapDump(allocator_name, std::move(heap_dump));
+ heap_dumps_[allocator_name] = std::move(heap_dump);
}
std::string base_name = base::StringPrintf("tracing/heap_profiler_%s",
@@ -333,10 +353,21 @@ void ProcessMemoryDump::AddOwnershipEdge(
void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
const std::string& target_node_name) {
+ // Do not create new dumps for suballocations in background mode.
+ if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+ return;
+
std::string child_mad_name = target_node_name + "/__" + source.ToString();
MemoryAllocatorDump* target_child_mad = CreateAllocatorDump(child_mad_name);
AddOwnershipEdge(source, target_child_mad->guid());
}
+MemoryAllocatorDump* ProcessMemoryDump::GetBlackHoleMad() {
+ DCHECK(is_black_hole_non_fatal_for_testing_);
+ if (!black_hole_mad_)
+ black_hole_mad_.reset(new MemoryAllocatorDump("discarded", this));
+ return black_hole_mad_.get();
+}
+
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/process_memory_dump.h b/chromium/base/trace_event/process_memory_dump.h
index 51e4b5f5150..d020c7d652f 100644
--- a/chromium/base/trace_event/process_memory_dump.h
+++ b/chromium/base/trace_event/process_memory_dump.h
@@ -16,6 +16,7 @@
#include "base/memory/scoped_vector.h"
#include "base/trace_event/memory_allocator_dump.h"
#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/memory_dump_session_state.h"
#include "base/trace_event/process_memory_maps.h"
#include "base/trace_event/process_memory_totals.h"
@@ -67,7 +68,8 @@ class BASE_EXPORT ProcessMemoryDump {
static size_t CountResidentBytes(void* start_address, size_t mapped_size);
#endif
- ProcessMemoryDump(scoped_refptr<MemoryDumpSessionState> session_state);
+ ProcessMemoryDump(scoped_refptr<MemoryDumpSessionState> session_state,
+ const MemoryDumpArgs& dump_args);
~ProcessMemoryDump();
// Creates a new MemoryAllocatorDump with the given name and returns the
@@ -116,14 +118,6 @@ class BASE_EXPORT ProcessMemoryDump {
// Returns the map of the MemoryAllocatorDumps added to this dump.
const AllocatorDumpsMap& allocator_dumps() const { return allocator_dumps_; }
- // Adds a heap dump for the allocator with |absolute_name|. The |TracedValue|
- // must have the correct format. |trace_event::HeapDumper| will generate such
- // a value from a |trace_event::AllocationRegister|.
- // TODO(bashi): Remove this when WebMemoryDumpProvider is gone.
- // http://crbug.com/605822
- void AddHeapDump(const std::string& absolute_name,
- std::unique_ptr<TracedValue> heap_dump);
-
// Dumps heap usage with |allocator_name|.
void DumpHeapUsage(const base::hash_map<base::trace_event::AllocationContext,
base::trace_event::AllocationMetrics>&
@@ -183,10 +177,16 @@ class BASE_EXPORT ProcessMemoryDump {
const HeapDumpsMap& heap_dumps() const { return heap_dumps_; }
+ const MemoryDumpArgs& dump_args() const { return dump_args_; }
+
private:
+ FRIEND_TEST_ALL_PREFIXES(ProcessMemoryDumpTest, BackgroundModeTest);
+
MemoryAllocatorDump* AddAllocatorDumpInternal(
std::unique_ptr<MemoryAllocatorDump> mad);
+ MemoryAllocatorDump* GetBlackHoleMad();
+
ProcessMemoryTotals process_totals_;
bool has_process_totals_;
@@ -202,6 +202,18 @@ class BASE_EXPORT ProcessMemoryDump {
// Keeps track of relationships between MemoryAllocatorDump(s).
std::vector<MemoryAllocatorDumpEdge> allocator_dumps_edges_;
+ // Level of detail of the current dump.
+ const MemoryDumpArgs dump_args_;
+
+ // This allocator dump is returned when an invalid dump is created in
+ // background mode. The attributes of the dump are ignored and not added to
+ // the trace.
+ std::unique_ptr<MemoryAllocatorDump> black_hole_mad_;
+
+ // When set to true, the DCHECK(s) for invalid dump creations on the
+ // background mode are disabled for testing.
+ static bool is_black_hole_non_fatal_for_testing_;
+
DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDump);
};
diff --git a/chromium/base/trace_event/process_memory_dump_unittest.cc b/chromium/base/trace_event/process_memory_dump_unittest.cc
index 3a93b2c489d..571774a10ca 100644
--- a/chromium/base/trace_event/process_memory_dump_unittest.cc
+++ b/chromium/base/trace_event/process_memory_dump_unittest.cc
@@ -7,8 +7,10 @@
#include <stddef.h>
#include "base/memory/aligned_memory.h"
+#include "base/memory/ptr_util.h"
#include "base/process/process_metrics.h"
#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/trace_event_argument.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -16,14 +18,22 @@ namespace base {
namespace trace_event {
namespace {
+
+const MemoryDumpArgs kDetailedDumpArgs = {MemoryDumpLevelOfDetail::DETAILED};
+const char* const kTestDumpNameWhitelist[] = {
+ "Whitelisted/TestName", "Whitelisted/TestName_0x?",
+ "Whitelisted/0x?/TestName", nullptr};
+
TracedValue* GetHeapDump(const ProcessMemoryDump& pmd, const char* name) {
auto it = pmd.heap_dumps().find(name);
return it == pmd.heap_dumps().end() ? nullptr : it->second.get();
}
+
} // namespace
TEST(ProcessMemoryDumpTest, Clear) {
- std::unique_ptr<ProcessMemoryDump> pmd1(new ProcessMemoryDump(nullptr));
+ std::unique_ptr<ProcessMemoryDump> pmd1(
+ new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
pmd1->CreateAllocatorDump("mad1");
pmd1->CreateAllocatorDump("mad2");
ASSERT_FALSE(pmd1->allocator_dumps().empty());
@@ -58,10 +68,10 @@ TEST(ProcessMemoryDumpTest, Clear) {
pmd1->AsValueInto(traced_value.get());
// Check that the pmd can be reused and behaves as expected.
- auto mad1 = pmd1->CreateAllocatorDump("mad1");
- auto mad3 = pmd1->CreateAllocatorDump("mad3");
- auto shared_mad1 = pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
- auto shared_mad2 =
+ auto* mad1 = pmd1->CreateAllocatorDump("mad1");
+ auto* mad3 = pmd1->CreateAllocatorDump("mad3");
+ auto* shared_mad1 = pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+ auto* shared_mad2 =
pmd1->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
ASSERT_EQ(4u, pmd1->allocator_dumps().size());
ASSERT_EQ(mad1, pmd1->GetAllocatorDump("mad1"));
@@ -80,35 +90,36 @@ TEST(ProcessMemoryDumpTest, Clear) {
TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
std::unique_ptr<TracedValue> traced_value(new TracedValue);
- TracedValue* heap_dumps_ptr[4];
- std::unique_ptr<TracedValue> heap_dump;
-
- std::unique_ptr<ProcessMemoryDump> pmd1(new ProcessMemoryDump(nullptr));
- auto mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
- auto mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
+ hash_map<AllocationContext, AllocationMetrics> metrics_by_context;
+ metrics_by_context[AllocationContext()] = { 1, 1 };
+ TraceEventMemoryOverhead overhead;
+
+ scoped_refptr<MemoryDumpSessionState> session_state =
+ new MemoryDumpSessionState;
+ session_state->SetStackFrameDeduplicator(
+ WrapUnique(new StackFrameDeduplicator));
+ session_state->SetTypeNameDeduplicator(
+ WrapUnique(new TypeNameDeduplicator));
+ std::unique_ptr<ProcessMemoryDump> pmd1(
+ new ProcessMemoryDump(session_state.get(), kDetailedDumpArgs));
+ auto* mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
+ auto* mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
pmd1->AddOwnershipEdge(mad1_1->guid(), mad1_2->guid());
- heap_dump.reset(new TracedValue);
- heap_dumps_ptr[0] = heap_dump.get();
- pmd1->AddHeapDump("pmd1/heap_dump1", std::move(heap_dump));
- heap_dump.reset(new TracedValue);
- heap_dumps_ptr[1] = heap_dump.get();
- pmd1->AddHeapDump("pmd1/heap_dump2", std::move(heap_dump));
-
- std::unique_ptr<ProcessMemoryDump> pmd2(new ProcessMemoryDump(nullptr));
- auto mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
- auto mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
+ pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump1");
+ pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump2");
+
+ std::unique_ptr<ProcessMemoryDump> pmd2(
+ new ProcessMemoryDump(session_state.get(), kDetailedDumpArgs));
+ auto* mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
+ auto* mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
pmd2->AddOwnershipEdge(mad2_1->guid(), mad2_2->guid());
- heap_dump.reset(new TracedValue);
- heap_dumps_ptr[2] = heap_dump.get();
- pmd2->AddHeapDump("pmd2/heap_dump1", std::move(heap_dump));
- heap_dump.reset(new TracedValue);
- heap_dumps_ptr[3] = heap_dump.get();
- pmd2->AddHeapDump("pmd2/heap_dump2", std::move(heap_dump));
+ pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump1");
+ pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump2");
MemoryAllocatorDumpGuid shared_mad_guid1(1);
MemoryAllocatorDumpGuid shared_mad_guid2(2);
- auto shared_mad1 = pmd2->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
- auto shared_mad2 =
+ auto* shared_mad1 = pmd2->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+ auto* shared_mad2 =
pmd2->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
pmd1->TakeAllDumpsFrom(pmd2.get());
@@ -141,10 +152,10 @@ TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
ASSERT_TRUE(MemoryAllocatorDump::Flags::WEAK & shared_mad2->flags());
ASSERT_EQ(4u, pmd1->heap_dumps().size());
- ASSERT_EQ(heap_dumps_ptr[0], GetHeapDump(*pmd1, "pmd1/heap_dump1"));
- ASSERT_EQ(heap_dumps_ptr[1], GetHeapDump(*pmd1, "pmd1/heap_dump2"));
- ASSERT_EQ(heap_dumps_ptr[2], GetHeapDump(*pmd1, "pmd2/heap_dump1"));
- ASSERT_EQ(heap_dumps_ptr[3], GetHeapDump(*pmd1, "pmd2/heap_dump2"));
+ ASSERT_TRUE(GetHeapDump(*pmd1, "pmd1/heap_dump1") != nullptr);
+ ASSERT_TRUE(GetHeapDump(*pmd1, "pmd1/heap_dump2") != nullptr);
+ ASSERT_TRUE(GetHeapDump(*pmd1, "pmd2/heap_dump1") != nullptr);
+ ASSERT_TRUE(GetHeapDump(*pmd1, "pmd2/heap_dump2") != nullptr);
// Check that calling AsValueInto() doesn't cause a crash.
traced_value.reset(new TracedValue);
@@ -154,17 +165,18 @@ TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
}
TEST(ProcessMemoryDumpTest, Suballocations) {
- std::unique_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(nullptr));
+ std::unique_ptr<ProcessMemoryDump> pmd(
+ new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
const std::string allocator_dump_name = "fakealloc/allocated_objects";
pmd->CreateAllocatorDump(allocator_dump_name);
// Create one allocation with an auto-assigned guid and mark it as a
// suballocation of "fakealloc/allocated_objects".
- auto pic1_dump = pmd->CreateAllocatorDump("picturemanager/picture1");
+ auto* pic1_dump = pmd->CreateAllocatorDump("picturemanager/picture1");
pmd->AddSuballocation(pic1_dump->guid(), allocator_dump_name);
// Same here, but this time create an allocation with an explicit guid.
- auto pic2_dump = pmd->CreateAllocatorDump("picturemanager/picture2",
+ auto* pic2_dump = pmd->CreateAllocatorDump("picturemanager/picture2",
MemoryAllocatorDumpGuid(0x42));
pmd->AddSuballocation(pic2_dump->guid(), allocator_dump_name);
@@ -198,29 +210,75 @@ TEST(ProcessMemoryDumpTest, Suballocations) {
}
TEST(ProcessMemoryDumpTest, GlobalAllocatorDumpTest) {
- std::unique_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(nullptr));
+ std::unique_ptr<ProcessMemoryDump> pmd(
+ new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
MemoryAllocatorDumpGuid shared_mad_guid(1);
- auto shared_mad1 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+ auto* shared_mad1 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
ASSERT_EQ(shared_mad_guid, shared_mad1->guid());
ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
- auto shared_mad2 = pmd->GetSharedGlobalAllocatorDump(shared_mad_guid);
+ auto* shared_mad2 = pmd->GetSharedGlobalAllocatorDump(shared_mad_guid);
ASSERT_EQ(shared_mad1, shared_mad2);
ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
- auto shared_mad3 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+ auto* shared_mad3 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
ASSERT_EQ(shared_mad1, shared_mad3);
ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
- auto shared_mad4 = pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
+ auto* shared_mad4 = pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
ASSERT_EQ(shared_mad1, shared_mad4);
ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
- auto shared_mad5 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+ auto* shared_mad5 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
ASSERT_EQ(shared_mad1, shared_mad5);
ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
}
+TEST(ProcessMemoryDumpTest, BackgroundModeTest) {
+ MemoryDumpArgs background_args = {MemoryDumpLevelOfDetail::BACKGROUND};
+ std::unique_ptr<ProcessMemoryDump> pmd(
+ new ProcessMemoryDump(nullptr, background_args));
+ ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = true;
+ SetAllocatorDumpNameWhitelistForTesting(kTestDumpNameWhitelist);
+ MemoryAllocatorDump* black_hole_mad = pmd->GetBlackHoleMad();
+
+ // Invalid dump names.
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("NotWhitelisted/TestName"));
+ EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("TestName"));
+ EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("Whitelisted/Test"));
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("Not/Whitelisted/TestName"));
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/TestName/Google"));
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/TestName/0x1a2Google"));
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/TestName/__12/Google"));
+
+ // Global dumps.
+ MemoryAllocatorDumpGuid guid(1);
+ EXPECT_EQ(black_hole_mad, pmd->CreateSharedGlobalAllocatorDump(guid));
+ EXPECT_EQ(black_hole_mad, pmd->CreateWeakSharedGlobalAllocatorDump(guid));
+ EXPECT_EQ(black_hole_mad, pmd->GetSharedGlobalAllocatorDump(guid));
+
+ // Suballocations.
+ pmd->AddSuballocation(guid, "malloc/allocated_objects");
+ EXPECT_EQ(0u, pmd->allocator_dumps_edges_.size());
+ EXPECT_EQ(0u, pmd->allocator_dumps_.size());
+
+ // Valid dump names.
+ EXPECT_NE(black_hole_mad, pmd->CreateAllocatorDump("Whitelisted/TestName"));
+ EXPECT_NE(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/TestName_0xA1b2"));
+ EXPECT_NE(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/0xaB/TestName"));
+
+ // GetAllocatorDump is consistent.
+ EXPECT_EQ(black_hole_mad, pmd->GetAllocatorDump("NotWhitelisted/TestName"));
+ EXPECT_NE(black_hole_mad, pmd->GetAllocatorDump("Whitelisted/TestName"));
+}
+
#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
TEST(ProcessMemoryDumpTest, CountResidentBytes) {
const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
diff --git a/chromium/base/trace_event/trace_config.cc b/chromium/base/trace_event/trace_config.cc
index 25a0cd6d40b..d4dc2cc2e4e 100644
--- a/chromium/base/trace_event/trace_config.cc
+++ b/chromium/base/trace_event/trace_config.cc
@@ -47,6 +47,7 @@ const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
// String parameters that is used to parse memory dump config in trace config
// string.
const char kMemoryDumpConfigParam[] = "memory_dump_config";
+const char kAllowedDumpModesParam[] = "allowed_dump_modes";
const char kTriggersParam[] = "triggers";
const char kPeriodicIntervalParam[] = "periodic_interval_ms";
const char kModeParam[] = "mode";
@@ -75,6 +76,15 @@ class ConvertableTraceConfigToTraceFormat
const TraceConfig trace_config_;
};
+std::set<MemoryDumpLevelOfDetail> GetDefaultAllowedMemoryDumpModes() {
+ std::set<MemoryDumpLevelOfDetail> all_modes;
+ for (uint32_t mode = static_cast<uint32_t>(MemoryDumpLevelOfDetail::FIRST);
+ mode <= static_cast<uint32_t>(MemoryDumpLevelOfDetail::LAST); mode++) {
+ all_modes.insert(static_cast<MemoryDumpLevelOfDetail>(mode));
+ }
+ return all_modes;
+}
+
} // namespace
@@ -85,6 +95,12 @@ void TraceConfig::MemoryDumpConfig::HeapProfiler::Clear() {
breakdown_threshold_bytes = kDefaultBreakdownThresholdBytes;
}
+void TraceConfig::ResetMemoryDumpConfig(
+ const TraceConfig::MemoryDumpConfig& memory_dump_config) {
+ memory_dump_config_.Clear();
+ memory_dump_config_ = memory_dump_config;
+}
+
TraceConfig::MemoryDumpConfig::MemoryDumpConfig() {};
TraceConfig::MemoryDumpConfig::MemoryDumpConfig(
@@ -93,6 +109,7 @@ TraceConfig::MemoryDumpConfig::MemoryDumpConfig(
TraceConfig::MemoryDumpConfig::~MemoryDumpConfig() {};
void TraceConfig::MemoryDumpConfig::Clear() {
+ allowed_dump_modes.clear();
triggers.clear();
heap_profiler_options.Clear();
}
@@ -311,8 +328,6 @@ void TraceConfig::InitializeDefault() {
enable_sampling_ = false;
enable_systrace_ = false;
enable_argument_filter_ = false;
- excluded_categories_.push_back("*Debug");
- excluded_categories_.push_back("*Test");
}
void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
@@ -361,7 +376,7 @@ void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
// category enabled. So, use the default periodic dump config.
const base::DictionaryValue* memory_dump_config = nullptr;
if (dict.GetDictionary(kMemoryDumpConfigParam, &memory_dump_config))
- SetMemoryDumpConfig(*memory_dump_config);
+ SetMemoryDumpConfigFromConfigDict(*memory_dump_config);
else
SetDefaultMemoryDumpConfig();
}
@@ -509,11 +524,25 @@ void TraceConfig::AddCategoryToDict(base::DictionaryValue& dict,
dict.Set(param, std::move(list));
}
-void TraceConfig::SetMemoryDumpConfig(
+void TraceConfig::SetMemoryDumpConfigFromConfigDict(
const base::DictionaryValue& memory_dump_config) {
+ // Set allowed dump modes.
+ memory_dump_config_.allowed_dump_modes.clear();
+ const base::ListValue* allowed_modes_list;
+ if (memory_dump_config.GetList(kAllowedDumpModesParam, &allowed_modes_list)) {
+ for (size_t i = 0; i < allowed_modes_list->GetSize(); ++i) {
+ std::string level_of_detail_str;
+ allowed_modes_list->GetString(i, &level_of_detail_str);
+ memory_dump_config_.allowed_dump_modes.insert(
+ StringToMemoryDumpLevelOfDetail(level_of_detail_str));
+ }
+ } else {
+ // If allowed modes param is not given then allow all modes by default.
+ memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
+ }
+
// Set triggers
memory_dump_config_.triggers.clear();
-
const base::ListValue* trigger_list = nullptr;
if (memory_dump_config.GetList(kTriggersParam, &trigger_list) &&
trigger_list->GetSize() > 0) {
@@ -559,6 +588,7 @@ void TraceConfig::SetDefaultMemoryDumpConfig() {
memory_dump_config_.Clear();
memory_dump_config_.triggers.push_back(kDefaultHeavyMemoryDumpTrigger);
memory_dump_config_.triggers.push_back(kDefaultLightMemoryDumpTrigger);
+ memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
}
void TraceConfig::ToDict(base::DictionaryValue& dict) const {
@@ -605,6 +635,15 @@ void TraceConfig::ToDict(base::DictionaryValue& dict) const {
if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
std::unique_ptr<base::DictionaryValue> memory_dump_config(
new base::DictionaryValue());
+ std::unique_ptr<base::ListValue> allowed_modes_list(new base::ListValue());
+ for (MemoryDumpLevelOfDetail dump_mode :
+ memory_dump_config_.allowed_dump_modes) {
+ allowed_modes_list->AppendString(
+ MemoryDumpLevelOfDetailToString(dump_mode));
+ }
+ memory_dump_config->Set(kAllowedDumpModesParam,
+ std::move(allowed_modes_list));
+
std::unique_ptr<base::ListValue> triggers_list(new base::ListValue());
for (const MemoryDumpConfig::Trigger& config
: memory_dump_config_.triggers) {
diff --git a/chromium/base/trace_event/trace_config.h b/chromium/base/trace_event/trace_config.h
index 5b119eae98c..c3a940e2592 100644
--- a/chromium/base/trace_event/trace_config.h
+++ b/chromium/base/trace_event/trace_config.h
@@ -7,6 +7,7 @@
#include <stdint.h>
+#include <set>
#include <string>
#include <vector>
@@ -42,7 +43,7 @@ class BASE_EXPORT TraceConfig {
// Specifies the memory dump config for tracing.
// Used only when "memory-infra" category is enabled.
- struct MemoryDumpConfig {
+ struct BASE_EXPORT MemoryDumpConfig {
MemoryDumpConfig();
MemoryDumpConfig(const MemoryDumpConfig& other);
~MemoryDumpConfig();
@@ -69,6 +70,11 @@ class BASE_EXPORT TraceConfig {
// Reset the values in the config.
void Clear();
+ // Set of memory dump modes allowed for the tracing session. The explicitly
+ // triggered dumps will be successful only if the dump mode is allowed in
+ // the config.
+ std::set<MemoryDumpLevelOfDetail> allowed_dump_modes;
+
std::vector<Trigger> triggers;
HeapProfiler heap_profiler_options;
};
@@ -139,7 +145,7 @@ class BASE_EXPORT TraceConfig {
// "inc_pattern*",
// "disabled-by-default-memory-infra"],
// "excluded_categories": ["excluded", "exc_pattern*"],
- // "synthetic_delays": ["test.Delay1;16", "test.Delay2;32"]
+ // "synthetic_delays": ["test.Delay1;16", "test.Delay2;32"],
// "memory_dump_config": {
// "triggers": [
// {
@@ -188,7 +194,8 @@ class BASE_EXPORT TraceConfig {
std::string ToCategoryFilterString() const;
// Returns true if at least one category in the list is enabled by this
- // trace config.
+ // trace config. This is used to determine if the category filters are
+ // enabled in the TRACE_* macros.
bool IsCategoryGroupEnabled(const char* category_group) const;
// Merges config with the current TraceConfig
@@ -196,6 +203,9 @@ class BASE_EXPORT TraceConfig {
void Clear();
+ // Clears and resets the memory dump config.
+ void ResetMemoryDumpConfig(const MemoryDumpConfig& memory_dump_config);
+
const MemoryDumpConfig& memory_dump_config() const {
return memory_dump_config_;
}
@@ -204,7 +214,6 @@ class BASE_EXPORT TraceConfig {
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidLegacyFormat);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
TraceConfigFromInvalidLegacyStrings);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, ConstructDefaultTraceConfig);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidString);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromInvalidString);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
@@ -212,6 +221,8 @@ class BASE_EXPORT TraceConfig {
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromMemoryConfigString);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, LegacyStringToMemoryDumpConfig);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, EmptyMemoryDumpConfigTest);
+ FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
+ EmptyAndAsteriskCategoryFilterString);
// The default trace config, used when none is provided.
// Allows all non-disabled-by-default categories through, except if they end
@@ -235,7 +246,8 @@ class BASE_EXPORT TraceConfig {
const char* param,
const StringList& categories) const;
- void SetMemoryDumpConfig(const base::DictionaryValue& memory_dump_config);
+ void SetMemoryDumpConfigFromConfigDict(
+ const base::DictionaryValue& memory_dump_config);
void SetDefaultMemoryDumpConfig();
// Convert TraceConfig to the dict representation of the TraceConfig.
@@ -249,7 +261,10 @@ class BASE_EXPORT TraceConfig {
void WriteCategoryFilterString(const StringList& delays,
std::string* out) const;
- // Returns true if category is enable according to this trace config.
+ // Returns true if the category is enabled according to this trace config.
+ // This tells whether a category is enabled from the TraceConfig's
+ // perspective. Please refer to IsCategoryGroupEnabled() to determine if a
+ // category is enabled from the tracing runtime's perspective.
bool IsCategoryEnabled(const char* category_name) const;
static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(
diff --git a/chromium/base/trace_event/trace_config_memory_test_util.h b/chromium/base/trace_event/trace_config_memory_test_util.h
index 1acc62b9cec..6b47f8dc550 100644
--- a/chromium/base/trace_event/trace_config_memory_test_util.h
+++ b/chromium/base/trace_event/trace_config_memory_test_util.h
@@ -24,6 +24,7 @@ class TraceConfigMemoryTestUtil {
"\"%s\""
"],"
"\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
"\"heap_profiler_options\":{"
"\"breakdown_threshold_bytes\":2048"
"},"
@@ -52,6 +53,7 @@ class TraceConfigMemoryTestUtil {
"\"%s\""
"],"
"\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
"\"triggers\":["
"]"
"},"
@@ -71,6 +73,28 @@ class TraceConfigMemoryTestUtil {
"\"record_mode\":\"record-until-full\""
"}", MemoryDumpManager::kTraceCategory);
}
+
+ static std::string GetTraceConfig_BackgroundTrigger(int period_ms) {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_sampling\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\"],"
+ "\"triggers\":["
+ "{"
+ "\"mode\":\"background\","
+ "\"periodic_interval_ms\":%d"
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}", MemoryDumpManager::kTraceCategory, period_ms);
+ }
};
} // namespace trace_event
diff --git a/chromium/base/trace_event/trace_config_unittest.cc b/chromium/base/trace_event/trace_config_unittest.cc
index a17337619b2..4b46b2fefdd 100644
--- a/chromium/base/trace_event/trace_config_unittest.cc
+++ b/chromium/base/trace_event/trace_config_unittest.cc
@@ -21,7 +21,6 @@ const char kDefaultTraceConfigString[] =
"\"enable_argument_filter\":false,"
"\"enable_sampling\":false,"
"\"enable_systrace\":false,"
- "\"excluded_categories\":[\"*Debug\",\"*Test\"],"
"\"record_mode\":\"record-until-full\""
"}";
@@ -36,6 +35,7 @@ const char kCustomTraceConfigString[] =
"\"disabled-by-default-cc\","
"\"disabled-by-default-memory-infra\"],"
"\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
"\"heap_profiler_options\":{"
"\"breakdown_threshold_bytes\":10240"
"},"
@@ -48,6 +48,24 @@ const char kCustomTraceConfigString[] =
"\"synthetic_delays\":[\"test.Delay1;16\",\"test.Delay2;32\"]"
"}";
+void CheckDefaultTraceConfigBehavior(const TraceConfig& tc) {
+ EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+ EXPECT_FALSE(tc.IsSamplingEnabled());
+ EXPECT_FALSE(tc.IsSystraceEnabled());
+ EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+
+ // Default trace config enables every category filter except the
+ // disabled-by-default-* ones.
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("not-excluded-category"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-cc"));
+
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1,not-excluded-category"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1,disabled-by-default-cc"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled(
+ "disabled-by-default-cc,disabled-by-default-cc2"));
+}
+
} // namespace
TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
@@ -155,9 +173,6 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
config.ToTraceOptionsString().c_str());
// From category filter strings
- config = TraceConfig("-*Debug,-*Test", "");
- EXPECT_STREQ("-*Debug,-*Test", config.ToCategoryFilterString().c_str());
-
config = TraceConfig("included,-excluded,inc_pattern*,-exc_pattern*", "");
EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
config.ToCategoryFilterString().c_str());
@@ -257,38 +272,79 @@ TEST(TraceConfigTest, TraceConfigFromInvalidLegacyStrings) {
}
TEST(TraceConfigTest, ConstructDefaultTraceConfig) {
- // Make sure that upon an empty string, we fall back to the default config.
TraceConfig tc;
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
- EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
- EXPECT_FALSE(tc.IsSystraceEnabled());
- EXPECT_FALSE(tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
+
+ // Constructors from category filter string and trace option string.
+ TraceConfig tc_asterisk("*", "");
+ EXPECT_STREQ("*", tc_asterisk.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc_asterisk);
+
+ TraceConfig tc_empty_category_filter("", "");
+ EXPECT_STREQ("", tc_empty_category_filter.ToCategoryFilterString().c_str());
+ EXPECT_STREQ(kDefaultTraceConfigString,
+ tc_empty_category_filter.ToString().c_str());
+ CheckDefaultTraceConfigBehavior(tc_empty_category_filter);
+
+ // Constructor from JSON formated config string.
+ TraceConfig tc_empty_json_string("");
+ EXPECT_STREQ("", tc_empty_json_string.ToCategoryFilterString().c_str());
+ EXPECT_STREQ(kDefaultTraceConfigString,
+ tc_empty_json_string.ToString().c_str());
+ CheckDefaultTraceConfigBehavior(tc_empty_json_string);
+
+ // Constructor from dictionary value.
+ DictionaryValue dict;
+ TraceConfig tc_dict(dict);
+ EXPECT_STREQ("", tc_dict.ToCategoryFilterString().c_str());
+ EXPECT_STREQ(kDefaultTraceConfigString, tc_dict.ToString().c_str());
+ CheckDefaultTraceConfigBehavior(tc_dict);
+}
- EXPECT_FALSE(tc.IsCategoryEnabled("Category1"));
- EXPECT_FALSE(tc.IsCategoryEnabled("not-excluded-category"));
- EXPECT_FALSE(tc.IsCategoryEnabled("CategoryTest"));
- EXPECT_FALSE(tc.IsCategoryEnabled("CategoryDebug"));
- EXPECT_FALSE(tc.IsCategoryEnabled("disabled-by-default-cc"));
+TEST(TraceConfigTest, EmptyAndAsteriskCategoryFilterString) {
+ TraceConfig tc_empty("", "");
+ TraceConfig tc_asterisk("*", "");
- EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1"));
- EXPECT_TRUE(tc.IsCategoryGroupEnabled("not-excluded-category"));
- EXPECT_FALSE(tc.IsCategoryGroupEnabled("CategoryTest"));
- EXPECT_FALSE(tc.IsCategoryGroupEnabled("CategoryDebug"));
- EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-cc"));
+ EXPECT_STREQ("", tc_empty.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("*", tc_asterisk.ToCategoryFilterString().c_str());
+
+ // Both fall back to default config.
+ CheckDefaultTraceConfigBehavior(tc_empty);
+ CheckDefaultTraceConfigBehavior(tc_asterisk);
+
+ // They differ only for internal checking.
+ EXPECT_FALSE(tc_empty.IsCategoryEnabled("Category1"));
+ EXPECT_FALSE(tc_empty.IsCategoryEnabled("not-excluded-category"));
+ EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("Category1"));
+ EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("not-excluded-category"));
+}
- EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1,CategoryDebug"));
- EXPECT_TRUE(tc.IsCategoryGroupEnabled("CategoryDebug,Category1"));
- EXPECT_TRUE(tc.IsCategoryGroupEnabled("CategoryTest,not-excluded-category"));
- EXPECT_FALSE(tc.IsCategoryGroupEnabled("CategoryDebug,CategoryTest"));
+TEST(TraceConfigTest, DisabledByDefaultCategoryFilterString) {
+ TraceConfig tc("foo,disabled-by-default-foo", "");
+ EXPECT_STREQ("foo,disabled-by-default-foo",
+ tc.ToCategoryFilterString().c_str());
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-foo"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-bar"));
+
+ // Enabling only the disabled-by-default-* category means the default ones
+ // are also enabled.
+ tc = TraceConfig("disabled-by-default-foo", "");
+ EXPECT_STREQ("disabled-by-default-foo", tc.ToCategoryFilterString().c_str());
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-foo"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-bar"));
}
TEST(TraceConfigTest, TraceConfigFromDict) {
- // Passing in empty dictionary will not result in default trace config.
+ // Passing in empty dictionary will result in default trace config.
DictionaryValue dict;
TraceConfig tc(dict);
- EXPECT_STRNE(kDefaultTraceConfigString, tc.ToString().c_str());
+ EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
@@ -307,7 +363,7 @@ TEST(TraceConfigTest, TraceConfigFromDict) {
EXPECT_FALSE(default_tc.IsSamplingEnabled());
EXPECT_FALSE(default_tc.IsSystraceEnabled());
EXPECT_FALSE(default_tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", default_tc.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("", default_tc.ToCategoryFilterString().c_str());
std::unique_ptr<Value> custom_value(
JSONReader::Read(kCustomTraceConfigString));
@@ -405,7 +461,8 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
tc = TraceConfig("This is an invalid config string.");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
@@ -413,7 +470,8 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
tc = TraceConfig("[\"This\", \"is\", \"not\", \"a\", \"dictionary\"]");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
@@ -421,7 +479,8 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
tc = TraceConfig("{\"record_mode\": invalid-value-needs-double-quote}");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
@@ -429,7 +488,8 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
- EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
// If the config string a dictionary formatted as a JSON string, it will
// initialize TraceConfig with best effort.
@@ -439,6 +499,7 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
tc = TraceConfig("{\"arbitrary-key\":\"arbitrary-value\"}");
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
@@ -446,6 +507,7 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
const char invalid_config_string[] =
"{"
@@ -487,9 +549,7 @@ TEST(TraceConfigTest, MergingTraceConfigs) {
"\"enable_argument_filter\":false,"
"\"enable_sampling\":false,"
"\"enable_systrace\":false,"
- "\"excluded_categories\":["
- "\"*Debug\",\"*Test\",\"excluded\",\"exc_pattern*\""
- "],"
+ "\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
"\"record_mode\":\"record-until-full\""
"}",
tc.ToString().c_str());
@@ -568,22 +628,34 @@ TEST(TraceConfigTest, SetTraceOptionValues) {
}
TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
- std::string tc_str =
+ std::string tc_str1 =
TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(200, 2000);
- TraceConfig tc(tc_str);
- EXPECT_EQ(tc_str, tc.ToString());
- EXPECT_TRUE(tc.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
- ASSERT_EQ(2u, tc.memory_dump_config_.triggers.size());
+ TraceConfig tc1(tc_str1);
+ EXPECT_EQ(tc_str1, tc1.ToString());
+ EXPECT_TRUE(tc1.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+ ASSERT_EQ(2u, tc1.memory_dump_config_.triggers.size());
- EXPECT_EQ(200u, tc.memory_dump_config_.triggers[0].periodic_interval_ms);
+ EXPECT_EQ(200u, tc1.memory_dump_config_.triggers[0].periodic_interval_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
- tc.memory_dump_config_.triggers[0].level_of_detail);
+ tc1.memory_dump_config_.triggers[0].level_of_detail);
- EXPECT_EQ(2000u, tc.memory_dump_config_.triggers[1].periodic_interval_ms);
+ EXPECT_EQ(2000u, tc1.memory_dump_config_.triggers[1].periodic_interval_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
- tc.memory_dump_config_.triggers[1].level_of_detail);
- EXPECT_EQ(2048u, tc.memory_dump_config_.heap_profiler_options.
- breakdown_threshold_bytes);
+ tc1.memory_dump_config_.triggers[1].level_of_detail);
+ EXPECT_EQ(
+ 2048u,
+ tc1.memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
+
+ std::string tc_str2 =
+ TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
+ 1 /* period_ms */);
+ TraceConfig tc2(tc_str2);
+ EXPECT_EQ(tc_str2, tc2.ToString());
+ EXPECT_TRUE(tc2.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+ ASSERT_EQ(1u, tc2.memory_dump_config_.triggers.size());
+ EXPECT_EQ(1u, tc2.memory_dump_config_.triggers[0].periodic_interval_ms);
+ EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
+ tc2.memory_dump_config_.triggers[0].level_of_detail);
}
TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
diff --git a/chromium/base/trace_event/trace_event.gypi b/chromium/base/trace_event/trace_event.gypi
index 4335ea1b981..f915780de5f 100644
--- a/chromium/base/trace_event/trace_event.gypi
+++ b/chromium/base/trace_event/trace_event.gypi
@@ -35,6 +35,8 @@
'trace_event/memory_dump_request_args.h',
'trace_event/memory_dump_session_state.cc',
'trace_event/memory_dump_session_state.h',
+ 'trace_event/memory_infra_background_whitelist.cc',
+ 'trace_event/memory_infra_background_whitelist.h',
'trace_event/process_memory_dump.cc',
'trace_event/process_memory_dump.h',
'trace_event/process_memory_maps.cc',
diff --git a/chromium/base/trace_event/trace_event_android.cc b/chromium/base/trace_event/trace_event_android.cc
index d406d2cafae..a28c54a8b90 100644
--- a/chromium/base/trace_event/trace_event_android.cc
+++ b/chromium/base/trace_event/trace_event_android.cc
@@ -128,7 +128,8 @@ void TraceLog::StopATrace() {
// TraceLog::Flush() requires the current thread to have a message loop, but
// this thread called from Java may not have one, so flush in another thread.
Thread end_chrome_tracing_thread("end_chrome_tracing");
- WaitableEvent complete_event(false, false);
+ WaitableEvent complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
end_chrome_tracing_thread.Start();
end_chrome_tracing_thread.task_runner()->PostTask(
FROM_HERE, base::Bind(&EndChromeTracing, Unretained(this),
diff --git a/chromium/base/trace_event/trace_event_argument.cc b/chromium/base/trace_event/trace_event_argument.cc
index 8babf3b47fa..336d964bff4 100644
--- a/chromium/base/trace_event/trace_event_argument.cc
+++ b/chromium/base/trace_event/trace_event_argument.cc
@@ -288,7 +288,7 @@ void TracedValue::SetBaseValueWithCopiedName(base::StringPiece name,
const ListValue* list_value;
value.GetAsList(&list_value);
BeginArrayWithCopiedName(name);
- for (base::Value* base_value : *list_value)
+ for (const auto& base_value : *list_value)
AppendBaseValue(*base_value);
EndArray();
} break;
@@ -342,7 +342,7 @@ void TracedValue::AppendBaseValue(const base::Value& value) {
const ListValue* list_value;
value.GetAsList(&list_value);
BeginArray();
- for (base::Value* base_value : *list_value)
+ for (const auto& base_value : *list_value)
AppendBaseValue(*base_value);
EndArray();
} break;
@@ -361,7 +361,7 @@ std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
DCHECK((cur_dict && !cur_list) || (cur_list && !cur_dict));
switch (*type) {
case kTypeStartDict: {
- auto new_dict = new DictionaryValue();
+ auto* new_dict = new DictionaryValue();
if (cur_dict) {
cur_dict->SetWithoutPathExpansion(ReadKeyName(it),
WrapUnique(new_dict));
@@ -386,7 +386,7 @@ std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
} break;
case kTypeStartArray: {
- auto new_list = new ListValue();
+ auto* new_list = new ListValue();
if (cur_dict) {
cur_dict->SetWithoutPathExpansion(ReadKeyName(it),
WrapUnique(new_list));
diff --git a/chromium/base/trace_event/trace_event_impl.cc b/chromium/base/trace_event/trace_event_impl.cc
index e2e250ed567..f469f2f6bcb 100644
--- a/chromium/base/trace_event/trace_event_impl.cc
+++ b/chromium/base/trace_event/trace_event_impl.cc
@@ -261,7 +261,7 @@ void TraceEvent::AppendValueAsJSON(unsigned char type,
// So as not to lose bits from a 64-bit pointer, output as a hex string.
StringAppendF(
out, "\"0x%" PRIx64 "\"",
- static_cast<uint64_t>(reinterpret_cast<intptr_t>(value.as_pointer)));
+ static_cast<uint64_t>(reinterpret_cast<uintptr_t>(value.as_pointer)));
break;
case TRACE_VALUE_TYPE_STRING:
case TRACE_VALUE_TYPE_COPY_STRING:
diff --git a/chromium/base/trace_event/trace_event_memory_overhead.cc b/chromium/base/trace_event/trace_event_memory_overhead.cc
index ba7207d6163..23579cbb22d 100644
--- a/chromium/base/trace_event/trace_event_memory_overhead.cc
+++ b/chromium/base/trace_event/trace_event_memory_overhead.cc
@@ -104,7 +104,7 @@ void TraceEventMemoryOverhead::AddValue(const Value& value) {
const ListValue* list_value = nullptr;
value.GetAsList(&list_value);
Add("ListValue", sizeof(ListValue));
- for (const Value* v : *list_value)
+ for (const auto& v : *list_value)
AddValue(*v);
} break;
diff --git a/chromium/base/trace_event/trace_event_system_stats_monitor_unittest.cc b/chromium/base/trace_event/trace_event_system_stats_monitor_unittest.cc
index e834ded3700..3ec6eab8163 100644
--- a/chromium/base/trace_event/trace_event_system_stats_monitor_unittest.cc
+++ b/chromium/base/trace_event/trace_event_system_stats_monitor_unittest.cc
@@ -8,6 +8,7 @@
#include <string>
#include "base/macros.h"
+#include "base/run_loop.h"
#include "base/trace_event/trace_event_impl.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -48,12 +49,12 @@ TEST_F(TraceSystemStatsMonitorTest, TraceEventSystemStatsMonitor) {
// Simulate enabling tracing.
system_stats_monitor->StartProfiling();
- message_loop.RunUntilIdle();
+ RunLoop().RunUntilIdle();
EXPECT_TRUE(system_stats_monitor->IsTimerRunningForTest());
// Simulate disabling tracing.
system_stats_monitor->StopProfiling();
- message_loop.RunUntilIdle();
+ RunLoop().RunUntilIdle();
EXPECT_FALSE(system_stats_monitor->IsTimerRunningForTest());
// Deleting the observer removes it from the TraceLog observer list.
diff --git a/chromium/base/trace_event/trace_event_unittest.cc b/chromium/base/trace_event/trace_event_unittest.cc
index e626a779ed5..ff8ec2de788 100644
--- a/chromium/base/trace_event/trace_event_unittest.cc
+++ b/chromium/base/trace_event/trace_event_unittest.cc
@@ -10,6 +10,7 @@
#include <cstdlib>
#include <memory>
+#include <utility>
#include "base/bind.h"
#include "base/command_line.h"
@@ -96,14 +97,18 @@ class TraceEventTestFixture : public testing::Test {
}
void CancelTrace() {
- WaitableEvent flush_complete_event(false, false);
+ WaitableEvent flush_complete_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
CancelTraceAsync(&flush_complete_event);
flush_complete_event.Wait();
}
void EndTraceAndFlush() {
num_flush_callbacks_ = 0;
- WaitableEvent flush_complete_event(false, false);
+ WaitableEvent flush_complete_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
EndTraceAndFlushAsync(&flush_complete_event);
flush_complete_event.Wait();
}
@@ -111,7 +116,9 @@ class TraceEventTestFixture : public testing::Test {
// Used when testing thread-local buffers which requires the thread initiating
// flush to have a message loop.
void EndTraceAndFlushInThreadWithMessageLoop() {
- WaitableEvent flush_complete_event(false, false);
+ WaitableEvent flush_complete_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
Thread flush_thread("flush");
flush_thread.Start();
flush_thread.task_runner()->PostTask(
@@ -199,7 +206,7 @@ void TraceEventTestFixture::OnTraceDataCollected(
while (root_list->GetSize()) {
std::unique_ptr<Value> item;
root_list->Remove(0, &item);
- trace_parsed_.Append(item.release());
+ trace_parsed_.Append(std::move(item));
}
if (!has_more_events)
@@ -997,6 +1004,17 @@ void ValidateInstantEventPresentOnEveryThread(const ListValue& trace_parsed,
}
}
+void CheckTraceDefaultCategoryFilters(const TraceLog& trace_log) {
+ // Default enables all category filters except the disabled-by-default-* ones.
+ EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("bar"));
+ EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("foo,bar"));
+ EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled(
+ "foo,disabled-by-default-foo"));
+ EXPECT_FALSE(*trace_log.GetCategoryGroupEnabled(
+ "disabled-by-default-foo,disabled-by-default-bar"));
+}
+
} // namespace
// Simple Test for emitting data and validating it was received.
@@ -1609,7 +1627,8 @@ TEST_F(TraceEventTestFixture, DataCapturedOnThread) {
BeginTrace();
Thread thread("1");
- WaitableEvent task_complete_event(false, false);
+ WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.Start();
thread.task_runner()->PostTask(
@@ -1631,7 +1650,9 @@ TEST_F(TraceEventTestFixture, DataCapturedManyThreads) {
WaitableEvent* task_complete_events[num_threads];
for (int i = 0; i < num_threads; i++) {
threads[i] = new Thread(StringPrintf("Thread %d", i));
- task_complete_events[i] = new WaitableEvent(false, false);
+ task_complete_events[i] =
+ new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
threads[i]->Start();
threads[i]->task_runner()->PostTask(
FROM_HERE, base::Bind(&TraceManyInstantEvents, i, num_events,
@@ -1678,7 +1699,9 @@ TEST_F(TraceEventTestFixture, ThreadNames) {
// Now run some trace code on these threads.
WaitableEvent* task_complete_events[kNumThreads];
for (int i = 0; i < kNumThreads; i++) {
- task_complete_events[i] = new WaitableEvent(false, false);
+ task_complete_events[i] =
+ new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
threads[i]->Start();
thread_ids[i] = threads[i]->GetThreadId();
threads[i]->task_runner()->PostTask(
@@ -1951,7 +1974,7 @@ TEST_F(TraceEventTestFixture, TraceCategoriesAfterNestedEnable) {
EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("baz"));
EXPECT_STREQ(
- "-*Debug,-*Test",
+ "",
trace_log->GetCurrentTraceConfig().ToCategoryFilterString().c_str());
trace_log->SetDisabled();
trace_log->SetDisabled();
@@ -1988,6 +2011,48 @@ TEST_F(TraceEventTestFixture, TraceCategoriesAfterNestedEnable) {
trace_log->SetDisabled();
}
+TEST_F(TraceEventTestFixture, TraceWithDefaultCategoryFilters) {
+ TraceLog* trace_log = TraceLog::GetInstance();
+
+ trace_log->SetEnabled(TraceConfig(), TraceLog::RECORDING_MODE);
+ CheckTraceDefaultCategoryFilters(*trace_log);
+ trace_log->SetDisabled();
+
+ trace_log->SetEnabled(TraceConfig("", ""), TraceLog::RECORDING_MODE);
+ CheckTraceDefaultCategoryFilters(*trace_log);
+ trace_log->SetDisabled();
+
+ trace_log->SetEnabled(TraceConfig("*", ""), TraceLog::RECORDING_MODE);
+ CheckTraceDefaultCategoryFilters(*trace_log);
+ trace_log->SetDisabled();
+
+ trace_log->SetEnabled(TraceConfig(""), TraceLog::RECORDING_MODE);
+ CheckTraceDefaultCategoryFilters(*trace_log);
+ trace_log->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceWithDisabledByDefaultCategoryFilters) {
+ TraceLog* trace_log = TraceLog::GetInstance();
+
+ trace_log->SetEnabled(TraceConfig("foo,disabled-by-default-foo", ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-foo"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-bar"));
+ trace_log->SetDisabled();
+
+ // Enabling only the disabled-by-default-* category means the default ones
+ // are also enabled.
+ trace_log->SetEnabled(TraceConfig("disabled-by-default-foo", ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-bar"));
+ trace_log->SetDisabled();
+}
+
TEST_F(TraceEventTestFixture, TraceSampling) {
TraceLog::GetInstance()->SetEnabled(
TraceConfig(kRecordAllCategoryFilter, "record-until-full,enable-sampling"),
@@ -2823,7 +2888,8 @@ TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopBeforeTracing) {
BeginTrace();
Thread thread("1");
- WaitableEvent task_complete_event(false, false);
+ WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.Start();
thread.task_runner()->PostTask(
FROM_HERE, Bind(&TraceLog::SetCurrentThreadBlocksMessageLoop,
@@ -2833,8 +2899,10 @@ TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopBeforeTracing) {
FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
task_complete_event.Wait();
- WaitableEvent task_start_event(false, false);
- WaitableEvent task_stop_event(false, false);
+ WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.task_runner()->PostTask(
FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
task_start_event.Wait();
@@ -2895,15 +2963,18 @@ TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopAfterTracing) {
BeginTrace();
Thread thread("1");
- WaitableEvent task_complete_event(false, false);
+ WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.Start();
thread.task_runner()->PostTask(
FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
task_complete_event.Wait();
- WaitableEvent task_start_event(false, false);
- WaitableEvent task_stop_event(false, false);
+ WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.task_runner()->PostTask(
FROM_HERE, Bind(&SetBlockingFlagAndBlockUntilStopped, &task_start_event,
&task_stop_event));
@@ -2920,7 +2991,8 @@ TEST_F(TraceEventTestFixture, ThreadOnceBlocking) {
BeginTrace();
Thread thread("1");
- WaitableEvent task_complete_event(false, false);
+ WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.Start();
thread.task_runner()->PostTask(
@@ -2928,8 +3000,10 @@ TEST_F(TraceEventTestFixture, ThreadOnceBlocking) {
task_complete_event.Wait();
task_complete_event.Reset();
- WaitableEvent task_start_event(false, false);
- WaitableEvent task_stop_event(false, false);
+ WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
thread.task_runner()->PostTask(
FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
task_start_event.Wait();
@@ -3099,5 +3173,12 @@ TEST_F(TraceEventTestFixture, SyntheticDelayConfigurationToString) {
EXPECT_EQ(filter, config.ToCategoryFilterString());
}
+TEST_F(TraceEventTestFixture, ClockSyncEventsAreAlwaysAddedToTrace) {
+ BeginSpecificTrace("-*");
+ TRACE_EVENT_CLOCK_SYNC_RECEIVER(1);
+ EndTraceAndFlush();
+ EXPECT_TRUE(FindNamePhase("clock_sync", "c"));
+}
+
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/trace_log.cc b/chromium/base/trace_event/trace_log.cc
index cc40ba98eb3..0661caf6fd9 100644
--- a/chromium/base/trace_event/trace_log.cc
+++ b/chromium/base/trace_event/trace_log.cc
@@ -86,7 +86,7 @@ const size_t kEchoToConsoleTraceEventBufferChunks = 256;
const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
const int kThreadFlushTimeoutMs = 3000;
-#define MAX_CATEGORY_GROUPS 105
+#define MAX_CATEGORY_GROUPS 200
// Parallel arrays g_category_groups and g_category_group_enabled are separate
// so that a pointer to a member of g_category_group_enabled can be easily
@@ -402,7 +402,7 @@ void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
if (thread_blocks_message_loop_.Get() || !MessageLoop::current())
return;
HEAP_PROFILER_SCOPED_IGNORE;
- auto thread_local_event_buffer = thread_local_event_buffer_.Get();
+ auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
if (thread_local_event_buffer &&
!CheckGeneration(thread_local_event_buffer->generation())) {
delete thread_local_event_buffer;
@@ -479,6 +479,12 @@ void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
}
#endif
+ // TODO(primiano): this is a temporary workaround for catapult:#2341,
+ // to guarantee that metadata events are always added even if the category
+ // filter is "-*". See crbug.com/618054 for more details and long-term fix.
+ if (mode_ == RECORDING_MODE && !strcmp(category_group, "__metadata"))
+ enabled_flag |= ENABLED_FOR_RECORDING;
+
g_category_group_enabled[category_index] = enabled_flag;
}
@@ -1038,7 +1044,7 @@ void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
for (hash_set<MessageLoop*>::const_iterator it =
thread_message_loops_.begin();
it != thread_message_loops_.end(); ++it) {
- LOG(WARNING) << "Thread: " << (*it)->thread_name();
+ LOG(WARNING) << "Thread: " << (*it)->GetThreadName();
}
}
FinishFlush(generation, discard_events);
@@ -1221,7 +1227,7 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
// |thread_local_event_buffer_| can be null if the current thread doesn't have
// a message loop or the message loop is blocked.
InitializeThreadLocalEventBufferIfSupported();
- auto thread_local_event_buffer = thread_local_event_buffer_.Get();
+ auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
// Check and update the current thread name only if the event is for the
// current thread to avoid locks in most cases.
diff --git a/chromium/base/trace_event/trace_sampling_thread.cc b/chromium/base/trace_event/trace_sampling_thread.cc
index a8d32d6ee2a..5a0d2f8a024 100644
--- a/chromium/base/trace_event/trace_sampling_thread.cc
+++ b/chromium/base/trace_event/trace_sampling_thread.cc
@@ -25,7 +25,9 @@ class TraceBucketData {
};
TraceSamplingThread::TraceSamplingThread()
- : thread_running_(false), waitable_event_for_testing_(false, false) {}
+ : thread_running_(false),
+ waitable_event_for_testing_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
TraceSamplingThread::~TraceSamplingThread() {}
diff --git a/chromium/base/trace_event/winheap_dump_provider_win.cc b/chromium/base/trace_event/winheap_dump_provider_win.cc
index 80956369cca..f918aafad19 100644
--- a/chromium/base/trace_event/winheap_dump_provider_win.cc
+++ b/chromium/base/trace_event/winheap_dump_provider_win.cc
@@ -9,7 +9,6 @@
#include "base/debug/profiler.h"
#include "base/strings/string_util.h"
#include "base/trace_event/process_memory_dump.h"
-#include "base/win/windows_version.h"
namespace base {
namespace trace_event {
@@ -56,13 +55,7 @@ bool WinHeapDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
// then chaos should be assumed. This flakyness is acceptable for tracing.
// - The MSDN page for HeapLock says: "If the HeapLock function is called on
// a heap created with the HEAP_NO_SERIALIZATION flag, the results are
- // undefined.". This is a problem on Windows XP where some system DLLs are
- // known for creating heaps with this particular flag. For this reason
- // this function should be disabled on XP.
- //
- // See https://crbug.com/487291 for more details about this.
- if (base::win::GetVersion() < base::win::VERSION_VISTA)
- return false;
+ // undefined."
// Disable this dump provider for the SyzyASan instrumented build
// because they don't support the heap walking functions yet.
diff --git a/chromium/base/trace_event/winheap_dump_provider_win_unittest.cc b/chromium/base/trace_event/winheap_dump_provider_win_unittest.cc
index d7488fee457..c2dc01c195c 100644
--- a/chromium/base/trace_event/winheap_dump_provider_win_unittest.cc
+++ b/chromium/base/trace_event/winheap_dump_provider_win_unittest.cc
@@ -14,8 +14,8 @@ namespace base {
namespace trace_event {
TEST(WinHeapDumpProviderTest, OnMemoryDump) {
- ProcessMemoryDump pmd(new MemoryDumpSessionState);
MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+ ProcessMemoryDump pmd(new MemoryDumpSessionState, dump_args);
WinHeapDumpProvider* winheap_dump_provider =
WinHeapDumpProvider::GetInstance();
diff --git a/chromium/base/tracked_objects.cc b/chromium/base/tracked_objects.cc
index d24cedf1592..675c9b89e67 100644
--- a/chromium/base/tracked_objects.cc
+++ b/chromium/base/tracked_objects.cc
@@ -16,6 +16,7 @@
#include "base/process/process_handle.h"
#include "base/strings/stringprintf.h"
#include "base/third_party/valgrind/memcheck.h"
+#include "base/threading/worker_pool.h"
#include "base/tracking_info.h"
#include "build/build_config.h"
@@ -355,7 +356,9 @@ ThreadData* ThreadData::next() const { return next_; }
// static
void ThreadData::InitializeThreadContext(const std::string& suggested_name) {
- Initialize();
+ if (base::WorkerPool::RunsTasksOnCurrentThread())
+ return;
+ EnsureTlsInitialization();
ThreadData* current_thread_data =
reinterpret_cast<ThreadData*>(tls_index_.Get());
if (current_thread_data)
@@ -669,7 +672,7 @@ void ThreadData::OnProfilingPhaseCompletedOnThread(int profiling_phase) {
}
}
-void ThreadData::Initialize() {
+void ThreadData::EnsureTlsInitialization() {
if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED)
return; // Someone else did the initialization.
// Due to racy lazy initialization in tests, we'll need to recheck status_
@@ -709,7 +712,7 @@ void ThreadData::InitializeAndSetTrackingStatus(Status status) {
DCHECK_GE(status, DEACTIVATED);
DCHECK_LE(status, PROFILING_ACTIVE);
- Initialize(); // No-op if already initialized.
+ EnsureTlsInitialization(); // No-op if already initialized.
if (status > DEACTIVATED)
status = PROFILING_ACTIVE;
diff --git a/chromium/base/tracked_objects.h b/chromium/base/tracked_objects.h
index 168b17db045..81962f3fe9a 100644
--- a/chromium/base/tracked_objects.h
+++ b/chromium/base/tracked_objects.h
@@ -514,7 +514,7 @@ class BASE_EXPORT ThreadData {
// Initializes all statics if needed (this initialization call should be made
// while we are single threaded).
- static void Initialize();
+ static void EnsureTlsInitialization();
// Sets internal status_.
// If |status| is false, then status_ is set to DEACTIVATED.
diff --git a/chromium/base/tuple.h b/chromium/base/tuple.h
index df69bf01169..e82f2e5f06a 100644
--- a/chromium/base/tuple.h
+++ b/chromium/base/tuple.h
@@ -2,27 +2,24 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// A Tuple is a generic templatized container, similar in concept to std::pair
-// and std::tuple. The convenient MakeTuple() function takes any number of
-// arguments and will construct and return the appropriate Tuple object. The
-// functions DispatchToMethod and DispatchToFunction take a function pointer or
-// instance and method pointer, and unpack a tuple into arguments to the call.
-//
-// Tuple elements are copied by value, and stored in the tuple. See the unit
-// tests for more details of how/when the values are copied.
+// Use std::tuple as tuple type. This file contains helper functions for
+// working with std::tuples.
+// The functions DispatchToMethod and DispatchToFunction take a function pointer
+// or instance and method pointer, and unpack a tuple into arguments to the
+// call.
//
// Example usage:
// // These two methods of creating a Tuple are identical.
-// Tuple<int, const char*> tuple_a(1, "wee");
-// Tuple<int, const char*> tuple_b = MakeTuple(1, "wee");
+// std::tuple<int, const char*> tuple_a(1, "wee");
+// std::tuple<int, const char*> tuple_b = std::make_tuple(1, "wee");
//
// void SomeFunc(int a, const char* b) { }
// DispatchToFunction(&SomeFunc, tuple_a); // SomeFunc(1, "wee")
// DispatchToFunction(
-// &SomeFunc, MakeTuple(10, "foo")); // SomeFunc(10, "foo")
+// &SomeFunc, std::make_tuple(10, "foo")); // SomeFunc(10, "foo")
//
// struct { void SomeMeth(int a, int b, int c) { } } foo;
-// DispatchToMethod(&foo, &Foo::SomeMeth, MakeTuple(1, 2, 3));
+// DispatchToMethod(&foo, &Foo::SomeMeth, std::make_tuple(1, 2, 3));
// // foo->SomeMeth(1, 2, 3);
#ifndef BASE_TUPLE_H_
@@ -107,46 +104,23 @@ struct MakeIndexSequenceImpl<N, Ns...>
#endif // defined(OS_WIN) && defined(_PREFAST_)
-template <size_t N>
-using MakeIndexSequence = typename MakeIndexSequenceImpl<N>::Type;
-
-// Tuple -----------------------------------------------------------------------
-//
-// This set of classes is useful for bundling 0 or more heterogeneous data types
-// into a single variable. The advantage of this is that it greatly simplifies
-// function objects that need to take an arbitrary number of parameters; see
-// RunnableMethod and IPC::MessageWithTuple.
-//
-// Tuple<> is supplied to act as a 'void' type. It can be used, for example,
-// when dispatching to a function that accepts no arguments (see the
-// Dispatchers below).
-// Tuple<A> is rarely useful. One such use is when A is non-const ref that you
-// want filled by the dispatchee, and the tuple is merely a container for that
-// output (a "tier"). See MakeRefTuple and its usages.
-
-template <typename... Ts>
-using Tuple = std::tuple<Ts...>;
-
-using std::get;
-
-// Tuple creators -------------------------------------------------------------
-//
-// Helper functions for constructing tuples while inferring the template
-// argument types.
-
-template <typename... Ts>
-inline Tuple<Ts...> MakeTuple(const Ts&... arg) {
- return Tuple<Ts...>(arg...);
+// std::get() in <=libstdc++-4.6 returns an lvalue-reference for
+// rvalue-reference of a tuple, where an rvalue-reference is expected.
+template <size_t I, typename... Ts>
+typename std::tuple_element<I, std::tuple<Ts...>>::type&& get(
+ std::tuple<Ts...>&& t) {
+ using ElemType = typename std::tuple_element<I, std::tuple<Ts...>>::type;
+ return std::forward<ElemType>(std::get<I>(t));
}
-// The following set of helpers make what Boost refers to as "Tiers" - a tuple
-// of references.
-
-template <typename... Ts>
-inline Tuple<Ts&...> MakeRefTuple(Ts&... arg) {
- return Tuple<Ts&...>(arg...);
+template <size_t I, typename T>
+auto get(T& t) -> decltype(std::get<I>(t)) {
+ return std::get<I>(t);
}
+template <size_t N>
+using MakeIndexSequence = typename MakeIndexSequenceImpl<N>::Type;
+
// Dispatchers ----------------------------------------------------------------
//
// Helper functions that call the given method on an object, with the unpacked
@@ -161,15 +135,15 @@ inline Tuple<Ts&...> MakeRefTuple(Ts&... arg) {
template <typename ObjT, typename Method, typename... Ts, size_t... Ns>
inline void DispatchToMethodImpl(const ObjT& obj,
Method method,
- const Tuple<Ts...>& arg,
+ const std::tuple<Ts...>& arg,
IndexSequence<Ns...>) {
- (obj->*method)(internal::Unwrap(get<Ns>(arg))...);
+ (obj->*method)(internal::Unwrap(std::get<Ns>(arg))...);
}
template <typename ObjT, typename Method, typename... Ts>
inline void DispatchToMethod(const ObjT& obj,
Method method,
- const Tuple<Ts...>& arg) {
+ const std::tuple<Ts...>& arg) {
DispatchToMethodImpl(obj, method, arg, MakeIndexSequence<sizeof...(Ts)>());
}
@@ -177,13 +151,14 @@ inline void DispatchToMethod(const ObjT& obj,
template <typename Function, typename... Ts, size_t... Ns>
inline void DispatchToFunctionImpl(Function function,
- const Tuple<Ts...>& arg,
+ const std::tuple<Ts...>& arg,
IndexSequence<Ns...>) {
- (*function)(internal::Unwrap(get<Ns>(arg))...);
+ (*function)(internal::Unwrap(std::get<Ns>(arg))...);
}
template <typename Function, typename... Ts>
-inline void DispatchToFunction(Function function, const Tuple<Ts...>& arg) {
+inline void DispatchToFunction(Function function,
+ const std::tuple<Ts...>& arg) {
DispatchToFunctionImpl(function, arg, MakeIndexSequence<sizeof...(Ts)>());
}
@@ -197,18 +172,19 @@ template <typename ObjT,
size_t... OutNs>
inline void DispatchToMethodImpl(const ObjT& obj,
Method method,
- const Tuple<InTs...>& in,
- Tuple<OutTs...>* out,
+ const std::tuple<InTs...>& in,
+ std::tuple<OutTs...>* out,
IndexSequence<InNs...>,
IndexSequence<OutNs...>) {
- (obj->*method)(internal::Unwrap(get<InNs>(in))..., &get<OutNs>(*out)...);
+ (obj->*method)(internal::Unwrap(std::get<InNs>(in))...,
+ &std::get<OutNs>(*out)...);
}
template <typename ObjT, typename Method, typename... InTs, typename... OutTs>
inline void DispatchToMethod(const ObjT& obj,
Method method,
- const Tuple<InTs...>& in,
- Tuple<OutTs...>* out) {
+ const std::tuple<InTs...>& in,
+ std::tuple<OutTs...>* out) {
DispatchToMethodImpl(obj, method, in, out,
MakeIndexSequence<sizeof...(InTs)>(),
MakeIndexSequence<sizeof...(OutTs)>());
diff --git a/chromium/base/tuple_unittest.cc b/chromium/base/tuple_unittest.cc
index 55a91392353..6f90c292206 100644
--- a/chromium/base/tuple_unittest.cc
+++ b/chromium/base/tuple_unittest.cc
@@ -32,51 +32,34 @@ struct Addz {
} // namespace
TEST(TupleTest, Basic) {
- base::Tuple<> t0 = base::MakeTuple();
+ std::tuple<> t0 = std::make_tuple();
ALLOW_UNUSED_LOCAL(t0);
- base::Tuple<int> t1(1);
- base::Tuple<int, const char*> t2 =
- base::MakeTuple(1, static_cast<const char*>("wee"));
- base::Tuple<int, int, int> t3(1, 2, 3);
- base::Tuple<int, int, int, int*> t4(1, 2, 3, &get<0>(t1));
- base::Tuple<int, int, int, int, int*> t5(1, 2, 3, 4, &get<0>(t4));
- base::Tuple<int, int, int, int, int, int*> t6(1, 2, 3, 4, 5, &get<0>(t4));
-
- EXPECT_EQ(1, get<0>(t1));
- EXPECT_EQ(1, get<0>(t2));
- EXPECT_EQ(1, get<0>(t3));
- EXPECT_EQ(2, get<1>(t3));
- EXPECT_EQ(3, get<2>(t3));
- EXPECT_EQ(1, get<0>(t4));
- EXPECT_EQ(2, get<1>(t4));
- EXPECT_EQ(3, get<2>(t4));
- EXPECT_EQ(1, get<0>(t5));
- EXPECT_EQ(2, get<1>(t5));
- EXPECT_EQ(3, get<2>(t5));
- EXPECT_EQ(4, get<3>(t5));
- EXPECT_EQ(1, get<0>(t6));
- EXPECT_EQ(2, get<1>(t6));
- EXPECT_EQ(3, get<2>(t6));
- EXPECT_EQ(4, get<3>(t6));
- EXPECT_EQ(5, get<4>(t6));
-
- EXPECT_EQ(1, get<0>(t1));
+ std::tuple<int> t1(1);
+ std::tuple<int, const char*> t2 =
+ std::make_tuple(1, static_cast<const char*>("wee"));
+ ALLOW_UNUSED_LOCAL(t2);
+ std::tuple<int, int, int> t3(1, 2, 3);
+ std::tuple<int, int, int, int*> t4(1, 2, 3, &std::get<0>(t1));
+ std::tuple<int, int, int, int, int*> t5(1, 2, 3, 4, &std::get<0>(t4));
+ std::tuple<int, int, int, int, int, int*> t6(1, 2, 3, 4, 5, &std::get<0>(t4));
+
+ EXPECT_EQ(1, std::get<0>(t1));
DispatchToFunction(&DoAdd, t4);
- EXPECT_EQ(6, get<0>(t1));
+ EXPECT_EQ(6, std::get<0>(t1));
int res = 0;
- DispatchToFunction(&DoAdd, base::MakeTuple(9, 8, 7, &res));
+ DispatchToFunction(&DoAdd, std::make_tuple(9, 8, 7, &res));
EXPECT_EQ(24, res);
Addy addy;
- EXPECT_EQ(1, get<0>(t4));
+ EXPECT_EQ(1, std::get<0>(t4));
DispatchToMethod(&addy, &Addy::DoAdd, t5);
- EXPECT_EQ(10, get<0>(t4));
+ EXPECT_EQ(10, std::get<0>(t4));
Addz addz;
- EXPECT_EQ(10, get<0>(t4));
+ EXPECT_EQ(10, std::get<0>(t4));
DispatchToMethod(&addz, &Addz::DoAdd, t6);
- EXPECT_EQ(15, get<0>(t4));
+ EXPECT_EQ(15, std::get<0>(t4));
}
namespace {
@@ -111,8 +94,8 @@ TEST(TupleTest, Copying) {
bool res = false;
// Creating the tuple should copy the class to store internally in the tuple.
- base::Tuple<CopyLogger, CopyLogger*, bool*> tuple(logger, &logger, &res);
- get<1>(tuple) = &get<0>(tuple);
+ std::tuple<CopyLogger, CopyLogger*, bool*> tuple(logger, &logger, &res);
+ std::get<1>(tuple) = &std::get<0>(tuple);
EXPECT_EQ(2, CopyLogger::TimesConstructed);
EXPECT_EQ(1, CopyLogger::TimesCopied);
@@ -131,4 +114,30 @@ TEST(TupleTest, Copying) {
EXPECT_EQ(2, CopyLogger::TimesCopied);
}
+TEST(TupleTest, Get) {
+ int i = 1;
+ int j = 2;
+ std::tuple<int, int&, int&&> t(3, i, std::move(j));
+ EXPECT_TRUE((std::is_same<int&, decltype(base::get<0>(t))>::value));
+ EXPECT_EQ(3, base::get<0>(t));
+
+ EXPECT_TRUE((std::is_same<int&, decltype(base::get<1>(t))>::value));
+ EXPECT_EQ(1, base::get<1>(t));
+
+ EXPECT_TRUE((std::is_same<int&, decltype(base::get<2>(t))>::value));
+ EXPECT_EQ(2, base::get<2>(t));
+
+ EXPECT_TRUE((std::is_same<int&&,
+ decltype(base::get<0>(std::move(t)))>::value));
+ EXPECT_EQ(3, base::get<0>(std::move(t)));
+
+ EXPECT_TRUE((std::is_same<int&,
+ decltype(base::get<1>(std::move(t)))>::value));
+ EXPECT_EQ(1, base::get<1>(std::move(t)));
+
+ EXPECT_TRUE((std::is_same<int&&,
+ decltype(base::get<2>(std::move(t)))>::value));
+ EXPECT_EQ(2, base::get<2>(std::move(t)));
+}
+
} // namespace base
diff --git a/chromium/base/values.cc b/chromium/base/values.cc
index 1b87498738a..4772b647748 100644
--- a/chromium/base/values.cc
+++ b/chromium/base/values.cc
@@ -14,7 +14,6 @@
#include "base/json/json_writer.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
-#include "base/move.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
@@ -29,8 +28,8 @@ std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node);
// expects |node| to always be non-NULL.
std::unique_ptr<ListValue> CopyListWithoutEmptyChildren(const ListValue& list) {
std::unique_ptr<ListValue> copy;
- for (ListValue::const_iterator it = list.begin(); it != list.end(); ++it) {
- std::unique_ptr<Value> child_copy = CopyWithoutEmptyChildren(**it);
+ for (const auto& entry : list) {
+ std::unique_ptr<Value> child_copy = CopyWithoutEmptyChildren(*entry);
if (child_copy) {
if (!copy)
copy.reset(new ListValue);
@@ -68,22 +67,6 @@ std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node) {
}
}
-// A small functor for comparing Values for std::find_if and similar.
-class ValueEquals {
- public:
- // Pass the value against which all consecutive calls of the () operator will
- // compare their argument to. This Value object must not be destroyed while
- // the ValueEquals is in use.
- explicit ValueEquals(const Value* first) : first_(first) { }
-
- bool operator ()(const Value* second) const {
- return first_->Equals(second);
- }
-
- private:
- const Value* first_;
-};
-
} // namespace
Value::~Value() {
@@ -321,12 +304,12 @@ BinaryValue::~BinaryValue() {
}
// static
-BinaryValue* BinaryValue::CreateWithCopiedBuffer(const char* buffer,
- size_t size) {
- char* buffer_copy = new char[size];
- memcpy(buffer_copy, buffer, size);
- std::unique_ptr<char[]> scoped_buffer_copy(buffer_copy);
- return new BinaryValue(std::move(scoped_buffer_copy), size);
+std::unique_ptr<BinaryValue> BinaryValue::CreateWithCopiedBuffer(
+ const char* buffer,
+ size_t size) {
+ std::unique_ptr<char[]> buffer_copy(new char[size]);
+ memcpy(buffer_copy.get(), buffer, size);
+ return base::MakeUnique<BinaryValue>(std::move(buffer_copy), size);
}
bool BinaryValue::GetAsBinary(const BinaryValue** out_value) const {
@@ -336,7 +319,7 @@ bool BinaryValue::GetAsBinary(const BinaryValue** out_value) const {
}
BinaryValue* BinaryValue::DeepCopy() const {
- return CreateWithCopiedBuffer(buffer_.get(), size_);
+ return CreateWithCopiedBuffer(buffer_.get(), size_).release();
}
bool BinaryValue::Equals(const Value* other) const {
@@ -383,18 +366,12 @@ bool DictionaryValue::GetAsDictionary(const DictionaryValue** out_value) const {
bool DictionaryValue::HasKey(const std::string& key) const {
DCHECK(IsStringUTF8(key));
- ValueMap::const_iterator current_entry = dictionary_.find(key);
+ auto current_entry = dictionary_.find(key);
DCHECK((current_entry == dictionary_.end()) || current_entry->second);
return current_entry != dictionary_.end();
}
void DictionaryValue::Clear() {
- ValueMap::iterator dict_iterator = dictionary_.begin();
- while (dict_iterator != dictionary_.end()) {
- delete dict_iterator->second;
- ++dict_iterator;
- }
-
dictionary_.clear();
}
@@ -452,16 +429,7 @@ void DictionaryValue::SetString(const std::string& path,
void DictionaryValue::SetWithoutPathExpansion(const std::string& key,
std::unique_ptr<Value> in_value) {
- Value* bare_ptr = in_value.release();
- // If there's an existing value here, we need to delete it, because
- // we own all our children.
- std::pair<ValueMap::iterator, bool> ins_res =
- dictionary_.insert(std::make_pair(key, bare_ptr));
- if (!ins_res.second) {
- DCHECK_NE(ins_res.first->second, bare_ptr); // This would be bogus
- delete ins_res.first->second;
- ins_res.first->second = bare_ptr;
- }
+ dictionary_[key] = std::move(in_value);
}
void DictionaryValue::SetWithoutPathExpansion(const std::string& key,
@@ -645,13 +613,12 @@ bool DictionaryValue::GetList(const std::string& path, ListValue** out_value) {
bool DictionaryValue::GetWithoutPathExpansion(const std::string& key,
const Value** out_value) const {
DCHECK(IsStringUTF8(key));
- ValueMap::const_iterator entry_iterator = dictionary_.find(key);
+ auto entry_iterator = dictionary_.find(key);
if (entry_iterator == dictionary_.end())
return false;
- const Value* entry = entry_iterator->second;
if (out_value)
- *out_value = entry;
+ *out_value = entry_iterator->second.get();
return true;
}
@@ -775,15 +742,12 @@ bool DictionaryValue::RemoveWithoutPathExpansion(
const std::string& key,
std::unique_ptr<Value>* out_value) {
DCHECK(IsStringUTF8(key));
- ValueMap::iterator entry_iterator = dictionary_.find(key);
+ auto entry_iterator = dictionary_.find(key);
if (entry_iterator == dictionary_.end())
return false;
- Value* entry = entry_iterator->second;
if (out_value)
- out_value->reset(entry);
- else
- delete entry;
+ *out_value = std::move(entry_iterator->second);
dictionary_.erase(entry_iterator);
return true;
}
@@ -849,10 +813,9 @@ DictionaryValue::Iterator::~Iterator() {}
DictionaryValue* DictionaryValue::DeepCopy() const {
DictionaryValue* result = new DictionaryValue;
- for (ValueMap::const_iterator current_entry(dictionary_.begin());
- current_entry != dictionary_.end(); ++current_entry) {
- result->SetWithoutPathExpansion(current_entry->first,
- current_entry->second->DeepCopy());
+ for (const auto& current_entry : dictionary_) {
+ result->SetWithoutPathExpansion(current_entry.first,
+ current_entry.second->CreateDeepCopy());
}
return result;
@@ -904,12 +867,14 @@ ListValue::~ListValue() {
}
void ListValue::Clear() {
- for (ValueVector::iterator i(list_.begin()); i != list_.end(); ++i)
- delete *i;
list_.clear();
}
bool ListValue::Set(size_t index, Value* in_value) {
+ return Set(index, WrapUnique(in_value));
+}
+
+bool ListValue::Set(size_t index, std::unique_ptr<Value> in_value) {
if (!in_value)
return false;
@@ -917,25 +882,21 @@ bool ListValue::Set(size_t index, Value* in_value) {
// Pad out any intermediate indexes with null settings
while (index > list_.size())
Append(CreateNullValue());
- Append(in_value);
+ Append(std::move(in_value));
} else {
+ // TODO(dcheng): remove this DCHECK once the raw pointer version is removed?
DCHECK(list_[index] != in_value);
- delete list_[index];
- list_[index] = in_value;
+ list_[index] = std::move(in_value);
}
return true;
}
-bool ListValue::Set(size_t index, std::unique_ptr<Value> in_value) {
- return Set(index, in_value.release());
-}
-
bool ListValue::Get(size_t index, const Value** out_value) const {
if (index >= list_.size())
return false;
if (out_value)
- *out_value = list_[index];
+ *out_value = list_[index].get();
return true;
}
@@ -1046,20 +1007,17 @@ bool ListValue::Remove(size_t index, std::unique_ptr<Value>* out_value) {
return false;
if (out_value)
- out_value->reset(list_[index]);
- else
- delete list_[index];
+ *out_value = std::move(list_[index]);
list_.erase(list_.begin() + index);
return true;
}
bool ListValue::Remove(const Value& value, size_t* index) {
- for (ValueVector::iterator i(list_.begin()); i != list_.end(); ++i) {
- if ((*i)->Equals(&value)) {
- size_t previous_index = i - list_.begin();
- delete *i;
- list_.erase(i);
+ for (auto it = list_.begin(); it != list_.end(); ++it) {
+ if ((*it)->Equals(&value)) {
+ size_t previous_index = it - list_.begin();
+ list_.erase(it);
if (index)
*index = previous_index;
@@ -1072,20 +1030,18 @@ bool ListValue::Remove(const Value& value, size_t* index) {
ListValue::iterator ListValue::Erase(iterator iter,
std::unique_ptr<Value>* out_value) {
if (out_value)
- out_value->reset(*iter);
- else
- delete *iter;
+ *out_value = std::move(*Storage::iterator(iter));
return list_.erase(iter);
}
void ListValue::Append(std::unique_ptr<Value> in_value) {
- Append(in_value.release());
+ list_.push_back(std::move(in_value));
}
void ListValue::Append(Value* in_value) {
DCHECK(in_value);
- list_.push_back(in_value);
+ Append(WrapUnique(in_value));
}
void ListValue::AppendBoolean(bool in_value) {
@@ -1124,13 +1080,13 @@ void ListValue::AppendStrings(const std::vector<string16>& in_values) {
bool ListValue::AppendIfNotPresent(Value* in_value) {
DCHECK(in_value);
- for (ValueVector::const_iterator i(list_.begin()); i != list_.end(); ++i) {
- if ((*i)->Equals(in_value)) {
+ for (const auto& entry : list_) {
+ if (entry->Equals(in_value)) {
delete in_value;
return false;
}
}
- list_.push_back(in_value);
+ list_.emplace_back(in_value);
return true;
}
@@ -1139,12 +1095,15 @@ bool ListValue::Insert(size_t index, Value* in_value) {
if (index > list_.size())
return false;
- list_.insert(list_.begin() + index, in_value);
+ list_.insert(list_.begin() + index, WrapUnique(in_value));
return true;
}
ListValue::const_iterator ListValue::Find(const Value& value) const {
- return std::find_if(list_.begin(), list_.end(), ValueEquals(&value));
+ return std::find_if(list_.begin(), list_.end(),
+ [&value](const std::unique_ptr<Value>& entry) {
+ return entry->Equals(&value);
+ });
}
void ListValue::Swap(ListValue* other) {
@@ -1166,8 +1125,8 @@ bool ListValue::GetAsList(const ListValue** out_value) const {
ListValue* ListValue::DeepCopy() const {
ListValue* result = new ListValue;
- for (ValueVector::const_iterator i(list_.begin()); i != list_.end(); ++i)
- result->Append((*i)->DeepCopy());
+ for (const auto& entry : list_)
+ result->Append(entry->CreateDeepCopy());
return result;
}
@@ -1182,11 +1141,11 @@ bool ListValue::Equals(const Value* other) const {
const ListValue* other_list =
static_cast<const ListValue*>(other);
- const_iterator lhs_it, rhs_it;
+ Storage::const_iterator lhs_it, rhs_it;
for (lhs_it = begin(), rhs_it = other_list->begin();
lhs_it != end() && rhs_it != other_list->end();
++lhs_it, ++rhs_it) {
- if (!(*lhs_it)->Equals(*rhs_it))
+ if (!(*lhs_it)->Equals(rhs_it->get()))
return false;
}
if (lhs_it != end() || rhs_it != other_list->end())
diff --git a/chromium/base/values.h b/chromium/base/values.h
index e2506cc14f5..e3d60891b33 100644
--- a/chromium/base/values.h
+++ b/chromium/base/values.h
@@ -42,9 +42,6 @@ class ListValue;
class StringValue;
class Value;
-typedef std::vector<Value*> ValueVector;
-typedef std::map<std::string, Value*> ValueMap;
-
// The Value class is the base class for Values. A Value can be instantiated
// via the Create*Value() factory methods, or by directly creating instances of
// the subclasses.
@@ -185,7 +182,8 @@ class BASE_EXPORT BinaryValue: public Value {
// For situations where you want to keep ownership of your buffer, this
// factory method creates a new BinaryValue by copying the contents of the
// buffer that's passed in.
- static BinaryValue* CreateWithCopiedBuffer(const char* buffer, size_t size);
+ static std::unique_ptr<BinaryValue> CreateWithCopiedBuffer(const char* buffer,
+ size_t size);
size_t GetSize() const { return size_; }
@@ -210,6 +208,7 @@ class BASE_EXPORT BinaryValue: public Value {
// are |std::string|s and should be UTF-8 encoded.
class BASE_EXPORT DictionaryValue : public Value {
public:
+ using Storage = std::map<std::string, std::unique_ptr<Value>>;
// Returns |value| if it is a dictionary, nullptr otherwise.
static std::unique_ptr<DictionaryValue> From(std::unique_ptr<Value> value);
@@ -372,7 +371,7 @@ class BASE_EXPORT DictionaryValue : public Value {
private:
const DictionaryValue& target_;
- ValueMap::const_iterator it_;
+ Storage::const_iterator it_;
};
// Overridden from Value:
@@ -382,7 +381,7 @@ class BASE_EXPORT DictionaryValue : public Value {
bool Equals(const Value* other) const override;
private:
- ValueMap dictionary_;
+ Storage dictionary_;
DISALLOW_COPY_AND_ASSIGN(DictionaryValue);
};
@@ -390,8 +389,9 @@ class BASE_EXPORT DictionaryValue : public Value {
// This type of Value represents a list of other Value values.
class BASE_EXPORT ListValue : public Value {
public:
- typedef ValueVector::iterator iterator;
- typedef ValueVector::const_iterator const_iterator;
+ using Storage = std::vector<std::unique_ptr<Value>>;
+ using const_iterator = Storage::const_iterator;
+ using iterator = Storage::iterator;
// Returns |value| if it is a list, nullptr otherwise.
static std::unique_ptr<ListValue> From(std::unique_ptr<Value> value);
@@ -508,7 +508,7 @@ class BASE_EXPORT ListValue : public Value {
std::unique_ptr<ListValue> CreateDeepCopy() const;
private:
- ValueVector list_;
+ Storage list_;
DISALLOW_COPY_AND_ASSIGN(ListValue);
};
diff --git a/chromium/base/values_unittest.cc b/chromium/base/values_unittest.cc
index ac7883054f6..d68522234de 100644
--- a/chromium/base/values_unittest.cc
+++ b/chromium/base/values_unittest.cc
@@ -127,7 +127,7 @@ TEST(ValuesTest, BinaryValue) {
char stack_buffer[42];
memset(stack_buffer, '!', 42);
- binary.reset(BinaryValue::CreateWithCopiedBuffer(stack_buffer, 42));
+ binary = BinaryValue::CreateWithCopiedBuffer(stack_buffer, 42);
ASSERT_TRUE(binary.get());
ASSERT_TRUE(binary->GetBuffer());
ASSERT_NE(stack_buffer, binary->GetBuffer());
diff --git a/chromium/base/win/BUILD.gn b/chromium/base/win/BUILD.gn
new file mode 100644
index 00000000000..01092735b3a
--- /dev/null
+++ b/chromium/base/win/BUILD.gn
@@ -0,0 +1,23 @@
+# Copyright (c) 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/buildflag_header.gni")
+
+declare_args() {
+ # Indicates if the handle verifier should operate in a single module mode. By
+ # default a single instance gets shared by all the modules.
+ single_module_mode_handle_verifier = false
+}
+
+# Ensure that the handle verifier is always used in a single module mode for the
+# component builds.
+if (is_component_build) {
+ single_module_mode_handle_verifier = true
+}
+
+buildflag_header("base_win_features") {
+ header = "base_features.h"
+ header_dir = "base/win"
+ flags = [ "SINGLE_MODULE_MODE_HANDLE_VERIFIER=$single_module_mode_handle_verifier" ]
+}
diff --git a/chromium/base/win/registry.cc b/chromium/base/win/registry.cc
index 7837b8a68d1..2fe53cf8b8c 100644
--- a/chromium/base/win/registry.cc
+++ b/chromium/base/win/registry.cc
@@ -39,7 +39,7 @@ const REGSAM kWow64AccessMask = KEY_WOW64_32KEY | KEY_WOW64_64KEY;
// Watches for modifications to a key.
class RegKey::Watcher : public ObjectWatcher::Delegate {
public:
- explicit Watcher(RegKey* owner) : owner_(owner) {}
+ Watcher() {}
~Watcher() override {}
bool StartWatching(HKEY key, const ChangeCallback& callback);
@@ -53,7 +53,6 @@ class RegKey::Watcher : public ObjectWatcher::Delegate {
}
private:
- RegKey* owner_;
ScopedHandle watch_event_;
ObjectWatcher object_watcher_;
ChangeCallback callback_;
@@ -413,7 +412,7 @@ LONG RegKey::WriteValue(const wchar_t* name,
bool RegKey::StartWatching(const ChangeCallback& callback) {
if (!key_watcher_)
- key_watcher_.reset(new Watcher(this));
+ key_watcher_.reset(new Watcher());
if (!key_watcher_->StartWatching(key_, callback))
return false;
diff --git a/chromium/base/win/scoped_handle.cc b/chromium/base/win/scoped_handle.cc
index cce16281ae4..6d152aec414 100644
--- a/chromium/base/win/scoped_handle.cc
+++ b/chromium/base/win/scoped_handle.cc
@@ -16,6 +16,7 @@
#include "base/macros.h"
#include "base/synchronization/lock_impl.h"
#include "base/threading/thread_local.h"
+#include "base/win/base_features.h"
#include "base/win/current_module.h"
extern "C" {
@@ -131,7 +132,7 @@ void ThreadSafeAssignOrCreateActiveVerifier(ActiveVerifier* existing_verifier,
// static
void ActiveVerifier::InstallVerifier() {
-#if defined(COMPONENT_BUILD)
+#if BUILDFLAG(SINGLE_MODULE_MODE_HANDLE_VERIFIER)
// Component build has one Active Verifier per module.
ThreadSafeAssignOrCreateActiveVerifier(nullptr, true);
#else
diff --git a/chromium/base/win/scoped_handle.h b/chromium/base/win/scoped_handle.h
index 6603e681109..d8f480f5f20 100644
--- a/chromium/base/win/scoped_handle.h
+++ b/chromium/base/win/scoped_handle.h
@@ -12,7 +12,6 @@
#include "base/location.h"
#include "base/logging.h"
#include "base/macros.h"
-#include "base/move.h"
// TODO(rvargas): remove this with the rest of the verifier.
#if defined(COMPILER_MSVC)
@@ -37,8 +36,6 @@ namespace win {
// this explicitly is necessary because of bug 528394 and VC++ 2015.
template <class Traits, class Verifier>
class GenericScopedHandle {
- MOVE_ONLY_TYPE_FOR_CPP_03(GenericScopedHandle)
-
public:
typedef typename Traits::Handle Handle;
@@ -112,6 +109,8 @@ class GenericScopedHandle {
FRIEND_TEST_ALL_PREFIXES(ScopedHandleTest, ActiveVerifierWrongOwner);
FRIEND_TEST_ALL_PREFIXES(ScopedHandleTest, ActiveVerifierUntrackedHandle);
Handle handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(GenericScopedHandle);
};
#undef BASE_WIN_GET_CALLER
diff --git a/chromium/base/win/scoped_handle_test_dll.cc b/chromium/base/win/scoped_handle_test_dll.cc
index 440a4ca7658..c72e4592b9d 100644
--- a/chromium/base/win/scoped_handle_test_dll.cc
+++ b/chromium/base/win/scoped_handle_test_dll.cc
@@ -6,6 +6,7 @@
#include <vector>
+#include "base/win/base_features.h"
#include "base/win/current_module.h"
#include "base/win/scoped_handle.h"
@@ -99,7 +100,7 @@ bool InternalRunLocationTest() {
HMODULE main_module = ::GetModuleHandle(NULL);
-#if defined(COMPONENT_BUILD)
+#if BUILDFLAG(SINGLE_MODULE_MODE_HANDLE_VERIFIER)
// In a component build ActiveVerifier will always be created inside base.dll
// as the code always lives there.
if (verifier_module == my_module || verifier_module == main_module)
diff --git a/chromium/base/win/win_util.cc b/chromium/base/win/win_util.cc
index 75f1e53c141..3b7d3195ac6 100644
--- a/chromium/base/win/win_util.cc
+++ b/chromium/base/win/win_util.cc
@@ -6,10 +6,7 @@
#include <aclapi.h>
#include <cfgmgr32.h>
-#include <lm.h>
#include <powrprof.h>
-#include <shellapi.h>
-#include <shlobj.h>
#include <shobjidl.h> // Must be before propkey.
#include <initguid.h>
#include <inspectable.h>
@@ -19,6 +16,8 @@
#include <roapi.h>
#include <sddl.h>
#include <setupapi.h>
+#include <shellscalingapi.h>
+#include <shlwapi.h>
#include <signal.h>
#include <stddef.h>
#include <stdlib.h>
@@ -41,7 +40,6 @@
#include "base/strings/utf_string_conversions.h"
#include "base/threading/thread_restrictions.h"
#include "base/win/registry.h"
-#include "base/win/scoped_co_mem.h"
#include "base/win/scoped_comptr.h"
#include "base/win/scoped_handle.h"
#include "base/win/scoped_propvariant.h"
@@ -107,10 +105,6 @@ class LazyIsUser32AndGdi32Available {
DISALLOW_COPY_AND_ASSIGN(LazyIsUser32AndGdi32Available);
};
-const wchar_t kWindows8OSKRegPath[] =
- L"Software\\Classes\\CLSID\\{054AAE20-4BEA-4347-8A35-64A533254A9D}"
- L"\\LocalServer32";
-
// Returns the current platform role. We use the PowerDeterminePlatformRoleEx
// API for that.
POWER_PLATFORM_ROLE GetPlatformRole() {
@@ -525,116 +519,15 @@ bool IsTabletDevice(std::string* reason) {
return is_tablet;
}
-bool DisplayVirtualKeyboard() {
- if (GetVersion() < VERSION_WIN8)
- return false;
-
- if (IsKeyboardPresentOnSlate(nullptr))
- return false;
-
- static LazyInstance<string16>::Leaky osk_path = LAZY_INSTANCE_INITIALIZER;
-
- if (osk_path.Get().empty()) {
- // We need to launch TabTip.exe from the location specified under the
- // LocalServer32 key for the {{054AAE20-4BEA-4347-8A35-64A533254A9D}}
- // CLSID.
- // TabTip.exe is typically found at
- // c:\program files\common files\microsoft shared\ink on English Windows.
- // We don't want to launch TabTip.exe from
- // c:\program files (x86)\common files\microsoft shared\ink. This path is
- // normally found on 64 bit Windows.
- RegKey key(HKEY_LOCAL_MACHINE, kWindows8OSKRegPath,
- KEY_READ | KEY_WOW64_64KEY);
- DWORD osk_path_length = 1024;
- if (key.ReadValue(NULL,
- WriteInto(&osk_path.Get(), osk_path_length),
- &osk_path_length,
- NULL) != ERROR_SUCCESS) {
- DLOG(WARNING) << "Failed to read on screen keyboard path from registry";
- return false;
- }
- size_t common_program_files_offset =
- osk_path.Get().find(L"%CommonProgramFiles%");
- // Typically the path to TabTip.exe read from the registry will start with
- // %CommonProgramFiles% which needs to be replaced with the corrsponding
- // expanded string.
- // If the path does not begin with %CommonProgramFiles% we use it as is.
- if (common_program_files_offset != string16::npos) {
- // Preserve the beginning quote in the path.
- osk_path.Get().erase(common_program_files_offset,
- wcslen(L"%CommonProgramFiles%"));
- // The path read from the registry contains the %CommonProgramFiles%
- // environment variable prefix. On 64 bit Windows the SHGetKnownFolderPath
- // function returns the common program files path with the X86 suffix for
- // the FOLDERID_ProgramFilesCommon value.
- // To get the correct path to TabTip.exe we first read the environment
- // variable CommonProgramW6432 which points to the desired common
- // files path. Failing that we fallback to the SHGetKnownFolderPath API.
-
- // We then replace the %CommonProgramFiles% value with the actual common
- // files path found in the process.
- string16 common_program_files_path;
- std::unique_ptr<wchar_t[]> common_program_files_wow6432;
- DWORD buffer_size =
- GetEnvironmentVariable(L"CommonProgramW6432", NULL, 0);
- if (buffer_size) {
- common_program_files_wow6432.reset(new wchar_t[buffer_size]);
- GetEnvironmentVariable(L"CommonProgramW6432",
- common_program_files_wow6432.get(),
- buffer_size);
- common_program_files_path = common_program_files_wow6432.get();
- DCHECK(!common_program_files_path.empty());
- } else {
- ScopedCoMem<wchar_t> common_program_files;
- if (FAILED(SHGetKnownFolderPath(FOLDERID_ProgramFilesCommon, 0, NULL,
- &common_program_files))) {
- return false;
- }
- common_program_files_path = common_program_files;
- }
-
- osk_path.Get().insert(1, common_program_files_path);
- }
- }
-
- HINSTANCE ret = ::ShellExecuteW(NULL,
- L"",
- osk_path.Get().c_str(),
- NULL,
- NULL,
- SW_SHOW);
- return reinterpret_cast<intptr_t>(ret) > 32;
-}
-
-bool DismissVirtualKeyboard() {
- if (GetVersion() < VERSION_WIN8)
- return false;
-
- // We dismiss the virtual keyboard by generating the ESC keystroke
- // programmatically.
- const wchar_t kOSKClassName[] = L"IPTip_Main_Window";
- HWND osk = ::FindWindow(kOSKClassName, NULL);
- if (::IsWindow(osk) && ::IsWindowEnabled(osk)) {
- PostMessage(osk, WM_SYSCOMMAND, SC_CLOSE, 0);
- return true;
- }
- return false;
-}
-
-enum DomainEnrollementState {UNKNOWN = -1, NOT_ENROLLED, ENROLLED};
+enum DomainEnrollmentState {UNKNOWN = -1, NOT_ENROLLED, ENROLLED};
static volatile long int g_domain_state = UNKNOWN;
bool IsEnrolledToDomain() {
// Doesn't make any sense to retry inside a user session because joining a
// domain will only kick in on a restart.
if (g_domain_state == UNKNOWN) {
- LPWSTR domain;
- NETSETUP_JOIN_STATUS join_status;
- if(::NetGetJoinInformation(NULL, &domain, &join_status) != NERR_Success)
- return false;
- ::NetApiBufferFree(domain);
::InterlockedCompareExchange(&g_domain_state,
- join_status == ::NetSetupDomainName ?
+ IsOS(OS_DOMAINMEMBER) ?
ENROLLED : NOT_ENROLLED,
UNKNOWN);
}
@@ -705,5 +598,30 @@ void DisableFlicks(HWND hwnd) {
TABLET_DISABLE_FLICKFALLBACKKEYS));
}
+bool IsProcessPerMonitorDpiAware() {
+ enum class PerMonitorDpiAware {
+ UNKNOWN = 0,
+ PER_MONITOR_DPI_UNAWARE,
+ PER_MONITOR_DPI_AWARE,
+ };
+ static PerMonitorDpiAware per_monitor_dpi_aware = PerMonitorDpiAware::UNKNOWN;
+ if (per_monitor_dpi_aware == PerMonitorDpiAware::UNKNOWN) {
+ per_monitor_dpi_aware = PerMonitorDpiAware::PER_MONITOR_DPI_UNAWARE;
+ HMODULE shcore_dll = ::LoadLibrary(L"shcore.dll");
+ if (shcore_dll) {
+ auto get_process_dpi_awareness_func =
+ reinterpret_cast<decltype(::GetProcessDpiAwareness)*>(
+ ::GetProcAddress(shcore_dll, "GetProcessDpiAwareness"));
+ if (get_process_dpi_awareness_func) {
+ PROCESS_DPI_AWARENESS awareness;
+ if (SUCCEEDED(get_process_dpi_awareness_func(nullptr, &awareness)) &&
+ awareness == PROCESS_PER_MONITOR_DPI_AWARE)
+ per_monitor_dpi_aware = PerMonitorDpiAware::PER_MONITOR_DPI_AWARE;
+ }
+ }
+ }
+ return per_monitor_dpi_aware == PerMonitorDpiAware::PER_MONITOR_DPI_AWARE;
+}
+
} // namespace win
} // namespace base
diff --git a/chromium/base/win/win_util.h b/chromium/base/win/win_util.h
index 08e0112af91..23fcfb5707a 100644
--- a/chromium/base/win/win_util.h
+++ b/chromium/base/win/win_util.h
@@ -149,14 +149,6 @@ BASE_EXPORT bool IsKeyboardPresentOnSlate(std::string* reason);
offsetof(struct_name, member) + \
(sizeof static_cast<struct_name*>(NULL)->member)
-// Displays the on screen keyboard on Windows 8 and above. Returns true on
-// success.
-BASE_EXPORT bool DisplayVirtualKeyboard();
-
-// Dismisses the on screen keyboard if it is being displayed on Windows 8 and.
-// above. Returns true on success.
-BASE_EXPORT bool DismissVirtualKeyboard();
-
// Returns true if the machine is enrolled to a domain.
BASE_EXPORT bool IsEnrolledToDomain();
@@ -184,6 +176,9 @@ BASE_EXPORT bool GetLoadedModulesSnapshot(HANDLE process,
BASE_EXPORT void EnableFlicks(HWND hwnd);
BASE_EXPORT void DisableFlicks(HWND hwnd);
+// Returns true if the process is per monitor DPI aware.
+BASE_EXPORT bool IsProcessPerMonitorDpiAware();
+
} // namespace win
} // namespace base