summaryrefslogtreecommitdiffstats
path: root/chromium/base
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2017-07-12 14:07:37 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2017-07-17 10:29:26 +0000
commitec02ee4181c49b61fce1c8fb99292dbb8139cc90 (patch)
tree25cde714b2b71eb639d1cd53f5a22e9ba76e14ef /chromium/base
parentbb09965444b5bb20b096a291445170876225268d (diff)
BASELINE: Update Chromium to 59.0.3071.134
Change-Id: Id02ef6fb2204c5fd21668a1c3e6911c83b17585a Reviewed-by: Alexandru Croitor <alexandru.croitor@qt.io>
Diffstat (limited to 'chromium/base')
-rw-r--r--chromium/base/BUILD.gn100
-rw-r--r--chromium/base/OWNERS5
-rw-r--r--chromium/base/allocator/BUILD.gn11
-rw-r--r--chromium/base/allocator/allocator_interception_mac.h15
-rw-r--r--chromium/base/allocator/allocator_interception_mac.mm152
-rw-r--r--chromium/base/allocator/allocator_interception_mac_unittest.mm64
-rw-r--r--chromium/base/allocator/allocator_shim.cc6
-rw-r--r--chromium/base/allocator/allocator_shim_internals.h21
-rw-r--r--chromium/base/allocator/allocator_shim_override_glibc_weak_symbols.h32
-rw-r--r--chromium/base/allocator/allocator_shim_override_mac_symbols.h7
-rw-r--r--chromium/base/allocator/allocator_shim_unittest.cc22
-rw-r--r--chromium/base/allocator/malloc_zone_functions_mac.cc22
-rw-r--r--chromium/base/allocator/malloc_zone_functions_mac.h36
-rw-r--r--chromium/base/allocator/malloc_zone_functions_mac_unittest.cc2
-rw-r--r--chromium/base/allocator/partition_allocator/address_space_randomization.cc20
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator.cc13
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.cc499
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.h1
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc21
-rw-r--r--chromium/base/allocator/tcmalloc_unittest.cc11
-rw-r--r--chromium/base/at_exit.cc4
-rw-r--r--chromium/base/base.isolate60
-rw-r--r--chromium/base/bind_internal.h4
-rw-r--r--chromium/base/bind_unittest.cc38
-rw-r--r--chromium/base/callback.h90
-rw-r--r--chromium/base/callback_helpers.h4
-rw-r--r--chromium/base/callback_internal.cc31
-rw-r--r--chromium/base/callback_internal.h42
-rw-r--r--chromium/base/callback_unittest.cc42
-rw-r--r--chromium/base/command_line.cc15
-rw-r--r--chromium/base/command_line_unittest.cc31
-rw-r--r--chromium/base/containers/container_test_utils.h39
-rw-r--r--chromium/base/containers/flat_map.h290
-rw-r--r--chromium/base/containers/flat_map_unittest.cc173
-rw-r--r--chromium/base/containers/flat_set.h702
-rw-r--r--chromium/base/containers/flat_set_unittest.cc1233
-rw-r--r--chromium/base/containers/flat_tree.h774
-rw-r--r--chromium/base/containers/flat_tree_unittest.cc1385
-rw-r--r--chromium/base/containers/mru_cache.h2
-rw-r--r--chromium/base/critical_closure.h15
-rw-r--r--chromium/base/critical_closure_internal_ios.mm5
-rw-r--r--chromium/base/debug/activity_analyzer.cc188
-rw-r--r--chromium/base/debug/activity_analyzer.h66
-rw-r--r--chromium/base/debug/activity_analyzer_unittest.cc163
-rw-r--r--chromium/base/debug/activity_tracker.cc793
-rw-r--r--chromium/base/debug/activity_tracker.h321
-rw-r--r--chromium/base/debug/activity_tracker_unittest.cc192
-rw-r--r--chromium/base/debug/stack_trace.cc16
-rw-r--r--chromium/base/debug/stack_trace.h15
-rw-r--r--chromium/base/debug/stack_trace_unittest.cc15
-rw-r--r--chromium/base/debug/task_annotator_unittest.cc2
-rw-r--r--chromium/base/debug/thread_heap_usage_tracker_unittest.cc11
-rw-r--r--chromium/base/deferred_sequenced_task_runner.cc53
-rw-r--r--chromium/base/deferred_sequenced_task_runner.h11
-rw-r--r--chromium/base/deferred_sequenced_task_runner_unittest.cc27
-rw-r--r--chromium/base/environment.cc2
-rw-r--r--chromium/base/feature_list.cc10
-rw-r--r--chromium/base/feature_list.h7
-rw-r--r--chromium/base/feature_list_unittest.cc3
-rw-r--r--chromium/base/files/file.cc1
-rw-r--r--chromium/base/files/file_descriptor_watcher_posix.cc10
-rw-r--r--chromium/base/files/file_locking_unittest.cc14
-rw-r--r--chromium/base/files/file_path.cc2
-rw-r--r--chromium/base/files/file_path.h2
-rw-r--r--chromium/base/files/file_path_watcher_linux.cc12
-rw-r--r--chromium/base/files/file_path_watcher_unittest.cc4
-rw-r--r--chromium/base/files/file_proxy.cc52
-rw-r--r--chromium/base/files/file_unittest.cc11
-rw-r--r--chromium/base/files/file_util_mac.mm12
-rw-r--r--chromium/base/files/file_util_proxy.cc6
-rw-r--r--chromium/base/files/file_util_unittest.cc8
-rw-r--r--chromium/base/files/file_util_win.cc33
-rw-r--r--chromium/base/files/important_file_writer_unittest.cc4
-rw-r--r--chromium/base/i18n/encoding_detection.cc12
-rw-r--r--chromium/base/i18n/file_util_icu.cc4
-rw-r--r--chromium/base/i18n/message_formatter.cc5
-rw-r--r--chromium/base/i18n/number_formatting.cc9
-rw-r--r--chromium/base/i18n/rtl.cc10
-rw-r--r--chromium/base/i18n/string_compare.cc5
-rw-r--r--chromium/base/i18n/time_formatting.cc14
-rw-r--r--chromium/base/i18n/time_formatting_unittest.cc3
-rw-r--r--chromium/base/i18n/timezone.cc4
-rw-r--r--chromium/base/i18n/unicodestring.h32
-rw-r--r--chromium/base/json/json_parser.cc63
-rw-r--r--chromium/base/json/json_parser.h2
-rw-r--r--chromium/base/json/json_parser_unittest.cc51
-rw-r--r--chromium/base/json/json_perftest.cc2
-rw-r--r--chromium/base/json/json_writer.cc4
-rw-r--r--chromium/base/json/json_writer_unittest.cc18
-rw-r--r--chromium/base/lazy_instance.h54
-rw-r--r--chromium/base/lazy_instance_unittest.cc23
-rw-r--r--chromium/base/logging.h9
-rw-r--r--chromium/base/logging_unittest.cc26
-rw-r--r--chromium/base/mac/mach_port_broker_unittest.cc17
-rw-r--r--chromium/base/mac/sdk_forward_declarations.h19
-rw-r--r--chromium/base/mac/sdk_forward_declarations.mm5
-rw-r--r--chromium/base/memory/aligned_memory_unittest.cc8
-rw-r--r--chromium/base/memory/memory_coordinator_proxy.cc8
-rw-r--r--chromium/base/memory/memory_coordinator_proxy.h4
-rw-r--r--chromium/base/memory/memory_pressure_monitor_mac.cc163
-rw-r--r--chromium/base/memory/memory_pressure_monitor_mac.h45
-rw-r--r--chromium/base/memory/memory_pressure_monitor_mac_unittest.cc215
-rw-r--r--chromium/base/memory/ref_counted.cc31
-rw-r--r--chromium/base/memory/ref_counted.h211
-rw-r--r--chromium/base/memory/ref_counted_delete_on_sequence.h6
-rw-r--r--chromium/base/memory/ref_counted_unittest.cc38
-rw-r--r--chromium/base/memory/ref_counted_unittest.nc25
-rw-r--r--chromium/base/memory/shared_memory.h18
-rw-r--r--chromium/base/memory/shared_memory_mac_unittest.cc8
-rw-r--r--chromium/base/memory/shared_memory_posix.cc27
-rw-r--r--chromium/base/memory/shared_memory_tracker.cc92
-rw-r--r--chromium/base/memory/shared_memory_tracker.h56
-rw-r--r--chromium/base/memory/shared_memory_unittest.cc8
-rw-r--r--chromium/base/memory/shared_memory_win_unittest.cc12
-rw-r--r--chromium/base/memory/singleton.h16
-rw-r--r--chromium/base/memory/singleton_objc.h60
-rw-r--r--chromium/base/memory/weak_ptr_unittest.cc28
-rw-r--r--chromium/base/message_loop/incoming_task_queue.cc10
-rw-r--r--chromium/base/message_loop/incoming_task_queue.h3
-rw-r--r--chromium/base/message_loop/message_loop.h2
-rw-r--r--chromium/base/message_loop/message_loop_task_runner.cc12
-rw-r--r--chromium/base/message_loop/message_loop_task_runner.h5
-rw-r--r--chromium/base/message_loop/message_loop_task_runner_unittest.cc41
-rw-r--r--chromium/base/message_loop/message_loop_test.cc189
-rw-r--r--chromium/base/message_loop/message_loop_unittest.cc52
-rw-r--r--chromium/base/message_loop/message_pump_glib_unittest.cc45
-rw-r--r--chromium/base/message_loop/message_pump_libevent_unittest.cc12
-rw-r--r--chromium/base/message_loop/message_pump_perftest.cc12
-rw-r--r--chromium/base/metrics/OWNERS2
-rw-r--r--chromium/base/metrics/bucket_ranges.h18
-rw-r--r--chromium/base/metrics/histogram_functions.h7
-rw-r--r--chromium/base/metrics/histogram_macros.h14
-rw-r--r--chromium/base/metrics/histogram_macros_internal.h61
-rw-r--r--chromium/base/metrics/histogram_macros_unittest.cc31
-rw-r--r--chromium/base/metrics/histogram_unittest.nc16
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator.cc67
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator.h9
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator_unittest.cc37
-rw-r--r--chromium/base/metrics/persistent_memory_allocator.cc113
-rw-r--r--chromium/base/metrics/persistent_memory_allocator.h53
-rw-r--r--chromium/base/metrics/persistent_memory_allocator_unittest.cc16
-rw-r--r--chromium/base/metrics/statistics_recorder.cc20
-rw-r--r--chromium/base/metrics/statistics_recorder.h2
-rw-r--r--chromium/base/metrics/user_metrics.cc8
-rw-r--r--chromium/base/native_library.h10
-rw-r--r--chromium/base/native_library_unittest.cc4
-rw-r--r--chromium/base/native_library_win.cc121
-rw-r--r--chromium/base/nix/xdg_util.cc4
-rw-r--r--chromium/base/nix/xdg_util_unittest.cc20
-rw-r--r--chromium/base/numerics/safe_conversions_impl.h22
-rw-r--r--chromium/base/numerics/saturated_arithmetic.h6
-rw-r--r--chromium/base/numerics/saturated_arithmetic_arm.h6
-rw-r--r--chromium/base/observer_list_threadsafe.h244
-rw-r--r--chromium/base/observer_list_unittest.cc170
-rw-r--r--chromium/base/optional.h3
-rw-r--r--chromium/base/optional_unittest.cc4
-rw-r--r--chromium/base/pickle.cc1
-rw-r--r--chromium/base/posix/unix_domain_socket_linux.cc5
-rw-r--r--chromium/base/posix/unix_domain_socket_linux.h7
-rw-r--r--chromium/base/posix/unix_domain_socket_linux_unittest.cc8
-rw-r--r--chromium/base/post_task_and_reply_with_result_internal.h9
-rw-r--r--chromium/base/process/launch.h5
-rw-r--r--chromium/base/process/launch_posix.cc8
-rw-r--r--chromium/base/process/launch_win.cc12
-rw-r--r--chromium/base/process/memory_unittest.cc20
-rw-r--r--chromium/base/process/process_info_linux.cc6
-rw-r--r--chromium/base/process/process_info_unittest.cc20
-rw-r--r--chromium/base/process/process_linux.cc22
-rw-r--r--chromium/base/process/process_metrics.cc5
-rw-r--r--chromium/base/process/process_metrics.h142
-rw-r--r--chromium/base/process/process_metrics_freebsd.cc2
-rw-r--r--chromium/base/process/process_metrics_ios.cc44
-rw-r--r--chromium/base/process/process_metrics_linux.cc66
-rw-r--r--chromium/base/process/process_metrics_mac.cc211
-rw-r--r--chromium/base/process/process_metrics_openbsd.cc2
-rw-r--r--chromium/base/process/process_metrics_unittest.cc87
-rw-r--r--chromium/base/process/process_metrics_win.cc55
-rw-r--r--chromium/base/process/process_posix.cc1
-rw-r--r--chromium/base/process/process_unittest.cc80
-rw-r--r--chromium/base/process/process_util_unittest.cc122
-rw-r--r--chromium/base/process/process_win.cc7
-rw-r--r--chromium/base/run_loop_unittest.cc28
-rw-r--r--chromium/base/sequence_checker_impl.cc4
-rw-r--r--chromium/base/sequence_checker_unittest.cc5
-rw-r--r--chromium/base/sequenced_task_runner.cc9
-rw-r--r--chromium/base/sequenced_task_runner.h5
-rw-r--r--chromium/base/sequenced_task_runner_unittest.cc10
-rw-r--r--chromium/base/stl_util.h161
-rw-r--r--chromium/base/stl_util_unittest.cc156
-rw-r--r--chromium/base/strings/string_piece.h3
-rw-r--r--chromium/base/strings/string_piece_unittest.cc14
-rw-r--r--chromium/base/strings/string_util_unittest.cc52
-rw-r--r--chromium/base/sync_socket.h3
-rw-r--r--chromium/base/sync_socket_nacl.cc6
-rw-r--r--chromium/base/sync_socket_posix.cc6
-rw-r--r--chromium/base/sync_socket_win.cc6
-rw-r--r--chromium/base/synchronization/atomic_flag_unittest.cc16
-rw-r--r--chromium/base/synchronization/condition_variable_unittest.cc2
-rw-r--r--chromium/base/synchronization/waitable_event.h3
-rw-r--r--chromium/base/synchronization/waitable_event_posix.cc63
-rw-r--r--chromium/base/synchronization/waitable_event_unittest.cc38
-rw-r--r--chromium/base/sys_info.h21
-rw-r--r--chromium/base/sys_info_android.cc39
-rw-r--r--chromium/base/sys_info_ios.mm18
-rw-r--r--chromium/base/sys_info_linux.cc23
-rw-r--r--chromium/base/sys_info_mac.mm19
-rw-r--r--chromium/base/sys_info_openbsd.cc2
-rw-r--r--chromium/base/sys_info_posix.cc24
-rw-r--r--chromium/base/sys_info_unittest.cc122
-rw-r--r--chromium/base/sys_info_win.cc6
-rw-r--r--chromium/base/task/cancelable_task_tracker.cc6
-rw-r--r--chromium/base/task/cancelable_task_tracker_unittest.cc20
-rw-r--r--chromium/base/task_runner.cc14
-rw-r--r--chromium/base/task_runner.h9
-rw-r--r--chromium/base/task_runner_util.h9
-rw-r--r--chromium/base/task_scheduler/delayed_task_manager.cc7
-rw-r--r--chromium/base/task_scheduler/post_task.cc39
-rw-r--r--chromium/base/task_scheduler/post_task.h77
-rw-r--r--chromium/base/task_scheduler/priority_queue.cc9
-rw-r--r--chromium/base/task_scheduler/priority_queue.h5
-rw-r--r--chromium/base/task_scheduler/priority_queue_unittest.cc25
-rw-r--r--chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc453
-rw-r--r--chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.h97
-rw-r--r--chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc470
-rw-r--r--chromium/base/task_scheduler/scheduler_worker.cc41
-rw-r--r--chromium/base/task_scheduler/scheduler_worker.h10
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool.h28
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl.cc460
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl.h88
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc281
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_params.cc16
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_params.h23
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_unittest.cc11
-rw-r--r--chromium/base/task_scheduler/sequence.cc5
-rw-r--r--chromium/base/task_scheduler/sequence_unittest.cc11
-rw-r--r--chromium/base/task_scheduler/task.cc8
-rw-r--r--chromium/base/task_scheduler/task.h4
-rw-r--r--chromium/base/task_scheduler/task_scheduler.cc56
-rw-r--r--chromium/base/task_scheduler/task_scheduler.h61
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl.cc108
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl.h23
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl_unittest.cc82
-rw-r--r--chromium/base/task_scheduler/task_tracker_unittest.cc42
-rw-r--r--chromium/base/task_scheduler/task_unittest.cc12
-rw-r--r--chromium/base/task_scheduler/test_task_factory.cc4
-rw-r--r--chromium/base/template_util.h103
-rw-r--r--chromium/base/template_util_unittest.cc56
-rw-r--r--chromium/base/test/BUILD.gn12
-rw-r--r--chromium/base/third_party/dmg_fp/README.chromium1
-rw-r--r--chromium/base/third_party/dmg_fp/g_fmt.cc6
-rw-r--r--chromium/base/third_party/xdg_mime/README.chromium1
-rw-r--r--chromium/base/third_party/xdg_mime/function_casts.patch44
-rw-r--r--chromium/base/third_party/xdg_mime/xdgmime.c11
-rw-r--r--chromium/base/threading/post_task_and_reply_impl.cc34
-rw-r--r--chromium/base/threading/post_task_and_reply_impl.h6
-rw-r--r--chromium/base/threading/post_task_and_reply_impl_unittest.cc35
-rw-r--r--chromium/base/threading/sequenced_task_runner_handle_unittest.cc8
-rw-r--r--chromium/base/threading/sequenced_worker_pool.cc136
-rw-r--r--chromium/base/threading/sequenced_worker_pool.h18
-rw-r--r--chromium/base/threading/sequenced_worker_pool_unittest.cc142
-rw-r--r--chromium/base/threading/thread.cc4
-rw-r--r--chromium/base/threading/thread_perftest.cc12
-rw-r--r--chromium/base/threading/thread_restrictions.h15
-rw-r--r--chromium/base/threading/thread_unittest.cc44
-rw-r--r--chromium/base/threading/worker_pool.cc20
-rw-r--r--chromium/base/threading/worker_pool.h7
-rw-r--r--chromium/base/threading/worker_pool_posix.cc23
-rw-r--r--chromium/base/threading/worker_pool_posix.h5
-rw-r--r--chromium/base/threading/worker_pool_unittest.cc19
-rw-r--r--chromium/base/threading/worker_pool_win.cc11
-rw-r--r--chromium/base/timer/timer.cc9
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc21
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc5
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_register.cc80
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_register.h86
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_register_unittest.cc55
-rw-r--r--chromium/base/trace_event/malloc_dump_provider.cc28
-rw-r--r--chromium/base/trace_event/memory_allocator_dump.cc5
-rw-r--r--chromium/base/trace_event/memory_allocator_dump.h11
-rw-r--r--chromium/base/trace_event/memory_allocator_dump_unittest.cc10
-rw-r--r--chromium/base/trace_event/memory_dump_manager.cc338
-rw-r--r--chromium/base/trace_event/memory_dump_manager.h145
-rw-r--r--chromium/base/trace_event/memory_dump_manager_unittest.cc213
-rw-r--r--chromium/base/trace_event/memory_dump_provider_info.cc43
-rw-r--r--chromium/base/trace_event/memory_dump_provider_info.h108
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.cc7
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.h53
-rw-r--r--chromium/base/trace_event/memory_dump_scheduler.cc267
-rw-r--r--chromium/base/trace_event/memory_dump_scheduler.h127
-rw-r--r--chromium/base/trace_event/memory_dump_scheduler_unittest.cc201
-rw-r--r--chromium/base/trace_event/memory_infra_background_whitelist.cc68
-rw-r--r--chromium/base/trace_event/memory_peak_detector.cc288
-rw-r--r--chromium/base/trace_event/memory_peak_detector.h184
-rw-r--r--chromium/base/trace_event/memory_peak_detector_unittest.cc558
-rw-r--r--chromium/base/trace_event/memory_usage_estimator.h3
-rw-r--r--chromium/base/trace_event/process_memory_dump.h1
-rw-r--r--chromium/base/trace_event/trace_category_unittest.cc2
-rw-r--r--chromium/base/trace_event/trace_config.cc373
-rw-r--r--chromium/base/trace_event/trace_config.h66
-rw-r--r--chromium/base/trace_event/trace_config_category_filter.cc297
-rw-r--r--chromium/base/trace_event/trace_config_category_filter.h86
-rw-r--r--chromium/base/trace_event/trace_config_unittest.cc131
-rw-r--r--chromium/base/trace_event/trace_event_argument.cc15
-rw-r--r--chromium/base/trace_event/trace_event_etw_export_win.cc17
-rw-r--r--chromium/base/trace_event/trace_event_etw_export_win.h4
-rw-r--r--chromium/base/trace_event/trace_event_memory_overhead.cc10
-rw-r--r--chromium/base/trace_event/trace_event_system_stats_monitor.cc10
-rw-r--r--chromium/base/trace_event/trace_event_unittest.cc80
-rw-r--r--chromium/base/trace_event/trace_log.cc52
-rw-r--r--chromium/base/tuple.h20
-rw-r--r--chromium/base/tuple_unittest.cc26
-rw-r--r--chromium/base/value_conversions.cc9
-rw-r--r--chromium/base/value_conversions.h8
-rw-r--r--chromium/base/values.cc424
-rw-r--r--chromium/base/values.h75
-rw-r--r--chromium/base/values_unittest.cc402
-rw-r--r--chromium/base/win/message_window.cc4
-rw-r--r--chromium/base/win/scoped_comptr.h181
-rw-r--r--chromium/base/win/scoped_comptr_unittest.cc17
-rw-r--r--chromium/base/win/scoped_handle_unittest.cc4
-rw-r--r--chromium/base/win/shortcut.cc16
-rw-r--r--chromium/base/win/wait_chain_unittest.cc4
-rw-r--r--chromium/base/win/win_util.cc9
-rw-r--r--chromium/base/win/win_util.h2
-rw-r--r--chromium/base/win/windows_version.cc4
-rw-r--r--chromium/base/win/windows_version.h1
326 files changed, 14427 insertions, 7080 deletions
diff --git a/chromium/base/BUILD.gn b/chromium/base/BUILD.gn
index 21c303dcb10..88ef9bc54ac 100644
--- a/chromium/base/BUILD.gn
+++ b/chromium/base/BUILD.gn
@@ -87,35 +87,6 @@ if (is_nacl_nonsfi) {
}
}
-if (is_nacl) {
- # None of the files apply to nacl, and we can't make an empty static library.
- group("base_paths") {
- }
-} else {
- static_library("base_paths") {
- sources = [
- "base_paths.cc",
- "base_paths.h",
- "base_paths_android.cc",
- "base_paths_android.h",
- "base_paths_mac.h",
- "base_paths_mac.mm",
- "base_paths_posix.cc",
- "base_paths_posix.h",
- "base_paths_win.cc",
- "base_paths_win.h",
- ]
-
- if (is_android || is_mac || is_ios) {
- sources -= [ "base_paths_posix.cc" ]
- }
-
- configs += [ ":base_implementation" ]
-
- visibility = [ ":base" ]
- }
-}
-
if (is_android) {
config("android_system_libs") {
libs = [ "log" ] # Used by logging.cc.
@@ -276,7 +247,9 @@ component("base") {
"command_line.h",
"compiler_specific.h",
"containers/adapters.h",
+ "containers/flat_map.h",
"containers/flat_set.h",
+ "containers/flat_tree.h",
"containers/hash_tables.h",
"containers/linked_list.h",
"containers/mru_cache.h",
@@ -472,6 +445,7 @@ component("base") {
"mac/scoped_aedesc.h",
"mac/scoped_authorizationref.h",
"mac/scoped_block.h",
+ "mac/scoped_cffiledescriptorref.h",
"mac/scoped_cftyperef.h",
"mac/scoped_dispatch_object.h",
"mac/scoped_ionotificationportref.h",
@@ -850,8 +824,11 @@ component("base") {
"task_scheduler/scheduler_lock.h",
"task_scheduler/scheduler_lock_impl.cc",
"task_scheduler/scheduler_lock_impl.h",
+ "task_scheduler/scheduler_single_thread_task_runner_manager.cc",
+ "task_scheduler/scheduler_single_thread_task_runner_manager.h",
"task_scheduler/scheduler_worker.cc",
"task_scheduler/scheduler_worker.h",
+ "task_scheduler/scheduler_worker_params.h",
"task_scheduler/scheduler_worker_pool.h",
"task_scheduler/scheduler_worker_pool_impl.cc",
"task_scheduler/scheduler_worker_pool_impl.h",
@@ -991,6 +968,8 @@ component("base") {
"trace_event/memory_dump_manager.cc",
"trace_event/memory_dump_manager.h",
"trace_event/memory_dump_provider.h",
+ "trace_event/memory_dump_provider_info.cc",
+ "trace_event/memory_dump_provider_info.h",
"trace_event/memory_dump_request_args.cc",
"trace_event/memory_dump_request_args.h",
"trace_event/memory_dump_scheduler.cc",
@@ -999,6 +978,8 @@ component("base") {
"trace_event/memory_dump_session_state.h",
"trace_event/memory_infra_background_whitelist.cc",
"trace_event/memory_infra_background_whitelist.h",
+ "trace_event/memory_peak_detector.cc",
+ "trace_event/memory_peak_detector.h",
"trace_event/memory_usage_estimator.cc",
"trace_event/memory_usage_estimator.h",
"trace_event/process_memory_dump.cc",
@@ -1012,6 +993,8 @@ component("base") {
"trace_event/trace_category.h",
"trace_event/trace_config.cc",
"trace_event/trace_config.h",
+ "trace_event/trace_config_category_filter.cc",
+ "trace_event/trace_config_category_filter.h",
"trace_event/trace_event.h",
"trace_event/trace_event_android.cc",
"trace_event/trace_event_argument.cc",
@@ -1048,6 +1031,7 @@ component("base") {
"version.h",
"vlog.cc",
"vlog.h",
+ "win/current_module.h",
"win/enum_variant.cc",
"win/enum_variant.h",
"win/event_trace_consumer.h",
@@ -1101,6 +1085,24 @@ component("base") {
"win/wrapped_window_proc.h",
]
+ if (!is_nacl) {
+ sources += [
+ "base_paths.cc",
+ "base_paths.h",
+ "base_paths_android.cc",
+ "base_paths_android.h",
+ "base_paths_mac.h",
+ "base_paths_mac.mm",
+ "base_paths_posix.h",
+ "base_paths_win.cc",
+ "base_paths_win.h",
+ ]
+
+ if (is_linux) {
+ sources += [ "base_paths_posix.cc" ]
+ }
+ }
+
all_dependent_configs = []
defines = []
data = []
@@ -1120,7 +1122,6 @@ component("base") {
]
public_deps = [
- ":base_paths",
":base_static",
":build_date",
":debugging_flags",
@@ -1192,6 +1193,7 @@ component("base") {
"process/internal_linux.cc",
"process/memory_linux.cc",
"process/process_handle_linux.cc",
+ "process/process_info_linux.cc",
"process/process_iterator_linux.cc",
"process/process_metrics_linux.cc",
"sys_info_linux.cc",
@@ -1587,6 +1589,13 @@ component("base") {
sources += [ "power_monitor/power_monitor_device_source_posix.cc" ]
}
+ if (is_posix && !is_mac && !is_nacl) {
+ sources += [
+ "memory/shared_memory_tracker.cc",
+ "memory/shared_memory_tracker.h",
+ ]
+ }
+
if (!use_glib) {
sources -= [
"message_loop/message_pump_glib.cc",
@@ -1609,8 +1618,6 @@ component("base") {
configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_max" ]
}
-
- allow_circular_includes_from = public_deps
}
buildflag_header("debugging_flags") {
@@ -1619,6 +1626,7 @@ buildflag_header("debugging_flags") {
flags = [
"ENABLE_PROFILING=$enable_profiling",
"ENABLE_MEMORY_TASK_PROFILER=$enable_memory_task_profiler",
+ "CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
]
}
@@ -1691,6 +1699,7 @@ component("i18n") {
"i18n/time_formatting.h",
"i18n/timezone.cc",
"i18n/timezone.h",
+ "i18n/unicodestring.h",
"i18n/utf8_validator_tables.cc",
"i18n/utf8_validator_tables.h",
]
@@ -1884,6 +1893,7 @@ if (is_ios || is_mac) {
test("base_unittests") {
sources = [
+ "allocator/allocator_interception_mac_unittest.mm",
"allocator/malloc_zone_functions_mac_unittest.cc",
"allocator/tcmalloc_unittest.cc",
"android/application_status_listener_unittest.cc",
@@ -1913,7 +1923,10 @@ test("base_unittests") {
"cancelable_callback_unittest.cc",
"command_line_unittest.cc",
"containers/adapters_unittest.cc",
+ "containers/container_test_utils.h",
+ "containers/flat_map_unittest.cc",
"containers/flat_set_unittest.cc",
+ "containers/flat_tree_unittest.cc",
"containers/hash_tables_unittest.cc",
"containers/linked_list_unittest.cc",
"containers/mru_cache_unittest.cc",
@@ -1963,6 +1976,7 @@ test("base_unittests") {
"i18n/time_formatting_unittest.cc",
"i18n/timezone_unittest.cc",
"id_map_unittest.cc",
+ "ios/crb_protocol_observers_unittest.mm",
"ios/device_util_unittest.mm",
"ios/weak_nsobject_unittest.mm",
"json/json_parser_unittest.cc",
@@ -2040,6 +2054,7 @@ test("base_unittests") {
"process/memory_unittest.cc",
"process/memory_unittest_mac.h",
"process/memory_unittest_mac.mm",
+ "process/process_info_unittest.cc",
"process/process_metrics_unittest.cc",
"process/process_metrics_unittest_ios.cc",
"process/process_unittest.cc",
@@ -2088,6 +2103,7 @@ test("base_unittests") {
"task_scheduler/delayed_task_manager_unittest.cc",
"task_scheduler/priority_queue_unittest.cc",
"task_scheduler/scheduler_lock_unittest.cc",
+ "task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc",
"task_scheduler/scheduler_worker_pool_impl_unittest.cc",
"task_scheduler/scheduler_worker_stack_unittest.cc",
"task_scheduler/scheduler_worker_unittest.cc",
@@ -2144,6 +2160,8 @@ test("base_unittests") {
"trace_event/java_heap_dump_provider_android_unittest.cc",
"trace_event/memory_allocator_dump_unittest.cc",
"trace_event/memory_dump_manager_unittest.cc",
+ "trace_event/memory_dump_scheduler_unittest.cc",
+ "trace_event/memory_peak_detector_unittest.cc",
"trace_event/memory_usage_estimator_unittest.cc",
"trace_event/process_memory_dump_unittest.cc",
"trace_event/trace_category_unittest.cc",
@@ -2371,6 +2389,7 @@ if (enable_nocompile_tests) {
"bind_unittest.nc",
"callback_list_unittest.nc",
"callback_unittest.nc",
+ "memory/ref_counted_unittest.nc",
"memory/weak_ptr_unittest.nc",
"metrics/histogram_unittest.nc",
]
@@ -2436,6 +2455,7 @@ if (is_android) {
srcjar_deps = [
":base_android_java_enums_srcjar",
":base_build_config_gen",
+ ":base_java_aidl",
":base_native_libraries_gen",
]
@@ -2514,6 +2534,8 @@ if (is_android) {
"android/java/src/org/chromium/base/metrics/RecordUserAction.java",
"android/java/src/org/chromium/base/metrics/StatisticsRecorderAndroid.java",
"android/java/src/org/chromium/base/multidex/ChromiumMultiDexInstaller.java",
+ "android/java/src/org/chromium/base/process_launcher/ChildProcessCreationParams.java",
+ "android/java/src/org/chromium/base/process_launcher/FileDescriptorInfo.java",
]
# New versions of BuildConfig.java and NativeLibraries.java
@@ -2525,12 +2547,21 @@ if (is_android) {
]
}
+ android_aidl("base_java_aidl") {
+ import_include = [ "android/java/src" ]
+ sources = [
+ "android/java/src/org/chromium/base/process_launcher/ICallbackInt.aidl",
+ "android/java/src/org/chromium/base/process_launcher/IChildProcessService.aidl",
+ ]
+ }
+
android_library("base_javatests") {
testonly = true
deps = [
":base_java",
":base_java_test_support",
"//third_party/android_support_test_runner:runner_java",
+ "//third_party/junit:junit",
]
java_files = [
"android/javatests/src/org/chromium/base/AdvancedMockContextTest.java",
@@ -2552,6 +2583,8 @@ if (is_android) {
"//testing/android/reporter:reporter_java",
"//third_party/android_support_test_runner:exposed_instrumentation_api_publish_java",
"//third_party/android_support_test_runner:runner_java",
+ "//third_party/android_tools:android_support_chromium_java",
+ "//third_party/android_tools:android_support_compat_java",
"//third_party/hamcrest:hamcrest_core_java",
"//third_party/junit",
]
@@ -2560,6 +2593,8 @@ if (is_android) {
"test/android/javatests/src/org/chromium/base/test/BaseChromiumInstrumentationTestRunner.java",
"test/android/javatests/src/org/chromium/base/test/BaseInstrumentationTestRunner.java",
"test/android/javatests/src/org/chromium/base/test/BaseJUnit4ClassRunner.java",
+ "test/android/javatests/src/org/chromium/base/test/BaseChromiumAndroidJUnitRunner.java",
+ "test/android/javatests/src/org/chromium/base/test/BaseChromiumRunnerCommon.java",
"test/android/javatests/src/org/chromium/base/test/BaseTestResult.java",
"test/android/javatests/src/org/chromium/base/test/util/AdvancedMockContext.java",
"test/android/javatests/src/org/chromium/base/test/util/CallbackHelper.java",
@@ -2649,6 +2684,7 @@ if (is_android) {
]
java_files = [
"test/android/java/src/org/chromium/base/ContentUriTestUtils.java",
+ "test/android/java/src/org/chromium/base/JavaHandlerThreadTest.java",
"test/android/java/src/org/chromium/base/TestSystemMessageHandler.java",
]
}
diff --git a/chromium/base/OWNERS b/chromium/base/OWNERS
index 0e7d0104961..28043c42589 100644
--- a/chromium/base/OWNERS
+++ b/chromium/base/OWNERS
@@ -36,11 +36,6 @@ per-file BUILD.gn=file://base/android/OWNERS
per-file feature_list*=asvitkine@chromium.org
per-file feature_list*=isherman@chromium.org
-# For bot infrastructure:
-per-file *.isolate=maruel@chromium.org
-per-file *.isolate=tandrii@chromium.org
-per-file *.isolate=vadimsh@chromium.org
-
# For TCMalloc tests:
per-file security_unittest.cc=jln@chromium.org
diff --git a/chromium/base/allocator/BUILD.gn b/chromium/base/allocator/BUILD.gn
index 8cdb06161f5..ac534817fc8 100644
--- a/chromium/base/allocator/BUILD.gn
+++ b/chromium/base/allocator/BUILD.gn
@@ -65,12 +65,10 @@ config("tcmalloc_flags") {
# tcmalloc contains some unused local template specializations.
"-Wno-unused-function",
- # tcmalloc uses COMPILE_ASSERT without static_assert but with
- # typedefs.
+ # tcmalloc uses COMPILE_ASSERT without static_assert but with typedefs.
"-Wno-unused-local-typedefs",
- # for magic2_ in debugallocation.cc (only built in Debug builds)
- # typedefs.
+ # for magic2_ in debugallocation.cc (only built in Debug builds) typedefs.
"-Wno-unused-private-field",
]
} else {
@@ -166,12 +164,15 @@ if (use_allocator == "tcmalloc") {
"$tcmalloc_dir/src/heap-profile-table.cc",
"$tcmalloc_dir/src/heap-profile-table.h",
"$tcmalloc_dir/src/heap-profiler.cc",
+ "$tcmalloc_dir/src/heap-profiler.h",
"$tcmalloc_dir/src/internal_logging.cc",
"$tcmalloc_dir/src/internal_logging.h",
"$tcmalloc_dir/src/linked_list.h",
"$tcmalloc_dir/src/malloc_extension.cc",
+ "$tcmalloc_dir/src/malloc_extension.h",
"$tcmalloc_dir/src/malloc_hook-inl.h",
"$tcmalloc_dir/src/malloc_hook.cc",
+ "$tcmalloc_dir/src/malloc_hook.h",
"$tcmalloc_dir/src/maybe_threads.cc",
"$tcmalloc_dir/src/maybe_threads.h",
"$tcmalloc_dir/src/memory_region_map.cc",
@@ -187,6 +188,7 @@ if (use_allocator == "tcmalloc") {
"$tcmalloc_dir/src/stack_trace_table.cc",
"$tcmalloc_dir/src/stack_trace_table.h",
"$tcmalloc_dir/src/stacktrace.cc",
+ "$tcmalloc_dir/src/stacktrace.h",
"$tcmalloc_dir/src/static_vars.cc",
"$tcmalloc_dir/src/static_vars.h",
"$tcmalloc_dir/src/symbolize.cc",
@@ -196,6 +198,7 @@ if (use_allocator == "tcmalloc") {
# #included by debugallocation_shim.cc
#"$tcmalloc_dir/src/tcmalloc.cc",
+ #"$tcmalloc_dir/src/tcmalloc.h",
"$tcmalloc_dir/src/thread_cache.cc",
"$tcmalloc_dir/src/thread_cache.h",
"$tcmalloc_dir/src/windows/port.cc",
diff --git a/chromium/base/allocator/allocator_interception_mac.h b/chromium/base/allocator/allocator_interception_mac.h
index 54e73de154f..68f1d53bb60 100644
--- a/chromium/base/allocator/allocator_interception_mac.h
+++ b/chromium/base/allocator/allocator_interception_mac.h
@@ -8,6 +8,7 @@
#include <stddef.h>
#include "base/base_export.h"
+#include "third_party/apple_apsl/malloc.h"
namespace base {
namespace allocator {
@@ -35,6 +36,20 @@ bool UncheckedCallocMac(size_t num_items, size_t size, void** result);
// Has no effect on the default malloc zone if the allocator shim already
// performs that interception.
BASE_EXPORT void InterceptAllocationsMac();
+
+// Updates all malloc zones to use their original functions.
+// Also calls ClearAllMallocZonesForTesting.
+BASE_EXPORT void UninterceptMallocZonesForTesting();
+
+// Periodically checks for, and shims new malloc zones. Stops checking after 1
+// minute.
+BASE_EXPORT void PeriodicallyShimNewMallocZones();
+
+// Exposed for testing.
+BASE_EXPORT void ShimNewMallocZones();
+BASE_EXPORT void ReplaceZoneFunctions(ChromeMallocZone* zone,
+ const MallocZoneFunctions* functions);
+
} // namespace allocator
} // namespace base
diff --git a/chromium/base/allocator/allocator_interception_mac.mm b/chromium/base/allocator/allocator_interception_mac.mm
index c484830da5e..4980051aff8 100644
--- a/chromium/base/allocator/allocator_interception_mac.mm
+++ b/chromium/base/allocator/allocator_interception_mac.mm
@@ -27,14 +27,15 @@
#include <new>
-#include "base/allocator/allocator_shim.h"
#include "base/allocator/features.h"
#include "base/allocator/malloc_zone_functions_mac.h"
+#include "base/bind.h"
#include "base/logging.h"
#include "base/mac/mac_util.h"
#include "base/mac/mach_logging.h"
#include "base/process/memory.h"
#include "base/scoped_clear_errno.h"
+#include "base/threading/sequenced_task_runner_handle.h"
#include "build/build_config.h"
#include "third_party/apple_apsl/CFBase.h"
@@ -264,6 +265,14 @@ id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone) {
return result;
}
+void UninterceptMallocZoneForTesting(struct _malloc_zone_t* zone) {
+ ChromeMallocZone* chrome_zone = reinterpret_cast<ChromeMallocZone*>(zone);
+ if (!IsMallocZoneAlreadyStored(chrome_zone))
+ return;
+ MallocZoneFunctions& functions = GetFunctionsForZone(zone);
+ ReplaceZoneFunctions(chrome_zone, &functions);
+}
+
} // namespace
bool UncheckedMallocMac(size_t size, void** result) {
@@ -294,44 +303,6 @@ bool UncheckedCallocMac(size_t num_items, size_t size, void** result) {
return *result != NULL;
}
-void ReplaceZoneFunctions(ChromeMallocZone* zone,
- const MallocZoneFunctions* functions) {
- // Remove protection.
- mach_vm_address_t reprotection_start = 0;
- mach_vm_size_t reprotection_length = 0;
- vm_prot_t reprotection_value = VM_PROT_NONE;
- DeprotectMallocZone(zone, &reprotection_start, &reprotection_length,
- &reprotection_value);
-
- CHECK(functions->malloc && functions->calloc && functions->valloc &&
- functions->free && functions->realloc);
- zone->malloc = functions->malloc;
- zone->calloc = functions->calloc;
- zone->valloc = functions->valloc;
- zone->free = functions->free;
- zone->realloc = functions->realloc;
- if (functions->batch_malloc)
- zone->batch_malloc = functions->batch_malloc;
- if (functions->batch_free)
- zone->batch_free = functions->batch_free;
- if (functions->size)
- zone->size = functions->size;
- if (zone->version >= 5 && functions->memalign) {
- zone->memalign = functions->memalign;
- }
- if (zone->version >= 6 && functions->free_definite_size) {
- zone->free_definite_size = functions->free_definite_size;
- }
-
- // Restore protection if it was active.
- if (reprotection_start) {
- kern_return_t result =
- mach_vm_protect(mach_task_self(), reprotection_start,
- reprotection_length, false, reprotection_value);
- MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect";
- }
-}
-
void StoreFunctionsForDefaultZone() {
ChromeMallocZone* default_zone = reinterpret_cast<ChromeMallocZone*>(
malloc_default_zone());
@@ -355,6 +326,13 @@ void StoreFunctionsForAllZones() {
}
void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions) {
+ // The default zone does not get returned in malloc_get_all_zones().
+ ChromeMallocZone* default_zone =
+ reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+ if (DoesMallocZoneNeedReplacing(default_zone, functions)) {
+ ReplaceZoneFunctions(default_zone, functions);
+ }
+
vm_address_t* zones;
unsigned int count;
kern_return_t kr =
@@ -363,7 +341,7 @@ void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions) {
return;
for (unsigned int i = 0; i < count; ++i) {
ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
- if (IsMallocZoneAlreadyStored(zone) && zone->malloc != functions->malloc) {
+ if (DoesMallocZoneNeedReplacing(zone, functions)) {
ReplaceZoneFunctions(zone, functions);
}
}
@@ -392,7 +370,7 @@ void InterceptAllocationsMac() {
reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
if (!IsMallocZoneAlreadyStored(default_zone)) {
StoreZoneFunctions(default_zone, &g_old_zone);
- MallocZoneFunctions new_functions;
+ MallocZoneFunctions new_functions = {};
new_functions.malloc = oom_killer_malloc;
new_functions.calloc = oom_killer_calloc;
new_functions.valloc = oom_killer_valloc;
@@ -408,7 +386,7 @@ void InterceptAllocationsMac() {
reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
if (purgeable_zone && !IsMallocZoneAlreadyStored(purgeable_zone)) {
StoreZoneFunctions(purgeable_zone, &g_old_purgeable_zone);
- MallocZoneFunctions new_functions;
+ MallocZoneFunctions new_functions = {};
new_functions.malloc = oom_killer_malloc_purgeable;
new_functions.calloc = oom_killer_calloc_purgeable;
new_functions.valloc = oom_killer_valloc_purgeable;
@@ -496,5 +474,95 @@ void InterceptAllocationsMac() {
reinterpret_cast<IMP>(oom_killer_allocWithZone));
}
+void UninterceptMallocZonesForTesting() {
+ UninterceptMallocZoneForTesting(malloc_default_zone());
+ vm_address_t* zones;
+ unsigned int count;
+ kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
+ CHECK(kr == KERN_SUCCESS);
+ for (unsigned int i = 0; i < count; ++i) {
+ UninterceptMallocZoneForTesting(
+ reinterpret_cast<struct _malloc_zone_t*>(zones[i]));
+ }
+
+ ClearAllMallocZonesForTesting();
+}
+
+namespace {
+
+void ShimNewMallocZonesAndReschedule(base::Time end_time,
+ base::TimeDelta delay) {
+ ShimNewMallocZones();
+
+ if (base::Time::Now() > end_time)
+ return;
+
+ base::TimeDelta next_delay = delay * 2;
+ SequencedTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&ShimNewMallocZonesAndReschedule, end_time, next_delay),
+ delay);
+}
+
+} // namespace
+
+void PeriodicallyShimNewMallocZones() {
+ base::Time end_time = base::Time::Now() + base::TimeDelta::FromMinutes(1);
+ base::TimeDelta initial_delay = base::TimeDelta::FromSeconds(1);
+ ShimNewMallocZonesAndReschedule(end_time, initial_delay);
+}
+
+void ShimNewMallocZones() {
+ StoreFunctionsForAllZones();
+
+ // Use the functions for the default zone as a template to replace those
+ // new zones.
+ ChromeMallocZone* default_zone =
+ reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+ DCHECK(IsMallocZoneAlreadyStored(default_zone));
+
+ MallocZoneFunctions new_functions;
+ StoreZoneFunctions(default_zone, &new_functions);
+ ReplaceFunctionsForStoredZones(&new_functions);
+}
+
+void ReplaceZoneFunctions(ChromeMallocZone* zone,
+ const MallocZoneFunctions* functions) {
+ // Remove protection.
+ mach_vm_address_t reprotection_start = 0;
+ mach_vm_size_t reprotection_length = 0;
+ vm_prot_t reprotection_value = VM_PROT_NONE;
+ DeprotectMallocZone(zone, &reprotection_start, &reprotection_length,
+ &reprotection_value);
+
+ CHECK(functions->malloc && functions->calloc && functions->valloc &&
+ functions->free && functions->realloc);
+ zone->malloc = functions->malloc;
+ zone->calloc = functions->calloc;
+ zone->valloc = functions->valloc;
+ zone->free = functions->free;
+ zone->realloc = functions->realloc;
+ if (functions->batch_malloc)
+ zone->batch_malloc = functions->batch_malloc;
+ if (functions->batch_free)
+ zone->batch_free = functions->batch_free;
+ if (functions->size)
+ zone->size = functions->size;
+ if (zone->version >= 5 && functions->memalign) {
+ zone->memalign = functions->memalign;
+ }
+ if (zone->version >= 6 && functions->free_definite_size) {
+ zone->free_definite_size = functions->free_definite_size;
+ }
+
+ // Restore protection if it was active.
+ if (reprotection_start) {
+ kern_return_t result =
+ mach_vm_protect(mach_task_self(), reprotection_start,
+ reprotection_length, false, reprotection_value);
+ MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect";
+ }
+}
+
} // namespace allocator
} // namespace base
diff --git a/chromium/base/allocator/allocator_interception_mac_unittest.mm b/chromium/base/allocator/allocator_interception_mac_unittest.mm
new file mode 100644
index 00000000000..c919ca0e8a9
--- /dev/null
+++ b/chromium/base/allocator/allocator_interception_mac_unittest.mm
@@ -0,0 +1,64 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <mach/mach.h>
+
+#include "base/allocator/allocator_interception_mac.h"
+#include "base/allocator/allocator_shim.h"
+#include "base/allocator/malloc_zone_functions_mac.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace allocator {
+
+namespace {
+void ResetMallocZone(ChromeMallocZone* zone) {
+ MallocZoneFunctions& functions = GetFunctionsForZone(zone);
+ ReplaceZoneFunctions(zone, &functions);
+}
+
+void ResetAllMallocZones() {
+ ChromeMallocZone* default_malloc_zone =
+ reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+ ResetMallocZone(default_malloc_zone);
+
+ vm_address_t* zones;
+ unsigned int count;
+ kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
+ if (kr != KERN_SUCCESS)
+ return;
+ for (unsigned int i = 0; i < count; ++i) {
+ ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
+ ResetMallocZone(zone);
+ }
+}
+} // namespace
+
+class AllocatorInterceptionTest : public testing::Test {
+ protected:
+ void TearDown() override {
+ ResetAllMallocZones();
+ ClearAllMallocZonesForTesting();
+ }
+};
+
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+TEST_F(AllocatorInterceptionTest, ShimNewMallocZones) {
+ InitializeAllocatorShim();
+ ChromeMallocZone* default_malloc_zone =
+ reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+
+ malloc_zone_t new_zone;
+ memset(&new_zone, 1, sizeof(malloc_zone_t));
+ malloc_zone_register(&new_zone);
+ EXPECT_NE(new_zone.malloc, default_malloc_zone->malloc);
+ ShimNewMallocZones();
+ EXPECT_EQ(new_zone.malloc, default_malloc_zone->malloc);
+
+ malloc_zone_unregister(&new_zone);
+}
+#endif
+
+} // namespace allocator
+} // namespace base
diff --git a/chromium/base/allocator/allocator_shim.cc b/chromium/base/allocator/allocator_shim.cc
index 7a5cfd6c6ee..36736cdc78d 100644
--- a/chromium/base/allocator/allocator_shim.cc
+++ b/chromium/base/allocator/allocator_shim.cc
@@ -23,6 +23,8 @@
#if defined(OS_MACOSX)
#include <malloc/malloc.h>
+
+#include "base/allocator/allocator_interception_mac.h"
#endif
// No calls to malloc / new in this file. They would would cause re-entrancy of
@@ -326,9 +328,11 @@ void InitializeAllocatorShim() {
// traversed the shim this will route them to the default malloc zone.
InitializeDefaultDispatchToMacAllocator();
+ MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
+
// This replaces the default malloc zone, causing calls to malloc & friends
// from the codebase to be routed to ShimMalloc() above.
- OverrideMacSymbols();
+ base::allocator::ReplaceFunctionsForStoredZones(&functions);
}
} // namespace allocator
} // namespace base
diff --git a/chromium/base/allocator/allocator_shim_internals.h b/chromium/base/allocator/allocator_shim_internals.h
index 82624ee45b7..0196f899aef 100644
--- a/chromium/base/allocator/allocator_shim_internals.h
+++ b/chromium/base/allocator/allocator_shim_internals.h
@@ -18,7 +18,26 @@
#endif
// Shim layer symbols need to be ALWAYS exported, regardless of component build.
-#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default")))
+//
+// If an exported symbol is linked into a DSO, it may be preempted by a
+// definition in the main executable. If this happens to an allocator symbol, it
+// will mean that the DSO will use the main executable's allocator. This is
+// normally relatively harmless -- regular allocations should all use the same
+// allocator, but if the DSO tries to hook the allocator it will not see any
+// allocations.
+//
+// However, if LLVM LTO is enabled, the compiler may inline the shim layer
+// symbols into callers. The end result is that allocator calls in DSOs may use
+// either the main executable's allocator or the DSO's allocator, depending on
+// whether the call was inlined. This is arguably a bug in LLVM caused by its
+// somewhat irregular handling of symbol interposition (see llvm.org/PR23501).
+// To work around the bug we use noinline to prevent the symbols from being
+// inlined.
+//
+// In the long run we probably want to avoid linking the allocator bits into
+// DSOs altogether. This will save a little space and stop giving DSOs the false
+// impression that they can hook the allocator.
+#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default"), noinline))
#endif // __GNUC__
diff --git a/chromium/base/allocator/allocator_shim_override_glibc_weak_symbols.h b/chromium/base/allocator/allocator_shim_override_glibc_weak_symbols.h
index b1296369c1e..7f50ac81c30 100644
--- a/chromium/base/allocator/allocator_shim_override_glibc_weak_symbols.h
+++ b/chromium/base/allocator/allocator_shim_override_glibc_weak_symbols.h
@@ -56,23 +56,21 @@ void* GlibcMemalignHook(size_t align, size_t size, const void* caller) {
} // namespace
-SHIM_ALWAYS_EXPORT void* (*MALLOC_HOOK_MAYBE_VOLATILE __malloc_hook)(
- size_t,
- const void*) = &GlibcMallocHook;
-
-SHIM_ALWAYS_EXPORT void* (*MALLOC_HOOK_MAYBE_VOLATILE __realloc_hook)(
- void*,
- size_t,
- const void*) = &GlibcReallocHook;
-
-SHIM_ALWAYS_EXPORT void (*MALLOC_HOOK_MAYBE_VOLATILE __free_hook)(void*,
- const void*) =
- &GlibcFreeHook;
-
-SHIM_ALWAYS_EXPORT void* (*MALLOC_HOOK_MAYBE_VOLATILE __memalign_hook)(
- size_t,
- size_t,
- const void*) = &GlibcMemalignHook;
+__attribute__((visibility("default"))) void* (
+ *MALLOC_HOOK_MAYBE_VOLATILE __malloc_hook)(size_t,
+ const void*) = &GlibcMallocHook;
+
+__attribute__((visibility("default"))) void* (
+ *MALLOC_HOOK_MAYBE_VOLATILE __realloc_hook)(void*, size_t, const void*) =
+ &GlibcReallocHook;
+
+__attribute__((visibility("default"))) void (
+ *MALLOC_HOOK_MAYBE_VOLATILE __free_hook)(void*,
+ const void*) = &GlibcFreeHook;
+
+__attribute__((visibility("default"))) void* (
+ *MALLOC_HOOK_MAYBE_VOLATILE __memalign_hook)(size_t, size_t, const void*) =
+ &GlibcMemalignHook;
// 2) Redefine libc symbols themselves.
diff --git a/chromium/base/allocator/allocator_shim_override_mac_symbols.h b/chromium/base/allocator/allocator_shim_override_mac_symbols.h
index dceba48f635..0b65edb1547 100644
--- a/chromium/base/allocator/allocator_shim_override_mac_symbols.h
+++ b/chromium/base/allocator/allocator_shim_override_mac_symbols.h
@@ -7,15 +7,15 @@
#endif
#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
-#include "base/allocator/allocator_interception_mac.h"
#include "base/allocator/malloc_zone_functions_mac.h"
#include "third_party/apple_apsl/malloc.h"
namespace base {
namespace allocator {
-void OverrideMacSymbols() {
+MallocZoneFunctions MallocZoneFunctionsToReplaceDefault() {
MallocZoneFunctions new_functions;
+ memset(&new_functions, 0, sizeof(MallocZoneFunctions));
new_functions.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
return ShimGetSizeEstimate(ptr, zone);
};
@@ -53,8 +53,7 @@ void OverrideMacSymbols() {
size_t size) {
ShimFreeDefiniteSize(ptr, size, zone);
};
-
- base::allocator::ReplaceFunctionsForStoredZones(&new_functions);
+ return new_functions;
}
} // namespace allocator
diff --git a/chromium/base/allocator/allocator_shim_unittest.cc b/chromium/base/allocator/allocator_shim_unittest.cc
index ee90f6352a2..70a6c979c90 100644
--- a/chromium/base/allocator/allocator_shim_unittest.cc
+++ b/chromium/base/allocator/allocator_shim_unittest.cc
@@ -25,6 +25,8 @@
#include <windows.h>
#elif defined(OS_MACOSX)
#include <malloc/malloc.h>
+#include "base/allocator/allocator_interception_mac.h"
+#include "base/mac/mac_util.h"
#include "third_party/apple_apsl/malloc.h"
#else
#include <malloc.h>
@@ -193,15 +195,17 @@ class AllocatorShimTest : public testing::Test {
subtle::Release_Store(&num_new_handler_calls, 0);
instance_ = this;
- }
-
#if defined(OS_MACOSX)
- static void SetUpTestCase() {
InitializeAllocatorShim();
- }
#endif
+ }
- void TearDown() override { instance_ = nullptr; }
+ void TearDown() override {
+ instance_ = nullptr;
+#if defined(OS_MACOSX)
+ UninterceptMallocZonesForTesting();
+#endif
+ }
protected:
size_t allocs_intercepted_by_size[kMaxSizeTracked];
@@ -355,7 +359,13 @@ TEST_F(AllocatorShimTest, InterceptLibcSymbolsBatchMallocFree) {
unsigned result_count = malloc_zone_batch_malloc(malloc_default_zone(), 99,
results.data(), count);
ASSERT_EQ(count, result_count);
- ASSERT_EQ(count, batch_mallocs_intercepted_by_size[99]);
+
+ // TODO(erikchen): On macOS 10.12+, batch_malloc in the default zone may
+ // forward to another zone, which we've also shimmed, resulting in
+ // MockBatchMalloc getting called twice as often as we'd expect. This
+ // re-entrancy into the allocator shim is a bug that needs to be fixed.
+ // https://crbug.com/693237.
+ // ASSERT_EQ(count, batch_mallocs_intercepted_by_size[99]);
std::vector<void*> results_copy(results);
malloc_zone_batch_free(malloc_default_zone(), results.data(), count);
diff --git a/chromium/base/allocator/malloc_zone_functions_mac.cc b/chromium/base/allocator/malloc_zone_functions_mac.cc
index e64719da9e6..9a414960308 100644
--- a/chromium/base/allocator/malloc_zone_functions_mac.cc
+++ b/chromium/base/allocator/malloc_zone_functions_mac.cc
@@ -10,11 +10,13 @@
namespace base {
namespace allocator {
-MallocZoneFunctions* g_malloc_zones = nullptr;
-MallocZoneFunctions::MallocZoneFunctions() {}
+MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
+static_assert(std::is_pod<MallocZoneFunctions>::value,
+ "MallocZoneFunctions must be POD");
void StoreZoneFunctions(const ChromeMallocZone* zone,
MallocZoneFunctions* functions) {
+ memset(functions, 0, sizeof(MallocZoneFunctions));
functions->malloc = zone->malloc;
functions->calloc = zone->calloc;
functions->valloc = zone->valloc;
@@ -51,10 +53,6 @@ base::Lock& GetLock() {
void EnsureMallocZonesInitializedLocked() {
GetLock().AssertAcquired();
- if (!g_malloc_zones) {
- g_malloc_zones = reinterpret_cast<base::allocator::MallocZoneFunctions*>(
- calloc(kMaxZoneCount, sizeof(MallocZoneFunctions)));
- }
}
int g_zone_count = 0;
@@ -71,14 +69,14 @@ bool IsMallocZoneAlreadyStoredLocked(ChromeMallocZone* zone) {
} // namespace
-void StoreMallocZone(ChromeMallocZone* zone) {
+bool StoreMallocZone(ChromeMallocZone* zone) {
base::AutoLock l(GetLock());
EnsureMallocZonesInitializedLocked();
if (IsMallocZoneAlreadyStoredLocked(zone))
- return;
+ return false;
if (g_zone_count == kMaxZoneCount)
- return;
+ return false;
StoreZoneFunctions(zone, &g_malloc_zones[g_zone_count]);
++g_zone_count;
@@ -87,6 +85,7 @@ void StoreMallocZone(ChromeMallocZone* zone) {
// reads these values is triggered after this function returns. so we want to
// guarantee that they are committed at this stage"
base::subtle::MemoryBarrier();
+ return true;
}
bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone) {
@@ -94,6 +93,11 @@ bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone) {
return IsMallocZoneAlreadyStoredLocked(zone);
}
+bool DoesMallocZoneNeedReplacing(ChromeMallocZone* zone,
+ const MallocZoneFunctions* functions) {
+ return IsMallocZoneAlreadyStored(zone) && zone->malloc != functions->malloc;
+}
+
int GetMallocZoneCountForTesting() {
base::AutoLock l(GetLock());
return g_zone_count;
diff --git a/chromium/base/allocator/malloc_zone_functions_mac.h b/chromium/base/allocator/malloc_zone_functions_mac.h
index c2ec2fcd353..a7f55433785 100644
--- a/chromium/base/allocator/malloc_zone_functions_mac.h
+++ b/chromium/base/allocator/malloc_zone_functions_mac.h
@@ -40,24 +40,23 @@ typedef void (*free_definite_size_type)(struct _malloc_zone_t* zone,
typedef size_t (*size_fn_type)(struct _malloc_zone_t* zone, const void* ptr);
struct MallocZoneFunctions {
- MallocZoneFunctions();
- malloc_type malloc = nullptr;
- calloc_type calloc = nullptr;
- valloc_type valloc = nullptr;
- free_type free = nullptr;
- realloc_type realloc = nullptr;
- memalign_type memalign = nullptr;
- batch_malloc_type batch_malloc = nullptr;
- batch_free_type batch_free = nullptr;
- free_definite_size_type free_definite_size = nullptr;
- size_fn_type size = nullptr;
- const ChromeMallocZone* context = nullptr;
+ malloc_type malloc;
+ calloc_type calloc;
+ valloc_type valloc;
+ free_type free;
+ realloc_type realloc;
+ memalign_type memalign;
+ batch_malloc_type batch_malloc;
+ batch_free_type batch_free;
+ free_definite_size_type free_definite_size;
+ size_fn_type size;
+ const ChromeMallocZone* context;
};
-void StoreZoneFunctions(const ChromeMallocZone* zone,
- MallocZoneFunctions* functions);
+BASE_EXPORT void StoreZoneFunctions(const ChromeMallocZone* zone,
+ MallocZoneFunctions* functions);
static constexpr int kMaxZoneCount = 30;
-BASE_EXPORT extern MallocZoneFunctions* g_malloc_zones;
+BASE_EXPORT extern MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
// The array g_malloc_zones stores all information about malloc zones before
// they are shimmed. This information needs to be accessed during dispatch back
@@ -79,8 +78,13 @@ BASE_EXPORT extern MallocZoneFunctions* g_malloc_zones;
//
// Most allocations go through the default allocator. We will ensure that the
// default allocator is stored as the first MallocZoneFunctions.
-BASE_EXPORT void StoreMallocZone(ChromeMallocZone* zone);
+//
+// Returns whether the zone was successfully stored.
+BASE_EXPORT bool StoreMallocZone(ChromeMallocZone* zone);
BASE_EXPORT bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone);
+BASE_EXPORT bool DoesMallocZoneNeedReplacing(
+ ChromeMallocZone* zone,
+ const MallocZoneFunctions* functions);
BASE_EXPORT int GetMallocZoneCountForTesting();
BASE_EXPORT void ClearAllMallocZonesForTesting();
diff --git a/chromium/base/allocator/malloc_zone_functions_mac_unittest.cc b/chromium/base/allocator/malloc_zone_functions_mac_unittest.cc
index ad638fcbcb7..09aa4293624 100644
--- a/chromium/base/allocator/malloc_zone_functions_mac_unittest.cc
+++ b/chromium/base/allocator/malloc_zone_functions_mac_unittest.cc
@@ -10,7 +10,7 @@ namespace allocator {
class MallocZoneFunctionsTest : public testing::Test {
protected:
- void SetUp() override { ClearAllMallocZonesForTesting(); }
+ void TearDown() override { ClearAllMallocZonesForTesting(); }
};
TEST_F(MallocZoneFunctionsTest, TestDefaultZoneMallocFree) {
diff --git a/chromium/base/allocator/partition_allocator/address_space_randomization.cc b/chromium/base/allocator/partition_allocator/address_space_randomization.cc
index d710e006bfd..d54fb4434e3 100644
--- a/chromium/base/allocator/partition_allocator/address_space_randomization.cc
+++ b/chromium/base/allocator/partition_allocator/address_space_randomization.cc
@@ -6,6 +6,7 @@
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/spin_lock.h"
+#include "base/win/windows_version.h"
#include "build/build_config.h"
#if defined(OS_WIN)
@@ -15,6 +16,11 @@
#include <unistd.h>
#endif
+// VersionHelpers.h must be included after windows.h.
+#if defined(OS_WIN)
+#include <VersionHelpers.h>
+#endif
+
namespace base {
namespace {
@@ -89,11 +95,17 @@ void* GetRandomPageBase() {
// This address mask gives a low likelihood of address space collisions. We
// handle the situation gracefully if there is a collision.
#if defined(OS_WIN)
- // 64-bit Windows has a bizarrely small 8TB user address space. Allocates in
- // the 1-5TB region. TODO(palmer): See if Windows >= 8.1 has the full 47 bits,
- // and use it if so. crbug.com/672219
random &= 0x3ffffffffffUL;
- random += 0x10000000000UL;
+ // Windows >= 8.1 has the full 47 bits. Use them where available.
+ static bool windows_81 = false;
+ static bool windows_81_initialized = false;
+ if (!windows_81_initialized) {
+ windows_81 = IsWindows8Point1OrGreater();
+ windows_81_initialized = true;
+ }
+ if (!windows_81) {
+ random += 0x10000000000UL;
+ }
#elif defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
// This range is copied from the TSan source, but works for all tools.
random &= 0x007fffffffffUL;
diff --git a/chromium/base/allocator/partition_allocator/page_allocator.cc b/chromium/base/allocator/partition_allocator/page_allocator.cc
index 1884c4690ed..606155fce4a 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator.cc
+++ b/chromium/base/allocator/partition_allocator/page_allocator.cc
@@ -6,8 +6,9 @@
#include <limits.h>
+#include <atomic>
+
#include "base/allocator/partition_allocator/address_space_randomization.h"
-#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/logging.h"
#include "build/build_config.h"
@@ -27,7 +28,7 @@
// On POSIX |mmap| uses a nearby address if the hint address is blocked.
static const bool kHintIsAdvisory = true;
-static volatile base::subtle::Atomic32 s_allocPageErrorCode = 0;
+static std::atomic<int32_t> s_allocPageErrorCode{0};
#elif defined(OS_WIN)
@@ -35,7 +36,7 @@ static volatile base::subtle::Atomic32 s_allocPageErrorCode = 0;
// |VirtualAlloc| will fail if allocation at the hint address is blocked.
static const bool kHintIsAdvisory = false;
-static base::subtle::Atomic32 s_allocPageErrorCode = ERROR_SUCCESS;
+static std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
#else
#error Unknown OS
@@ -58,14 +59,14 @@ static void* SystemAllocPages(
page_accessibility == PageAccessible ? PAGE_READWRITE : PAGE_NOACCESS;
ret = VirtualAlloc(hint, length, MEM_RESERVE | MEM_COMMIT, access_flag);
if (!ret)
- base::subtle::Release_Store(&s_allocPageErrorCode, GetLastError());
+ s_allocPageErrorCode = GetLastError();
#else
int access_flag = page_accessibility == PageAccessible
? (PROT_READ | PROT_WRITE)
: PROT_NONE;
ret = mmap(hint, length, access_flag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (ret == MAP_FAILED) {
- base::subtle::Release_Store(&s_allocPageErrorCode, errno);
+ s_allocPageErrorCode = errno;
ret = 0;
}
#endif
@@ -272,7 +273,7 @@ void DiscardSystemPages(void* address, size_t length) {
}
uint32_t GetAllocPageErrorCode() {
- return base::subtle::Acquire_Load(&s_allocPageErrorCode);
+ return s_allocPageErrorCode;
}
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.cc b/chromium/base/allocator/partition_allocator/partition_alloc.cc
index eb4c6390a15..b13f36bb530 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.cc
@@ -246,12 +246,12 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
}
#if !defined(ARCH_CPU_64_BITS)
-static NOINLINE void partitionOutOfMemoryWithLotsOfUncommitedPages() {
+static NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages() {
OOM_CRASH();
}
#endif
-static NOINLINE void partitionOutOfMemory(const PartitionRootBase* root) {
+static NOINLINE void PartitionOutOfMemory(const PartitionRootBase* root) {
#if !defined(ARCH_CPU_64_BITS)
// Check whether this OOM is due to a lot of super pages that are allocated
// but not committed, probably due to http://crbug.com/421387.
@@ -259,7 +259,7 @@ static NOINLINE void partitionOutOfMemory(const PartitionRootBase* root) {
root->total_size_of_direct_mapped_pages -
root->total_size_of_committed_pages >
kReasonableSizeOfUnusedPages) {
- partitionOutOfMemoryWithLotsOfUncommitedPages();
+ PartitionOutOfMemoryWithLotsOfUncommitedPages();
}
#endif
if (PartitionRootBase::gOomHandlingFunction)
@@ -267,11 +267,11 @@ static NOINLINE void partitionOutOfMemory(const PartitionRootBase* root) {
OOM_CRASH();
}
-static NOINLINE void partitionExcessiveAllocationSize() {
+static NOINLINE void PartitionExcessiveAllocationSize() {
OOM_CRASH();
}
-static NOINLINE void partitionBucketFull() {
+static NOINLINE void PartitionBucketFull() {
OOM_CRASH();
}
@@ -316,7 +316,7 @@ PartitionPageStateIsDecommitted(const PartitionPage* page) {
return ret;
}
-static void partitionIncreaseCommittedPages(PartitionRootBase* root,
+static void PartitionIncreaseCommittedPages(PartitionRootBase* root,
size_t len) {
root->total_size_of_committed_pages += len;
DCHECK(root->total_size_of_committed_pages <=
@@ -324,7 +324,7 @@ static void partitionIncreaseCommittedPages(PartitionRootBase* root,
root->total_size_of_direct_mapped_pages);
}
-static void partitionDecreaseCommittedPages(PartitionRootBase* root,
+static void PartitionDecreaseCommittedPages(PartitionRootBase* root,
size_t len) {
root->total_size_of_committed_pages -= len;
DCHECK(root->total_size_of_committed_pages <=
@@ -332,18 +332,18 @@ static void partitionDecreaseCommittedPages(PartitionRootBase* root,
root->total_size_of_direct_mapped_pages);
}
-static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root,
+static ALWAYS_INLINE void PartitionDecommitSystemPages(PartitionRootBase* root,
void* address,
size_t length) {
DecommitSystemPages(address, length);
- partitionDecreaseCommittedPages(root, length);
+ PartitionDecreaseCommittedPages(root, length);
}
-static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root,
+static ALWAYS_INLINE void PartitionRecommitSystemPages(PartitionRootBase* root,
void* address,
size_t length) {
RecommitSystemPages(address, length);
- partitionIncreaseCommittedPages(root, length);
+ PartitionIncreaseCommittedPages(root, length);
}
static ALWAYS_INLINE void* PartitionAllocPartitionPages(
@@ -364,7 +364,7 @@ static ALWAYS_INLINE void* PartitionAllocPartitionPages(
// allocation.
char* ret = root->next_partition_page;
root->next_partition_page += total_size;
- partitionIncreaseCommittedPages(root, total_size);
+ PartitionIncreaseCommittedPages(root, total_size);
return ret;
}
@@ -379,7 +379,7 @@ static ALWAYS_INLINE void* PartitionAllocPartitionPages(
return 0;
root->total_size_of_super_pages += kSuperPageSize;
- partitionIncreaseCommittedPages(root, total_size);
+ PartitionIncreaseCommittedPages(root, total_size);
root->next_super_page = super_page + kSuperPageSize;
char* ret = super_page + kPartitionPageSize;
@@ -445,13 +445,13 @@ static ALWAYS_INLINE void* PartitionAllocPartitionPages(
}
static ALWAYS_INLINE uint16_t
-partitionBucketPartitionPages(const PartitionBucket* bucket) {
+PartitionBucketPartitionPages(const PartitionBucket* bucket) {
return (bucket->num_system_pages_per_slot_span +
(kNumSystemPagesPerPartitionPage - 1)) /
kNumSystemPagesPerPartitionPage;
}
-static ALWAYS_INLINE void partitionPageReset(PartitionPage* page) {
+static ALWAYS_INLINE void PartitionPageReset(PartitionPage* page) {
DCHECK(PartitionPageStateIsDecommitted(page));
page->num_unprovisioned_slots = PartitionBucketSlots(page->bucket);
@@ -460,13 +460,13 @@ static ALWAYS_INLINE void partitionPageReset(PartitionPage* page) {
page->next_page = nullptr;
}
-static ALWAYS_INLINE void partitionPageSetup(PartitionPage* page,
+static ALWAYS_INLINE void PartitionPageSetup(PartitionPage* page,
PartitionBucket* bucket) {
// The bucket never changes. We set it up once.
page->bucket = bucket;
page->empty_cache_index = -1;
- partitionPageReset(page);
+ PartitionPageReset(page);
// If this page has just a single slot, do not set up page offsets for any
// page metadata other than the first one. This ensures that attempts to
@@ -474,17 +474,17 @@ static ALWAYS_INLINE void partitionPageSetup(PartitionPage* page,
if (page->num_unprovisioned_slots == 1)
return;
- uint16_t num_partition_pages = partitionBucketPartitionPages(bucket);
- char* pageCharPtr = reinterpret_cast<char*>(page);
+ uint16_t num_partition_pages = PartitionBucketPartitionPages(bucket);
+ char* page_char_ptr = reinterpret_cast<char*>(page);
for (uint16_t i = 1; i < num_partition_pages; ++i) {
- pageCharPtr += kPageMetadataSize;
- PartitionPage* secondaryPage =
- reinterpret_cast<PartitionPage*>(pageCharPtr);
- secondaryPage->page_offset = i;
+ page_char_ptr += kPageMetadataSize;
+ PartitionPage* secondary_page =
+ reinterpret_cast<PartitionPage*>(page_char_ptr);
+ secondary_page->page_offset = i;
}
}
-static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(
+static ALWAYS_INLINE char* PartitionPageAllocAndFillFreelist(
PartitionPage* page) {
DCHECK(page != &PartitionRootGeneric::gSeedPage);
uint16_t num_slots = page->num_unprovisioned_slots;
@@ -541,10 +541,10 @@ static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(
page->freelist_head = entry;
while (--num_new_freelist_entries) {
freelist_pointer += size;
- PartitionFreelistEntry* nextEntry =
+ PartitionFreelistEntry* next_entry =
reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
- entry->next = PartitionFreelistMask(nextEntry);
- entry = nextEntry;
+ entry->next = PartitionFreelistMask(next_entry);
+ entry = next_entry;
}
entry->next = PartitionFreelistMask(0);
} else {
@@ -561,7 +561,7 @@ static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(
// As potential pages are scanned, they are tidied up according to their state.
// Empty pages are swept on to the empty page list, decommitted pages on to the
// decommitted page list and full pages are unlinked from any list.
-static bool partitionSetNewActivePage(PartitionBucket* bucket) {
+static bool PartitionSetNewActivePage(PartitionBucket* bucket) {
PartitionPage* page = bucket->active_pages_head;
if (page == &PartitionRootBase::gSeedPage)
return false;
@@ -597,7 +597,7 @@ static bool partitionSetNewActivePage(PartitionBucket* bucket) {
// num_full_pages is a uint16_t for efficient packing so guard against
// overflow to be safe.
if (UNLIKELY(!bucket->num_full_pages))
- partitionBucketFull();
+ PartitionBucketFull();
// Not necessary but might help stop accidents.
page->next_page = 0;
}
@@ -614,14 +614,14 @@ static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent(
reinterpret_cast<char*>(page) + 3 * kPageMetadataSize);
}
-static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page,
+static ALWAYS_INLINE void PartitionPageSetRawSize(PartitionPage* page,
size_t size) {
- size_t* raw_sizePtr = PartitionPageGetRawSizePtr(page);
- if (UNLIKELY(raw_sizePtr != nullptr))
- *raw_sizePtr = size;
+ size_t* raw_size_ptr = PartitionPageGetRawSizePtr(page);
+ if (UNLIKELY(raw_size_ptr != nullptr))
+ *raw_size_ptr = size;
}
-static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root,
+static ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
int flags,
size_t raw_size) {
size_t size = PartitionDirectMapSize(raw_size);
@@ -648,9 +648,9 @@ static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root,
if (UNLIKELY(!ptr))
return nullptr;
- size_t committedPageSize = size + kSystemPageSize;
- root->total_size_of_direct_mapped_pages += committedPageSize;
- partitionIncreaseCommittedPages(root, committedPageSize);
+ size_t committed_page_size = size + kSystemPageSize;
+ root->total_size_of_direct_mapped_pages += committed_page_size;
+ PartitionIncreaseCommittedPages(root, committed_page_size);
char* slot = ptr + kPartitionPageSize;
SetSystemPagesInaccessible(ptr + (kSystemPageSize * 2),
@@ -679,9 +679,9 @@ static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root,
DCHECK(!page->empty_cache_index);
page->bucket = bucket;
page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
- PartitionFreelistEntry* nextEntry =
+ PartitionFreelistEntry* next_entry =
reinterpret_cast<PartitionFreelistEntry*>(slot);
- nextEntry->next = PartitionFreelistMask(0);
+ next_entry->next = PartitionFreelistMask(0);
DCHECK(!bucket->active_pages_head);
DCHECK(!bucket->empty_pages_head);
@@ -690,21 +690,21 @@ static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root,
DCHECK(!bucket->num_full_pages);
bucket->slot_size = size;
- PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page);
- mapExtent->map_size = map_size - kPartitionPageSize - kSystemPageSize;
- mapExtent->bucket = bucket;
+ PartitionDirectMapExtent* map_extent = partitionPageToDirectMapExtent(page);
+ map_extent->map_size = map_size - kPartitionPageSize - kSystemPageSize;
+ map_extent->bucket = bucket;
// Maintain the doubly-linked list of all direct mappings.
- mapExtent->next_extent = root->direct_map_list;
- if (mapExtent->next_extent)
- mapExtent->next_extent->prev_extent = mapExtent;
- mapExtent->prev_extent = nullptr;
- root->direct_map_list = mapExtent;
+ map_extent->next_extent = root->direct_map_list;
+ if (map_extent->next_extent)
+ map_extent->next_extent->prev_extent = map_extent;
+ map_extent->prev_extent = nullptr;
+ root->direct_map_list = map_extent;
return page;
}
-static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page) {
+static ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
PartitionRootBase* root = PartitionPageToRoot(page);
const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page);
size_t unmap_size = extent->map_size;
@@ -725,10 +725,10 @@ static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page) {
// page.
unmap_size += kPartitionPageSize + kSystemPageSize;
- size_t uncommittedPageSize = page->bucket->slot_size + kSystemPageSize;
- partitionDecreaseCommittedPages(root, uncommittedPageSize);
- DCHECK(root->total_size_of_direct_mapped_pages >= uncommittedPageSize);
- root->total_size_of_direct_mapped_pages -= uncommittedPageSize;
+ size_t uncommitted_page_size = page->bucket->slot_size + kSystemPageSize;
+ PartitionDecreaseCommittedPages(root, uncommitted_page_size);
+ DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
+ root->total_size_of_direct_mapped_pages -= uncommitted_page_size;
DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask));
@@ -747,7 +747,7 @@ void* PartitionAllocSlowPath(PartitionRootBase* root,
// The slow path is called when the freelist is empty.
DCHECK(!bucket->active_pages_head->freelist_head);
- PartitionPage* newPage = nullptr;
+ PartitionPage* new_page = nullptr;
// For the PartitionAllocGeneric API, we have a bunch of buckets marked
// as special cases. We bounce them through to the slow path so that we
@@ -761,88 +761,88 @@ void* PartitionAllocSlowPath(PartitionRootBase* root,
if (size > kGenericMaxDirectMapped) {
if (returnNull)
return nullptr;
- partitionExcessiveAllocationSize();
+ PartitionExcessiveAllocationSize();
}
- newPage = partitionDirectMap(root, flags, size);
- } else if (LIKELY(partitionSetNewActivePage(bucket))) {
+ new_page = PartitionDirectMap(root, flags, size);
+ } else if (LIKELY(PartitionSetNewActivePage(bucket))) {
// First, did we find an active page in the active pages list?
- newPage = bucket->active_pages_head;
- DCHECK(PartitionPageStateIsActive(newPage));
+ new_page = bucket->active_pages_head;
+ DCHECK(PartitionPageStateIsActive(new_page));
} else if (LIKELY(bucket->empty_pages_head != nullptr) ||
LIKELY(bucket->decommitted_pages_head != nullptr)) {
// Second, look in our lists of empty and decommitted pages.
// Check empty pages first, which are preferred, but beware that an
// empty page might have been decommitted.
- while (LIKELY((newPage = bucket->empty_pages_head) != nullptr)) {
- DCHECK(newPage->bucket == bucket);
- DCHECK(PartitionPageStateIsEmpty(newPage) ||
- PartitionPageStateIsDecommitted(newPage));
- bucket->empty_pages_head = newPage->next_page;
+ while (LIKELY((new_page = bucket->empty_pages_head) != nullptr)) {
+ DCHECK(new_page->bucket == bucket);
+ DCHECK(PartitionPageStateIsEmpty(new_page) ||
+ PartitionPageStateIsDecommitted(new_page));
+ bucket->empty_pages_head = new_page->next_page;
// Accept the empty page unless it got decommitted.
- if (newPage->freelist_head) {
- newPage->next_page = nullptr;
+ if (new_page->freelist_head) {
+ new_page->next_page = nullptr;
break;
}
- DCHECK(PartitionPageStateIsDecommitted(newPage));
- newPage->next_page = bucket->decommitted_pages_head;
- bucket->decommitted_pages_head = newPage;
+ DCHECK(PartitionPageStateIsDecommitted(new_page));
+ new_page->next_page = bucket->decommitted_pages_head;
+ bucket->decommitted_pages_head = new_page;
}
- if (UNLIKELY(!newPage) &&
+ if (UNLIKELY(!new_page) &&
LIKELY(bucket->decommitted_pages_head != nullptr)) {
- newPage = bucket->decommitted_pages_head;
- DCHECK(newPage->bucket == bucket);
- DCHECK(PartitionPageStateIsDecommitted(newPage));
- bucket->decommitted_pages_head = newPage->next_page;
- void* addr = PartitionPageToPointer(newPage);
- partitionRecommitSystemPages(root, addr,
- PartitionBucketBytes(newPage->bucket));
- partitionPageReset(newPage);
+ new_page = bucket->decommitted_pages_head;
+ DCHECK(new_page->bucket == bucket);
+ DCHECK(PartitionPageStateIsDecommitted(new_page));
+ bucket->decommitted_pages_head = new_page->next_page;
+ void* addr = PartitionPageToPointer(new_page);
+ PartitionRecommitSystemPages(root, addr,
+ PartitionBucketBytes(new_page->bucket));
+ PartitionPageReset(new_page);
}
- DCHECK(newPage);
+ DCHECK(new_page);
} else {
// Third. If we get here, we need a brand new page.
- uint16_t num_partition_pages = partitionBucketPartitionPages(bucket);
+ uint16_t num_partition_pages = PartitionBucketPartitionPages(bucket);
void* rawPages =
PartitionAllocPartitionPages(root, flags, num_partition_pages);
if (LIKELY(rawPages != nullptr)) {
- newPage = PartitionPointerToPageNoAlignmentCheck(rawPages);
- partitionPageSetup(newPage, bucket);
+ new_page = PartitionPointerToPageNoAlignmentCheck(rawPages);
+ PartitionPageSetup(new_page, bucket);
}
}
// Bail if we had a memory allocation failure.
- if (UNLIKELY(!newPage)) {
+ if (UNLIKELY(!new_page)) {
DCHECK(bucket->active_pages_head == &PartitionRootGeneric::gSeedPage);
if (returnNull)
return nullptr;
- partitionOutOfMemory(root);
+ PartitionOutOfMemory(root);
}
- bucket = newPage->bucket;
+ bucket = new_page->bucket;
DCHECK(bucket != &PartitionRootBase::gPagedBucket);
- bucket->active_pages_head = newPage;
- partitionPageSetRawSize(newPage, size);
+ bucket->active_pages_head = new_page;
+ PartitionPageSetRawSize(new_page, size);
// If we found an active page with free slots, or an empty page, we have a
// usable freelist head.
- if (LIKELY(newPage->freelist_head != nullptr)) {
- PartitionFreelistEntry* entry = newPage->freelist_head;
- PartitionFreelistEntry* newHead = PartitionFreelistMask(entry->next);
- newPage->freelist_head = newHead;
- newPage->num_allocated_slots++;
+ if (LIKELY(new_page->freelist_head != nullptr)) {
+ PartitionFreelistEntry* entry = new_page->freelist_head;
+ PartitionFreelistEntry* new_head = PartitionFreelistMask(entry->next);
+ new_page->freelist_head = new_head;
+ new_page->num_allocated_slots++;
return entry;
}
// Otherwise, we need to build the freelist.
- DCHECK(newPage->num_unprovisioned_slots);
- return partitionPageAllocAndFillFreelist(newPage);
+ DCHECK(new_page->num_unprovisioned_slots);
+ return PartitionPageAllocAndFillFreelist(new_page);
}
-static ALWAYS_INLINE void partitionDecommitPage(PartitionRootBase* root,
+static ALWAYS_INLINE void PartitionDecommitPage(PartitionRootBase* root,
PartitionPage* page) {
DCHECK(PartitionPageStateIsEmpty(page));
DCHECK(!PartitionBucketIsDirectMapped(page->bucket));
void* addr = PartitionPageToPointer(page);
- partitionDecommitSystemPages(root, addr, PartitionBucketBytes(page->bucket));
+ PartitionDecommitSystemPages(root, addr, PartitionBucketBytes(page->bucket));
// We actually leave the decommitted page in the active list. We'll sweep
// it on to the decommitted page list when we next walk the active page
@@ -855,17 +855,17 @@ static ALWAYS_INLINE void partitionDecommitPage(PartitionRootBase* root,
DCHECK(PartitionPageStateIsDecommitted(page));
}
-static void partitionDecommitPageIfPossible(PartitionRootBase* root,
+static void PartitionDecommitPageIfPossible(PartitionRootBase* root,
PartitionPage* page) {
DCHECK(page->empty_cache_index >= 0);
DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans);
DCHECK(page == root->global_empty_page_ring[page->empty_cache_index]);
page->empty_cache_index = -1;
if (PartitionPageStateIsEmpty(page))
- partitionDecommitPage(root, page);
+ PartitionDecommitPage(root, page);
}
-static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) {
+static ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) {
DCHECK(PartitionPageStateIsEmpty(page));
PartitionRootBase* root = PartitionPageToRoot(page);
@@ -877,30 +877,30 @@ static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) {
root->global_empty_page_ring[page->empty_cache_index] = 0;
}
- int16_t currentIndex = root->global_empty_page_ring_index;
- PartitionPage* pageToDecommit = root->global_empty_page_ring[currentIndex];
+ int16_t current_index = root->global_empty_page_ring_index;
+ PartitionPage* pageToDecommit = root->global_empty_page_ring[current_index];
// The page might well have been re-activated, filled up, etc. before we get
// around to looking at it here.
if (pageToDecommit)
- partitionDecommitPageIfPossible(root, pageToDecommit);
+ PartitionDecommitPageIfPossible(root, pageToDecommit);
// We put the empty slot span on our global list of "pages that were once
// empty". thus providing it a bit of breathing room to get re-used before
// we really free it. This improves performance, particularly on Mac OS X
// which has subpar memory management performance.
- root->global_empty_page_ring[currentIndex] = page;
- page->empty_cache_index = currentIndex;
- ++currentIndex;
- if (currentIndex == kMaxFreeableSpans)
- currentIndex = 0;
- root->global_empty_page_ring_index = currentIndex;
+ root->global_empty_page_ring[current_index] = page;
+ page->empty_cache_index = current_index;
+ ++current_index;
+ if (current_index == kMaxFreeableSpans)
+ current_index = 0;
+ root->global_empty_page_ring_index = current_index;
}
-static void partitionDecommitEmptyPages(PartitionRootBase* root) {
+static void PartitionDecommitEmptyPages(PartitionRootBase* root) {
for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
PartitionPage* page = root->global_empty_page_ring[i];
if (page)
- partitionDecommitPageIfPossible(root, page);
+ PartitionDecommitPageIfPossible(root, page);
root->global_empty_page_ring[i] = nullptr;
}
}
@@ -911,19 +911,19 @@ void PartitionFreeSlowPath(PartitionPage* page) {
if (LIKELY(page->num_allocated_slots == 0)) {
// Page became fully unused.
if (UNLIKELY(PartitionBucketIsDirectMapped(bucket))) {
- partitionDirectUnmap(page);
+ PartitionDirectUnmap(page);
return;
}
// If it's the current active page, change it. We bounce the page to
// the empty list as a force towards defragmentation.
if (LIKELY(page == bucket->active_pages_head))
- (void)partitionSetNewActivePage(bucket);
+ (void)PartitionSetNewActivePage(bucket);
DCHECK(bucket->active_pages_head != page);
- partitionPageSetRawSize(page, 0);
+ PartitionPageSetRawSize(page, 0);
DCHECK(!PartitionPageGetRawSize(page));
- partitionRegisterEmptyPage(page);
+ PartitionRegisterEmptyPage(page);
} else {
DCHECK(!PartitionBucketIsDirectMapped(bucket));
// Ensure that the page is full. That's the only valid case if we
@@ -980,7 +980,7 @@ bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
// Shrink by decommitting unneeded pages and making them inaccessible.
size_t decommitSize = current_size - new_size;
- partitionDecommitSystemPages(root, char_ptr + new_size, decommitSize);
+ PartitionDecommitSystemPages(root, char_ptr + new_size, decommitSize);
SetSystemPagesInaccessible(char_ptr + new_size, decommitSize);
} else if (new_size <= partitionPageToDirectMapExtent(page)->map_size) {
// Grow within the actually allocated memory. Just need to make the
@@ -988,7 +988,7 @@ bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
size_t recommit_size = new_size - current_size;
bool ret = SetSystemPagesAccessible(char_ptr + current_size, recommit_size);
CHECK(ret);
- partitionRecommitSystemPages(root, char_ptr + current_size, recommit_size);
+ PartitionRecommitSystemPages(root, char_ptr + current_size, recommit_size);
#if DCHECK_IS_ON()
memset(char_ptr + current_size, kUninitializedByte, recommit_size);
@@ -1004,7 +1004,7 @@ bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
PartitionCookieWriteValue(char_ptr + raw_size - kCookieSize);
#endif
- partitionPageSetRawSize(page, raw_size);
+ PartitionPageSetRawSize(page, raw_size);
DCHECK(PartitionPageGetRawSize(page) == raw_size);
page->bucket->slot_size = new_size;
@@ -1026,7 +1026,7 @@ void* PartitionReallocGeneric(PartitionRootGeneric* root,
}
if (new_size > kGenericMaxDirectMapped)
- partitionExcessiveAllocationSize();
+ PartitionExcessiveAllocationSize();
DCHECK(PartitionPointerIsValid(PartitionCookieFreePointerAdjust(ptr)));
@@ -1043,22 +1043,27 @@ void* PartitionReallocGeneric(PartitionRootGeneric* root,
}
}
- size_t actualNewSize = PartitionAllocActualSize(root, new_size);
- size_t actualOldSize = PartitionAllocGetSize(ptr);
+ size_t actual_new_size = PartitionAllocActualSize(root, new_size);
+ size_t actual_old_size = PartitionAllocGetSize(ptr);
// TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
// new size is a significant percentage smaller. We could do the same if we
// determine it is a win.
- if (actualNewSize == actualOldSize) {
+ if (actual_new_size == actual_old_size) {
// Trying to allocate a block of size new_size would give us a block of
- // the same size as the one we've already got, so no point in doing
- // anything here.
+ // the same size as the one we've already got, so re-use the allocation
+ // after updating statistics (and cookies, if present).
+ PartitionPageSetRawSize(page, PartitionCookieSizeAdjustAdd(new_size));
+#if DCHECK_IS_ON()
+ // Write a new trailing cookie.
+ PartitionCookieWriteValue(static_cast<char*>(ptr) + new_size);
+#endif
return ptr;
}
// This realloc cannot be resized in-place. Sadness.
void* ret = PartitionAllocGeneric(root, new_size, type_name);
- size_t copy_size = actualOldSize;
+ size_t copy_size = actual_old_size;
if (new_size < copy_size)
copy_size = new_size;
@@ -1089,14 +1094,14 @@ static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
return discardable_bytes;
}
- const size_t maxSlotCount =
+ const size_t max_slot_count =
(kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize;
- DCHECK(bucket_num_slots <= maxSlotCount);
+ DCHECK(bucket_num_slots <= max_slot_count);
DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots;
- char slotUsage[maxSlotCount];
- size_t lastSlot = static_cast<size_t>(-1);
- memset(slotUsage, 1, num_slots);
+ char slot_usage[max_slot_count];
+ size_t last_slot = static_cast<size_t>(-1);
+ memset(slot_usage, 1, num_slots);
char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page));
PartitionFreelistEntry* entry = page->freelist_head;
// First, walk the freelist for this page and make a bitmap of which slots
@@ -1104,7 +1109,7 @@ static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
while (entry) {
size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
DCHECK(slotIndex < num_slots);
- slotUsage[slotIndex] = 0;
+ slot_usage[slotIndex] = 0;
entry = PartitionFreelistMask(entry->next);
// If we have a slot where the masked freelist entry is 0, we can
// actually discard that freelist entry because touching a discarded
@@ -1112,59 +1117,59 @@ static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
// (Note that this optimization won't fire on big endian machines
// because the masking function is negation.)
if (!PartitionFreelistMask(entry))
- lastSlot = slotIndex;
+ last_slot = slotIndex;
}
// If the slot(s) at the end of the slot span are not in used, we can
// truncate them entirely and rewrite the freelist.
- size_t truncatedSlots = 0;
- while (!slotUsage[num_slots - 1]) {
- truncatedSlots++;
+ size_t truncated_slots = 0;
+ while (!slot_usage[num_slots - 1]) {
+ truncated_slots++;
num_slots--;
DCHECK(num_slots);
}
// First, do the work of calculating the discardable bytes. Don't actually
// discard anything unless the discard flag was passed in.
- char* beginPtr = nullptr;
- char* endPtr = nullptr;
- size_t unprovisionedBytes = 0;
- if (truncatedSlots) {
- beginPtr = ptr + (num_slots * slot_size);
- endPtr = beginPtr + (slot_size * truncatedSlots);
- beginPtr = reinterpret_cast<char*>(
- RoundUpToSystemPage(reinterpret_cast<size_t>(beginPtr)));
+ char* begin_ptr = nullptr;
+ char* end_ptr = nullptr;
+ size_t unprovisioned_bytes = 0;
+ if (truncated_slots) {
+ begin_ptr = ptr + (num_slots * slot_size);
+ end_ptr = begin_ptr + (slot_size * truncated_slots);
+ begin_ptr = reinterpret_cast<char*>(
+ RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
// We round the end pointer here up and not down because we're at the
// end of a slot span, so we "own" all the way up the page boundary.
- endPtr = reinterpret_cast<char*>(
- RoundUpToSystemPage(reinterpret_cast<size_t>(endPtr)));
- DCHECK(endPtr <= ptr + PartitionBucketBytes(bucket));
- if (beginPtr < endPtr) {
- unprovisionedBytes = endPtr - beginPtr;
- discardable_bytes += unprovisionedBytes;
+ end_ptr = reinterpret_cast<char*>(
+ RoundUpToSystemPage(reinterpret_cast<size_t>(end_ptr)));
+ DCHECK(end_ptr <= ptr + PartitionBucketBytes(bucket));
+ if (begin_ptr < end_ptr) {
+ unprovisioned_bytes = end_ptr - begin_ptr;
+ discardable_bytes += unprovisioned_bytes;
}
}
- if (unprovisionedBytes && discard) {
- DCHECK(truncatedSlots > 0);
- size_t numNewEntries = 0;
- page->num_unprovisioned_slots += static_cast<uint16_t>(truncatedSlots);
+ if (unprovisioned_bytes && discard) {
+ DCHECK(truncated_slots > 0);
+ size_t num_new_entries = 0;
+ page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
// Rewrite the freelist.
- PartitionFreelistEntry** entryPtr = &page->freelist_head;
+ PartitionFreelistEntry** entry_ptr = &page->freelist_head;
for (size_t slotIndex = 0; slotIndex < num_slots; ++slotIndex) {
- if (slotUsage[slotIndex])
+ if (slot_usage[slotIndex])
continue;
PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>(
ptr + (slot_size * slotIndex));
- *entryPtr = PartitionFreelistMask(entry);
- entryPtr = reinterpret_cast<PartitionFreelistEntry**>(entry);
- numNewEntries++;
+ *entry_ptr = PartitionFreelistMask(entry);
+ entry_ptr = reinterpret_cast<PartitionFreelistEntry**>(entry);
+ num_new_entries++;
}
// Terminate the freelist chain.
- *entryPtr = nullptr;
+ *entry_ptr = nullptr;
// The freelist head is stored unmasked.
page->freelist_head = PartitionFreelistMask(page->freelist_head);
- DCHECK(numNewEntries == num_slots - page->num_allocated_slots);
+ DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
// Discard the memory.
- DiscardSystemPages(beginPtr, unprovisionedBytes);
+ DiscardSystemPages(begin_ptr, unprovisioned_bytes);
}
// Next, walk the slots and for any not in use, consider where the system
@@ -1172,30 +1177,30 @@ static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
// system as long as we don't interfere with a freelist pointer or an
// adjacent slot.
for (size_t i = 0; i < num_slots; ++i) {
- if (slotUsage[i])
+ if (slot_usage[i])
continue;
// The first address we can safely discard is just after the freelist
// pointer. There's one quirk: if the freelist pointer is actually a
// null, we can discard that pointer value too.
- char* beginPtr = ptr + (i * slot_size);
- char* endPtr = beginPtr + slot_size;
- if (i != lastSlot)
- beginPtr += sizeof(PartitionFreelistEntry);
- beginPtr = reinterpret_cast<char*>(
- RoundUpToSystemPage(reinterpret_cast<size_t>(beginPtr)));
- endPtr = reinterpret_cast<char*>(
- RoundDownToSystemPage(reinterpret_cast<size_t>(endPtr)));
- if (beginPtr < endPtr) {
- size_t partialSlotBytes = endPtr - beginPtr;
- discardable_bytes += partialSlotBytes;
+ char* begin_ptr = ptr + (i * slot_size);
+ char* end_ptr = begin_ptr + slot_size;
+ if (i != last_slot)
+ begin_ptr += sizeof(PartitionFreelistEntry);
+ begin_ptr = reinterpret_cast<char*>(
+ RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
+ end_ptr = reinterpret_cast<char*>(
+ RoundDownToSystemPage(reinterpret_cast<size_t>(end_ptr)));
+ if (begin_ptr < end_ptr) {
+ size_t partial_slot_bytes = end_ptr - begin_ptr;
+ discardable_bytes += partial_slot_bytes;
if (discard)
- DiscardSystemPages(beginPtr, partialSlotBytes);
+ DiscardSystemPages(begin_ptr, partial_slot_bytes);
}
}
return discardable_bytes;
}
-static void partitionPurgeBucket(PartitionBucket* bucket) {
+static void PartitionPurgeBucket(PartitionBucket* bucket) {
if (bucket->active_pages_head != &PartitionRootGeneric::gSeedPage) {
for (PartitionPage* page = bucket->active_pages_head; page;
page = page->next_page) {
@@ -1207,7 +1212,7 @@ static void partitionPurgeBucket(PartitionBucket* bucket) {
void PartitionPurgeMemory(PartitionRoot* root, int flags) {
if (flags & PartitionPurgeDecommitEmptyPages)
- partitionDecommitEmptyPages(root);
+ PartitionDecommitEmptyPages(root);
// We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages
// here because that flag is only useful for allocations >= system page
// size. We only have allocations that large inside generic partitions
@@ -1217,12 +1222,12 @@ void PartitionPurgeMemory(PartitionRoot* root, int flags) {
void PartitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) {
subtle::SpinLock::Guard guard(root->lock);
if (flags & PartitionPurgeDecommitEmptyPages)
- partitionDecommitEmptyPages(root);
+ PartitionDecommitEmptyPages(root);
if (flags & PartitionPurgeDiscardUnusedSystemPages) {
for (size_t i = 0; i < kGenericNumBuckets; ++i) {
PartitionBucket* bucket = &root->buckets[i];
if (bucket->slot_size >= kSystemPageSize)
- partitionPurgeBucket(bucket);
+ PartitionPurgeBucket(bucket);
}
}
}
@@ -1279,9 +1284,9 @@ static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
stats_out->num_full_pages = static_cast<size_t>(bucket->num_full_pages);
stats_out->bucket_slot_size = bucket->slot_size;
uint16_t bucket_num_slots = PartitionBucketSlots(bucket);
- size_t bucketUsefulStorage = stats_out->bucket_slot_size * bucket_num_slots;
+ size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots;
stats_out->allocated_page_size = PartitionBucketBytes(bucket);
- stats_out->active_bytes = bucket->num_full_pages * bucketUsefulStorage;
+ stats_out->active_bytes = bucket->num_full_pages * bucket_useful_storage;
stats_out->resident_bytes =
bucket->num_full_pages * stats_out->allocated_page_size;
@@ -1310,11 +1315,25 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
const char* partition_name,
bool is_light_dump,
PartitionStatsDumper* dumper) {
- PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets];
+ PartitionMemoryStats stats = {0};
+ stats.total_mmapped_bytes = partition->total_size_of_super_pages +
+ partition->total_size_of_direct_mapped_pages;
+ stats.total_committed_bytes = partition->total_size_of_committed_pages;
+
+ size_t direct_mapped_allocations_total_size = 0;
+
static const size_t kMaxReportableDirectMaps = 4096;
- uint32_t direct_map_lengths[kMaxReportableDirectMaps];
- size_t num_direct_mapped_allocations = 0;
+ // Allocate on the heap rather than on the stack to avoid stack overflow
+ // skirmishes (on Windows, in particular).
+ std::unique_ptr<uint32_t[]> direct_map_lengths = nullptr;
+ if (!is_light_dump) {
+ direct_map_lengths =
+ std::unique_ptr<uint32_t[]>(new uint32_t[kMaxReportableDirectMaps]);
+ }
+
+ PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets];
+ size_t num_direct_mapped_allocations = 0;
{
subtle::SpinLock::Guard guard(partition->lock);
@@ -1327,55 +1346,52 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
bucket_stats[i].is_valid = false;
else
PartitionDumpBucketStats(&bucket_stats[i], bucket);
+ if (bucket_stats[i].is_valid) {
+ stats.total_resident_bytes += bucket_stats[i].resident_bytes;
+ stats.total_active_bytes += bucket_stats[i].active_bytes;
+ stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes;
+ stats.total_discardable_bytes += bucket_stats[i].discardable_bytes;
+ }
}
- for (PartitionDirectMapExtent* extent = partition->direct_map_list; extent;
- extent = extent->next_extent) {
+ for (PartitionDirectMapExtent *extent = partition->direct_map_list;
+ extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
+ extent = extent->next_extent, ++num_direct_mapped_allocations) {
DCHECK(!extent->next_extent ||
extent->next_extent->prev_extent == extent);
- direct_map_lengths[num_direct_mapped_allocations] =
- extent->bucket->slot_size;
- ++num_direct_mapped_allocations;
- if (num_direct_mapped_allocations == kMaxReportableDirectMaps)
- break;
+ size_t slot_size = extent->bucket->slot_size;
+ direct_mapped_allocations_total_size += slot_size;
+ if (is_light_dump)
+ continue;
+ direct_map_lengths[num_direct_mapped_allocations] = slot_size;
}
}
- // Call |PartitionsDumpBucketStats| after collecting stats because it can try
- // to allocate using |PartitionAllocGeneric| and it can't obtain the lock.
- PartitionMemoryStats stats = {0};
- stats.total_mmapped_bytes = partition->total_size_of_super_pages +
- partition->total_size_of_direct_mapped_pages;
- stats.total_committed_bytes = partition->total_size_of_committed_pages;
- for (size_t i = 0; i < kGenericNumBuckets; ++i) {
- if (bucket_stats[i].is_valid) {
- stats.total_resident_bytes += bucket_stats[i].resident_bytes;
- stats.total_active_bytes += bucket_stats[i].active_bytes;
- stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes;
- stats.total_discardable_bytes += bucket_stats[i].discardable_bytes;
- if (!is_light_dump)
+ if (!is_light_dump) {
+ // Call |PartitionsDumpBucketStats| after collecting stats because it can
+ // try to allocate using |PartitionAllocGeneric| and it can't obtain the
+ // lock.
+ for (size_t i = 0; i < kGenericNumBuckets; ++i) {
+ if (bucket_stats[i].is_valid)
dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]);
}
- }
-
- size_t direct_mapped_allocations_total_size = 0;
- for (size_t i = 0; i < num_direct_mapped_allocations; ++i) {
- uint32_t size = direct_map_lengths[i];
- direct_mapped_allocations_total_size += size;
- if (is_light_dump)
- continue;
- PartitionBucketMemoryStats stats;
- memset(&stats, '\0', sizeof(stats));
- stats.is_valid = true;
- stats.is_direct_map = true;
- stats.num_full_pages = 1;
- stats.allocated_page_size = size;
- stats.bucket_slot_size = size;
- stats.active_bytes = size;
- stats.resident_bytes = size;
- dumper->PartitionsDumpBucketStats(partition_name, &stats);
+ for (size_t i = 0; i < num_direct_mapped_allocations; ++i) {
+ uint32_t size = direct_map_lengths[i];
+
+ PartitionBucketMemoryStats stats;
+ memset(&stats, '\0', sizeof(stats));
+ stats.is_valid = true;
+ stats.is_direct_map = true;
+ stats.num_full_pages = 1;
+ stats.allocated_page_size = size;
+ stats.bucket_slot_size = size;
+ stats.active_bytes = size;
+ stats.resident_bytes = size;
+ dumper->PartitionsDumpBucketStats(partition_name, &stats);
+ }
}
+
stats.total_resident_bytes += direct_mapped_allocations_total_size;
stats.total_active_bytes += direct_mapped_allocations_total_size;
dumper->PartitionDumpTotals(partition_name, &stats);
@@ -1385,27 +1401,42 @@ void PartitionDumpStats(PartitionRoot* partition,
const char* partition_name,
bool is_light_dump,
PartitionStatsDumper* dumper) {
- static const size_t kMaxReportableBuckets = 4096 / sizeof(void*);
- PartitionBucketMemoryStats memory_stats[kMaxReportableBuckets];
- const size_t partitionNumBuckets = partition->num_buckets;
- DCHECK(partitionNumBuckets <= kMaxReportableBuckets);
-
- for (size_t i = 0; i < partitionNumBuckets; ++i)
- PartitionDumpBucketStats(&memory_stats[i], &partition->buckets()[i]);
- // PartitionsDumpBucketStats is called after collecting stats because it
- // can use PartitionAlloc to allocate and this can affect the statistics.
PartitionMemoryStats stats = {0};
stats.total_mmapped_bytes = partition->total_size_of_super_pages;
stats.total_committed_bytes = partition->total_size_of_committed_pages;
DCHECK(!partition->total_size_of_direct_mapped_pages);
+
+ static const size_t kMaxReportableBuckets = 4096 / sizeof(void*);
+ std::unique_ptr<PartitionBucketMemoryStats[]> memory_stats;
+ if (!is_light_dump)
+ memory_stats = std::unique_ptr<PartitionBucketMemoryStats[]>(
+ new PartitionBucketMemoryStats[kMaxReportableBuckets]);
+
+ const size_t partitionNumBuckets = partition->num_buckets;
+ DCHECK(partitionNumBuckets <= kMaxReportableBuckets);
+
for (size_t i = 0; i < partitionNumBuckets; ++i) {
- if (memory_stats[i].is_valid) {
- stats.total_resident_bytes += memory_stats[i].resident_bytes;
- stats.total_active_bytes += memory_stats[i].active_bytes;
- stats.total_decommittable_bytes += memory_stats[i].decommittable_bytes;
- stats.total_discardable_bytes += memory_stats[i].discardable_bytes;
- if (!is_light_dump)
+ PartitionBucketMemoryStats bucket_stats = {0};
+ PartitionDumpBucketStats(&bucket_stats, &partition->buckets()[i]);
+ if (bucket_stats.is_valid) {
+ stats.total_resident_bytes += bucket_stats.resident_bytes;
+ stats.total_active_bytes += bucket_stats.active_bytes;
+ stats.total_decommittable_bytes += bucket_stats.decommittable_bytes;
+ stats.total_discardable_bytes += bucket_stats.discardable_bytes;
+ }
+ if (!is_light_dump) {
+ if (bucket_stats.is_valid)
+ memory_stats[i] = bucket_stats;
+ else
+ memory_stats[i].is_valid = false;
+ }
+ }
+ if (!is_light_dump) {
+ // PartitionsDumpBucketStats is called after collecting stats because it
+ // can use PartitionAlloc to allocate and this can affect the statistics.
+ for (size_t i = 0; i < partitionNumBuckets; ++i) {
+ if (memory_stats[i].is_valid)
dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]);
}
}
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.h b/chromium/base/allocator/partition_allocator/partition_alloc.h
index a3ff90e089f..c720a50a638 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.h
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.h
@@ -60,6 +60,7 @@
// - Better freelist masking function to guarantee fault on 32-bit.
#include <limits.h>
+#include <string.h>
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/spin_lock.h"
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
index 1ed26bba972..0507c0b4aa9 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
@@ -2088,6 +2088,27 @@ TEST(PartitionAllocTest, PurgeDiscardable) {
}
}
+TEST(PartitionAllocTest, ReallocMovesCookies) {
+ TestSetup();
+
+ // Resize so as to be sure to hit a "resize in place" case, and ensure that
+ // use of the entire result is compatible with the debug mode's cookies, even
+ // when the bucket size is large enough to span more than one partition page
+ // and we can track the "raw" size. See https://crbug.com/709271
+ const size_t kSize = base::kMaxSystemPagesPerSlotSpan * base::kSystemPageSize;
+ void* ptr =
+ PartitionAllocGeneric(generic_allocator.root(), kSize + 1, type_name);
+ EXPECT_TRUE(ptr);
+
+ memset(ptr, 0xbd, kSize + 1);
+ ptr = PartitionReallocGeneric(generic_allocator.root(), ptr, kSize + 2,
+ type_name);
+ EXPECT_TRUE(ptr);
+
+ memset(ptr, 0xbd, kSize + 2);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+}
+
} // namespace base
#endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/chromium/base/allocator/tcmalloc_unittest.cc b/chromium/base/allocator/tcmalloc_unittest.cc
index 5313bfdf652..47246585f06 100644
--- a/chromium/base/allocator/tcmalloc_unittest.cc
+++ b/chromium/base/allocator/tcmalloc_unittest.cc
@@ -8,6 +8,7 @@
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/process/process_metrics.h"
+#include "base/sys_info.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -86,6 +87,10 @@ static void TestCalloc(size_t n, size_t s, bool ok) {
}
}
+bool IsLowMemoryDevice() {
+ return base::SysInfo::AmountOfPhysicalMemory() <= 256LL * 1024 * 1024;
+}
+
} // namespace
TEST(TCMallocTest, Malloc) {
@@ -154,6 +159,12 @@ TEST(TCMallocTest, Realloc) {
}
}
+ // The logic below tries to allocate kNumEntries * 9000 ~= 130 MB of memory.
+ // This would cause the test to crash on low memory devices with no VM
+ // overcommit (e.g., chromecast).
+ if (IsLowMemoryDevice())
+ return;
+
// Now make sure realloc works correctly even when we overflow the
// packed cache, so some entries are evicted from the cache.
// The cache has 2^12 entries, keyed by page number.
diff --git a/chromium/base/at_exit.cc b/chromium/base/at_exit.cc
index 5dcc83cb2f5..e0025ea0d3b 100644
--- a/chromium/base/at_exit.cc
+++ b/chromium/base/at_exit.cc
@@ -81,6 +81,10 @@ void AtExitManager::ProcessCallbacksNow() {
g_top_manager->processing_callbacks_ = true;
}
+ // Relax the cross-thread access restriction to non-thread-safe RefCount.
+ // It's safe since all other threads should be terminated at this point.
+ ScopedAllowCrossThreadRefCountAccess allow_cross_thread_ref_count_access;
+
while (!tasks.empty()) {
base::Closure task = tasks.top();
task.Run();
diff --git a/chromium/base/base.isolate b/chromium/base/base.isolate
deleted file mode 100644
index 079d07d8100..00000000000
--- a/chromium/base/base.isolate
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-{
- 'includes': [
- # While the target 'base' doesn't depend on ../third_party/icu/icu.gyp
- # itself, virtually all targets using it has to include icu. The only
- # exception is the Windows sandbox (?).
- '../third_party/icu/icu.isolate',
- # Sanitizer-instrumented third-party libraries (if enabled).
- '../third_party/instrumented_libraries/instrumented_libraries.isolate',
- # MSVS runtime libraries.
- '../build/config/win/msvs_dependencies.isolate',
- ],
- 'conditions': [
- ['use_custom_libcxx==1', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/lib/libc++.so',
- ],
- },
- }],
- ['OS=="mac" and asan==1', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/libclang_rt.asan_osx_dynamic.dylib',
- ],
- },
- }],
- ['OS=="win" and asan==1 and component=="shared_library"', {
- 'variables': {
- 'files': [
- # We only need x.y.z/lib/windows/clang_rt.asan_dynamic-i386.dll,
- # but since the version (x.y.z) changes, just grab the whole dir.
- '../third_party/llvm-build/Release+Asserts/lib/clang/',
- ],
- },
- }],
- ['OS=="linux" and (asan==1 or lsan==1 or msan==1 or tsan==1)', {
- 'variables': {
- 'files': [
- # For llvm-symbolizer.
- '../third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6',
- ],
- },
- }],
- ['asan==1 or lsan==1 or msan==1 or tsan==1', {
- 'variables': {
- 'files': [
- '../tools/valgrind/asan/',
- '../third_party/llvm-build/Release+Asserts/bin/llvm-symbolizer<(EXECUTABLE_SUFFIX)',
- ],
- },
- }],
- # Workaround for https://code.google.com/p/swarming/issues/detail?id=211
- ['asan==0 or lsan==0 or msan==0 or tsan==0', {
- 'variables': {},
- }],
- ],
-}
diff --git a/chromium/base/bind_internal.h b/chromium/base/bind_internal.h
index 8988bdca226..c8af87d5806 100644
--- a/chromium/base/bind_internal.h
+++ b/chromium/base/bind_internal.h
@@ -360,7 +360,7 @@ struct Invoker<StorageType, R(UnboundArgs...)> {
return InvokeHelper<is_weak_call, R>::MakeItSo(
std::forward<Functor>(functor),
- Unwrap(base::get<indices>(std::forward<BoundArgsTuple>(bound)))...,
+ Unwrap(std::get<indices>(std::forward<BoundArgsTuple>(bound)))...,
std::forward<UnboundArgs>(unbound_args)...);
}
};
@@ -393,7 +393,7 @@ bool ApplyCancellationTraitsImpl(const Functor& functor,
const BoundArgsTuple& bound_args,
IndexSequence<indices...>) {
return CallbackCancellationTraits<Functor, BoundArgsTuple>::IsCancelled(
- functor, base::get<indices>(bound_args)...);
+ functor, std::get<indices>(bound_args)...);
}
// Relays |base| to corresponding CallbackCancellationTraits<>::Run(). Returns
diff --git a/chromium/base/bind_unittest.cc b/chromium/base/bind_unittest.cc
index 6e01f7f3fdb..38a66b25214 100644
--- a/chromium/base/bind_unittest.cc
+++ b/chromium/base/bind_unittest.cc
@@ -1303,65 +1303,59 @@ TEST_F(BindTest, OnceCallback) {
static_assert(std::is_constructible<
RepeatingClosure, const RepeatingClosure&>::value,
"RepeatingClosure should be copyable.");
- static_assert(is_assignable<
- RepeatingClosure, const RepeatingClosure&>::value,
+ static_assert(
+ std::is_assignable<RepeatingClosure, const RepeatingClosure&>::value,
"RepeatingClosure should be copy-assignable.");
// Move constructor and assignment of RepeatingCallback.
static_assert(std::is_constructible<
RepeatingClosure, RepeatingClosure&&>::value,
"RepeatingClosure should be movable.");
- static_assert(is_assignable<
- RepeatingClosure, RepeatingClosure&&>::value,
- "RepeatingClosure should be move-assignable");
+ static_assert(std::is_assignable<RepeatingClosure, RepeatingClosure&&>::value,
+ "RepeatingClosure should be move-assignable");
// Conversions from OnceCallback to RepeatingCallback.
static_assert(!std::is_constructible<
RepeatingClosure, const OnceClosure&>::value,
"OnceClosure should not be convertible to RepeatingClosure.");
- static_assert(!is_assignable<
- RepeatingClosure, const OnceClosure&>::value,
+ static_assert(
+ !std::is_assignable<RepeatingClosure, const OnceClosure&>::value,
"OnceClosure should not be convertible to RepeatingClosure.");
// Destructive conversions from OnceCallback to RepeatingCallback.
static_assert(!std::is_constructible<
RepeatingClosure, OnceClosure&&>::value,
"OnceClosure should not be convertible to RepeatingClosure.");
- static_assert(!is_assignable<
- RepeatingClosure, OnceClosure&&>::value,
- "OnceClosure should not be convertible to RepeatingClosure.");
+ static_assert(!std::is_assignable<RepeatingClosure, OnceClosure&&>::value,
+ "OnceClosure should not be convertible to RepeatingClosure.");
// Copy constructor and assignment of OnceCallback.
static_assert(!std::is_constructible<
OnceClosure, const OnceClosure&>::value,
"OnceClosure should not be copyable.");
- static_assert(!is_assignable<
- OnceClosure, const OnceClosure&>::value,
- "OnceClosure should not be copy-assignable");
+ static_assert(!std::is_assignable<OnceClosure, const OnceClosure&>::value,
+ "OnceClosure should not be copy-assignable");
// Move constructor and assignment of OnceCallback.
static_assert(std::is_constructible<
OnceClosure, OnceClosure&&>::value,
"OnceClosure should be movable.");
- static_assert(is_assignable<
- OnceClosure, OnceClosure&&>::value,
- "OnceClosure should be move-assignable.");
+ static_assert(std::is_assignable<OnceClosure, OnceClosure&&>::value,
+ "OnceClosure should be move-assignable.");
// Conversions from RepeatingCallback to OnceCallback.
static_assert(std::is_constructible<
OnceClosure, const RepeatingClosure&>::value,
"RepeatingClosure should be convertible to OnceClosure.");
- static_assert(is_assignable<
- OnceClosure, const RepeatingClosure&>::value,
- "RepeatingClosure should be convertible to OnceClosure.");
+ static_assert(std::is_assignable<OnceClosure, const RepeatingClosure&>::value,
+ "RepeatingClosure should be convertible to OnceClosure.");
// Destructive conversions from RepeatingCallback to OnceCallback.
static_assert(std::is_constructible<
OnceClosure, RepeatingClosure&&>::value,
"RepeatingClosure should be convertible to OnceClosure.");
- static_assert(is_assignable<
- OnceClosure, RepeatingClosure&&>::value,
- "RepeatingClosure should be covretible to OnceClosure.");
+ static_assert(std::is_assignable<OnceClosure, RepeatingClosure&&>::value,
+ "RepeatingClosure should be covretible to OnceClosure.");
OnceClosure cb = BindOnce(&VoidPolymorphic<>::Run);
std::move(cb).Run();
diff --git a/chromium/base/callback.h b/chromium/base/callback.h
index 9bb0c0f6679..c91e1a88d38 100644
--- a/chromium/base/callback.h
+++ b/chromium/base/callback.h
@@ -21,71 +21,6 @@ namespace base {
namespace internal {
-template <typename CallbackType>
-struct IsOnceCallback : std::false_type {};
-
-template <typename Signature>
-struct IsOnceCallback<OnceCallback<Signature>> : std::true_type {};
-
-// RunMixin provides different variants of `Run()` function to `Callback<>`
-// based on the type of callback.
-template <typename CallbackType>
-class RunMixin;
-
-// Specialization for OnceCallback.
-template <typename R, typename... Args>
-class RunMixin<OnceCallback<R(Args...)>> {
- private:
- using CallbackType = OnceCallback<R(Args...)>;
-
- public:
- using PolymorphicInvoke = R(*)(internal::BindStateBase*, Args&&...);
-
- R Run(Args... args) const & {
- // Note: even though this static_assert will trivially always fail, it
- // cannot be simply replaced with static_assert(false, ...) because:
- // - Per [dcl.dcl]/p4, a program is ill-formed if the constant-expression
- // argument does not evaluate to true.
- // - Per [temp.res]/p8, if no valid specialization can be generated for a
- // template definition, and that template is not instantiated, the
- // template definition is ill-formed, no diagnostic required.
- // These two clauses, taken together, would allow a conforming C++ compiler
- // to immediately reject static_assert(false, ...), even inside an
- // uninstantiated template.
- static_assert(!IsOnceCallback<CallbackType>::value,
- "OnceCallback::Run() may only be invoked on a non-const "
- "rvalue, i.e. std::move(callback).Run().");
- }
-
- R Run(Args... args) && {
- // Move the callback instance into a local variable before the invocation,
- // that ensures the internal state is cleared after the invocation.
- // It's not safe to touch |this| after the invocation, since running the
- // bound function may destroy |this|.
- CallbackType cb = static_cast<CallbackType&&>(*this);
- PolymorphicInvoke f =
- reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
- return f(cb.bind_state_.get(), std::forward<Args>(args)...);
- }
-};
-
-// Specialization for RepeatingCallback.
-template <typename R, typename... Args>
-class RunMixin<RepeatingCallback<R(Args...)>> {
- private:
- using CallbackType = RepeatingCallback<R(Args...)>;
-
- public:
- using PolymorphicInvoke = R(*)(internal::BindStateBase*, Args&&...);
-
- R Run(Args... args) const {
- const CallbackType& cb = static_cast<const CallbackType&>(*this);
- PolymorphicInvoke f =
- reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
- return f(cb.bind_state_.get(), std::forward<Args>(args)...);
- }
-};
-
template <typename From, typename To>
struct IsCallbackConvertible : std::false_type {};
@@ -100,14 +35,14 @@ template <typename R,
internal::CopyMode copy_mode,
internal::RepeatMode repeat_mode>
class Callback<R(Args...), copy_mode, repeat_mode>
- : public internal::CallbackBase<copy_mode>,
- public internal::RunMixin<Callback<R(Args...), copy_mode, repeat_mode>> {
+ : public internal::CallbackBase<copy_mode> {
public:
static_assert(repeat_mode != internal::RepeatMode::Once ||
copy_mode == internal::CopyMode::MoveOnly,
"OnceCallback must be MoveOnly.");
using RunType = R(Args...);
+ using PolymorphicInvoke = R (*)(internal::BindStateBase*, Args&&...);
Callback() : internal::CallbackBase<copy_mode>(nullptr) {}
@@ -135,7 +70,26 @@ class Callback<R(Args...), copy_mode, repeat_mode>
return this->EqualsInternal(other);
}
- friend class internal::RunMixin<Callback>;
+ R Run(Args... args) const & {
+ static_assert(repeat_mode == internal::RepeatMode::Repeating,
+ "OnceCallback::Run() may only be invoked on a non-const "
+ "rvalue, i.e. std::move(callback).Run().");
+
+ PolymorphicInvoke f =
+ reinterpret_cast<PolymorphicInvoke>(this->polymorphic_invoke());
+ return f(this->bind_state_.get(), std::forward<Args>(args)...);
+ }
+
+ R Run(Args... args) && {
+ // Move the callback instance into a local variable before the invocation,
+ // that ensures the internal state is cleared after the invocation.
+ // It's not safe to touch |this| after the invocation, since running the
+ // bound function may destroy |this|.
+ Callback cb = std::move(*this);
+ PolymorphicInvoke f =
+ reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
+ return f(cb.bind_state_.get(), std::forward<Args>(args)...);
+ }
};
} // namespace base
diff --git a/chromium/base/callback_helpers.h b/chromium/base/callback_helpers.h
index ec3d6cbf168..6e0aee88822 100644
--- a/chromium/base/callback_helpers.h
+++ b/chromium/base/callback_helpers.h
@@ -8,8 +8,8 @@
// generated). Instead, consider adding methods here.
//
// ResetAndReturn(&cb) is like cb.Reset() but allows executing a callback (via a
-// copy) after the original callback is Reset(). This can be handy if Run()
-// reads/writes the variable holding the Callback.
+// move or copy) after the original callback is Reset(). This can be handy if
+// Run() reads/writes the variable holding the Callback.
#ifndef BASE_CALLBACK_HELPERS_H_
#define BASE_CALLBACK_HELPERS_H_
diff --git a/chromium/base/callback_internal.cc b/chromium/base/callback_internal.cc
index 4afd567f0fe..a760f0664c2 100644
--- a/chromium/base/callback_internal.cc
+++ b/chromium/base/callback_internal.cc
@@ -17,6 +17,10 @@ bool ReturnFalse(const BindStateBase*) {
} // namespace
+void BindStateBaseRefCountTraits::Destruct(const BindStateBase* bind_state) {
+ bind_state->destructor_(bind_state);
+}
+
BindStateBase::BindStateBase(InvokeFuncStorage polymorphic_invoke,
void (*destructor)(const BindStateBase*))
: BindStateBase(polymorphic_invoke, destructor, &ReturnFalse) {
@@ -26,19 +30,9 @@ BindStateBase::BindStateBase(InvokeFuncStorage polymorphic_invoke,
void (*destructor)(const BindStateBase*),
bool (*is_cancelled)(const BindStateBase*))
: polymorphic_invoke_(polymorphic_invoke),
- ref_count_(0),
destructor_(destructor),
is_cancelled_(is_cancelled) {}
-void BindStateBase::AddRef() const {
- AtomicRefCountInc(&ref_count_);
-}
-
-void BindStateBase::Release() const {
- if (!AtomicRefCountDec(&ref_count_))
- destructor_(this);
-}
-
CallbackBase<CopyMode::MoveOnly>::CallbackBase(CallbackBase&& c) = default;
CallbackBase<CopyMode::MoveOnly>&
@@ -54,6 +48,16 @@ CallbackBase<CopyMode::MoveOnly>& CallbackBase<CopyMode::MoveOnly>::operator=(
return *this;
}
+CallbackBase<CopyMode::MoveOnly>::CallbackBase(
+ CallbackBase<CopyMode::Copyable>&& c)
+ : bind_state_(std::move(c.bind_state_)) {}
+
+CallbackBase<CopyMode::MoveOnly>& CallbackBase<CopyMode::MoveOnly>::operator=(
+ CallbackBase<CopyMode::Copyable>&& c) {
+ bind_state_ = std::move(c.bind_state_);
+ return *this;
+}
+
void CallbackBase<CopyMode::MoveOnly>::Reset() {
// NULL the bind_state_ last, since it may be holding the last ref to whatever
// object owns us, and we may be deleted after that.
@@ -70,10 +74,9 @@ bool CallbackBase<CopyMode::MoveOnly>::EqualsInternal(
return bind_state_ == other.bind_state_;
}
-CallbackBase<CopyMode::MoveOnly>::CallbackBase(
- BindStateBase* bind_state)
- : bind_state_(bind_state) {
- DCHECK(!bind_state_.get() || bind_state_->ref_count_ == 1);
+CallbackBase<CopyMode::MoveOnly>::CallbackBase(BindStateBase* bind_state)
+ : bind_state_(bind_state ? AdoptRef(bind_state) : nullptr) {
+ DCHECK(!bind_state_.get() || bind_state_->HasOneRef());
}
CallbackBase<CopyMode::MoveOnly>::~CallbackBase() {}
diff --git a/chromium/base/callback_internal.h b/chromium/base/callback_internal.h
index f7501f96c5d..29b07c23bd7 100644
--- a/chromium/base/callback_internal.h
+++ b/chromium/base/callback_internal.h
@@ -8,17 +8,29 @@
#ifndef BASE_CALLBACK_INTERNAL_H_
#define BASE_CALLBACK_INTERNAL_H_
-#include "base/atomic_ref_count.h"
#include "base/base_export.h"
#include "base/callback_forward.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
namespace base {
+
+struct FakeBindState;
+
namespace internal {
+
template <CopyMode copy_mode>
class CallbackBase;
+class BindStateBase;
+
+template <typename Functor, typename... BoundArgs>
+struct BindState;
+
+struct BindStateBaseRefCountTraits {
+ static void Destruct(const BindStateBase*);
+};
+
// BindStateBase is used to provide an opaque handle that the Callback
// class can use to represent a function object with bound arguments. It
// behaves as an existential type that is used by a corresponding
@@ -30,38 +42,43 @@ class CallbackBase;
// Creating a vtable for every BindState template instantiation results in a lot
// of bloat. Its only task is to call the destructor which can be done with a
// function pointer.
-class BASE_EXPORT BindStateBase {
+class BASE_EXPORT BindStateBase
+ : public RefCountedThreadSafe<BindStateBase, BindStateBaseRefCountTraits> {
public:
+ REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
+
using InvokeFuncStorage = void(*)();
- protected:
+ private:
BindStateBase(InvokeFuncStorage polymorphic_invoke,
void (*destructor)(const BindStateBase*));
BindStateBase(InvokeFuncStorage polymorphic_invoke,
void (*destructor)(const BindStateBase*),
bool (*is_cancelled)(const BindStateBase*));
+
~BindStateBase() = default;
- private:
- friend class scoped_refptr<BindStateBase>;
+ friend struct BindStateBaseRefCountTraits;
+ friend class RefCountedThreadSafe<BindStateBase, BindStateBaseRefCountTraits>;
+
template <CopyMode copy_mode>
friend class CallbackBase;
+ // Whitelist subclasses that access the destructor of BindStateBase.
+ template <typename Functor, typename... BoundArgs>
+ friend struct BindState;
+ friend struct ::base::FakeBindState;
+
bool IsCancelled() const {
return is_cancelled_(this);
}
- void AddRef() const;
- void Release() const;
-
// In C++, it is safe to cast function pointers to function pointers of
// another type. It is not okay to use void*. We create a InvokeFuncStorage
// that that can store our function pointer, and then cast it back to
// the original type on usage.
InvokeFuncStorage polymorphic_invoke_;
- mutable AtomicRefCount ref_count_;
-
// Pointer to a function that will properly destroy |this|.
void (*destructor_)(const BindStateBase*);
bool (*is_cancelled_)(const BindStateBase*);
@@ -82,8 +99,11 @@ class BASE_EXPORT CallbackBase<CopyMode::MoveOnly> {
explicit CallbackBase(const CallbackBase<CopyMode::Copyable>& c);
CallbackBase& operator=(const CallbackBase<CopyMode::Copyable>& c);
+ explicit CallbackBase(CallbackBase<CopyMode::Copyable>&& c);
+ CallbackBase& operator=(CallbackBase<CopyMode::Copyable>&& c);
+
// Returns true if Callback is null (doesn't refer to anything).
- bool is_null() const { return bind_state_.get() == NULL; }
+ bool is_null() const { return !bind_state_; }
explicit operator bool() const { return !is_null(); }
// Returns true if the callback invocation will be nop due to an cancellation.
diff --git a/chromium/base/callback_unittest.cc b/chromium/base/callback_unittest.cc
index a41736946a1..f76adbcdd2c 100644
--- a/chromium/base/callback_unittest.cc
+++ b/chromium/base/callback_unittest.cc
@@ -21,24 +21,13 @@ void NopInvokeFunc() {}
// based on a type we declared in the anonymous namespace above to remove any
// chance of colliding with another instantiation and breaking the
// one-definition-rule.
-struct FakeBindState1 : internal::BindStateBase {
- FakeBindState1() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
- private:
- ~FakeBindState1() {}
- static void Destroy(const internal::BindStateBase* self) {
- delete static_cast<const FakeBindState1*>(self);
- }
- static bool IsCancelled(const internal::BindStateBase*) {
- return false;
- }
-};
+struct FakeBindState : internal::BindStateBase {
+ FakeBindState() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
-struct FakeBindState2 : internal::BindStateBase {
- FakeBindState2() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
private:
- ~FakeBindState2() {}
+ ~FakeBindState() {}
static void Destroy(const internal::BindStateBase* self) {
- delete static_cast<const FakeBindState2*>(self);
+ delete static_cast<const FakeBindState*>(self);
}
static bool IsCancelled(const internal::BindStateBase*) {
return false;
@@ -50,9 +39,7 @@ namespace {
class CallbackTest : public ::testing::Test {
public:
CallbackTest()
- : callback_a_(new FakeBindState1()),
- callback_b_(new FakeBindState2()) {
- }
+ : callback_a_(new FakeBindState()), callback_b_(new FakeBindState()) {}
~CallbackTest() override {}
@@ -94,7 +81,7 @@ TEST_F(CallbackTest, Equals) {
EXPECT_FALSE(callback_b_.Equals(callback_a_));
// We should compare based on instance, not type.
- Callback<void()> callback_c(new FakeBindState1());
+ Callback<void()> callback_c(new FakeBindState());
Callback<void()> callback_a2 = callback_a_;
EXPECT_TRUE(callback_a_.Equals(callback_a2));
EXPECT_FALSE(callback_a_.Equals(callback_c));
@@ -148,6 +135,23 @@ TEST_F(CallbackTest, ResetAndReturn) {
ASSERT_TRUE(tfr.cb_already_run);
}
+TEST_F(CallbackTest, NullAfterMoveRun) {
+ Closure cb = Bind([] {});
+ ASSERT_TRUE(cb);
+ std::move(cb).Run();
+ ASSERT_FALSE(cb);
+
+ const Closure cb2 = Bind([] {});
+ ASSERT_TRUE(cb2);
+ std::move(cb2).Run();
+ ASSERT_TRUE(cb2);
+
+ OnceClosure cb3 = BindOnce([] {});
+ ASSERT_TRUE(cb3);
+ std::move(cb3).Run();
+ ASSERT_FALSE(cb3);
+}
+
class CallbackOwner : public base::RefCounted<CallbackOwner> {
public:
explicit CallbackOwner(bool* deleted) {
diff --git a/chromium/base/command_line.cc b/chromium/base/command_line.cc
index 99ea2b00032..873da813483 100644
--- a/chromium/base/command_line.cc
+++ b/chromium/base/command_line.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#include "build/build_config.h"
@@ -411,11 +412,15 @@ void CommandLine::AppendArguments(const CommandLine& other,
void CommandLine::PrependWrapper(const CommandLine::StringType& wrapper) {
if (wrapper.empty())
return;
- // The wrapper may have embedded arguments (like "gdb --args"). In this case,
- // we don't pretend to do anything fancy, we just split on spaces.
- StringVector wrapper_argv = SplitString(
- wrapper, FilePath::StringType(1, ' '), base::TRIM_WHITESPACE,
- base::SPLIT_WANT_ALL);
+ // Split the wrapper command based on whitespace (with quoting).
+ using CommandLineTokenizer =
+ StringTokenizerT<StringType, StringType::const_iterator>;
+ CommandLineTokenizer tokenizer(wrapper, FILE_PATH_LITERAL(" "));
+ tokenizer.set_quote_chars(FILE_PATH_LITERAL("'\""));
+ std::vector<StringType> wrapper_argv;
+ while (tokenizer.GetNext())
+ wrapper_argv.emplace_back(tokenizer.token());
+
// Prepend the wrapper and update the switches/arguments |begin_args_|.
argv_.insert(argv_.begin(), wrapper_argv.begin(), wrapper_argv.end());
begin_args_ += wrapper_argv.size();
diff --git a/chromium/base/command_line_unittest.cc b/chromium/base/command_line_unittest.cc
index bcfc6c59c91..79c9aecc2a2 100644
--- a/chromium/base/command_line_unittest.cc
+++ b/chromium/base/command_line_unittest.cc
@@ -406,4 +406,35 @@ TEST(CommandLineTest, Copy) {
EXPECT_TRUE(assigned.HasSwitch(pair.first));
}
+TEST(CommandLineTest, PrependSimpleWrapper) {
+ CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
+ cl.AppendSwitch("a");
+ cl.AppendSwitch("b");
+ cl.PrependWrapper(FILE_PATH_LITERAL("wrapper --foo --bar"));
+
+ EXPECT_EQ(6u, cl.argv().size());
+ EXPECT_EQ(FILE_PATH_LITERAL("wrapper"), cl.argv()[0]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--foo"), cl.argv()[1]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--bar"), cl.argv()[2]);
+ EXPECT_EQ(FILE_PATH_LITERAL("Program"), cl.argv()[3]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--a"), cl.argv()[4]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--b"), cl.argv()[5]);
+}
+
+TEST(CommandLineTest, PrependComplexWrapper) {
+ CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
+ cl.AppendSwitch("a");
+ cl.AppendSwitch("b");
+ cl.PrependWrapper(
+ FILE_PATH_LITERAL("wrapper --foo='hello world' --bar=\"let's go\""));
+
+ EXPECT_EQ(6u, cl.argv().size());
+ EXPECT_EQ(FILE_PATH_LITERAL("wrapper"), cl.argv()[0]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--foo='hello world'"), cl.argv()[1]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--bar=\"let's go\""), cl.argv()[2]);
+ EXPECT_EQ(FILE_PATH_LITERAL("Program"), cl.argv()[3]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--a"), cl.argv()[4]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--b"), cl.argv()[5]);
+}
+
} // namespace base
diff --git a/chromium/base/containers/container_test_utils.h b/chromium/base/containers/container_test_utils.h
new file mode 100644
index 00000000000..e36b9f73125
--- /dev/null
+++ b/chromium/base/containers/container_test_utils.h
@@ -0,0 +1,39 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_CONTAINER_TEST_UTILS_H_
+#define BASE_CONTAINERS_CONTAINER_TEST_UTILS_H_
+
+// This file contains some helper classes for testing conainer behavior.
+
+#include "base/macros.h"
+
+namespace base {
+
+// A move-only class that holds an integer.
+class MoveOnlyInt {
+ public:
+ explicit MoveOnlyInt(int data = 1) : data_(data) {}
+ MoveOnlyInt(MoveOnlyInt&& other) : data_(other.data_) { other.data_ = 0; }
+ MoveOnlyInt& operator=(MoveOnlyInt&& other) {
+ data_ = other.data_;
+ other.data_ = 0;
+ return *this;
+ }
+
+ friend bool operator<(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) {
+ return lhs.data_ < rhs.data_;
+ }
+
+ int data() const { return data_; }
+
+ private:
+ int data_;
+
+ DISALLOW_COPY_AND_ASSIGN(MoveOnlyInt);
+};
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_CONTAINER_TEST_UTILS_H_
diff --git a/chromium/base/containers/flat_map.h b/chromium/base/containers/flat_map.h
new file mode 100644
index 00000000000..bd0b1265bc9
--- /dev/null
+++ b/chromium/base/containers/flat_map.h
@@ -0,0 +1,290 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_FLAT_MAP_H_
+#define BASE_CONTAINERS_FLAT_MAP_H_
+
+#include <utility>
+
+#include "base/containers/flat_tree.h"
+#include "base/logging.h"
+
+namespace base {
+
+namespace internal {
+
+// An implementation of the flat_set GetKeyFromValue template parameter that
+// extracts the key as the first element of a pair.
+template <class Key, class Mapped>
+struct GetKeyFromValuePairFirst {
+ const Key& operator()(const std::pair<Key, Mapped>& p) const {
+ return p.first;
+ }
+};
+
+} // namespace internal
+
+// OVERVIEW
+//
+// This file implements flat_map container. It is an alternative to standard
+// sorted containers that stores its elements in contiguous memory (a vector).
+//
+// Additional documentation and usage advice is in flat_set.h.
+//
+// DOCUMENTATION
+//
+// Most of the core functionality is inherited from flat_tree. Please see
+// flat_tree.h for more details for most of these functions. As a quick
+// reference, the functions available are:
+//
+// Constructors (inputs need not be sorted):
+// flat_map(InputIterator first, InputIterator last,
+// FlatContainerDupes, const Compare& compare = Compare());
+// flat_map(const flat_map&);
+// flat_map(flat_map&&);
+// flat_map(std::vector<value_type>, FlatContainerDupes); // Re-use storage.
+// flat_map(std::initializer_list<value_type> ilist,
+// const Compare& comp = Compare());
+//
+// Assignment functions:
+// flat_map& operator=(const flat_map&);
+// flat_map& operator=(flat_map&&);
+// flat_map& operator=(initializer_list<pair<Key, Mapped>>);
+//
+// Memory management functions:
+// void reserve(size_t);
+// size_t capacity() const;
+// void shrink_to_fit();
+//
+// Size management functions:
+// void clear();
+// size_t size() const;
+// size_t max_size() const;
+// bool empty() const;
+//
+// Iterator functions:
+// iterator begin();
+// const_iterator begin() const;
+// const_iterator cbegin() const;
+// iterator end();
+// const_iterator end() const;
+// const_iterator cend() const;
+// reverse_iterator rbegin();
+// const reverse_iterator rbegin() const;
+// const_reverse_iterator crbegin() const;
+// reverse_iterator rend();
+// const_reverse_iterator rend() const;
+// const_reverse_iterator crend() const;
+//
+// Insert and accessor functions:
+// Mapped& operator[](const Key&);
+// Mapped& operator[](Key&&);
+// pair<iterator, bool> insert(const pair<Key, Mapped>&);
+// pair<iterator, bool> insert(pair<Key, Mapped>&&);
+// pair<iterator, bool> emplace(Args&&...);
+// iterator emplace_hint(const_iterator, Args&&...);
+//
+// Erase functions:
+// iterator erase(const_iterator);
+// iterator erase(const_iterator first, const_iterator& last);
+// size_t erase(const Key& key)
+//
+// Comparators (see std::map documentation).
+// key_compare key_comp() const;
+// value_compare value_comp() const;
+//
+// Search functions:
+// size_t count(const Key&) const;
+// iterator find(const Key&);
+// const_iterator find(const Key&) const;
+// pair<iterator, iterator> equal_range(Key&)
+// iterator lower_bound(const Key&);
+// const_iterator lower_bound(const Key&) const;
+// iterator upper_bound(const Key&);
+// const_iterator upper_bound(const Key&) const;
+//
+// General functions:
+// void swap(flat_map&&)
+//
+// Non-member operators:
+// bool operator==(const flat_map&, const flat_map);
+// bool operator!=(const flat_map&, const flat_map);
+// bool operator<(const flat_map&, const flat_map);
+// bool operator>(const flat_map&, const flat_map);
+// bool operator>=(const flat_map&, const flat_map);
+// bool operator<=(const flat_map&, const flat_map);
+//
+template <class Key, class Mapped, class Compare = std::less<Key>>
+// Meets the requirements of Container, AssociativeContainer,
+// ReversibleContainer.
+// Requires: Key is Movable, Compare is a StrictWeakOrdering on Key.
+class flat_map : public ::base::internal::flat_tree<
+ Key,
+ std::pair<Key, Mapped>,
+ ::base::internal::GetKeyFromValuePairFirst<Key, Mapped>,
+ Compare> {
+ private:
+ using tree = typename ::base::internal::flat_tree<
+ Key,
+ std::pair<Key, Mapped>,
+ ::base::internal::GetKeyFromValuePairFirst<Key, Mapped>,
+ Compare>;
+
+ public:
+ using mapped_type = Mapped;
+ using value_type = typename tree::value_type;
+
+ // --------------------------------------------------------------------------
+ // Lifetime.
+ //
+ // Constructors that take range guarantee O(N * log(N)) + O(N) complexity
+ // (N is a range length). Thr range constructors are NOT stable. If there are
+ // duplicates an arbitrary one will be chosen.
+ //
+ // Assume that move constructors invalidate iterators and references.
+ //
+ // The constructors that take ranges, lists, and vectors do not require that
+ // the input be sorted.
+
+ flat_map();
+ explicit flat_map(const Compare& comp);
+
+ template <class InputIterator>
+ flat_map(InputIterator first,
+ InputIterator last,
+ FlatContainerDupes dupe_handling,
+ const Compare& comp = Compare());
+
+ flat_map(const flat_map&);
+ flat_map(flat_map&&);
+
+ flat_map(std::vector<value_type> items,
+ FlatContainerDupes dupe_handling,
+ const Compare& comp = Compare());
+
+ flat_map(std::initializer_list<value_type> ilist,
+ FlatContainerDupes dupe_handling,
+ const Compare& comp = Compare());
+
+ ~flat_map();
+
+ // --------------------------------------------------------------------------
+ // Assignments.
+ //
+ // Assume that move assignment invalidates iterators and references.
+
+ flat_map& operator=(const flat_map&);
+ flat_map& operator=(flat_map&&);
+ // Takes the first if there are duplicates in the initializer list.
+ flat_map& operator=(std::initializer_list<value_type> ilist);
+
+ // --------------------------------------------------------------------------
+ // Map-specific insert operations.
+ //
+ // Normal insert() functions are inherited from flat_tree.
+ //
+ // Assume that every operation invalidates iterators and references.
+ // Insertion of one element can take O(size).
+
+ mapped_type& operator[](const Key& key);
+ mapped_type& operator[](Key&& key);
+
+ // --------------------------------------------------------------------------
+ // General operations.
+ //
+ // Assume that swap invalidates iterators and references.
+
+ void swap(flat_map& other);
+
+ friend void swap(flat_map& lhs, flat_map& rhs) { lhs.swap(rhs); }
+};
+
+// ----------------------------------------------------------------------------
+// Lifetime.
+
+template <class Key, class Mapped, class Compare>
+flat_map<Key, Mapped, Compare>::flat_map() = default;
+
+template <class Key, class Mapped, class Compare>
+flat_map<Key, Mapped, Compare>::flat_map(const Compare& comp) : tree(comp) {}
+
+template <class Key, class Mapped, class Compare>
+template <class InputIterator>
+flat_map<Key, Mapped, Compare>::flat_map(InputIterator first,
+ InputIterator last,
+ FlatContainerDupes dupe_handling,
+ const Compare& comp)
+ : tree(first, last, dupe_handling, comp) {}
+
+template <class Key, class Mapped, class Compare>
+flat_map<Key, Mapped, Compare>::flat_map(const flat_map&) = default;
+
+template <class Key, class Mapped, class Compare>
+flat_map<Key, Mapped, Compare>::flat_map(flat_map&&) = default;
+
+template <class Key, class Mapped, class Compare>
+flat_map<Key, Mapped, Compare>::flat_map(std::vector<value_type> items,
+ FlatContainerDupes dupe_handling,
+ const Compare& comp)
+ : tree(std::move(items), dupe_handling, comp) {}
+
+template <class Key, class Mapped, class Compare>
+flat_map<Key, Mapped, Compare>::flat_map(
+ std::initializer_list<value_type> ilist,
+ FlatContainerDupes dupe_handling,
+ const Compare& comp)
+ : flat_map(std::begin(ilist), std::end(ilist), dupe_handling, comp) {}
+
+template <class Key, class Mapped, class Compare>
+flat_map<Key, Mapped, Compare>::~flat_map() = default;
+
+// ----------------------------------------------------------------------------
+// Assignments.
+
+template <class Key, class Mapped, class Compare>
+auto flat_map<Key, Mapped, Compare>::operator=(const flat_map&)
+ -> flat_map& = default;
+
+template <class Key, class Mapped, class Compare>
+auto flat_map<Key, Mapped, Compare>::operator=(flat_map &&)
+ -> flat_map& = default;
+
+template <class Key, class Mapped, class Compare>
+auto flat_map<Key, Mapped, Compare>::operator=(
+ std::initializer_list<value_type> ilist) -> flat_map& {
+ tree::operator=(ilist);
+ return *this;
+}
+
+// ----------------------------------------------------------------------------
+// Insert operations.
+
+template <class Key, class Mapped, class Compare>
+auto flat_map<Key, Mapped, Compare>::operator[](const Key& key) -> Mapped& {
+ typename tree::iterator found = tree::lower_bound(key);
+ if (found == tree::end() || tree::key_comp()(key, found->first))
+ found = tree::unsafe_emplace(found, key, Mapped());
+ return found->second;
+}
+
+template <class Key, class Mapped, class Compare>
+auto flat_map<Key, Mapped, Compare>::operator[](Key&& key) -> Mapped& {
+ const Key& key_ref = key;
+ typename tree::iterator found = tree::lower_bound(key_ref);
+ if (found == tree::end() || tree::key_comp()(key, found->first))
+ found = tree::unsafe_emplace(found, std::move(key), Mapped());
+ return found->second;
+}
+
+// ----------------------------------------------------------------------------
+// General operations.
+
+template <class Key, class Mapped, class Compare>
+void flat_map<Key, Mapped, Compare>::swap(flat_map& other) {
+ tree::swap(other);
+}
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_FLAT_MAP_H_
diff --git a/chromium/base/containers/flat_map_unittest.cc b/chromium/base/containers/flat_map_unittest.cc
new file mode 100644
index 00000000000..0556527635a
--- /dev/null
+++ b/chromium/base/containers/flat_map_unittest.cc
@@ -0,0 +1,173 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/flat_map.h"
+
+#include <string>
+#include <vector>
+
+#include "base/containers/container_test_utils.h"
+#include "base/macros.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// A flat_map is basically a interface to flat_tree. So several basic
+// operations are tested to make sure things are set up properly, but the bulk
+// of the tests are in flat_tree_unittests.cc.
+
+using ::testing::ElementsAre;
+
+namespace base {
+
+TEST(FlatMap, IncompleteType) {
+ struct A {
+ using Map = flat_map<A, A>;
+ int data;
+ Map set_with_incomplete_type;
+ Map::iterator it;
+ Map::const_iterator cit;
+
+ // We do not declare operator< because clang complains that it's unused.
+ };
+
+ A a;
+}
+
+TEST(FlatMap, RangeConstructor) {
+ flat_map<int, int>::value_type input_vals[] = {
+ {1, 1}, {1, 2}, {1, 3}, {2, 1}, {2, 2}, {2, 3}, {3, 1}, {3, 2}, {3, 3}};
+
+ flat_map<int, int> first(std::begin(input_vals), std::end(input_vals),
+ KEEP_FIRST_OF_DUPES);
+ EXPECT_THAT(first, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 1),
+ std::make_pair(3, 1)));
+
+ flat_map<int, int> last(std::begin(input_vals), std::end(input_vals),
+ KEEP_LAST_OF_DUPES);
+ EXPECT_THAT(last, ElementsAre(std::make_pair(1, 3), std::make_pair(2, 3),
+ std::make_pair(3, 3)));
+}
+
+TEST(FlatMap, MoveConstructor) {
+ using pair = std::pair<MoveOnlyInt, MoveOnlyInt>;
+
+ flat_map<MoveOnlyInt, MoveOnlyInt> original;
+ original.insert(pair(MoveOnlyInt(1), MoveOnlyInt(1)));
+ original.insert(pair(MoveOnlyInt(2), MoveOnlyInt(2)));
+ original.insert(pair(MoveOnlyInt(3), MoveOnlyInt(3)));
+ original.insert(pair(MoveOnlyInt(4), MoveOnlyInt(4)));
+
+ flat_map<MoveOnlyInt, MoveOnlyInt> moved(std::move(original));
+
+ EXPECT_EQ(1U, moved.count(MoveOnlyInt(1)));
+ EXPECT_EQ(1U, moved.count(MoveOnlyInt(2)));
+ EXPECT_EQ(1U, moved.count(MoveOnlyInt(3)));
+ EXPECT_EQ(1U, moved.count(MoveOnlyInt(4)));
+}
+
+TEST(FlatMap, VectorConstructor) {
+ using IntPair = std::pair<int, int>;
+ using IntMap = flat_map<int, int>;
+ {
+ std::vector<IntPair> vect{{1, 1}, {1, 2}, {2, 1}};
+ IntMap map(std::move(vect), KEEP_FIRST_OF_DUPES);
+ EXPECT_THAT(map, ElementsAre(IntPair(1, 1), IntPair(2, 1)));
+ }
+ {
+ std::vector<IntPair> vect{{1, 1}, {1, 2}, {2, 1}};
+ IntMap map(std::move(vect), KEEP_LAST_OF_DUPES);
+ EXPECT_THAT(map, ElementsAre(IntPair(1, 2), IntPair(2, 1)));
+ }
+}
+
+TEST(FlatMap, InitializerListConstructor) {
+ {
+ flat_map<int, int> cont(
+ {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {1, 2}, {10, 10}, {8, 8}},
+ KEEP_FIRST_OF_DUPES);
+ EXPECT_THAT(cont, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2),
+ std::make_pair(3, 3), std::make_pair(4, 4),
+ std::make_pair(5, 5), std::make_pair(8, 8),
+ std::make_pair(10, 10)));
+ }
+ {
+ flat_map<int, int> cont(
+ {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {1, 2}, {10, 10}, {8, 8}},
+ KEEP_LAST_OF_DUPES);
+ EXPECT_THAT(cont, ElementsAre(std::make_pair(1, 2), std::make_pair(2, 2),
+ std::make_pair(3, 3), std::make_pair(4, 4),
+ std::make_pair(5, 5), std::make_pair(8, 8),
+ std::make_pair(10, 10)));
+ }
+}
+
+TEST(FlatMap, InsertFindSize) {
+ base::flat_map<int, int> s;
+ s.insert(std::make_pair(1, 1));
+ s.insert(std::make_pair(1, 1));
+ s.insert(std::make_pair(2, 2));
+
+ EXPECT_EQ(2u, s.size());
+ EXPECT_EQ(std::make_pair(1, 1), *s.find(1));
+ EXPECT_EQ(std::make_pair(2, 2), *s.find(2));
+ EXPECT_EQ(s.end(), s.find(7));
+}
+
+TEST(FlatMap, CopySwap) {
+ base::flat_map<int, int> original;
+ original.insert({1, 1});
+ original.insert({2, 2});
+ EXPECT_THAT(original,
+ ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2)));
+
+ base::flat_map<int, int> copy(original);
+ EXPECT_THAT(copy, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2)));
+
+ copy.erase(copy.begin());
+ copy.insert({10, 10});
+ EXPECT_THAT(copy, ElementsAre(std::make_pair(2, 2), std::make_pair(10, 10)));
+
+ original.swap(copy);
+ EXPECT_THAT(original,
+ ElementsAre(std::make_pair(2, 2), std::make_pair(10, 10)));
+ EXPECT_THAT(copy, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2)));
+}
+
+// operator[](const Key&)
+TEST(FlatMap, SubscriptConstKey) {
+ base::flat_map<std::string, int> m;
+
+ // Default construct elements that don't exist yet.
+ int& s = m["a"];
+ EXPECT_EQ(0, s);
+ EXPECT_EQ(1u, m.size());
+
+ // The returned mapped reference should refer into the map.
+ s = 22;
+ EXPECT_EQ(22, m["a"]);
+
+ // Overwrite existing elements.
+ m["a"] = 44;
+ EXPECT_EQ(44, m["a"]);
+}
+
+// operator[](Key&&)
+TEST(FlatMap, SubscriptMoveOnlyKey) {
+ base::flat_map<MoveOnlyInt, int> m;
+
+ // Default construct elements that don't exist yet.
+ int& s = m[MoveOnlyInt(1)];
+ EXPECT_EQ(0, s);
+ EXPECT_EQ(1u, m.size());
+
+ // The returned mapped reference should refer into the map.
+ s = 22;
+ EXPECT_EQ(22, m[MoveOnlyInt(1)]);
+
+ // Overwrite existing elements.
+ m[MoveOnlyInt(1)] = 44;
+ EXPECT_EQ(44, m[MoveOnlyInt(1)]);
+}
+
+} // namespace base
diff --git a/chromium/base/containers/flat_set.h b/chromium/base/containers/flat_set.h
index ae444c2c8e2..da19034f32c 100644
--- a/chromium/base/containers/flat_set.h
+++ b/chromium/base/containers/flat_set.h
@@ -5,17 +5,15 @@
#ifndef BASE_CONTAINERS_FLAT_SET_H_
#define BASE_CONTAINERS_FLAT_SET_H_
-#include <algorithm>
-#include <functional>
-#include <utility>
-#include <vector>
+#include "base/containers/flat_tree.h"
namespace base {
// Overview:
// This file implements flat_set container. It is an alternative to standard
-// sorted containers that stores it's elements in contiguous memory (current
-// version uses sorted std::vector).
+// sorted containers that stores it's elements in contiguous memory using a
+// std::vector.
+//
// Discussion that preceded introduction of this container can be found here:
// https://groups.google.com/a/chromium.org/forum/#!searchin/chromium-dev/vector$20based/chromium-dev/4uQMma9vj9w/HaQ-WvMOAwAJ
//
@@ -27,23 +25,25 @@ namespace base {
// Usage guidance:
// Prefer base::flat_set for:
// * Very small sets, something that is an easy fit for cache. Consider
-// "very small" to be under a 100 32bit integers.
+// "very small" to be under 100 32bit integers.
// * Sets that are built once (using flat_set::flat_set(first, last)). Consider
// collecting all data in a vector and then building flat_set out of it.
-// TODO(dyaroshev): improve the interface to better support this pattern
-// (crbug.com/682254).
-// * Sets where mutating happens in big bulks: use erase(std::remove()) idiom
-// for erasing many elements. Insertion is harder - consider set operations
-// or building a new vector. Set operations can be slow if one of the sets
-// is considerably bigger. Also be aware that beating performance of
-// sort + unique (implementation of flat_set's constructor) is hard, clever
-// merge of many sets might not win. Generally avoid inserting into flat set
-// without benchmarks.
+// Using the constructor that takes a moved vector allows you to re-use
+// storage.
+// * Sets where mutating happens in big bulks: to erase multiple elements, use
+// base::EraseIf() rather than repeated single-element removal. Insertion is
+// harder - consider set operations or building a new vector. Set operations
+// can be slow if one of the sets is considerably bigger. Also be aware that
+// beating performance of sort + unique (implementation of flat_set's
+// constructor) is hard, clever merge of many sets might not win. Generally
+// avoid inserting into flat set without benchmarks.
// * Copying and iterating.
// * Set operations (union/intersect etc).
//
// Prefer to build a new flat_set from a std::vector (or similar) instead of
-// calling insert() repeatedly, which would have O(size^2) complexity.
+// calling insert() repeatedly, which would have O(size^2) complexity. The
+// constructor that can accept a moved vector (not required to be sorted) is
+// the most efficient.
//
// TODO(dyaroshev): develop standalone benchmarks to find performance boundaries
// for different types of sets crbug.com/682215.
@@ -60,12 +60,6 @@ namespace base {
// - we ask (for now) to assume that move operations invalidate iterators.
// TODO(dyaroshev): Research the possibility of using a small buffer
// optimization crbug.com/682240.
-// * Constructor sorts elements in a non-stable manner (unlike std::set). So
-// among equivalent (with respect to provided compare) elements passed to
-// the constructor it is unspecified with one will end up in the set.
-// However insert()/emplace() methods are stable with respect to already
-// inserted elements - an element that is already in the set will not be
-// replaced.
// * allocator support is not implemented.
// * insert(first, last) and insert(std::initializer_list) are not
// implemented (see Notes section).
@@ -79,584 +73,94 @@ namespace base {
// method.
// TODO(dyaroshev): research an algorithm for range insertion crbug.com/682249.
-template <class Key, class Compare = std::less<Key>>
-// Meets the requirements of Container, AssociativeContainer,
-// ReversibleContainer.
-// Requires: Key is Movable, Compare is a StrictWeakOrdering on Key.
-class flat_set {
- private:
- using underlying_type = std::vector<Key>;
-
- public:
- // --------------------------------------------------------------------------
- // Types.
- //
- using key_type = Key;
- using key_compare = Compare;
- using value_type = Key;
- using value_compare = Compare;
-
- using pointer = typename underlying_type::pointer;
- using const_pointer = typename underlying_type::const_pointer;
- using reference = typename underlying_type::reference;
- using const_reference = typename underlying_type::const_reference;
- using size_type = typename underlying_type::size_type;
- using difference_type = typename underlying_type::difference_type;
- using iterator = typename underlying_type::iterator;
- using const_iterator = typename underlying_type::const_iterator;
- using reverse_iterator = typename underlying_type::reverse_iterator;
- using const_reverse_iterator =
- typename underlying_type::const_reverse_iterator;
-
- // --------------------------------------------------------------------------
- // Lifetime.
- //
- // Constructors that take range guarantee O(N * log^2(N)) + O(N) complexity
- // and take O(N * log(N)) + O(N) if extra memory is available (N is a range
- // length).
- //
- // Assume that move constructors invalidate iterators and references.
-
- flat_set();
- explicit flat_set(const Compare& comp);
-
- template <class InputIterator>
- flat_set(InputIterator first,
- InputIterator last,
- const Compare& comp = Compare());
-
- flat_set(const flat_set&);
- flat_set(flat_set&&);
-
- flat_set(std::initializer_list<value_type> ilist,
- const Compare& comp = Compare());
-
- ~flat_set();
-
- // --------------------------------------------------------------------------
- // Assignments.
- //
- // Assume that move assignment invalidates iterators and references.
-
- flat_set& operator=(const flat_set&);
- flat_set& operator=(flat_set&&);
- flat_set& operator=(std::initializer_list<value_type> ilist);
-
- // --------------------------------------------------------------------------
- // Memory management.
- //
- // Beware that shrink_to_fit() simply forwards the request to the
- // underlying_type and its implementation is free to optimize otherwise and
- // leave capacity() to be greater that its size.
- //
- // reserve() and shrink_to_fit() invalidate iterators and references.
-
- void reserve(size_type new_capacity);
- size_type capacity() const;
- void shrink_to_fit();
-
- // --------------------------------------------------------------------------
- // Size management.
- //
- // clear() leaves the capacity() of the flat_set unchanged.
-
- void clear();
-
- size_type size() const;
- size_type max_size() const;
- bool empty() const;
-
- // --------------------------------------------------------------------------
- // Iterators.
-
- iterator begin();
- const_iterator begin() const;
- const_iterator cbegin() const;
-
- iterator end();
- const_iterator end() const;
- const_iterator cend() const;
-
- reverse_iterator rbegin();
- const_reverse_iterator rbegin() const;
- const_reverse_iterator crbegin() const;
-
- reverse_iterator rend();
- const_reverse_iterator rend() const;
- const_reverse_iterator crend() const;
-
- // --------------------------------------------------------------------------
- // Insert operations.
- //
- // Assume that every operation invalidates iterators and references.
- // Insertion of one element can take O(size). See the Notes section in the
- // class comments on why we do not currently implement range insertion.
- // Capacity of flat_set grows in an implementation-defined manner.
- //
- // NOTE: Prefer to build a new flat_set from a std::vector (or similar)
- // instead of calling insert() repeatedly.
-
- std::pair<iterator, bool> insert(const value_type& val);
- std::pair<iterator, bool> insert(value_type&& val);
-
- iterator insert(const_iterator position_hint, const value_type& x);
- iterator insert(const_iterator position_hint, value_type&& x);
-
- template <class... Args>
- std::pair<iterator, bool> emplace(Args&&... args);
-
- template <class... Args>
- iterator emplace_hint(const_iterator position_hint, Args&&... args);
-
- // --------------------------------------------------------------------------
- // Erase operations.
- //
- // Assume that every operation invalidates iterators and references.
- //
- // erase(position), erase(first, last) can take O(size).
- // erase(key) may take O(size) + O(log(size)).
- //
- // Prefer the erase(std::remove(), end()) idiom for deleting multiple
- // elements.
-
- iterator erase(const_iterator position);
- iterator erase(const_iterator first, const_iterator last);
- size_type erase(const key_type& key);
-
- // --------------------------------------------------------------------------
- // Comparators.
-
- key_compare key_comp() const;
- value_compare value_comp() const;
-
- // --------------------------------------------------------------------------
- // Search operations.
- //
- // Search operations have O(log(size)) complexity.
-
- size_type count(const key_type& key) const;
-
- iterator find(const key_type& key);
- const_iterator find(const key_type& key) const;
-
- std::pair<iterator, iterator> equal_range(const key_type& ket);
- std::pair<const_iterator, const_iterator> equal_range(
- const key_type& key) const;
-
- iterator lower_bound(const key_type& key);
- const_iterator lower_bound(const key_type& key) const;
-
- iterator upper_bound(const key_type& key);
- const_iterator upper_bound(const key_type& key) const;
-
- // --------------------------------------------------------------------------
- // General operations.
- //
- // Assume that swap invalidates iterators and references.
- //
- // As with std::set, equality and ordering operations for the whole flat_set
- // are equivalent to using equal() and lexicographical_compare() on the key
- // types, rather than using element-wise key_comp() as e.g. lower_bound()
- // does. Implementation note: currently we use operator==() and operator<() on
- // std::vector, because they have the same contract we need, so we use them
- // directly for brevity and in case it is more optimal than calling equal()
- // and lexicograhpical_compare(). If the underlying container type is changed,
- // this code may need to be modified.
-
- void swap(flat_set& other);
-
- friend bool operator==(const flat_set& lhs, const flat_set& rhs) {
- return lhs.impl_.body_ == rhs.impl_.body_;
- }
-
- friend bool operator!=(const flat_set& lhs, const flat_set& rhs) {
- return !(lhs == rhs);
- }
-
- friend bool operator<(const flat_set& lhs, const flat_set& rhs) {
- return lhs.impl_.body_ < rhs.impl_.body_;
- }
-
- friend bool operator>(const flat_set& lhs, const flat_set& rhs) {
- return rhs < lhs;
- }
-
- friend bool operator>=(const flat_set& lhs, const flat_set& rhs) {
- return !(lhs < rhs);
- }
-
- friend bool operator<=(const flat_set& lhs, const flat_set& rhs) {
- return !(lhs > rhs);
- }
-
- friend void swap(flat_set& lhs, flat_set& rhs) { lhs.swap(rhs); }
-
- private:
- const flat_set& as_const() { return *this; }
-
- iterator const_cast_it(const_iterator c_it) {
- auto distance = std::distance(cbegin(), c_it);
- return std::next(begin(), distance);
- }
-
- void sort_and_unique() {
- // std::set sorts elements preserving stability because it doesn't have any
- // performance wins in not doing that. We do, so we use an unstable sort.
- std::sort(begin(), end(), value_comp());
- erase(std::unique(begin(), end(),
- [this](const value_type& lhs, const value_type& rhs) {
- // lhs is already <= rhs due to sort, therefore
- // !(lhs < rhs) <=> lhs == rhs.
- return !value_comp()(lhs, rhs);
- }),
- end());
- }
-
- // To support comparators that may not be possible to default-construct, we
- // have to store an instance of Compare. Using this to store all internal
- // state of flat_set and using private inheritance to store compare lets us
- // take advantage of an empty base class optimization to avoid extra space in
- // the common case when Compare has no state.
- struct Impl : private Compare {
- Impl() = default;
-
- template <class Cmp, class... Body>
- explicit Impl(Cmp&& compare_arg, Body&&... underlying_type_args)
- : Compare(std::forward<Cmp>(compare_arg)),
- body_(std::forward<Body>(underlying_type_args)...) {}
-
- Compare compare() const { return *this; }
-
- underlying_type body_;
- } impl_;
-};
-
-// ----------------------------------------------------------------------------
-// Lifetime.
-
-template <class Key, class Compare>
-flat_set<Key, Compare>::flat_set() = default;
-
-template <class Key, class Compare>
-flat_set<Key, Compare>::flat_set(const Compare& comp) : impl_(comp) {}
-
-template <class Key, class Compare>
-template <class InputIterator>
-flat_set<Key, Compare>::flat_set(InputIterator first,
- InputIterator last,
- const Compare& comp)
- : impl_(comp, first, last) {
- sort_and_unique();
-}
-
-template <class Key, class Compare>
-flat_set<Key, Compare>::flat_set(const flat_set&) = default;
-
-template <class Key, class Compare>
-flat_set<Key, Compare>::flat_set(flat_set&&) = default;
-
-template <class Key, class Compare>
-flat_set<Key, Compare>::flat_set(std::initializer_list<value_type> ilist,
- const Compare& comp)
- : flat_set(std::begin(ilist), std::end(ilist), comp) {}
-
-template <class Key, class Compare>
-flat_set<Key, Compare>::~flat_set() = default;
-
-// ----------------------------------------------------------------------------
-// Assignments.
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::operator=(const flat_set&) -> flat_set& = default;
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::operator=(flat_set &&) -> flat_set& = default;
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::operator=(std::initializer_list<value_type> ilist)
- -> flat_set& {
- impl_.body_ = ilist;
- sort_and_unique();
- return *this;
-}
-
-// ----------------------------------------------------------------------------
-// Memory management.
-
-template <class Key, class Compare>
-void flat_set<Key, Compare>::reserve(size_type new_capacity) {
- impl_.body_.reserve(new_capacity);
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::capacity() const -> size_type {
- return impl_.body_.capacity();
-}
-
-template <class Key, class Compare>
-void flat_set<Key, Compare>::shrink_to_fit() {
- impl_.body_.shrink_to_fit();
-}
-
-// ----------------------------------------------------------------------------
-// Size management.
-
-template <class Key, class Compare>
-void flat_set<Key, Compare>::clear() {
- impl_.body_.clear();
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::size() const -> size_type {
- return impl_.body_.size();
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::max_size() const -> size_type {
- return impl_.body_.max_size();
-}
-
-template <class Key, class Compare>
-bool flat_set<Key, Compare>::empty() const {
- return impl_.body_.empty();
-}
-
-// ----------------------------------------------------------------------------
-// Iterators.
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::begin() -> iterator {
- return impl_.body_.begin();
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::begin() const -> const_iterator {
- return impl_.body_.begin();
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::cbegin() const -> const_iterator {
- return impl_.body_.cbegin();
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::end() -> iterator {
- return impl_.body_.end();
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::end() const -> const_iterator {
- return impl_.body_.end();
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::cend() const -> const_iterator {
- return impl_.body_.cend();
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::rbegin() -> reverse_iterator {
- return impl_.body_.rbegin();
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::rbegin() const -> const_reverse_iterator {
- return impl_.body_.rbegin();
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::crbegin() const -> const_reverse_iterator {
- return impl_.body_.crbegin();
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::rend() -> reverse_iterator {
- return impl_.body_.rend();
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::rend() const -> const_reverse_iterator {
- return impl_.body_.rend();
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::crend() const -> const_reverse_iterator {
- return impl_.body_.crend();
-}
-
-// ----------------------------------------------------------------------------
-// Insert operations.
+// QUICK REFERENCE
//
-// Currently we use position_hint the same way as eastl or boost:
-// https://github.com/electronicarts/EASTL/blob/master/include/EASTL/vector_set.h#L493
+// Most of the core functionality is inherited from flat_tree. Please see
+// flat_tree.h for more details for most of these functions. As a quick
+// reference, the functions available are:
//
-// We duplicate code between copy and move version so that we can avoid
-// creating a temporary value.
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::insert(const value_type& val)
- -> std::pair<iterator, bool> {
- auto position = lower_bound(val);
-
- if (position == end() || value_comp()(val, *position))
- return {impl_.body_.insert(position, val), true};
-
- return {position, false};
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::insert(value_type&& val)
- -> std::pair<iterator, bool> {
- auto position = lower_bound(val);
-
- if (position == end() || value_comp()(val, *position))
- return {impl_.body_.insert(position, std::move(val)), true};
-
- return {position, false};
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::insert(const_iterator position_hint,
- const value_type& val) -> iterator {
- if (position_hint == end() || value_comp()(val, *position_hint)) {
- if (position_hint == begin() || value_comp()(*(position_hint - 1), val))
- // We have to cast away const because of crbug.com/677044.
- return impl_.body_.insert(const_cast_it(position_hint), val);
- }
- return insert(val).first;
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::insert(const_iterator position_hint,
- value_type&& val) -> iterator {
- if (position_hint == end() || value_comp()(val, *position_hint)) {
- if (position_hint == begin() || value_comp()(*(position_hint - 1), val))
- // We have to cast away const because of crbug.com/677044.
- return impl_.body_.insert(const_cast_it(position_hint), std::move(val));
- }
- return insert(std::move(val)).first;
-}
-
-template <class Key, class Compare>
-template <class... Args>
-auto flat_set<Key, Compare>::emplace(Args&&... args)
- -> std::pair<iterator, bool> {
- return insert(value_type(std::forward<Args>(args)...));
-}
-
-template <class Key, class Compare>
-template <class... Args>
-auto flat_set<Key, Compare>::emplace_hint(const_iterator position_hint,
- Args&&... args) -> iterator {
- return insert(position_hint, value_type(std::forward<Args>(args)...));
-}
-
-// ----------------------------------------------------------------------------
-// Erase operations.
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::erase(const_iterator position) -> iterator {
- // We have to cast away const because of crbug.com/677044.
- return impl_.body_.erase(const_cast_it(position));
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::erase(const key_type& val) -> size_type {
- auto eq_range = equal_range(val);
- auto res = std::distance(eq_range.first, eq_range.second);
- // We have to cast away const because of crbug.com/677044.
- erase(const_cast_it(eq_range.first), const_cast_it(eq_range.second));
- return res;
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::erase(const_iterator first, const_iterator last)
- -> iterator {
- // We have to cast away const because of crbug.com/677044.
- return impl_.body_.erase(const_cast_it(first), const_cast_it(last));
-}
-
-// ----------------------------------------------------------------------------
-// Comparators.
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::key_comp() const -> key_compare {
- return impl_.compare();
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::value_comp() const -> value_compare {
- return impl_.compare();
-}
-
-// ----------------------------------------------------------------------------
-// Search operations.
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::count(const key_type& key) const -> size_type {
- auto eq_range = equal_range(key);
- return std::distance(eq_range.first, eq_range.second);
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::find(const key_type& key) -> iterator {
- return const_cast_it(as_const().find(key));
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::find(const key_type& key) const -> const_iterator {
- auto eq_range = equal_range(key);
- return (eq_range.first == eq_range.second) ? end() : eq_range.first;
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::equal_range(const key_type& key)
- -> std::pair<iterator, iterator> {
- auto res = as_const().equal_range(key);
- return {const_cast_it(res.first), const_cast_it(res.second)};
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::equal_range(const key_type& key) const
- -> std::pair<const_iterator, const_iterator> {
- auto lower = lower_bound(key);
-
- if (lower == end() || key_comp()(key, *lower))
- return {lower, lower};
-
- return {lower, std::next(lower)};
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::lower_bound(const key_type& key) -> iterator {
- return const_cast_it(as_const().lower_bound(key));
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::lower_bound(const key_type& key) const
- -> const_iterator {
- return std::lower_bound(begin(), end(), key, key_comp());
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::upper_bound(const key_type& key) -> iterator {
- return const_cast_it(as_const().upper_bound(key));
-}
-
-template <class Key, class Compare>
-auto flat_set<Key, Compare>::upper_bound(const key_type& key) const
- -> const_iterator {
- return std::upper_bound(begin(), end(), key, key_comp());
-}
-
-// ----------------------------------------------------------------------------
-// General operations.
-
-template <class Key, class Compare>
-void flat_set<Key, Compare>::swap(flat_set& other) {
- std::swap(impl_, other.impl_);
-}
+// Constructors (inputs need not be sorted):
+// flat_set(InputIterator first, InputIterator last,
+// FlatContainerDupes, const Compare& compare = Compare());
+// flat_set(const flat_set&);
+// flat_set(flat_set&&);
+// flat_set(std::vector<Key>, FlatContainerDupes); // Re-use storage.
+// flat_set(std::initializer_list<value_type> ilist,
+// const Compare& comp = Compare());
+//
+// Assignment functions:
+// flat_set& operator=(const flat_set&);
+// flat_set& operator=(flat_set&&);
+// flat_set& operator=(initializer_list<Key>);
+//
+// Memory management functions:
+// void reserve(size_t);
+// size_t capacity() const;
+// void shrink_to_fit();
+//
+// Size management functions:
+// void clear();
+// size_t size() const;
+// size_t max_size() const;
+// bool empty() const;
+//
+// Iterator functions:
+// iterator begin();
+// const_iterator begin() const;
+// const_iterator cbegin() const;
+// iterator end();
+// const_iterator end() const;
+// const_iterator cend() const;
+// reverse_iterator rbegin();
+// const reverse_iterator rbegin() const;
+// const_reverse_iterator crbegin() const;
+// reverse_iterator rend();
+// const_reverse_iterator rend() const;
+// const_reverse_iterator crend() const;
+//
+// Insert and accessor functions:
+// pair<iterator, bool> insert(const Key&);
+// pair<iterator, bool> insert(Key&&);
+// pair<iterator, bool> emplace(Args&&...);
+// iterator emplace_hint(const_iterator, Args&&...);
+//
+// Erase functions:
+// iterator erase(const_iterator);
+// iterator erase(const_iterator first, const_iterator& last);
+// size_t erase(const Key& key)
+//
+// Comparators (see std::set documentation).
+// key_compare key_comp() const;
+// value_compare value_comp() const;
+//
+// Search functions:
+// size_t count(const Key&) const;
+// iterator find(const Key&);
+// const_iterator find(const Key&) const;
+// pair<iterator, iterator> equal_range(Key&)
+// iterator lower_bound(const Key&);
+// const_iterator lower_bound(const Key&) const;
+// iterator upper_bound(const Key&);
+// const_iterator upper_bound(const Key&) const;
+//
+// General functions:
+// void swap(flat_set&&)
+//
+// Non-member operators:
+// bool operator==(const flat_set&, const flat_set);
+// bool operator!=(const flat_set&, const flat_set);
+// bool operator<(const flat_set&, const flat_set);
+// bool operator>(const flat_set&, const flat_set);
+// bool operator>=(const flat_set&, const flat_set);
+// bool operator<=(const flat_set&, const flat_set);
+//
+template <class Key, class Compare = std::less<Key>>
+using flat_set = typename ::base::internal::flat_tree<
+ Key,
+ Key,
+ ::base::internal::GetKeyFromValueIdentity<Key>,
+ Compare>;
-} // namespace base
+} // namespace base
#endif // BASE_CONTAINERS_FLAT_SET_H_
diff --git a/chromium/base/containers/flat_set_unittest.cc b/chromium/base/containers/flat_set_unittest.cc
index 1c7a46a026f..dc024fcf0a9 100644
--- a/chromium/base/containers/flat_set_unittest.cc
+++ b/chromium/base/containers/flat_set_unittest.cc
@@ -4,176 +4,25 @@
#include "base/containers/flat_set.h"
-// Following tests are ported and extended tests from libcpp for std::set.
-// They can be found here:
-// https://github.com/llvm-mirror/libcxx/tree/master/test/std/containers/associative/set
-//
-// Not ported tests:
-// * No tests with PrivateConstructor and std::less<> changed to std::less<T>
-// These tests have to do with C++14 std::less<>
-// http://en.cppreference.com/w/cpp/utility/functional/less_void
-// and add support for templated versions of lookup functions.
-// Current implementation of flat containers doesn't support it.
-// * No tests with TemplateConstructor.
-// Library working group issue: LWG #2059
-// http://www.open-std.org/jtc1/sc22/wg21/docs/lwg-defects.html#2059
-// There is an ambiguity between erase with an iterator and erase with a key,
-// if key has a templated constructor. We have to fix this.
-// * No tests for max_size()
-// Has to do with allocator support.
-// * No tests with DefaultOnly.
-// Standard containers allocate each element in the separate node on the heap
-// and then manipulate these nodes. Flat containers store their elements in
-// contiguous memory and move them around, type is required to be movable.
-// * No tests for N3644.
-// This proposal suggests that all default constructed iterators compare
-// equal. Currently we use std::vector iterators and they don't implement
-// this.
-// * No tests with min_allocator and no tests counting allocations.
-// Flat sets currently don't support allocators.
-// * No tests for range insertion. Flat sets currently do not support this
-// functionality.
-
#include <string>
#include <vector>
+#include "base/containers/container_test_utils.h"
#include "base/macros.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-namespace {
-
-class MoveOnly {
- public:
- explicit MoveOnly(int data = 1) : data_(data) {}
- MoveOnly(MoveOnly&& other) : data_(other.data_) { other.data_ = 0; }
- MoveOnly& operator=(MoveOnly&& other) {
- data_ = other.data_;
- other.data_ = 0;
- return *this;
- }
-
- friend bool operator<(const MoveOnly& lhs, const MoveOnly& rhs) {
- return lhs.data_ < rhs.data_;
- }
-
- int data() const { return data_; }
-
- private:
- int data_;
-
- DISALLOW_COPY_AND_ASSIGN(MoveOnly);
-};
-
-template <class It>
-class InputIterator {
- public:
- using iterator_category = std::input_iterator_tag;
- using value_type = typename std::iterator_traits<It>::value_type;
- using difference_type = typename std::iterator_traits<It>::difference_type;
- using pointer = It;
- using reference = typename std::iterator_traits<It>::reference;
-
- InputIterator() : it_() {}
- explicit InputIterator(It it) : it_(it) {}
-
- reference operator*() const { return *it_; }
- pointer operator->() const { return it_; }
-
- InputIterator& operator++() {
- ++it_;
- return *this;
- }
- InputIterator operator++(int) {
- InputIterator tmp(*this);
- ++(*this);
- return tmp;
- }
-
- friend bool operator==(const InputIterator& lhs, const InputIterator& rhs) {
- return lhs.it_ == rhs.it_;
- }
- friend bool operator!=(const InputIterator& lhs, const InputIterator& rhs) {
- return !(lhs == rhs);
- }
-
- private:
- It it_;
-};
-
-template <typename It>
-InputIterator<It> MakeInputIterator(It it) {
- return InputIterator<It>(it);
-}
-
-class Emplaceable {
- public:
- Emplaceable() : Emplaceable(0, 0.0) {}
- Emplaceable(int i, double d) : int_(i), double_(d) {}
- Emplaceable(Emplaceable&& other) : int_(other.int_), double_(other.double_) {
- other.int_ = 0;
- other.double_ = 0.0;
- }
-
- Emplaceable& operator=(Emplaceable&& other) {
- int_ = other.int_;
- other.int_ = 0;
- double_ = other.double_;
- other.double_ = 0.0;
- return *this;
- }
-
- friend bool operator==(const Emplaceable& lhs, const Emplaceable& rhs) {
- return std::tie(lhs.int_, lhs.double_) == std::tie(rhs.int_, rhs.double_);
- }
-
- friend bool operator<(const Emplaceable& lhs, const Emplaceable& rhs) {
- return std::tie(lhs.int_, lhs.double_) < std::tie(rhs.int_, rhs.double_);
- }
-
- private:
- int int_;
- double double_;
-
- DISALLOW_COPY_AND_ASSIGN(Emplaceable);
-};
-
-class NonDefaultConstructibleCompare {
- public:
- explicit NonDefaultConstructibleCompare(int) {}
-
- template <typename T>
- bool operator()(const T& lhs, const T& rhs) {
- return std::less<T>()(lhs, rhs);
- }
-};
-
-// Common test sets.
-using IntSet = base::flat_set<int>;
-using MoveOnlySet = base::flat_set<MoveOnly>;
-using EmplaceableSet = base::flat_set<Emplaceable>;
-using ReversedSet = base::flat_set<int, std::greater<int>>;
-
-// TODO(dyaroshev): replace less<int> with less<>, once we have it
-// crbug.com/682254.
-using SetWithLess = base::flat_set<int, std::less<int>>;
-
-using SetWithStrangeCompare =
- base::flat_set<int, NonDefaultConstructibleCompare>;
+// A flat_set is basically a interface to flat_tree. So several basic
+// operations are tested to make sure things are set up properly, but the bulk
+// of the tests are in flat_tree_unittests.cc.
using ::testing::ElementsAre;
-} // namespace
-
-// ----------------------------------------------------------------------------
-// Class.
-
-// Check that base::flat_set and its iterators can be instantiated with an
-// incomplete type.
+namespace base {
TEST(FlatSet, IncompleteType) {
struct A {
- using Set = base::flat_set<A>;
+ using Set = flat_set<A>;
int data;
Set set_with_incomplete_type;
Set::iterator it;
@@ -185,1060 +34,60 @@ TEST(FlatSet, IncompleteType) {
A a;
}
-TEST(FlatSet, Stability) {
- using Pair = std::pair<int, int>;
-
- struct LessByFirst {
- bool operator()(const Pair& lhs, const Pair& rhs) {
- return lhs.first < rhs.first;
- }
- };
-
- using Set = base::flat_set<Pair, LessByFirst>;
-
- // Constructors are not stable.
- Set cont{{0, 0}, {1, 0}, {0, 1}, {2, 0}, {0, 2}, {1, 1}};
-
- auto NoneOfSecondsAreTwo = [&cont] {
- return std::none_of(cont.begin(), cont.end(),
- [](const Pair& elem) { return elem.second == 2; });
- };
-
- // Should not replace existing.
- cont.insert(Pair(0, 2));
- cont.insert(Pair(1, 2));
- cont.insert(Pair(2, 2));
-
- EXPECT_TRUE(NoneOfSecondsAreTwo())
- << "insert should be stable with respect to constructor";
-
- cont.insert(Pair(3, 0));
- cont.insert(Pair(3, 2));
-
- EXPECT_TRUE(NoneOfSecondsAreTwo())
- << "insert should be stable with respect to previous insert";
-}
-
-// ----------------------------------------------------------------------------
-// Types.
-
-// key_type
-// key_compare
-// value_type
-// value_compare
-// pointer
-// const_pointer
-// reference
-// const_reference
-// size_type
-// difference_type
-// iterator
-// const_iterator
-// reverse_iterator
-// const_reverse_iterator
-
-TEST(FlatSet, Types) {
- // These are guaranteed to be portable.
- static_assert((std::is_same<int, IntSet::key_type>::value), "");
- static_assert((std::is_same<int, IntSet::value_type>::value), "");
- static_assert((std::is_same<std::less<int>, IntSet::key_compare>::value), "");
- static_assert((std::is_same<std::less<int>, IntSet::value_compare>::value),
- "");
- static_assert((std::is_same<int&, IntSet::reference>::value), "");
- static_assert((std::is_same<const int&, IntSet::const_reference>::value), "");
- static_assert((std::is_same<int*, IntSet::pointer>::value), "");
- static_assert((std::is_same<const int*, IntSet::const_pointer>::value), "");
-}
-
-// ----------------------------------------------------------------------------
-// Lifetime.
-
-// flat_set()
-// flat_set(const Compare& comp)
-
-TEST(FlatSet, DefaultConstructor) {
- {
- IntSet cont;
- EXPECT_THAT(cont, ElementsAre());
- }
-
- {
- SetWithStrangeCompare cont(NonDefaultConstructibleCompare(0));
- EXPECT_THAT(cont, ElementsAre());
- }
-}
-
-// flat_set(InputIterator first,
-// InputIterator last,
-// const Compare& comp = Compare())
-
TEST(FlatSet, RangeConstructor) {
- {
- IntSet::value_type input_vals[] = {1, 1, 1, 2, 2, 2, 3, 3, 3};
-
- IntSet cont(MakeInputIterator(std::begin(input_vals)),
- MakeInputIterator(std::end(input_vals)));
- EXPECT_THAT(cont, ElementsAre(1, 2, 3));
- }
- {
- SetWithStrangeCompare::value_type input_vals[] = {1, 1, 1, 2, 2,
- 2, 3, 3, 3};
-
- SetWithStrangeCompare cont(MakeInputIterator(std::begin(input_vals)),
- MakeInputIterator(std::end(input_vals)),
- NonDefaultConstructibleCompare(0));
- EXPECT_THAT(cont, ElementsAre(1, 2, 3));
- }
-}
-
-// flat_set(const flat_set& x)
-
-TEST(FlatSet, CopyConstructor) {
- IntSet original{1, 2, 3, 4};
- IntSet copied(original);
-
- EXPECT_THAT(copied, ElementsAre(1, 2, 3, 4));
+ flat_set<int>::value_type input_vals[] = {1, 1, 1, 2, 2, 2, 3, 3, 3};
- EXPECT_THAT(copied, ElementsAre(1, 2, 3, 4));
- EXPECT_THAT(original, ElementsAre(1, 2, 3, 4));
- EXPECT_EQ(original, copied);
+ flat_set<int> cont(std::begin(input_vals), std::end(input_vals),
+ base::KEEP_FIRST_OF_DUPES);
+ EXPECT_THAT(cont, ElementsAre(1, 2, 3));
}
-// flat_set(flat_set&& x)
-
TEST(FlatSet, MoveConstructor) {
int input_range[] = {1, 2, 3, 4};
- MoveOnlySet original(std::begin(input_range), std::end(input_range));
- MoveOnlySet moved(std::move(original));
+ flat_set<MoveOnlyInt> original(std::begin(input_range), std::end(input_range),
+ base::KEEP_FIRST_OF_DUPES);
+ flat_set<MoveOnlyInt> moved(std::move(original));
- EXPECT_EQ(1U, moved.count(MoveOnly(1)));
- EXPECT_EQ(1U, moved.count(MoveOnly(2)));
- EXPECT_EQ(1U, moved.count(MoveOnly(3)));
- EXPECT_EQ(1U, moved.count(MoveOnly(4)));
+ EXPECT_EQ(1U, moved.count(MoveOnlyInt(1)));
+ EXPECT_EQ(1U, moved.count(MoveOnlyInt(2)));
+ EXPECT_EQ(1U, moved.count(MoveOnlyInt(3)));
+ EXPECT_EQ(1U, moved.count(MoveOnlyInt(4)));
}
-// flat_set(std::initializer_list<value_type> ilist,
-// const Compare& comp = Compare())
-
TEST(FlatSet, InitializerListConstructor) {
- {
- IntSet cont{1, 2, 3, 4, 5, 6, 10, 8};
- EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10));
- }
- {
- SetWithStrangeCompare cont({1, 2, 3, 4, 5, 6, 10, 8},
- NonDefaultConstructibleCompare(0));
- EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10));
- }
-}
-
-// ----------------------------------------------------------------------------
-// Assignments.
-
-// flat_set& operator=(const flat_set&)
-
-TEST(FlatSet, CopyAssignable) {
- IntSet original{1, 2, 3, 4};
- IntSet copied;
- copied = original;
-
- EXPECT_THAT(copied, ElementsAre(1, 2, 3, 4));
- EXPECT_THAT(original, ElementsAre(1, 2, 3, 4));
- EXPECT_EQ(original, copied);
-}
-
-// flat_set& operator=(flat_set&&)
-
-TEST(FlatSet, MoveAssignable) {
- int input_range[] = {1, 2, 3, 4};
-
- MoveOnlySet original(std::begin(input_range), std::end(input_range));
- MoveOnlySet moved;
- moved = std::move(original);
-
- EXPECT_EQ(1U, moved.count(MoveOnly(1)));
- EXPECT_EQ(1U, moved.count(MoveOnly(2)));
- EXPECT_EQ(1U, moved.count(MoveOnly(3)));
- EXPECT_EQ(1U, moved.count(MoveOnly(4)));
-}
-
-// flat_set& operator=(std::initializer_list<value_type> ilist)
-
-TEST(FlatSet, InitializerListAssignable) {
- IntSet cont{0};
- cont = {1, 2, 3, 4, 5, 6, 10, 8};
-
- EXPECT_EQ(0U, cont.count(0));
+ flat_set<int> cont({1, 2, 3, 4, 5, 6, 10, 8}, KEEP_FIRST_OF_DUPES);
EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10));
}
-// --------------------------------------------------------------------------
-// Memory management.
-
-// void reserve(size_type new_capacity)
-
-TEST(FlatSet, Reserve) {
- IntSet cont{1, 2, 3};
-
- cont.reserve(5);
- EXPECT_LE(5U, cont.capacity());
-}
-
-// size_type capacity() const
-
-TEST(FlatSet, Capacity) {
- IntSet cont{1, 2, 3};
-
- EXPECT_LE(cont.size(), cont.capacity());
- cont.reserve(5);
- EXPECT_LE(cont.size(), cont.capacity());
-}
-
-// void shrink_to_fit()
-
-TEST(FlatSet, ShrinkToFit) {
- IntSet cont{1, 2, 3};
-
- IntSet::size_type capacity_before = cont.capacity();
- cont.shrink_to_fit();
- EXPECT_GE(capacity_before, cont.capacity());
-}
-
-// ----------------------------------------------------------------------------
-// Size management.
-
-// void clear()
-
-TEST(FlatSet, Clear) {
- IntSet cont{1, 2, 3, 4, 5, 6, 7, 8};
- cont.clear();
- EXPECT_THAT(cont, ElementsAre());
-}
-
-// size_type size() const
-
-TEST(FlatSet, Size) {
- IntSet cont;
-
- EXPECT_EQ(0U, cont.size());
- cont.insert(2);
- EXPECT_EQ(1U, cont.size());
- cont.insert(1);
- EXPECT_EQ(2U, cont.size());
- cont.insert(3);
- EXPECT_EQ(3U, cont.size());
- cont.erase(cont.begin());
- EXPECT_EQ(2U, cont.size());
- cont.erase(cont.begin());
- EXPECT_EQ(1U, cont.size());
- cont.erase(cont.begin());
- EXPECT_EQ(0U, cont.size());
-}
-
-// bool empty() const
-
-TEST(FlatSet, Empty) {
- IntSet cont;
-
- EXPECT_TRUE(cont.empty());
- cont.insert(1);
- EXPECT_FALSE(cont.empty());
- cont.clear();
- EXPECT_TRUE(cont.empty());
-}
-
-// ----------------------------------------------------------------------------
-// Iterators.
-
-// iterator begin()
-// const_iterator begin() const
-// iterator end()
-// const_iterator end() const
-//
-// reverse_iterator rbegin()
-// const_reverse_iterator rbegin() const
-// reverse_iterator rend()
-// const_reverse_iterator rend() const
-//
-// const_iterator cbegin() const
-// const_iterator cend() const
-// const_reverse_iterator crbegin() const
-// const_reverse_iterator crend() const
-
-TEST(FlatSet, Iterators) {
- IntSet cont{1, 2, 3, 4, 5, 6, 7, 8};
-
- auto size = static_cast<IntSet::difference_type>(cont.size());
-
- EXPECT_EQ(size, std::distance(cont.begin(), cont.end()));
- EXPECT_EQ(size, std::distance(cont.cbegin(), cont.cend()));
- EXPECT_EQ(size, std::distance(cont.rbegin(), cont.rend()));
- EXPECT_EQ(size, std::distance(cont.crbegin(), cont.crend()));
-
- {
- IntSet::iterator it = cont.begin();
- IntSet::const_iterator c_it = cont.cbegin();
- EXPECT_EQ(it, c_it);
- for (int j = 1; it != cont.end(); ++it, ++c_it, ++j) {
- EXPECT_EQ(j, *it);
- EXPECT_EQ(j, *c_it);
- }
- }
- {
- IntSet::reverse_iterator rit = cont.rbegin();
- IntSet::const_reverse_iterator c_rit = cont.crbegin();
- EXPECT_EQ(rit, c_rit);
- for (int j = static_cast<int>(size); rit != cont.rend();
- ++rit, ++c_rit, --j) {
- EXPECT_EQ(j, *rit);
- EXPECT_EQ(j, *c_rit);
- }
- }
-}
-
-// ----------------------------------------------------------------------------
-// Insert operations.
-
-// pair<iterator, bool> insert(const value_type& val)
-
-TEST(FlatSet, InsertLValue) {
- IntSet cont;
-
- int value = 2;
- std::pair<IntSet::iterator, bool> result = cont.insert(value);
- EXPECT_TRUE(result.second);
- EXPECT_EQ(cont.begin(), result.first);
- EXPECT_EQ(1U, cont.size());
- EXPECT_EQ(2, *result.first);
-
- value = 1;
- result = cont.insert(value);
- EXPECT_TRUE(result.second);
- EXPECT_EQ(cont.begin(), result.first);
- EXPECT_EQ(2U, cont.size());
- EXPECT_EQ(1, *result.first);
-
- value = 3;
- result = cont.insert(value);
- EXPECT_TRUE(result.second);
- EXPECT_EQ(std::prev(cont.end()), result.first);
- EXPECT_EQ(3U, cont.size());
- EXPECT_EQ(3, *result.first);
-
- value = 3;
- result = cont.insert(value);
- EXPECT_FALSE(result.second);
- EXPECT_EQ(std::prev(cont.end()), result.first);
- EXPECT_EQ(3U, cont.size());
- EXPECT_EQ(3, *result.first);
-}
-
-// pair<iterator, bool> insert(value_type&& val)
-
-TEST(FlatSet, InsertRValue) {
- MoveOnlySet cont;
-
- std::pair<MoveOnlySet::iterator, bool> result = cont.insert(MoveOnly(2));
- EXPECT_TRUE(result.second);
- EXPECT_EQ(cont.begin(), result.first);
- EXPECT_EQ(1U, cont.size());
- EXPECT_EQ(2, result.first->data());
-
- result = cont.insert(MoveOnly(1));
- EXPECT_TRUE(result.second);
- EXPECT_EQ(cont.begin(), result.first);
- EXPECT_EQ(2U, cont.size());
- EXPECT_EQ(1, result.first->data());
-
- result = cont.insert(MoveOnly(3));
- EXPECT_TRUE(result.second);
- EXPECT_EQ(std::prev(cont.end()), result.first);
- EXPECT_EQ(3U, cont.size());
- EXPECT_EQ(3, result.first->data());
-
- result = cont.insert(MoveOnly(3));
- EXPECT_FALSE(result.second);
- EXPECT_EQ(std::prev(cont.end()), result.first);
- EXPECT_EQ(3U, cont.size());
- EXPECT_EQ(3, result.first->data());
-}
-
-// iterator insert(const_iterator position_hint, const value_type& val)
-
-TEST(FlatSet, InsertPositionLValue) {
- IntSet cont;
-
- IntSet::iterator result = cont.insert(cont.cend(), 2);
- EXPECT_EQ(cont.begin(), result);
- EXPECT_EQ(1U, cont.size());
- EXPECT_EQ(2, *result);
-
- result = cont.insert(cont.cend(), 1);
- EXPECT_EQ(cont.begin(), result);
- EXPECT_EQ(2U, cont.size());
- EXPECT_EQ(1, *result);
-
- result = cont.insert(cont.cend(), 3);
- EXPECT_EQ(std::prev(cont.end()), result);
- EXPECT_EQ(3U, cont.size());
- EXPECT_EQ(3, *result);
-
- result = cont.insert(cont.cend(), 3);
- EXPECT_EQ(std::prev(cont.end()), result);
- EXPECT_EQ(3U, cont.size());
- EXPECT_EQ(3, *result);
-}
-
-// iterator insert(const_iterator position_hint, value_type&& val)
-
-TEST(FlatSet, InsertPositionRValue) {
- MoveOnlySet cont;
-
- MoveOnlySet::iterator result = cont.insert(cont.cend(), MoveOnly(2));
- EXPECT_EQ(cont.begin(), result);
- EXPECT_EQ(1U, cont.size());
- EXPECT_EQ(2, result->data());
-
- result = cont.insert(cont.cend(), MoveOnly(1));
- EXPECT_EQ(cont.begin(), result);
- EXPECT_EQ(2U, cont.size());
- EXPECT_EQ(1, result->data());
-
- result = cont.insert(cont.cend(), MoveOnly(3));
- EXPECT_EQ(std::prev(cont.end()), result);
- EXPECT_EQ(3U, cont.size());
- EXPECT_EQ(3, result->data());
+TEST(FlatSet, InsertFindSize) {
+ base::flat_set<int> s;
+ s.insert(1);
+ s.insert(1);
+ s.insert(2);
- result = cont.insert(cont.cend(), MoveOnly(3));
- EXPECT_EQ(std::prev(cont.end()), result);
- EXPECT_EQ(3U, cont.size());
- EXPECT_EQ(3, result->data());
+ EXPECT_EQ(2u, s.size());
+ EXPECT_EQ(1, *s.find(1));
+ EXPECT_EQ(2, *s.find(2));
+ EXPECT_EQ(s.end(), s.find(7));
}
-// template <class... Args>
-// pair<iterator, bool> emplace(Args&&... args)
+TEST(FlatSet, CopySwap) {
+ base::flat_set<int> original;
+ original.insert(1);
+ original.insert(2);
+ EXPECT_THAT(original, ElementsAre(1, 2));
-TEST(FlatSet, Emplace) {
- {
- EmplaceableSet cont;
+ base::flat_set<int> copy(original);
+ EXPECT_THAT(copy, ElementsAre(1, 2));
- std::pair<EmplaceableSet::iterator, bool> result = cont.emplace();
- EXPECT_TRUE(result.second);
- EXPECT_EQ(cont.begin(), result.first);
- EXPECT_EQ(1U, cont.size());
- EXPECT_EQ(Emplaceable(), *cont.begin());
+ copy.erase(copy.begin());
+ copy.insert(10);
+ EXPECT_THAT(copy, ElementsAre(2, 10));
- result = cont.emplace(2, 3.5);
- EXPECT_TRUE(result.second);
- EXPECT_EQ(std::next(cont.begin()), result.first);
- EXPECT_EQ(2U, cont.size());
- EXPECT_EQ(Emplaceable(2, 3.5), *result.first);
-
- result = cont.emplace(2, 3.5);
- EXPECT_FALSE(result.second);
- EXPECT_EQ(std::next(cont.begin()), result.first);
- EXPECT_EQ(2U, cont.size());
- EXPECT_EQ(Emplaceable(2, 3.5), *result.first);
- }
- {
- IntSet cont;
-
- std::pair<IntSet::iterator, bool> result = cont.emplace(2);
- EXPECT_TRUE(result.second);
- EXPECT_EQ(cont.begin(), result.first);
- EXPECT_EQ(1U, cont.size());
- EXPECT_EQ(2, *result.first);
- }
+ original.swap(copy);
+ EXPECT_THAT(original, ElementsAre(2, 10));
+ EXPECT_THAT(copy, ElementsAre(1, 2));
}
-// template <class... Args>
-// iterator emplace_hint(const_iterator position_hint, Args&&... args)
-
-TEST(FlatSet, EmplacePosition) {
- {
- EmplaceableSet cont;
-
- EmplaceableSet::iterator result = cont.emplace_hint(cont.cend());
- EXPECT_EQ(cont.begin(), result);
- EXPECT_EQ(1U, cont.size());
- EXPECT_EQ(Emplaceable(), *cont.begin());
-
- result = cont.emplace_hint(cont.cend(), 2, 3.5);
- EXPECT_EQ(std::next(cont.begin()), result);
- EXPECT_EQ(2U, cont.size());
- EXPECT_EQ(Emplaceable(2, 3.5), *result);
-
- result = cont.emplace_hint(cont.cbegin(), 2, 3.5);
- EXPECT_EQ(std::next(cont.begin()), result);
- EXPECT_EQ(2U, cont.size());
- EXPECT_EQ(Emplaceable(2, 3.5), *result);
- }
- {
- IntSet cont;
-
- IntSet::iterator result = cont.emplace_hint(cont.cend(), 2);
- EXPECT_EQ(cont.begin(), result);
- EXPECT_EQ(1U, cont.size());
- EXPECT_EQ(2, *result);
- }
-}
-
-// ----------------------------------------------------------------------------
-// Erase operations.
-
-// iterator erase(const_iterator position_hint)
-
-TEST(FlatSet, ErasePosition) {
- IntSet cont{1, 2, 3, 4, 5, 6, 7, 8};
-
- IntSet::iterator it = cont.erase(std::next(cont.cbegin(), 3));
- EXPECT_EQ(std::next(cont.begin(), 3), it);
- EXPECT_THAT(cont, ElementsAre(1, 2, 3, 5, 6, 7, 8));
-
- it = cont.erase(std::next(cont.cbegin(), 0));
- EXPECT_EQ(cont.begin(), it);
- EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7, 8));
-
- it = cont.erase(std::next(cont.cbegin(), 5));
- EXPECT_EQ(cont.end(), it);
- EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7));
-
- it = cont.erase(std::next(cont.cbegin(), 1));
- EXPECT_EQ(std::next(cont.begin()), it);
- EXPECT_THAT(cont, ElementsAre(2, 5, 6, 7));
-
- it = cont.erase(std::next(cont.cbegin(), 2));
- EXPECT_EQ(std::next(cont.begin(), 2), it);
- EXPECT_THAT(cont, ElementsAre(2, 5, 7));
-
- it = cont.erase(std::next(cont.cbegin(), 2));
- EXPECT_EQ(std::next(cont.begin(), 2), it);
- EXPECT_THAT(cont, ElementsAre(2, 5));
-
- it = cont.erase(std::next(cont.cbegin(), 0));
- EXPECT_EQ(std::next(cont.begin(), 0), it);
- EXPECT_THAT(cont, ElementsAre(5));
-
- it = cont.erase(cont.cbegin());
- EXPECT_EQ(cont.begin(), it);
- EXPECT_EQ(cont.end(), it);
-}
-
-// iterator erase(const_iterator first, const_iterator last)
-
-TEST(FlatSet, EraseRange) {
- IntSet cont{1, 2, 3, 4, 5, 6, 7, 8};
-
- IntSet::iterator it =
- cont.erase(std::next(cont.cbegin(), 5), std::next(cont.cbegin(), 5));
- EXPECT_EQ(std::next(cont.begin(), 5), it);
- EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 7, 8));
-
- it = cont.erase(std::next(cont.cbegin(), 3), std::next(cont.cbegin(), 4));
- EXPECT_EQ(std::next(cont.begin(), 3), it);
- EXPECT_THAT(cont, ElementsAre(1, 2, 3, 5, 6, 7, 8));
-
- it = cont.erase(std::next(cont.cbegin(), 2), std::next(cont.cbegin(), 5));
- EXPECT_EQ(std::next(cont.begin(), 2), it);
- EXPECT_THAT(cont, ElementsAre(1, 2, 7, 8));
-
- it = cont.erase(std::next(cont.cbegin(), 0), std::next(cont.cbegin(), 2));
- EXPECT_EQ(std::next(cont.begin(), 0), it);
- EXPECT_THAT(cont, ElementsAre(7, 8));
-
- it = cont.erase(cont.cbegin(), cont.cend());
- EXPECT_EQ(cont.begin(), it);
- EXPECT_EQ(cont.end(), it);
-}
-
-// size_type erase(const key_type& key)
-
-TEST(FlatSet, EraseKey) {
- IntSet cont{1, 2, 3, 4, 5, 6, 7, 8};
-
- EXPECT_EQ(0U, cont.erase(9));
- EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 7, 8));
-
- EXPECT_EQ(1U, cont.erase(4));
- EXPECT_THAT(cont, ElementsAre(1, 2, 3, 5, 6, 7, 8));
-
- EXPECT_EQ(1U, cont.erase(1));
- EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7, 8));
-
- EXPECT_EQ(1U, cont.erase(8));
- EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7));
-
- EXPECT_EQ(1U, cont.erase(3));
- EXPECT_THAT(cont, ElementsAre(2, 5, 6, 7));
-
- EXPECT_EQ(1U, cont.erase(6));
- EXPECT_THAT(cont, ElementsAre(2, 5, 7));
-
- EXPECT_EQ(1U, cont.erase(7));
- EXPECT_THAT(cont, ElementsAre(2, 5));
-
- EXPECT_EQ(1U, cont.erase(2));
- EXPECT_THAT(cont, ElementsAre(5));
-
- EXPECT_EQ(1U, cont.erase(5));
- EXPECT_THAT(cont, ElementsAre());
-}
-
-// ----------------------------------------------------------------------------
-// Comparators.
-
-// key_compare key_comp() const
-
-TEST(FlatSet, KeyComp) {
- ReversedSet cont{1, 2, 3, 4, 5};
-
- EXPECT_TRUE(std::is_sorted(cont.begin(), cont.end(), cont.key_comp()));
- int new_elements[] = {6, 7, 8, 9, 10};
- std::copy(std::begin(new_elements), std::end(new_elements),
- std::inserter(cont, cont.end()));
- EXPECT_TRUE(std::is_sorted(cont.begin(), cont.end(), cont.key_comp()));
-}
-
-// value_compare value_comp() const
-
-TEST(FlatSet, ValueComp) {
- ReversedSet cont{1, 2, 3, 4, 5};
-
- EXPECT_TRUE(std::is_sorted(cont.begin(), cont.end(), cont.value_comp()));
- int new_elements[] = {6, 7, 8, 9, 10};
- std::copy(std::begin(new_elements), std::end(new_elements),
- std::inserter(cont, cont.end()));
- EXPECT_TRUE(std::is_sorted(cont.begin(), cont.end(), cont.value_comp()));
-}
-
-// ----------------------------------------------------------------------------
-// Search operations.
-
-// size_type count(const key_type& key) const
-
-TEST(FlatSet, Count) {
- {
- const IntSet cont{5, 6, 7, 8, 9, 10, 11, 12};
-
- EXPECT_EQ(1U, cont.count(5));
- EXPECT_EQ(1U, cont.count(6));
- EXPECT_EQ(1U, cont.count(7));
- EXPECT_EQ(1U, cont.count(8));
- EXPECT_EQ(1U, cont.count(9));
- EXPECT_EQ(1U, cont.count(10));
- EXPECT_EQ(1U, cont.count(11));
- EXPECT_EQ(1U, cont.count(12));
- EXPECT_EQ(0U, cont.count(4));
- }
- {
- const SetWithLess cont{5, 6, 7, 8, 9, 10, 11, 12};
-
- EXPECT_EQ(1U, cont.count(5));
- EXPECT_EQ(1U, cont.count(6));
- EXPECT_EQ(1U, cont.count(7));
- EXPECT_EQ(1U, cont.count(8));
- EXPECT_EQ(1U, cont.count(9));
- EXPECT_EQ(1U, cont.count(10));
- EXPECT_EQ(1U, cont.count(11));
- EXPECT_EQ(1U, cont.count(12));
- EXPECT_EQ(0U, cont.count(4));
- }
-}
-
-// iterator find(const key_type& key)
-// const_iterator find(const key_type& key) const
-
-TEST(FlatSet, Find) {
- {
- IntSet cont{5, 6, 7, 8, 9, 10, 11, 12};
-
- EXPECT_EQ(cont.begin(), cont.find(5));
- EXPECT_EQ(std::next(cont.begin()), cont.find(6));
- EXPECT_EQ(std::next(cont.begin(), 2), cont.find(7));
- EXPECT_EQ(std::next(cont.begin(), 3), cont.find(8));
- EXPECT_EQ(std::next(cont.begin(), 4), cont.find(9));
- EXPECT_EQ(std::next(cont.begin(), 5), cont.find(10));
- EXPECT_EQ(std::next(cont.begin(), 6), cont.find(11));
- EXPECT_EQ(std::next(cont.begin(), 7), cont.find(12));
- EXPECT_EQ(std::next(cont.begin(), 8), cont.find(4));
- }
- {
- const IntSet cont{5, 6, 7, 8, 9, 10, 11, 12};
-
- EXPECT_EQ(cont.begin(), cont.find(5));
- EXPECT_EQ(std::next(cont.begin()), cont.find(6));
- EXPECT_EQ(std::next(cont.begin(), 2), cont.find(7));
- EXPECT_EQ(std::next(cont.begin(), 3), cont.find(8));
- EXPECT_EQ(std::next(cont.begin(), 4), cont.find(9));
- EXPECT_EQ(std::next(cont.begin(), 5), cont.find(10));
- EXPECT_EQ(std::next(cont.begin(), 6), cont.find(11));
- EXPECT_EQ(std::next(cont.begin(), 7), cont.find(12));
- EXPECT_EQ(std::next(cont.begin(), 8), cont.find(4));
- }
- {
- SetWithLess cont{5, 6, 7, 8, 9, 10, 11, 12};
-
- EXPECT_EQ(cont.begin(), cont.find(5));
- EXPECT_EQ(std::next(cont.begin()), cont.find(6));
- EXPECT_EQ(std::next(cont.begin(), 2), cont.find(7));
- EXPECT_EQ(std::next(cont.begin(), 3), cont.find(8));
- EXPECT_EQ(std::next(cont.begin(), 4), cont.find(9));
- EXPECT_EQ(std::next(cont.begin(), 5), cont.find(10));
- EXPECT_EQ(std::next(cont.begin(), 6), cont.find(11));
- EXPECT_EQ(std::next(cont.begin(), 7), cont.find(12));
- EXPECT_EQ(std::next(cont.begin(), 8), cont.find(4));
- }
-}
-
-// pair<iterator, iterator> equal_range(const key_type& key)
-// pair<const_iterator, const_iterator> equal_range(const key_type& key) const
-
-TEST(FlatSet, EqualRange) {
- {
- IntSet cont{5, 7, 9, 11, 13, 15, 17, 19};
-
- std::pair<IntSet::iterator, IntSet::iterator> result = cont.equal_range(5);
- EXPECT_EQ(std::next(cont.begin(), 0), result.first);
- EXPECT_EQ(std::next(cont.begin(), 1), result.second);
- result = cont.equal_range(7);
- EXPECT_EQ(std::next(cont.begin(), 1), result.first);
- EXPECT_EQ(std::next(cont.begin(), 2), result.second);
- result = cont.equal_range(9);
- EXPECT_EQ(std::next(cont.begin(), 2), result.first);
- EXPECT_EQ(std::next(cont.begin(), 3), result.second);
- result = cont.equal_range(11);
- EXPECT_EQ(std::next(cont.begin(), 3), result.first);
- EXPECT_EQ(std::next(cont.begin(), 4), result.second);
- result = cont.equal_range(13);
- EXPECT_EQ(std::next(cont.begin(), 4), result.first);
- EXPECT_EQ(std::next(cont.begin(), 5), result.second);
- result = cont.equal_range(15);
- EXPECT_EQ(std::next(cont.begin(), 5), result.first);
- EXPECT_EQ(std::next(cont.begin(), 6), result.second);
- result = cont.equal_range(17);
- EXPECT_EQ(std::next(cont.begin(), 6), result.first);
- EXPECT_EQ(std::next(cont.begin(), 7), result.second);
- result = cont.equal_range(19);
- EXPECT_EQ(std::next(cont.begin(), 7), result.first);
- EXPECT_EQ(std::next(cont.begin(), 8), result.second);
- result = cont.equal_range(4);
- EXPECT_EQ(std::next(cont.begin(), 0), result.first);
- EXPECT_EQ(std::next(cont.begin(), 0), result.second);
- result = cont.equal_range(6);
- EXPECT_EQ(std::next(cont.begin(), 1), result.first);
- EXPECT_EQ(std::next(cont.begin(), 1), result.second);
- result = cont.equal_range(8);
- EXPECT_EQ(std::next(cont.begin(), 2), result.first);
- EXPECT_EQ(std::next(cont.begin(), 2), result.second);
- result = cont.equal_range(10);
- EXPECT_EQ(std::next(cont.begin(), 3), result.first);
- EXPECT_EQ(std::next(cont.begin(), 3), result.second);
- result = cont.equal_range(12);
- EXPECT_EQ(std::next(cont.begin(), 4), result.first);
- EXPECT_EQ(std::next(cont.begin(), 4), result.second);
- result = cont.equal_range(14);
- EXPECT_EQ(std::next(cont.begin(), 5), result.first);
- EXPECT_EQ(std::next(cont.begin(), 5), result.second);
- result = cont.equal_range(16);
- EXPECT_EQ(std::next(cont.begin(), 6), result.first);
- EXPECT_EQ(std::next(cont.begin(), 6), result.second);
- result = cont.equal_range(18);
- EXPECT_EQ(std::next(cont.begin(), 7), result.first);
- EXPECT_EQ(std::next(cont.begin(), 7), result.second);
- result = cont.equal_range(20);
- EXPECT_EQ(std::next(cont.begin(), 8), result.first);
- EXPECT_EQ(std::next(cont.begin(), 8), result.second);
- }
- {
- const IntSet cont{5, 7, 9, 11, 13, 15, 17, 19};
-
- std::pair<IntSet::const_iterator, IntSet::const_iterator> result =
- cont.equal_range(5);
- EXPECT_EQ(std::next(cont.begin(), 0), result.first);
- EXPECT_EQ(std::next(cont.begin(), 1), result.second);
- result = cont.equal_range(7);
- EXPECT_EQ(std::next(cont.begin(), 1), result.first);
- EXPECT_EQ(std::next(cont.begin(), 2), result.second);
- result = cont.equal_range(9);
- EXPECT_EQ(std::next(cont.begin(), 2), result.first);
- EXPECT_EQ(std::next(cont.begin(), 3), result.second);
- result = cont.equal_range(11);
- EXPECT_EQ(std::next(cont.begin(), 3), result.first);
- EXPECT_EQ(std::next(cont.begin(), 4), result.second);
- result = cont.equal_range(13);
- EXPECT_EQ(std::next(cont.begin(), 4), result.first);
- EXPECT_EQ(std::next(cont.begin(), 5), result.second);
- result = cont.equal_range(15);
- EXPECT_EQ(std::next(cont.begin(), 5), result.first);
- EXPECT_EQ(std::next(cont.begin(), 6), result.second);
- result = cont.equal_range(17);
- EXPECT_EQ(std::next(cont.begin(), 6), result.first);
- EXPECT_EQ(std::next(cont.begin(), 7), result.second);
- result = cont.equal_range(19);
- EXPECT_EQ(std::next(cont.begin(), 7), result.first);
- EXPECT_EQ(std::next(cont.begin(), 8), result.second);
- result = cont.equal_range(4);
- EXPECT_EQ(std::next(cont.begin(), 0), result.first);
- EXPECT_EQ(std::next(cont.begin(), 0), result.second);
- result = cont.equal_range(6);
- EXPECT_EQ(std::next(cont.begin(), 1), result.first);
- EXPECT_EQ(std::next(cont.begin(), 1), result.second);
- result = cont.equal_range(8);
- EXPECT_EQ(std::next(cont.begin(), 2), result.first);
- EXPECT_EQ(std::next(cont.begin(), 2), result.second);
- result = cont.equal_range(10);
- EXPECT_EQ(std::next(cont.begin(), 3), result.first);
- EXPECT_EQ(std::next(cont.begin(), 3), result.second);
- result = cont.equal_range(12);
- EXPECT_EQ(std::next(cont.begin(), 4), result.first);
- EXPECT_EQ(std::next(cont.begin(), 4), result.second);
- result = cont.equal_range(14);
- EXPECT_EQ(std::next(cont.begin(), 5), result.first);
- EXPECT_EQ(std::next(cont.begin(), 5), result.second);
- result = cont.equal_range(16);
- EXPECT_EQ(std::next(cont.begin(), 6), result.first);
- EXPECT_EQ(std::next(cont.begin(), 6), result.second);
- result = cont.equal_range(18);
- EXPECT_EQ(std::next(cont.begin(), 7), result.first);
- EXPECT_EQ(std::next(cont.begin(), 7), result.second);
- result = cont.equal_range(20);
- EXPECT_EQ(std::next(cont.begin(), 8), result.first);
- EXPECT_EQ(std::next(cont.begin(), 8), result.second);
- }
- {
- SetWithLess cont{5, 7, 9, 11, 13, 15, 17, 19};
-
- std::pair<SetWithLess::iterator, SetWithLess::iterator> result =
- cont.equal_range(5);
- EXPECT_EQ(std::next(cont.begin(), 0), result.first);
- EXPECT_EQ(std::next(cont.begin(), 1), result.second);
- result = cont.equal_range(7);
- EXPECT_EQ(std::next(cont.begin(), 1), result.first);
- EXPECT_EQ(std::next(cont.begin(), 2), result.second);
- result = cont.equal_range(9);
- EXPECT_EQ(std::next(cont.begin(), 2), result.first);
- EXPECT_EQ(std::next(cont.begin(), 3), result.second);
- result = cont.equal_range(11);
- EXPECT_EQ(std::next(cont.begin(), 3), result.first);
- EXPECT_EQ(std::next(cont.begin(), 4), result.second);
- result = cont.equal_range(13);
- EXPECT_EQ(std::next(cont.begin(), 4), result.first);
- EXPECT_EQ(std::next(cont.begin(), 5), result.second);
- result = cont.equal_range(15);
- EXPECT_EQ(std::next(cont.begin(), 5), result.first);
- EXPECT_EQ(std::next(cont.begin(), 6), result.second);
- result = cont.equal_range(17);
- EXPECT_EQ(std::next(cont.begin(), 6), result.first);
- EXPECT_EQ(std::next(cont.begin(), 7), result.second);
- result = cont.equal_range(19);
- EXPECT_EQ(std::next(cont.begin(), 7), result.first);
- EXPECT_EQ(std::next(cont.begin(), 8), result.second);
- result = cont.equal_range(4);
- EXPECT_EQ(std::next(cont.begin(), 0), result.first);
- EXPECT_EQ(std::next(cont.begin(), 0), result.second);
- result = cont.equal_range(6);
- EXPECT_EQ(std::next(cont.begin(), 1), result.first);
- EXPECT_EQ(std::next(cont.begin(), 1), result.second);
- result = cont.equal_range(8);
- EXPECT_EQ(std::next(cont.begin(), 2), result.first);
- EXPECT_EQ(std::next(cont.begin(), 2), result.second);
- result = cont.equal_range(10);
- EXPECT_EQ(std::next(cont.begin(), 3), result.first);
- EXPECT_EQ(std::next(cont.begin(), 3), result.second);
- result = cont.equal_range(12);
- EXPECT_EQ(std::next(cont.begin(), 4), result.first);
- EXPECT_EQ(std::next(cont.begin(), 4), result.second);
- result = cont.equal_range(14);
- EXPECT_EQ(std::next(cont.begin(), 5), result.first);
- EXPECT_EQ(std::next(cont.begin(), 5), result.second);
- result = cont.equal_range(16);
- EXPECT_EQ(std::next(cont.begin(), 6), result.first);
- EXPECT_EQ(std::next(cont.begin(), 6), result.second);
- result = cont.equal_range(18);
- EXPECT_EQ(std::next(cont.begin(), 7), result.first);
- EXPECT_EQ(std::next(cont.begin(), 7), result.second);
- result = cont.equal_range(20);
- EXPECT_EQ(std::next(cont.begin(), 8), result.first);
- EXPECT_EQ(std::next(cont.begin(), 8), result.second);
- }
-}
-
-// iterator lower_bound(const key_type& key);
-// const_iterator lower_bound(const key_type& key) const;
-
-TEST(FlatSet, LowerBound) {
- {
- IntSet cont{5, 7, 9, 11, 13, 15, 17, 19};
-
- EXPECT_EQ(cont.begin(), cont.lower_bound(5));
- EXPECT_EQ(std::next(cont.begin()), cont.lower_bound(7));
- EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(9));
- EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(11));
- EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(13));
- EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(15));
- EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(17));
- EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(19));
- EXPECT_EQ(std::next(cont.begin(), 0), cont.lower_bound(4));
- EXPECT_EQ(std::next(cont.begin(), 1), cont.lower_bound(6));
- EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(8));
- EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(10));
- EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(12));
- EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(14));
- EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(16));
- EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(18));
- EXPECT_EQ(std::next(cont.begin(), 8), cont.lower_bound(20));
- }
- {
- const IntSet cont{5, 7, 9, 11, 13, 15, 17, 19};
-
- EXPECT_EQ(cont.begin(), cont.lower_bound(5));
- EXPECT_EQ(std::next(cont.begin()), cont.lower_bound(7));
- EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(9));
- EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(11));
- EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(13));
- EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(15));
- EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(17));
- EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(19));
- EXPECT_EQ(std::next(cont.begin(), 0), cont.lower_bound(4));
- EXPECT_EQ(std::next(cont.begin(), 1), cont.lower_bound(6));
- EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(8));
- EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(10));
- EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(12));
- EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(14));
- EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(16));
- EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(18));
- EXPECT_EQ(std::next(cont.begin(), 8), cont.lower_bound(20));
- }
- {
- SetWithLess cont{5, 7, 9, 11, 13, 15, 17, 19};
-
- EXPECT_EQ(cont.begin(), cont.lower_bound(5));
- EXPECT_EQ(std::next(cont.begin()), cont.lower_bound(7));
- EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(9));
- EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(11));
- EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(13));
- EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(15));
- EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(17));
- EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(19));
- EXPECT_EQ(std::next(cont.begin(), 0), cont.lower_bound(4));
- EXPECT_EQ(std::next(cont.begin(), 1), cont.lower_bound(6));
- EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(8));
- EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(10));
- EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(12));
- EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(14));
- EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(16));
- EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(18));
- EXPECT_EQ(std::next(cont.begin(), 8), cont.lower_bound(20));
- }
-}
-
-// iterator upper_bound(const key_type& key)
-// const_iterator upper_bound(const key_type& key) const
-
-TEST(FlatSet, UpperBound) {
- {
- IntSet cont{5, 7, 9, 11, 13, 15, 17, 19};
-
- EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(5));
- EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(7));
- EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(9));
- EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(11));
- EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(13));
- EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(15));
- EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(17));
- EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(19));
- EXPECT_EQ(std::next(cont.begin(), 0), cont.upper_bound(4));
- EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(6));
- EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(8));
- EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(10));
- EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(12));
- EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(14));
- EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(16));
- EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(18));
- EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(20));
- }
- {
- const IntSet cont{5, 7, 9, 11, 13, 15, 17, 19};
-
- EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(5));
- EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(7));
- EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(9));
- EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(11));
- EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(13));
- EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(15));
- EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(17));
- EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(19));
- EXPECT_EQ(std::next(cont.begin(), 0), cont.upper_bound(4));
- EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(6));
- EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(8));
- EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(10));
- EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(12));
- EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(14));
- EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(16));
- EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(18));
- EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(20));
- }
- {
- SetWithLess cont{5, 7, 9, 11, 13, 15, 17, 19};
-
- EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(5));
- EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(7));
- EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(9));
- EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(11));
- EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(13));
- EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(15));
- EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(17));
- EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(19));
- EXPECT_EQ(std::next(cont.begin(), 0), cont.upper_bound(4));
- EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(6));
- EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(8));
- EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(10));
- EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(12));
- EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(14));
- EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(16));
- EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(18));
- EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(20));
- }
-}
-
-// ----------------------------------------------------------------------------
-// General operations.
-
-// void swap(flat_set& other)
-// void swap(flat_set& lhs, flat_set& rhs)
-
-TEST(FlatSetOurs, Swap) {
- IntSet x{1, 2, 3};
- IntSet y{4};
- swap(x, y);
- EXPECT_THAT(x, ElementsAre(4));
- EXPECT_THAT(y, ElementsAre(1, 2, 3));
-
- y.swap(x);
- EXPECT_THAT(x, ElementsAre(1, 2, 3));
- EXPECT_THAT(y, ElementsAre(4));
-}
-
-// bool operator==(const flat_set& lhs, const flat_set& rhs)
-// bool operator!=(const flat_set& lhs, const flat_set& rhs)
-// bool operator<(const flat_set& lhs, const flat_set& rhs)
-// bool operator>(const flat_set& lhs, const flat_set& rhs)
-// bool operator<=(const flat_set& lhs, const flat_set& rhs)
-// bool operator>=(const flat_set& lhs, const flat_set& rhs)
-
-TEST(FlatSet, Comparison) {
- // Provided comparator does not participate in comparison.
- ReversedSet biggest{3};
- ReversedSet smallest{1};
- ReversedSet middle{1, 2};
-
- EXPECT_EQ(biggest, biggest);
- EXPECT_NE(biggest, smallest);
- EXPECT_LT(smallest, middle);
- EXPECT_LE(smallest, middle);
- EXPECT_LE(middle, middle);
- EXPECT_GT(biggest, middle);
- EXPECT_GE(biggest, middle);
- EXPECT_GE(biggest, biggest);
-}
+} // namespace base
diff --git a/chromium/base/containers/flat_tree.h b/chromium/base/containers/flat_tree.h
new file mode 100644
index 00000000000..c3a234fa784
--- /dev/null
+++ b/chromium/base/containers/flat_tree.h
@@ -0,0 +1,774 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_FLAT_TREE_H_
+#define BASE_CONTAINERS_FLAT_TREE_H_
+
+#include <algorithm>
+#include <vector>
+
+namespace base {
+
+enum FlatContainerDupes {
+ KEEP_FIRST_OF_DUPES,
+ KEEP_LAST_OF_DUPES,
+};
+
+namespace internal {
+
+// This algorithm is like unique() from the standard library except it
+// selects only the last of consecutive values instead of the first.
+template <class Iterator, class BinaryPredicate>
+Iterator LastUnique(Iterator first, Iterator last, BinaryPredicate compare) {
+ if (first == last)
+ return last;
+
+ Iterator dest = first;
+ Iterator cur = first;
+ Iterator prev = cur;
+ while (++cur != last) {
+ if (!compare(*prev, *cur)) {
+ // Non-identical one.
+ if (dest != prev)
+ *dest = std::move(*prev);
+ ++dest;
+ }
+ prev = cur;
+ }
+
+ if (dest != prev)
+ *dest = std::move(*prev);
+ return ++dest;
+}
+
+// Implementation of a sorted vector for backing flat_set and flat_map. Do not
+// use directly.
+//
+// The use of "value" in this is like std::map uses, meaning it's the thing
+// contained (in the case of map it's a <Kay, Mapped> pair). The Key is how
+// things are looked up. In the case of a set, Key == Value. In the case of
+// a map, the Key is a component of a Value.
+//
+// The helper class GetKeyFromValue provides the means to extract a key from a
+// value for comparison purposes. It should implement:
+// const Key& operator()(const Value&).
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+class flat_tree {
+ private:
+ using underlying_type = std::vector<Value>;
+
+ public:
+ // --------------------------------------------------------------------------
+ // Types.
+ //
+ using key_type = Key;
+ using key_compare = KeyCompare;
+ using value_type = Value;
+
+ // Wraps the templated key comparison to compare values.
+ class value_compare : public key_compare {
+ public:
+ value_compare() = default;
+
+ template <class Cmp>
+ explicit value_compare(Cmp&& compare_arg)
+ : KeyCompare(std::forward<Cmp>(compare_arg)) {}
+
+ bool operator()(const value_type& left, const value_type& right) const {
+ GetKeyFromValue extractor;
+ return key_compare::operator()(extractor(left), extractor(right));
+ }
+ };
+
+ using pointer = typename underlying_type::pointer;
+ using const_pointer = typename underlying_type::const_pointer;
+ using reference = typename underlying_type::reference;
+ using const_reference = typename underlying_type::const_reference;
+ using size_type = typename underlying_type::size_type;
+ using difference_type = typename underlying_type::difference_type;
+ using iterator = typename underlying_type::iterator;
+ using const_iterator = typename underlying_type::const_iterator;
+ using reverse_iterator = typename underlying_type::reverse_iterator;
+ using const_reverse_iterator =
+ typename underlying_type::const_reverse_iterator;
+
+ // --------------------------------------------------------------------------
+ // Lifetime.
+ //
+ // Constructors that take range guarantee O(N * log^2(N)) + O(N) complexity
+ // and take O(N * log(N)) + O(N) if extra memory is available (N is a range
+ // length).
+ //
+ // Assume that move constructors invalidate iterators and references.
+ //
+ // The constructors that take ranges, lists, and vectors do not require that
+ // the input be sorted.
+
+ flat_tree();
+ explicit flat_tree(const key_compare& comp);
+
+ template <class InputIterator>
+ flat_tree(InputIterator first,
+ InputIterator last,
+ FlatContainerDupes dupe_handling,
+ const key_compare& comp = key_compare());
+
+ flat_tree(const flat_tree&);
+ flat_tree(flat_tree&&);
+
+ flat_tree(std::vector<value_type> items,
+ FlatContainerDupes dupe_handling,
+ const key_compare& comp = key_compare());
+
+ flat_tree(std::initializer_list<value_type> ilist,
+ FlatContainerDupes dupe_handling,
+ const key_compare& comp = key_compare());
+
+ ~flat_tree();
+
+ // --------------------------------------------------------------------------
+ // Assignments.
+ //
+ // Assume that move assignment invalidates iterators and references.
+
+ flat_tree& operator=(const flat_tree&);
+ flat_tree& operator=(flat_tree&&);
+ // Takes the first if there are duplicates in the initializer list.
+ flat_tree& operator=(std::initializer_list<value_type> ilist);
+
+ // --------------------------------------------------------------------------
+ // Memory management.
+ //
+ // Beware that shrink_to_fit() simply forwards the request to the
+ // underlying_type and its implementation is free to optimize otherwise and
+ // leave capacity() to be greater that its size.
+ //
+ // reserve() and shrink_to_fit() invalidate iterators and references.
+
+ void reserve(size_type new_capacity);
+ size_type capacity() const;
+ void shrink_to_fit();
+
+ // --------------------------------------------------------------------------
+ // Size management.
+ //
+ // clear() leaves the capacity() of the flat_tree unchanged.
+
+ void clear();
+
+ size_type size() const;
+ size_type max_size() const;
+ bool empty() const;
+
+ // --------------------------------------------------------------------------
+ // Iterators.
+
+ iterator begin();
+ const_iterator begin() const;
+ const_iterator cbegin() const;
+
+ iterator end();
+ const_iterator end() const;
+ const_iterator cend() const;
+
+ reverse_iterator rbegin();
+ const_reverse_iterator rbegin() const;
+ const_reverse_iterator crbegin() const;
+
+ reverse_iterator rend();
+ const_reverse_iterator rend() const;
+ const_reverse_iterator crend() const;
+
+ // --------------------------------------------------------------------------
+ // Insert operations.
+ //
+ // Assume that every operation invalidates iterators and references.
+ // Insertion of one element can take O(size). See the Notes section in the
+ // class comments on why we do not currently implement range insertion.
+ // Capacity of flat_tree grows in an implementation-defined manner.
+ //
+ // NOTE: Prefer to build a new flat_tree from a std::vector (or similar)
+ // instead of calling insert() repeatedly.
+
+ std::pair<iterator, bool> insert(const value_type& val);
+ std::pair<iterator, bool> insert(value_type&& val);
+
+ iterator insert(const_iterator position_hint, const value_type& x);
+ iterator insert(const_iterator position_hint, value_type&& x);
+
+ template <class... Args>
+ std::pair<iterator, bool> emplace(Args&&... args);
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator position_hint, Args&&... args);
+
+ // --------------------------------------------------------------------------
+ // Erase operations.
+ //
+ // Assume that every operation invalidates iterators and references.
+ //
+ // erase(position), erase(first, last) can take O(size).
+ // erase(key) may take O(size) + O(log(size)).
+ //
+ // Prefer base::EraseIf() or some other variation on erase(remove(), end())
+ // idiom when deleting multiple non-consecutive elements.
+
+ iterator erase(const_iterator position);
+ iterator erase(const_iterator first, const_iterator last);
+ size_type erase(const key_type& key);
+
+ // --------------------------------------------------------------------------
+ // Comparators.
+
+ key_compare key_comp() const;
+ value_compare value_comp() const;
+
+ // --------------------------------------------------------------------------
+ // Search operations.
+ //
+ // Search operations have O(log(size)) complexity.
+
+ size_type count(const key_type& key) const;
+
+ iterator find(const key_type& key);
+ const_iterator find(const key_type& key) const;
+
+ std::pair<iterator, iterator> equal_range(const key_type& ket);
+ std::pair<const_iterator, const_iterator> equal_range(
+ const key_type& key) const;
+
+ iterator lower_bound(const key_type& key);
+ const_iterator lower_bound(const key_type& key) const;
+
+ iterator upper_bound(const key_type& key);
+ const_iterator upper_bound(const key_type& key) const;
+
+ // --------------------------------------------------------------------------
+ // General operations.
+ //
+ // Assume that swap invalidates iterators and references.
+ //
+ // Implementation note: currently we use operator==() and operator<() on
+ // std::vector, because they have the same contract we need, so we use them
+ // directly for brevity and in case it is more optimal than calling equal()
+ // and lexicograhpical_compare(). If the underlying container type is changed,
+ // this code may need to be modified.
+
+ void swap(flat_tree& other);
+
+ friend bool operator==(const flat_tree& lhs, const flat_tree& rhs) {
+ return lhs.impl_.body_ == rhs.impl_.body_;
+ }
+
+ friend bool operator!=(const flat_tree& lhs, const flat_tree& rhs) {
+ return !(lhs == rhs);
+ }
+
+ friend bool operator<(const flat_tree& lhs, const flat_tree& rhs) {
+ return lhs.impl_.body_ < rhs.impl_.body_;
+ }
+
+ friend bool operator>(const flat_tree& lhs, const flat_tree& rhs) {
+ return rhs < lhs;
+ }
+
+ friend bool operator>=(const flat_tree& lhs, const flat_tree& rhs) {
+ return !(lhs < rhs);
+ }
+
+ friend bool operator<=(const flat_tree& lhs, const flat_tree& rhs) {
+ return !(lhs > rhs);
+ }
+
+ friend void swap(flat_tree& lhs, flat_tree& rhs) { lhs.swap(rhs); }
+
+ protected:
+ // Emplaces a new item into the tree that is known not to be in it. This
+ // is for implementing map operator[].
+ template <class... Args>
+ iterator unsafe_emplace(const_iterator position, Args&&... args);
+
+ private:
+ // Helper class for e.g. lower_bound that can compare a value on the left
+ // to a key on the right.
+ struct KeyValueCompare {
+ // The key comparison object must outlive this class.
+ explicit KeyValueCompare(const key_compare& key_comp)
+ : key_comp_(key_comp) {}
+
+ bool operator()(const value_type& left, const key_type& right) const {
+ GetKeyFromValue extractor;
+ return key_comp_(extractor(left), right);
+ }
+
+ private:
+ const key_compare& key_comp_;
+ };
+
+ const flat_tree& as_const() { return *this; }
+
+ iterator const_cast_it(const_iterator c_it) {
+ auto distance = std::distance(cbegin(), c_it);
+ return std::next(begin(), distance);
+ }
+
+ void sort_and_unique(FlatContainerDupes dupes) {
+ // Preserve stability for the unique code below.
+ std::stable_sort(begin(), end(), impl_.get_value_comp());
+
+ auto comparator = [this](const value_type& lhs, const value_type& rhs) {
+ // lhs is already <= rhs due to sort, therefore
+ // !(lhs < rhs) <=> lhs == rhs.
+ return !impl_.get_value_comp()(lhs, rhs);
+ };
+
+ iterator erase_after;
+ switch (dupes) {
+ case KEEP_FIRST_OF_DUPES:
+ erase_after = std::unique(begin(), end(), comparator);
+ break;
+ case KEEP_LAST_OF_DUPES:
+ erase_after = LastUnique(begin(), end(), comparator);
+ break;
+ }
+ erase(erase_after, end());
+ }
+
+ // To support comparators that may not be possible to default-construct, we
+ // have to store an instance of Compare. Using this to store all internal
+ // state of flat_tree and using private inheritance to store compare lets us
+ // take advantage of an empty base class optimization to avoid extra space in
+ // the common case when Compare has no state.
+ struct Impl : private value_compare {
+ Impl() = default;
+
+ template <class Cmp, class... Body>
+ explicit Impl(Cmp&& compare_arg, Body&&... underlying_type_args)
+ : value_compare(std::forward<Cmp>(compare_arg)),
+ body_(std::forward<Body>(underlying_type_args)...) {}
+
+ const value_compare& get_value_comp() const { return *this; }
+ const key_compare& get_key_comp() const { return *this; }
+
+ underlying_type body_;
+ } impl_;
+};
+
+// ----------------------------------------------------------------------------
+// Lifetime.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::flat_tree() = default;
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::flat_tree(
+ const KeyCompare& comp)
+ : impl_(comp) {}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <class InputIterator>
+flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::flat_tree(
+ InputIterator first,
+ InputIterator last,
+ FlatContainerDupes dupe_handling,
+ const KeyCompare& comp)
+ : impl_(comp, first, last) {
+ sort_and_unique(dupe_handling);
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::flat_tree(
+ const flat_tree&) = default;
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::flat_tree(flat_tree&&) =
+ default;
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::flat_tree(
+ std::vector<value_type> items,
+ FlatContainerDupes dupe_handling,
+ const KeyCompare& comp)
+ : impl_(comp, std::move(items)) {
+ sort_and_unique(dupe_handling);
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::flat_tree(
+ std::initializer_list<value_type> ilist,
+ FlatContainerDupes dupe_handling,
+ const KeyCompare& comp)
+ : flat_tree(std::begin(ilist), std::end(ilist), dupe_handling, comp) {}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::~flat_tree() = default;
+
+// ----------------------------------------------------------------------------
+// Assignments.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::operator=(
+ const flat_tree&) -> flat_tree& = default;
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::operator=(flat_tree &&)
+ -> flat_tree& = default;
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::operator=(
+ std::initializer_list<value_type> ilist) -> flat_tree& {
+ impl_.body_ = ilist;
+ sort_and_unique(KEEP_FIRST_OF_DUPES);
+ return *this;
+}
+
+// ----------------------------------------------------------------------------
+// Memory management.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+void flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::reserve(
+ size_type new_capacity) {
+ impl_.body_.reserve(new_capacity);
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::capacity() const
+ -> size_type {
+ return impl_.body_.capacity();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+void flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::shrink_to_fit() {
+ impl_.body_.shrink_to_fit();
+}
+
+// ----------------------------------------------------------------------------
+// Size management.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+void flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::clear() {
+ impl_.body_.clear();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::size() const
+ -> size_type {
+ return impl_.body_.size();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::max_size() const
+ -> size_type {
+ return impl_.body_.max_size();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+bool flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::empty() const {
+ return impl_.body_.empty();
+}
+
+// ----------------------------------------------------------------------------
+// Iterators.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::begin() -> iterator {
+ return impl_.body_.begin();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::begin() const
+ -> const_iterator {
+ return impl_.body_.begin();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::cbegin() const
+ -> const_iterator {
+ return impl_.body_.cbegin();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::end() -> iterator {
+ return impl_.body_.end();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::end() const
+ -> const_iterator {
+ return impl_.body_.end();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::cend() const
+ -> const_iterator {
+ return impl_.body_.cend();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::rbegin()
+ -> reverse_iterator {
+ return impl_.body_.rbegin();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::rbegin() const
+ -> const_reverse_iterator {
+ return impl_.body_.rbegin();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::crbegin() const
+ -> const_reverse_iterator {
+ return impl_.body_.crbegin();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::rend()
+ -> reverse_iterator {
+ return impl_.body_.rend();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::rend() const
+ -> const_reverse_iterator {
+ return impl_.body_.rend();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::crend() const
+ -> const_reverse_iterator {
+ return impl_.body_.crend();
+}
+
+// ----------------------------------------------------------------------------
+// Insert operations.
+//
+// Currently we use position_hint the same way as eastl or boost:
+// https://github.com/electronicarts/EASTL/blob/master/include/EASTL/vector_set.h#L493
+//
+// We duplicate code between copy and move version so that we can avoid
+// creating a temporary value.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::insert(
+ const value_type& val) -> std::pair<iterator, bool> {
+ auto position = lower_bound(val);
+
+ if (position == end() || impl_.get_value_comp()(val, *position))
+ return {impl_.body_.insert(position, val), true};
+
+ return {position, false};
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::insert(
+ value_type&& val) -> std::pair<iterator, bool> {
+ GetKeyFromValue extractor;
+ auto position = lower_bound(extractor(val));
+
+ if (position == end() || impl_.get_value_comp()(val, *position))
+ return {impl_.body_.insert(position, std::move(val)), true};
+
+ return {position, false};
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::insert(
+ const_iterator position_hint,
+ const value_type& val) -> iterator {
+ if (position_hint == end() || impl_.get_value_comp()(val, *position_hint)) {
+ if (position_hint == begin() ||
+ impl_.get_value_comp()(*(position_hint - 1), val))
+ // We have to cast away const because of crbug.com/677044.
+ return impl_.body_.insert(const_cast_it(position_hint), val);
+ }
+ return insert(val).first;
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::insert(
+ const_iterator position_hint,
+ value_type&& val) -> iterator {
+ if (position_hint == end() || impl_.get_value_comp()(val, *position_hint)) {
+ if (position_hint == begin() ||
+ impl_.get_value_comp()(*(position_hint - 1), val))
+ // We have to cast away const because of crbug.com/677044.
+ return impl_.body_.insert(const_cast_it(position_hint), std::move(val));
+ }
+ return insert(std::move(val)).first;
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <class... Args>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::emplace(Args&&... args)
+ -> std::pair<iterator, bool> {
+ return insert(value_type(std::forward<Args>(args)...));
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <class... Args>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::emplace_hint(
+ const_iterator position_hint,
+ Args&&... args) -> iterator {
+ return insert(position_hint, value_type(std::forward<Args>(args)...));
+}
+
+// ----------------------------------------------------------------------------
+// Erase operations.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::erase(
+ const_iterator position) -> iterator {
+ // We have to cast away const because of crbug.com/677044.
+ return impl_.body_.erase(const_cast_it(position));
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::erase(
+ const key_type& val) -> size_type {
+ auto eq_range = equal_range(val);
+ auto res = std::distance(eq_range.first, eq_range.second);
+ // We have to cast away const because of crbug.com/677044.
+ erase(const_cast_it(eq_range.first), const_cast_it(eq_range.second));
+ return res;
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::erase(
+ const_iterator first,
+ const_iterator last) -> iterator {
+ // We have to cast away const because of crbug.com/677044.
+ return impl_.body_.erase(const_cast_it(first), const_cast_it(last));
+}
+
+// ----------------------------------------------------------------------------
+// Comparators.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::key_comp() const
+ -> key_compare {
+ return impl_.get_key_comp();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::value_comp() const
+ -> value_compare {
+ return impl_.get_value_comp();
+}
+
+// ----------------------------------------------------------------------------
+// Search operations.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::count(
+ const key_type& key) const -> size_type {
+ auto eq_range = equal_range(key);
+ return std::distance(eq_range.first, eq_range.second);
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::find(
+ const key_type& key) -> iterator {
+ return const_cast_it(as_const().find(key));
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::find(
+ const key_type& key) const -> const_iterator {
+ auto eq_range = equal_range(key);
+ return (eq_range.first == eq_range.second) ? end() : eq_range.first;
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::equal_range(
+ const key_type& key) -> std::pair<iterator, iterator> {
+ auto res = as_const().equal_range(key);
+ return {const_cast_it(res.first), const_cast_it(res.second)};
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::equal_range(
+ const key_type& key) const -> std::pair<const_iterator, const_iterator> {
+ auto lower = lower_bound(key);
+
+ GetKeyFromValue extractor;
+ if (lower == end() || impl_.get_key_comp()(key, extractor(*lower)))
+ return {lower, lower};
+
+ return {lower, std::next(lower)};
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::lower_bound(
+ const key_type& key) -> iterator {
+ return const_cast_it(as_const().lower_bound(key));
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::lower_bound(
+ const key_type& key) const -> const_iterator {
+ KeyValueCompare key_value(impl_.get_key_comp());
+ return std::lower_bound(begin(), end(), key, key_value);
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::upper_bound(
+ const key_type& key) -> iterator {
+ return const_cast_it(as_const().upper_bound(key));
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::upper_bound(
+ const key_type& key) const -> const_iterator {
+ KeyValueCompare key_value(impl_.get_key_comp());
+ return std::upper_bound(begin(), end(), key, key_value);
+}
+
+// ----------------------------------------------------------------------------
+// General operations.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+void flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::swap(
+ flat_tree& other) {
+ std::swap(impl_, other.impl_);
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <class... Args>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::unsafe_emplace(
+ const_iterator position,
+ Args&&... args) -> iterator {
+ // We have to cast away const because of crbug.com/677044.
+ return impl_.body_.emplace(const_cast_it(position),
+ std::forward<Args>(args)...);
+}
+
+// For containers like sets, the key is the same as the value. This implements
+// the GetKeyFromValue template parameter to flat_tree for this case.
+template <class Key>
+struct GetKeyFromValueIdentity {
+ const Key& operator()(const Key& k) const { return k; }
+};
+
+} // namespace internal
+
+// ----------------------------------------------------------------------------
+// Free functions.
+
+// Erases all elements that match predicate. It has O(size) complexity.
+template <class Key,
+ class Value,
+ class GetKeyFromValue,
+ class KeyCompare,
+ typename Predicate>
+void EraseIf(base::internal::flat_tree<Key, Value, GetKeyFromValue, KeyCompare>&
+ container,
+ Predicate pred) {
+ container.erase(std::remove_if(container.begin(), container.end(), pred),
+ container.end());
+}
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_FLAT_TREE_H_
diff --git a/chromium/base/containers/flat_tree_unittest.cc b/chromium/base/containers/flat_tree_unittest.cc
new file mode 100644
index 00000000000..e3a8f879fcf
--- /dev/null
+++ b/chromium/base/containers/flat_tree_unittest.cc
@@ -0,0 +1,1385 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/flat_tree.h"
+
+// Following tests are ported and extended tests from libcpp for std::set.
+// They can be found here:
+// https://github.com/llvm-mirror/libcxx/tree/master/test/std/containers/associative/set
+//
+// Not ported tests:
+// * No tests with PrivateConstructor and std::less<> changed to std::less<T>
+// These tests have to do with C++14 std::less<>
+// http://en.cppreference.com/w/cpp/utility/functional/less_void
+// and add support for templated versions of lookup functions.
+// Current implementation of flat containers doesn't support it.
+// * No tests with TemplateConstructor.
+// Library working group issue: LWG #2059
+// http://www.open-std.org/jtc1/sc22/wg21/docs/lwg-defects.html#2059
+// There is an ambiguity between erase with an iterator and erase with a key,
+// if key has a templated constructor. We have to fix this.
+// * No tests for max_size()
+// Has to do with allocator support.
+// * No tests with DefaultOnly.
+// Standard containers allocate each element in the separate node on the heap
+// and then manipulate these nodes. Flat containers store their elements in
+// contiguous memory and move them around, type is required to be movable.
+// * No tests for N3644.
+// This proposal suggests that all default constructed iterators compare
+// equal. Currently we use std::vector iterators and they don't implement
+// this.
+// * No tests with min_allocator and no tests counting allocations.
+// Flat sets currently don't support allocators.
+// * No tests for range insertion. Flat sets currently do not support this
+// functionality.
+
+#include <string>
+#include <vector>
+
+#include "base/containers/container_test_utils.h"
+#include "base/macros.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+template <class It>
+class InputIterator {
+ public:
+ using iterator_category = std::input_iterator_tag;
+ using value_type = typename std::iterator_traits<It>::value_type;
+ using difference_type = typename std::iterator_traits<It>::difference_type;
+ using pointer = It;
+ using reference = typename std::iterator_traits<It>::reference;
+
+ InputIterator() : it_() {}
+ explicit InputIterator(It it) : it_(it) {}
+
+ reference operator*() const { return *it_; }
+ pointer operator->() const { return it_; }
+
+ InputIterator& operator++() {
+ ++it_;
+ return *this;
+ }
+ InputIterator operator++(int) {
+ InputIterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }
+
+ friend bool operator==(const InputIterator& lhs, const InputIterator& rhs) {
+ return lhs.it_ == rhs.it_;
+ }
+ friend bool operator!=(const InputIterator& lhs, const InputIterator& rhs) {
+ return !(lhs == rhs);
+ }
+
+ private:
+ It it_;
+};
+
+template <typename It>
+InputIterator<It> MakeInputIterator(It it) {
+ return InputIterator<It>(it);
+}
+
+class Emplaceable {
+ public:
+ Emplaceable() : Emplaceable(0, 0.0) {}
+ Emplaceable(int i, double d) : int_(i), double_(d) {}
+ Emplaceable(Emplaceable&& other) : int_(other.int_), double_(other.double_) {
+ other.int_ = 0;
+ other.double_ = 0.0;
+ }
+
+ Emplaceable& operator=(Emplaceable&& other) {
+ int_ = other.int_;
+ other.int_ = 0;
+ double_ = other.double_;
+ other.double_ = 0.0;
+ return *this;
+ }
+
+ friend bool operator==(const Emplaceable& lhs, const Emplaceable& rhs) {
+ return std::tie(lhs.int_, lhs.double_) == std::tie(rhs.int_, rhs.double_);
+ }
+
+ friend bool operator<(const Emplaceable& lhs, const Emplaceable& rhs) {
+ return std::tie(lhs.int_, lhs.double_) < std::tie(rhs.int_, rhs.double_);
+ }
+
+ private:
+ int int_;
+ double double_;
+
+ DISALLOW_COPY_AND_ASSIGN(Emplaceable);
+};
+
+class NonDefaultConstructibleCompare {
+ public:
+ explicit NonDefaultConstructibleCompare(int) {}
+
+ template <typename T>
+ bool operator()(const T& lhs, const T& rhs) const {
+ return std::less<T>()(lhs, rhs);
+ }
+};
+
+template <class PairType>
+struct LessByFirst {
+ bool operator()(const PairType& lhs, const PairType& rhs) const {
+ return lhs.first < rhs.first;
+ }
+};
+
+// Common test trees.
+
+// TODO(dyaroshev): replace less<int> with less<>, once we have it
+// crbug.com/682254. This will make it different than IntTree.
+using IntTreeWithLess =
+ flat_tree<int, int, GetKeyFromValueIdentity<int>, std::less<int>>;
+using IntTree =
+ flat_tree<int, int, GetKeyFromValueIdentity<int>, std::less<int>>;
+using IntPair = std::pair<int, int>;
+using IntPairTree = flat_tree<IntPair,
+ IntPair,
+ GetKeyFromValueIdentity<IntPair>,
+ LessByFirst<IntPair>>;
+using MoveOnlyTree = flat_tree<MoveOnlyInt,
+ MoveOnlyInt,
+ GetKeyFromValueIdentity<MoveOnlyInt>,
+ std::less<MoveOnlyInt>>;
+using EmplaceableTree = flat_tree<Emplaceable,
+ Emplaceable,
+ GetKeyFromValueIdentity<Emplaceable>,
+ std::less<Emplaceable>>;
+using ReversedTree =
+ flat_tree<int, int, GetKeyFromValueIdentity<int>, std::greater<int>>;
+
+using TreeWithStrangeCompare = flat_tree<int,
+ int,
+ GetKeyFromValueIdentity<int>,
+ NonDefaultConstructibleCompare>;
+
+using ::testing::ElementsAre;
+
+} // namespace
+
+TEST(FlatTree, LastUnique) {
+ using Pair = std::pair<int, int>;
+ using Vect = std::vector<Pair>;
+
+ auto cmp = [](const Pair& lhs, const Pair& rhs) {
+ return lhs.first == rhs.first;
+ };
+
+ // Empty case.
+ Vect empty;
+ EXPECT_EQ(empty.end(), LastUnique(empty.begin(), empty.end(), cmp));
+
+ // Single element.
+ Vect one;
+ one.push_back(Pair(1, 1));
+ EXPECT_EQ(one.end(), LastUnique(one.begin(), one.end(), cmp));
+ ASSERT_EQ(1u, one.size());
+ EXPECT_THAT(one, ElementsAre(Pair(1, 1)));
+
+ // Two elements, already unique.
+ Vect two_u;
+ two_u.push_back(Pair(1, 1));
+ two_u.push_back(Pair(2, 2));
+ EXPECT_EQ(two_u.end(), LastUnique(two_u.begin(), two_u.end(), cmp));
+ EXPECT_THAT(two_u, ElementsAre(Pair(1, 1), Pair(2, 2)));
+
+ // Two elements, dupes.
+ Vect two_d;
+ two_d.push_back(Pair(1, 1));
+ two_d.push_back(Pair(1, 2));
+ auto last = LastUnique(two_d.begin(), two_d.end(), cmp);
+ EXPECT_EQ(two_d.begin() + 1, last);
+ two_d.erase(last, two_d.end());
+ EXPECT_THAT(two_d, ElementsAre(Pair(1, 2)));
+
+ // Non-dupes, dupes, non-dupes.
+ Vect ndn;
+ ndn.push_back(Pair(1, 1));
+ ndn.push_back(Pair(2, 1));
+ ndn.push_back(Pair(2, 2));
+ ndn.push_back(Pair(2, 3));
+ ndn.push_back(Pair(3, 1));
+ last = LastUnique(ndn.begin(), ndn.end(), cmp);
+ EXPECT_EQ(ndn.begin() + 3, last);
+ ndn.erase(last, ndn.end());
+ EXPECT_THAT(ndn, ElementsAre(Pair(1, 1), Pair(2, 3), Pair(3, 1)));
+
+ // Dupes, non-dupes, dupes.
+ Vect dnd;
+ dnd.push_back(Pair(1, 1));
+ dnd.push_back(Pair(1, 2));
+ dnd.push_back(Pair(1, 3));
+ dnd.push_back(Pair(2, 1));
+ dnd.push_back(Pair(3, 1));
+ dnd.push_back(Pair(3, 2));
+ dnd.push_back(Pair(3, 3));
+ last = LastUnique(dnd.begin(), dnd.end(), cmp);
+ EXPECT_EQ(dnd.begin() + 3, last);
+ dnd.erase(last, dnd.end());
+ EXPECT_THAT(dnd, ElementsAre(Pair(1, 3), Pair(2, 1), Pair(3, 3)));
+}
+
+// ----------------------------------------------------------------------------
+// Class.
+
+// Check that flat_tree and its iterators can be instantiated with an
+// incomplete type.
+
+TEST(FlatTree, IncompleteType) {
+ struct A {
+ using Tree = flat_tree<A, A, GetKeyFromValueIdentity<A>, std::less<A>>;
+ int data;
+ Tree set_with_incomplete_type;
+ Tree::iterator it;
+ Tree::const_iterator cit;
+
+ // We do not declare operator< because clang complains that it's unused.
+ };
+
+ A a;
+}
+
+TEST(FlatTree, Stability) {
+ using Pair = std::pair<int, int>;
+
+ using Tree =
+ flat_tree<Pair, Pair, GetKeyFromValueIdentity<Pair>, LessByFirst<Pair>>;
+
+ // Constructors are stable.
+ Tree cont({{0, 0}, {1, 0}, {0, 1}, {2, 0}, {0, 2}, {1, 1}},
+ KEEP_FIRST_OF_DUPES);
+
+ auto AllOfSecondsAreZero = [&cont] {
+ return std::all_of(cont.begin(), cont.end(),
+ [](const Pair& elem) { return elem.second == 0; });
+ };
+
+ EXPECT_TRUE(AllOfSecondsAreZero()) << "constructor should be stable";
+
+ // Should not replace existing.
+ cont.insert(Pair(0, 2));
+ cont.insert(Pair(1, 2));
+ cont.insert(Pair(2, 2));
+
+ EXPECT_TRUE(AllOfSecondsAreZero()) << "insert should be stable";
+
+ cont.insert(Pair(3, 0));
+ cont.insert(Pair(3, 2));
+
+ EXPECT_TRUE(AllOfSecondsAreZero()) << "insert should be stable";
+}
+
+// ----------------------------------------------------------------------------
+// Types.
+
+// key_type
+// key_compare
+// value_type
+// value_compare
+// pointer
+// const_pointer
+// reference
+// const_reference
+// size_type
+// difference_type
+// iterator
+// const_iterator
+// reverse_iterator
+// const_reverse_iterator
+
+TEST(FlatTree, Types) {
+ // These are guaranteed to be portable.
+ static_assert((std::is_same<int, IntTree::key_type>::value), "");
+ static_assert((std::is_same<int, IntTree::value_type>::value), "");
+ static_assert((std::is_same<std::less<int>, IntTree::key_compare>::value),
+ "");
+ static_assert((std::is_same<int&, IntTree::reference>::value), "");
+ static_assert((std::is_same<const int&, IntTree::const_reference>::value),
+ "");
+ static_assert((std::is_same<int*, IntTree::pointer>::value), "");
+ static_assert((std::is_same<const int*, IntTree::const_pointer>::value), "");
+}
+
+// ----------------------------------------------------------------------------
+// Lifetime.
+
+// flat_tree()
+// flat_tree(const Compare& comp)
+
+TEST(FlatTree, DefaultConstructor) {
+ {
+ IntTree cont;
+ EXPECT_THAT(cont, ElementsAre());
+ }
+
+ {
+ TreeWithStrangeCompare cont(NonDefaultConstructibleCompare(0));
+ EXPECT_THAT(cont, ElementsAre());
+ }
+}
+
+// flat_tree(InputIterator first,
+// InputIterator last,
+// FlatContainerDupes dupe_handling,
+// const Compare& comp = Compare())
+
+TEST(FlatTree, RangeConstructor) {
+ {
+ IntPair input_vals[] = {{1, 1}, {1, 2}, {2, 1}, {2, 2}, {1, 3},
+ {2, 3}, {3, 1}, {3, 2}, {3, 3}};
+
+ IntPairTree first_of(MakeInputIterator(std::begin(input_vals)),
+ MakeInputIterator(std::end(input_vals)),
+ KEEP_FIRST_OF_DUPES);
+ EXPECT_THAT(first_of,
+ ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1)));
+
+ IntPairTree last_of(MakeInputIterator(std::begin(input_vals)),
+ MakeInputIterator(std::end(input_vals)),
+ KEEP_LAST_OF_DUPES);
+ EXPECT_THAT(last_of,
+ ElementsAre(IntPair(1, 3), IntPair(2, 3), IntPair(3, 3)));
+ }
+ {
+ TreeWithStrangeCompare::value_type input_vals[] = {1, 1, 1, 2, 2,
+ 2, 3, 3, 3};
+
+ TreeWithStrangeCompare cont(MakeInputIterator(std::begin(input_vals)),
+ MakeInputIterator(std::end(input_vals)),
+ KEEP_FIRST_OF_DUPES,
+ NonDefaultConstructibleCompare(0));
+ EXPECT_THAT(cont, ElementsAre(1, 2, 3));
+ }
+}
+
+// flat_tree(const flat_tree& x)
+
+TEST(FlatTree, CopyConstructor) {
+ IntTree original({1, 2, 3, 4}, KEEP_FIRST_OF_DUPES);
+ IntTree copied(original);
+
+ EXPECT_THAT(copied, ElementsAre(1, 2, 3, 4));
+
+ EXPECT_THAT(copied, ElementsAre(1, 2, 3, 4));
+ EXPECT_THAT(original, ElementsAre(1, 2, 3, 4));
+ EXPECT_EQ(original, copied);
+}
+
+// flat_tree(flat_tree&& x)
+
+TEST(FlatTree, MoveConstructor) {
+ int input_range[] = {1, 2, 3, 4};
+
+ MoveOnlyTree original(std::begin(input_range), std::end(input_range),
+ KEEP_FIRST_OF_DUPES);
+ MoveOnlyTree moved(std::move(original));
+
+ EXPECT_EQ(1U, moved.count(MoveOnlyInt(1)));
+ EXPECT_EQ(1U, moved.count(MoveOnlyInt(2)));
+ EXPECT_EQ(1U, moved.count(MoveOnlyInt(3)));
+ EXPECT_EQ(1U, moved.count(MoveOnlyInt(4)));
+}
+
+// flat_tree(std::vector<value_type>, FlatContainerDupes dupe_handling)
+
+TEST(FlatTree, VectorConstructor) {
+ using Pair = std::pair<int, MoveOnlyInt>;
+
+ // Construct an unsorted vector with a duplicate item in it. Sorted by the
+ // first item, the second allows us to test for stability. Using a move
+ // only type to ensure the vector is not copied.
+ std::vector<Pair> storage;
+ storage.push_back(Pair(2, MoveOnlyInt(0)));
+ storage.push_back(Pair(1, MoveOnlyInt(0)));
+ storage.push_back(Pair(2, MoveOnlyInt(1)));
+
+ using Tree =
+ flat_tree<Pair, Pair, GetKeyFromValueIdentity<Pair>, LessByFirst<Pair>>;
+ Tree tree(std::move(storage), KEEP_FIRST_OF_DUPES);
+
+ // The list should be two items long, with only the first "2" saved.
+ ASSERT_EQ(2u, tree.size());
+ const Pair& zeroth = *tree.begin();
+ ASSERT_EQ(1, zeroth.first);
+ ASSERT_EQ(0, zeroth.second.data());
+
+ const Pair& first = *(tree.begin() + 1);
+ ASSERT_EQ(2, first.first);
+ ASSERT_EQ(0, first.second.data());
+
+ // Test KEEP_LAST_OF_DUPES with a simple vector constructor.
+ std::vector<IntPair> int_storage{{1, 1}, {1, 2}, {2, 1}};
+ IntPairTree int_tree(std::move(int_storage), KEEP_LAST_OF_DUPES);
+ EXPECT_THAT(int_tree, ElementsAre(IntPair(1, 2), IntPair(2, 1)));
+}
+
+// flat_tree(std::initializer_list<value_type> ilist,
+// FlatContainerDupes dupe_handling,
+// const Compare& comp = Compare())
+
+TEST(FlatTree, InitializerListConstructor) {
+ {
+ IntTree cont({1, 2, 3, 4, 5, 6, 10, 8}, KEEP_FIRST_OF_DUPES);
+ EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10));
+ }
+ {
+ IntTree cont({1, 2, 3, 4, 5, 6, 10, 8}, KEEP_FIRST_OF_DUPES);
+ EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10));
+ }
+ {
+ TreeWithStrangeCompare cont({1, 2, 3, 4, 5, 6, 10, 8}, KEEP_FIRST_OF_DUPES,
+ NonDefaultConstructibleCompare(0));
+ EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10));
+ }
+ {
+ IntPairTree first_of({{1, 1}, {2, 1}, {1, 2}}, KEEP_FIRST_OF_DUPES);
+ EXPECT_THAT(first_of, ElementsAre(IntPair(1, 1), IntPair(2, 1)));
+ }
+ {
+ IntPairTree last_of({{1, 1}, {2, 1}, {1, 2}}, KEEP_LAST_OF_DUPES);
+ EXPECT_THAT(last_of, ElementsAre(IntPair(1, 2), IntPair(2, 1)));
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Assignments.
+
+// flat_tree& operator=(const flat_tree&)
+
+TEST(FlatTree, CopyAssignable) {
+ IntTree original({1, 2, 3, 4}, KEEP_FIRST_OF_DUPES);
+ IntTree copied;
+ copied = original;
+
+ EXPECT_THAT(copied, ElementsAre(1, 2, 3, 4));
+ EXPECT_THAT(original, ElementsAre(1, 2, 3, 4));
+ EXPECT_EQ(original, copied);
+}
+
+// flat_tree& operator=(flat_tree&&)
+
+TEST(FlatTree, MoveAssignable) {
+ int input_range[] = {1, 2, 3, 4};
+
+ MoveOnlyTree original(std::begin(input_range), std::end(input_range),
+ KEEP_FIRST_OF_DUPES);
+ MoveOnlyTree moved;
+ moved = std::move(original);
+
+ EXPECT_EQ(1U, moved.count(MoveOnlyInt(1)));
+ EXPECT_EQ(1U, moved.count(MoveOnlyInt(2)));
+ EXPECT_EQ(1U, moved.count(MoveOnlyInt(3)));
+ EXPECT_EQ(1U, moved.count(MoveOnlyInt(4)));
+}
+
+// flat_tree& operator=(std::initializer_list<value_type> ilist)
+
+TEST(FlatTree, InitializerListAssignable) {
+ IntTree cont({0}, KEEP_FIRST_OF_DUPES);
+ cont = {1, 2, 3, 4, 5, 6, 10, 8};
+
+ EXPECT_EQ(0U, cont.count(0));
+ EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10));
+}
+
+// --------------------------------------------------------------------------
+// Memory management.
+
+// void reserve(size_type new_capacity)
+
+TEST(FlatTree, Reserve) {
+ IntTree cont({1, 2, 3}, KEEP_FIRST_OF_DUPES);
+
+ cont.reserve(5);
+ EXPECT_LE(5U, cont.capacity());
+}
+
+// size_type capacity() const
+
+TEST(FlatTree, Capacity) {
+ IntTree cont({1, 2, 3}, KEEP_FIRST_OF_DUPES);
+
+ EXPECT_LE(cont.size(), cont.capacity());
+ cont.reserve(5);
+ EXPECT_LE(cont.size(), cont.capacity());
+}
+
+// void shrink_to_fit()
+
+TEST(FlatTree, ShrinkToFit) {
+ IntTree cont({1, 2, 3}, KEEP_FIRST_OF_DUPES);
+
+ IntTree::size_type capacity_before = cont.capacity();
+ cont.shrink_to_fit();
+ EXPECT_GE(capacity_before, cont.capacity());
+}
+
+// ----------------------------------------------------------------------------
+// Size management.
+
+// void clear()
+
+TEST(FlatTree, Clear) {
+ IntTree cont({1, 2, 3, 4, 5, 6, 7, 8}, KEEP_FIRST_OF_DUPES);
+ cont.clear();
+ EXPECT_THAT(cont, ElementsAre());
+}
+
+// size_type size() const
+
+TEST(FlatTree, Size) {
+ IntTree cont;
+
+ EXPECT_EQ(0U, cont.size());
+ cont.insert(2);
+ EXPECT_EQ(1U, cont.size());
+ cont.insert(1);
+ EXPECT_EQ(2U, cont.size());
+ cont.insert(3);
+ EXPECT_EQ(3U, cont.size());
+ cont.erase(cont.begin());
+ EXPECT_EQ(2U, cont.size());
+ cont.erase(cont.begin());
+ EXPECT_EQ(1U, cont.size());
+ cont.erase(cont.begin());
+ EXPECT_EQ(0U, cont.size());
+}
+
+// bool empty() const
+
+TEST(FlatTree, Empty) {
+ IntTree cont;
+
+ EXPECT_TRUE(cont.empty());
+ cont.insert(1);
+ EXPECT_FALSE(cont.empty());
+ cont.clear();
+ EXPECT_TRUE(cont.empty());
+}
+
+// ----------------------------------------------------------------------------
+// Iterators.
+
+// iterator begin()
+// const_iterator begin() const
+// iterator end()
+// const_iterator end() const
+//
+// reverse_iterator rbegin()
+// const_reverse_iterator rbegin() const
+// reverse_iterator rend()
+// const_reverse_iterator rend() const
+//
+// const_iterator cbegin() const
+// const_iterator cend() const
+// const_reverse_iterator crbegin() const
+// const_reverse_iterator crend() const
+
+TEST(FlatTree, Iterators) {
+ IntTree cont({1, 2, 3, 4, 5, 6, 7, 8}, KEEP_FIRST_OF_DUPES);
+
+ auto size = static_cast<IntTree::difference_type>(cont.size());
+
+ EXPECT_EQ(size, std::distance(cont.begin(), cont.end()));
+ EXPECT_EQ(size, std::distance(cont.cbegin(), cont.cend()));
+ EXPECT_EQ(size, std::distance(cont.rbegin(), cont.rend()));
+ EXPECT_EQ(size, std::distance(cont.crbegin(), cont.crend()));
+
+ {
+ IntTree::iterator it = cont.begin();
+ IntTree::const_iterator c_it = cont.cbegin();
+ EXPECT_EQ(it, c_it);
+ for (int j = 1; it != cont.end(); ++it, ++c_it, ++j) {
+ EXPECT_EQ(j, *it);
+ EXPECT_EQ(j, *c_it);
+ }
+ }
+ {
+ IntTree::reverse_iterator rit = cont.rbegin();
+ IntTree::const_reverse_iterator c_rit = cont.crbegin();
+ EXPECT_EQ(rit, c_rit);
+ for (int j = static_cast<int>(size); rit != cont.rend();
+ ++rit, ++c_rit, --j) {
+ EXPECT_EQ(j, *rit);
+ EXPECT_EQ(j, *c_rit);
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Insert operations.
+
+// pair<iterator, bool> insert(const value_type& val)
+
+TEST(FlatTree, InsertLValue) {
+ IntTree cont;
+
+ int value = 2;
+ std::pair<IntTree::iterator, bool> result = cont.insert(value);
+ EXPECT_TRUE(result.second);
+ EXPECT_EQ(cont.begin(), result.first);
+ EXPECT_EQ(1U, cont.size());
+ EXPECT_EQ(2, *result.first);
+
+ value = 1;
+ result = cont.insert(value);
+ EXPECT_TRUE(result.second);
+ EXPECT_EQ(cont.begin(), result.first);
+ EXPECT_EQ(2U, cont.size());
+ EXPECT_EQ(1, *result.first);
+
+ value = 3;
+ result = cont.insert(value);
+ EXPECT_TRUE(result.second);
+ EXPECT_EQ(std::prev(cont.end()), result.first);
+ EXPECT_EQ(3U, cont.size());
+ EXPECT_EQ(3, *result.first);
+
+ value = 3;
+ result = cont.insert(value);
+ EXPECT_FALSE(result.second);
+ EXPECT_EQ(std::prev(cont.end()), result.first);
+ EXPECT_EQ(3U, cont.size());
+ EXPECT_EQ(3, *result.first);
+}
+
+// pair<iterator, bool> insert(value_type&& val)
+
+TEST(FlatTree, InsertRValue) {
+ MoveOnlyTree cont;
+
+ std::pair<MoveOnlyTree::iterator, bool> result = cont.insert(MoveOnlyInt(2));
+ EXPECT_TRUE(result.second);
+ EXPECT_EQ(cont.begin(), result.first);
+ EXPECT_EQ(1U, cont.size());
+ EXPECT_EQ(2, result.first->data());
+
+ result = cont.insert(MoveOnlyInt(1));
+ EXPECT_TRUE(result.second);
+ EXPECT_EQ(cont.begin(), result.first);
+ EXPECT_EQ(2U, cont.size());
+ EXPECT_EQ(1, result.first->data());
+
+ result = cont.insert(MoveOnlyInt(3));
+ EXPECT_TRUE(result.second);
+ EXPECT_EQ(std::prev(cont.end()), result.first);
+ EXPECT_EQ(3U, cont.size());
+ EXPECT_EQ(3, result.first->data());
+
+ result = cont.insert(MoveOnlyInt(3));
+ EXPECT_FALSE(result.second);
+ EXPECT_EQ(std::prev(cont.end()), result.first);
+ EXPECT_EQ(3U, cont.size());
+ EXPECT_EQ(3, result.first->data());
+}
+
+// iterator insert(const_iterator position_hint, const value_type& val)
+
+TEST(FlatTree, InsertPositionLValue) {
+ IntTree cont;
+
+ IntTree::iterator result = cont.insert(cont.cend(), 2);
+ EXPECT_EQ(cont.begin(), result);
+ EXPECT_EQ(1U, cont.size());
+ EXPECT_EQ(2, *result);
+
+ result = cont.insert(cont.cend(), 1);
+ EXPECT_EQ(cont.begin(), result);
+ EXPECT_EQ(2U, cont.size());
+ EXPECT_EQ(1, *result);
+
+ result = cont.insert(cont.cend(), 3);
+ EXPECT_EQ(std::prev(cont.end()), result);
+ EXPECT_EQ(3U, cont.size());
+ EXPECT_EQ(3, *result);
+
+ result = cont.insert(cont.cend(), 3);
+ EXPECT_EQ(std::prev(cont.end()), result);
+ EXPECT_EQ(3U, cont.size());
+ EXPECT_EQ(3, *result);
+}
+
+// iterator insert(const_iterator position_hint, value_type&& val)
+
+TEST(FlatTree, InsertPositionRValue) {
+ MoveOnlyTree cont;
+
+ MoveOnlyTree::iterator result = cont.insert(cont.cend(), MoveOnlyInt(2));
+ EXPECT_EQ(cont.begin(), result);
+ EXPECT_EQ(1U, cont.size());
+ EXPECT_EQ(2, result->data());
+
+ result = cont.insert(cont.cend(), MoveOnlyInt(1));
+ EXPECT_EQ(cont.begin(), result);
+ EXPECT_EQ(2U, cont.size());
+ EXPECT_EQ(1, result->data());
+
+ result = cont.insert(cont.cend(), MoveOnlyInt(3));
+ EXPECT_EQ(std::prev(cont.end()), result);
+ EXPECT_EQ(3U, cont.size());
+ EXPECT_EQ(3, result->data());
+
+ result = cont.insert(cont.cend(), MoveOnlyInt(3));
+ EXPECT_EQ(std::prev(cont.end()), result);
+ EXPECT_EQ(3U, cont.size());
+ EXPECT_EQ(3, result->data());
+}
+
+// template <class... Args>
+// pair<iterator, bool> emplace(Args&&... args)
+
+TEST(FlatTree, Emplace) {
+ {
+ EmplaceableTree cont;
+
+ std::pair<EmplaceableTree::iterator, bool> result = cont.emplace();
+ EXPECT_TRUE(result.second);
+ EXPECT_EQ(cont.begin(), result.first);
+ EXPECT_EQ(1U, cont.size());
+ EXPECT_EQ(Emplaceable(), *cont.begin());
+
+ result = cont.emplace(2, 3.5);
+ EXPECT_TRUE(result.second);
+ EXPECT_EQ(std::next(cont.begin()), result.first);
+ EXPECT_EQ(2U, cont.size());
+ EXPECT_EQ(Emplaceable(2, 3.5), *result.first);
+
+ result = cont.emplace(2, 3.5);
+ EXPECT_FALSE(result.second);
+ EXPECT_EQ(std::next(cont.begin()), result.first);
+ EXPECT_EQ(2U, cont.size());
+ EXPECT_EQ(Emplaceable(2, 3.5), *result.first);
+ }
+ {
+ IntTree cont;
+
+ std::pair<IntTree::iterator, bool> result = cont.emplace(2);
+ EXPECT_TRUE(result.second);
+ EXPECT_EQ(cont.begin(), result.first);
+ EXPECT_EQ(1U, cont.size());
+ EXPECT_EQ(2, *result.first);
+ }
+}
+
+// template <class... Args>
+// iterator emplace_hint(const_iterator position_hint, Args&&... args)
+
+TEST(FlatTree, EmplacePosition) {
+ {
+ EmplaceableTree cont;
+
+ EmplaceableTree::iterator result = cont.emplace_hint(cont.cend());
+ EXPECT_EQ(cont.begin(), result);
+ EXPECT_EQ(1U, cont.size());
+ EXPECT_EQ(Emplaceable(), *cont.begin());
+
+ result = cont.emplace_hint(cont.cend(), 2, 3.5);
+ EXPECT_EQ(std::next(cont.begin()), result);
+ EXPECT_EQ(2U, cont.size());
+ EXPECT_EQ(Emplaceable(2, 3.5), *result);
+
+ result = cont.emplace_hint(cont.cbegin(), 2, 3.5);
+ EXPECT_EQ(std::next(cont.begin()), result);
+ EXPECT_EQ(2U, cont.size());
+ EXPECT_EQ(Emplaceable(2, 3.5), *result);
+ }
+ {
+ IntTree cont;
+
+ IntTree::iterator result = cont.emplace_hint(cont.cend(), 2);
+ EXPECT_EQ(cont.begin(), result);
+ EXPECT_EQ(1U, cont.size());
+ EXPECT_EQ(2, *result);
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Erase operations.
+
+// iterator erase(const_iterator position_hint)
+
+TEST(FlatTree, ErasePosition) {
+ IntTree cont({1, 2, 3, 4, 5, 6, 7, 8}, KEEP_FIRST_OF_DUPES);
+
+ IntTree::iterator it = cont.erase(std::next(cont.cbegin(), 3));
+ EXPECT_EQ(std::next(cont.begin(), 3), it);
+ EXPECT_THAT(cont, ElementsAre(1, 2, 3, 5, 6, 7, 8));
+
+ it = cont.erase(std::next(cont.cbegin(), 0));
+ EXPECT_EQ(cont.begin(), it);
+ EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7, 8));
+
+ it = cont.erase(std::next(cont.cbegin(), 5));
+ EXPECT_EQ(cont.end(), it);
+ EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7));
+
+ it = cont.erase(std::next(cont.cbegin(), 1));
+ EXPECT_EQ(std::next(cont.begin()), it);
+ EXPECT_THAT(cont, ElementsAre(2, 5, 6, 7));
+
+ it = cont.erase(std::next(cont.cbegin(), 2));
+ EXPECT_EQ(std::next(cont.begin(), 2), it);
+ EXPECT_THAT(cont, ElementsAre(2, 5, 7));
+
+ it = cont.erase(std::next(cont.cbegin(), 2));
+ EXPECT_EQ(std::next(cont.begin(), 2), it);
+ EXPECT_THAT(cont, ElementsAre(2, 5));
+
+ it = cont.erase(std::next(cont.cbegin(), 0));
+ EXPECT_EQ(std::next(cont.begin(), 0), it);
+ EXPECT_THAT(cont, ElementsAre(5));
+
+ it = cont.erase(cont.cbegin());
+ EXPECT_EQ(cont.begin(), it);
+ EXPECT_EQ(cont.end(), it);
+}
+
+// iterator erase(const_iterator first, const_iterator last)
+
+TEST(FlatTree, EraseRange) {
+ IntTree cont({1, 2, 3, 4, 5, 6, 7, 8}, KEEP_FIRST_OF_DUPES);
+
+ IntTree::iterator it =
+ cont.erase(std::next(cont.cbegin(), 5), std::next(cont.cbegin(), 5));
+ EXPECT_EQ(std::next(cont.begin(), 5), it);
+ EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 7, 8));
+
+ it = cont.erase(std::next(cont.cbegin(), 3), std::next(cont.cbegin(), 4));
+ EXPECT_EQ(std::next(cont.begin(), 3), it);
+ EXPECT_THAT(cont, ElementsAre(1, 2, 3, 5, 6, 7, 8));
+
+ it = cont.erase(std::next(cont.cbegin(), 2), std::next(cont.cbegin(), 5));
+ EXPECT_EQ(std::next(cont.begin(), 2), it);
+ EXPECT_THAT(cont, ElementsAre(1, 2, 7, 8));
+
+ it = cont.erase(std::next(cont.cbegin(), 0), std::next(cont.cbegin(), 2));
+ EXPECT_EQ(std::next(cont.begin(), 0), it);
+ EXPECT_THAT(cont, ElementsAre(7, 8));
+
+ it = cont.erase(cont.cbegin(), cont.cend());
+ EXPECT_EQ(cont.begin(), it);
+ EXPECT_EQ(cont.end(), it);
+}
+
+// size_type erase(const key_type& key)
+
+TEST(FlatTree, EraseKey) {
+ IntTree cont({1, 2, 3, 4, 5, 6, 7, 8}, KEEP_FIRST_OF_DUPES);
+
+ EXPECT_EQ(0U, cont.erase(9));
+ EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 7, 8));
+
+ EXPECT_EQ(1U, cont.erase(4));
+ EXPECT_THAT(cont, ElementsAre(1, 2, 3, 5, 6, 7, 8));
+
+ EXPECT_EQ(1U, cont.erase(1));
+ EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7, 8));
+
+ EXPECT_EQ(1U, cont.erase(8));
+ EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7));
+
+ EXPECT_EQ(1U, cont.erase(3));
+ EXPECT_THAT(cont, ElementsAre(2, 5, 6, 7));
+
+ EXPECT_EQ(1U, cont.erase(6));
+ EXPECT_THAT(cont, ElementsAre(2, 5, 7));
+
+ EXPECT_EQ(1U, cont.erase(7));
+ EXPECT_THAT(cont, ElementsAre(2, 5));
+
+ EXPECT_EQ(1U, cont.erase(2));
+ EXPECT_THAT(cont, ElementsAre(5));
+
+ EXPECT_EQ(1U, cont.erase(5));
+ EXPECT_THAT(cont, ElementsAre());
+}
+
+// ----------------------------------------------------------------------------
+// Comparators.
+
+// key_compare key_comp() const
+
+TEST(FlatTree, KeyComp) {
+ ReversedTree cont({1, 2, 3, 4, 5}, KEEP_FIRST_OF_DUPES);
+
+ EXPECT_TRUE(std::is_sorted(cont.begin(), cont.end(), cont.key_comp()));
+ int new_elements[] = {6, 7, 8, 9, 10};
+ std::copy(std::begin(new_elements), std::end(new_elements),
+ std::inserter(cont, cont.end()));
+ EXPECT_TRUE(std::is_sorted(cont.begin(), cont.end(), cont.key_comp()));
+}
+
+// value_compare value_comp() const
+
+TEST(FlatTree, ValueComp) {
+ ReversedTree cont({1, 2, 3, 4, 5}, KEEP_FIRST_OF_DUPES);
+
+ EXPECT_TRUE(std::is_sorted(cont.begin(), cont.end(), cont.value_comp()));
+ int new_elements[] = {6, 7, 8, 9, 10};
+ std::copy(std::begin(new_elements), std::end(new_elements),
+ std::inserter(cont, cont.end()));
+ EXPECT_TRUE(std::is_sorted(cont.begin(), cont.end(), cont.value_comp()));
+}
+
+// ----------------------------------------------------------------------------
+// Search operations.
+
+// size_type count(const key_type& key) const
+
+TEST(FlatTree, Count) {
+ {
+ const IntTree cont({5, 6, 7, 8, 9, 10, 11, 12}, KEEP_FIRST_OF_DUPES);
+
+ EXPECT_EQ(1U, cont.count(5));
+ EXPECT_EQ(1U, cont.count(6));
+ EXPECT_EQ(1U, cont.count(7));
+ EXPECT_EQ(1U, cont.count(8));
+ EXPECT_EQ(1U, cont.count(9));
+ EXPECT_EQ(1U, cont.count(10));
+ EXPECT_EQ(1U, cont.count(11));
+ EXPECT_EQ(1U, cont.count(12));
+ EXPECT_EQ(0U, cont.count(4));
+ }
+ {
+ const IntTreeWithLess cont({5, 6, 7, 8, 9, 10, 11, 12},
+ KEEP_FIRST_OF_DUPES);
+
+ EXPECT_EQ(1U, cont.count(5));
+ EXPECT_EQ(1U, cont.count(6));
+ EXPECT_EQ(1U, cont.count(7));
+ EXPECT_EQ(1U, cont.count(8));
+ EXPECT_EQ(1U, cont.count(9));
+ EXPECT_EQ(1U, cont.count(10));
+ EXPECT_EQ(1U, cont.count(11));
+ EXPECT_EQ(1U, cont.count(12));
+ EXPECT_EQ(0U, cont.count(4));
+ }
+}
+
+// iterator find(const key_type& key)
+// const_iterator find(const key_type& key) const
+
+TEST(FlatTree, Find) {
+ {
+ IntTree cont({5, 6, 7, 8, 9, 10, 11, 12}, KEEP_FIRST_OF_DUPES);
+
+ EXPECT_EQ(cont.begin(), cont.find(5));
+ EXPECT_EQ(std::next(cont.begin()), cont.find(6));
+ EXPECT_EQ(std::next(cont.begin(), 2), cont.find(7));
+ EXPECT_EQ(std::next(cont.begin(), 3), cont.find(8));
+ EXPECT_EQ(std::next(cont.begin(), 4), cont.find(9));
+ EXPECT_EQ(std::next(cont.begin(), 5), cont.find(10));
+ EXPECT_EQ(std::next(cont.begin(), 6), cont.find(11));
+ EXPECT_EQ(std::next(cont.begin(), 7), cont.find(12));
+ EXPECT_EQ(std::next(cont.begin(), 8), cont.find(4));
+ }
+ {
+ const IntTree cont({5, 6, 7, 8, 9, 10, 11, 12}, KEEP_FIRST_OF_DUPES);
+
+ EXPECT_EQ(cont.begin(), cont.find(5));
+ EXPECT_EQ(std::next(cont.begin()), cont.find(6));
+ EXPECT_EQ(std::next(cont.begin(), 2), cont.find(7));
+ EXPECT_EQ(std::next(cont.begin(), 3), cont.find(8));
+ EXPECT_EQ(std::next(cont.begin(), 4), cont.find(9));
+ EXPECT_EQ(std::next(cont.begin(), 5), cont.find(10));
+ EXPECT_EQ(std::next(cont.begin(), 6), cont.find(11));
+ EXPECT_EQ(std::next(cont.begin(), 7), cont.find(12));
+ EXPECT_EQ(std::next(cont.begin(), 8), cont.find(4));
+ }
+ {
+ IntTreeWithLess cont({5, 6, 7, 8, 9, 10, 11, 12}, KEEP_FIRST_OF_DUPES);
+
+ EXPECT_EQ(cont.begin(), cont.find(5));
+ EXPECT_EQ(std::next(cont.begin()), cont.find(6));
+ EXPECT_EQ(std::next(cont.begin(), 2), cont.find(7));
+ EXPECT_EQ(std::next(cont.begin(), 3), cont.find(8));
+ EXPECT_EQ(std::next(cont.begin(), 4), cont.find(9));
+ EXPECT_EQ(std::next(cont.begin(), 5), cont.find(10));
+ EXPECT_EQ(std::next(cont.begin(), 6), cont.find(11));
+ EXPECT_EQ(std::next(cont.begin(), 7), cont.find(12));
+ EXPECT_EQ(std::next(cont.begin(), 8), cont.find(4));
+ }
+}
+
+// pair<iterator, iterator> equal_range(const key_type& key)
+// pair<const_iterator, const_iterator> equal_range(const key_type& key) const
+
+TEST(FlatTree, EqualRange) {
+ {
+ IntTree cont({5, 7, 9, 11, 13, 15, 17, 19}, KEEP_FIRST_OF_DUPES);
+
+ std::pair<IntTree::iterator, IntTree::iterator> result =
+ cont.equal_range(5);
+ EXPECT_EQ(std::next(cont.begin(), 0), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 1), result.second);
+ result = cont.equal_range(7);
+ EXPECT_EQ(std::next(cont.begin(), 1), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 2), result.second);
+ result = cont.equal_range(9);
+ EXPECT_EQ(std::next(cont.begin(), 2), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 3), result.second);
+ result = cont.equal_range(11);
+ EXPECT_EQ(std::next(cont.begin(), 3), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 4), result.second);
+ result = cont.equal_range(13);
+ EXPECT_EQ(std::next(cont.begin(), 4), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 5), result.second);
+ result = cont.equal_range(15);
+ EXPECT_EQ(std::next(cont.begin(), 5), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 6), result.second);
+ result = cont.equal_range(17);
+ EXPECT_EQ(std::next(cont.begin(), 6), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 7), result.second);
+ result = cont.equal_range(19);
+ EXPECT_EQ(std::next(cont.begin(), 7), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 8), result.second);
+ result = cont.equal_range(4);
+ EXPECT_EQ(std::next(cont.begin(), 0), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 0), result.second);
+ result = cont.equal_range(6);
+ EXPECT_EQ(std::next(cont.begin(), 1), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 1), result.second);
+ result = cont.equal_range(8);
+ EXPECT_EQ(std::next(cont.begin(), 2), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 2), result.second);
+ result = cont.equal_range(10);
+ EXPECT_EQ(std::next(cont.begin(), 3), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 3), result.second);
+ result = cont.equal_range(12);
+ EXPECT_EQ(std::next(cont.begin(), 4), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 4), result.second);
+ result = cont.equal_range(14);
+ EXPECT_EQ(std::next(cont.begin(), 5), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 5), result.second);
+ result = cont.equal_range(16);
+ EXPECT_EQ(std::next(cont.begin(), 6), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 6), result.second);
+ result = cont.equal_range(18);
+ EXPECT_EQ(std::next(cont.begin(), 7), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 7), result.second);
+ result = cont.equal_range(20);
+ EXPECT_EQ(std::next(cont.begin(), 8), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 8), result.second);
+ }
+ {
+ const IntTree cont({5, 7, 9, 11, 13, 15, 17, 19}, KEEP_FIRST_OF_DUPES);
+
+ std::pair<IntTree::const_iterator, IntTree::const_iterator> result =
+ cont.equal_range(5);
+ EXPECT_EQ(std::next(cont.begin(), 0), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 1), result.second);
+ result = cont.equal_range(7);
+ EXPECT_EQ(std::next(cont.begin(), 1), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 2), result.second);
+ result = cont.equal_range(9);
+ EXPECT_EQ(std::next(cont.begin(), 2), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 3), result.second);
+ result = cont.equal_range(11);
+ EXPECT_EQ(std::next(cont.begin(), 3), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 4), result.second);
+ result = cont.equal_range(13);
+ EXPECT_EQ(std::next(cont.begin(), 4), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 5), result.second);
+ result = cont.equal_range(15);
+ EXPECT_EQ(std::next(cont.begin(), 5), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 6), result.second);
+ result = cont.equal_range(17);
+ EXPECT_EQ(std::next(cont.begin(), 6), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 7), result.second);
+ result = cont.equal_range(19);
+ EXPECT_EQ(std::next(cont.begin(), 7), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 8), result.second);
+ result = cont.equal_range(4);
+ EXPECT_EQ(std::next(cont.begin(), 0), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 0), result.second);
+ result = cont.equal_range(6);
+ EXPECT_EQ(std::next(cont.begin(), 1), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 1), result.second);
+ result = cont.equal_range(8);
+ EXPECT_EQ(std::next(cont.begin(), 2), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 2), result.second);
+ result = cont.equal_range(10);
+ EXPECT_EQ(std::next(cont.begin(), 3), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 3), result.second);
+ result = cont.equal_range(12);
+ EXPECT_EQ(std::next(cont.begin(), 4), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 4), result.second);
+ result = cont.equal_range(14);
+ EXPECT_EQ(std::next(cont.begin(), 5), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 5), result.second);
+ result = cont.equal_range(16);
+ EXPECT_EQ(std::next(cont.begin(), 6), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 6), result.second);
+ result = cont.equal_range(18);
+ EXPECT_EQ(std::next(cont.begin(), 7), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 7), result.second);
+ result = cont.equal_range(20);
+ EXPECT_EQ(std::next(cont.begin(), 8), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 8), result.second);
+ }
+ {
+ IntTreeWithLess cont({5, 7, 9, 11, 13, 15, 17, 19}, KEEP_FIRST_OF_DUPES);
+
+ std::pair<IntTree::iterator, IntTree::iterator> result =
+ cont.equal_range(5);
+ EXPECT_EQ(std::next(cont.begin(), 0), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 1), result.second);
+ result = cont.equal_range(7);
+ EXPECT_EQ(std::next(cont.begin(), 1), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 2), result.second);
+ result = cont.equal_range(9);
+ EXPECT_EQ(std::next(cont.begin(), 2), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 3), result.second);
+ result = cont.equal_range(11);
+ EXPECT_EQ(std::next(cont.begin(), 3), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 4), result.second);
+ result = cont.equal_range(13);
+ EXPECT_EQ(std::next(cont.begin(), 4), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 5), result.second);
+ result = cont.equal_range(15);
+ EXPECT_EQ(std::next(cont.begin(), 5), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 6), result.second);
+ result = cont.equal_range(17);
+ EXPECT_EQ(std::next(cont.begin(), 6), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 7), result.second);
+ result = cont.equal_range(19);
+ EXPECT_EQ(std::next(cont.begin(), 7), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 8), result.second);
+ result = cont.equal_range(4);
+ EXPECT_EQ(std::next(cont.begin(), 0), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 0), result.second);
+ result = cont.equal_range(6);
+ EXPECT_EQ(std::next(cont.begin(), 1), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 1), result.second);
+ result = cont.equal_range(8);
+ EXPECT_EQ(std::next(cont.begin(), 2), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 2), result.second);
+ result = cont.equal_range(10);
+ EXPECT_EQ(std::next(cont.begin(), 3), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 3), result.second);
+ result = cont.equal_range(12);
+ EXPECT_EQ(std::next(cont.begin(), 4), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 4), result.second);
+ result = cont.equal_range(14);
+ EXPECT_EQ(std::next(cont.begin(), 5), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 5), result.second);
+ result = cont.equal_range(16);
+ EXPECT_EQ(std::next(cont.begin(), 6), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 6), result.second);
+ result = cont.equal_range(18);
+ EXPECT_EQ(std::next(cont.begin(), 7), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 7), result.second);
+ result = cont.equal_range(20);
+ EXPECT_EQ(std::next(cont.begin(), 8), result.first);
+ EXPECT_EQ(std::next(cont.begin(), 8), result.second);
+ }
+}
+
+// iterator lower_bound(const key_type& key);
+// const_iterator lower_bound(const key_type& key) const;
+
+TEST(FlatTree, LowerBound) {
+ {
+ IntTree cont({5, 7, 9, 11, 13, 15, 17, 19}, KEEP_FIRST_OF_DUPES);
+
+ EXPECT_EQ(cont.begin(), cont.lower_bound(5));
+ EXPECT_EQ(std::next(cont.begin()), cont.lower_bound(7));
+ EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(9));
+ EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(11));
+ EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(13));
+ EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(15));
+ EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(17));
+ EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(19));
+ EXPECT_EQ(std::next(cont.begin(), 0), cont.lower_bound(4));
+ EXPECT_EQ(std::next(cont.begin(), 1), cont.lower_bound(6));
+ EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(8));
+ EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(10));
+ EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(12));
+ EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(14));
+ EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(16));
+ EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(18));
+ EXPECT_EQ(std::next(cont.begin(), 8), cont.lower_bound(20));
+ }
+ {
+ const IntTree cont({5, 7, 9, 11, 13, 15, 17, 19}, KEEP_FIRST_OF_DUPES);
+
+ EXPECT_EQ(cont.begin(), cont.lower_bound(5));
+ EXPECT_EQ(std::next(cont.begin()), cont.lower_bound(7));
+ EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(9));
+ EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(11));
+ EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(13));
+ EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(15));
+ EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(17));
+ EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(19));
+ EXPECT_EQ(std::next(cont.begin(), 0), cont.lower_bound(4));
+ EXPECT_EQ(std::next(cont.begin(), 1), cont.lower_bound(6));
+ EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(8));
+ EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(10));
+ EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(12));
+ EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(14));
+ EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(16));
+ EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(18));
+ EXPECT_EQ(std::next(cont.begin(), 8), cont.lower_bound(20));
+ }
+ {
+ IntTreeWithLess cont({5, 7, 9, 11, 13, 15, 17, 19}, KEEP_FIRST_OF_DUPES);
+
+ EXPECT_EQ(cont.begin(), cont.lower_bound(5));
+ EXPECT_EQ(std::next(cont.begin()), cont.lower_bound(7));
+ EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(9));
+ EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(11));
+ EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(13));
+ EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(15));
+ EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(17));
+ EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(19));
+ EXPECT_EQ(std::next(cont.begin(), 0), cont.lower_bound(4));
+ EXPECT_EQ(std::next(cont.begin(), 1), cont.lower_bound(6));
+ EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(8));
+ EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(10));
+ EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(12));
+ EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(14));
+ EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(16));
+ EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(18));
+ EXPECT_EQ(std::next(cont.begin(), 8), cont.lower_bound(20));
+ }
+}
+
+// iterator upper_bound(const key_type& key)
+// const_iterator upper_bound(const key_type& key) const
+
+TEST(FlatTree, UpperBound) {
+ {
+ IntTree cont({5, 7, 9, 11, 13, 15, 17, 19}, KEEP_FIRST_OF_DUPES);
+
+ EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(5));
+ EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(7));
+ EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(9));
+ EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(11));
+ EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(13));
+ EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(15));
+ EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(17));
+ EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(19));
+ EXPECT_EQ(std::next(cont.begin(), 0), cont.upper_bound(4));
+ EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(6));
+ EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(8));
+ EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(10));
+ EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(12));
+ EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(14));
+ EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(16));
+ EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(18));
+ EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(20));
+ }
+ {
+ const IntTree cont({5, 7, 9, 11, 13, 15, 17, 19}, KEEP_FIRST_OF_DUPES);
+
+ EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(5));
+ EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(7));
+ EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(9));
+ EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(11));
+ EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(13));
+ EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(15));
+ EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(17));
+ EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(19));
+ EXPECT_EQ(std::next(cont.begin(), 0), cont.upper_bound(4));
+ EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(6));
+ EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(8));
+ EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(10));
+ EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(12));
+ EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(14));
+ EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(16));
+ EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(18));
+ EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(20));
+ }
+ {
+ IntTreeWithLess cont({5, 7, 9, 11, 13, 15, 17, 19}, KEEP_FIRST_OF_DUPES);
+
+ EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(5));
+ EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(7));
+ EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(9));
+ EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(11));
+ EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(13));
+ EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(15));
+ EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(17));
+ EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(19));
+ EXPECT_EQ(std::next(cont.begin(), 0), cont.upper_bound(4));
+ EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(6));
+ EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(8));
+ EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(10));
+ EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(12));
+ EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(14));
+ EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(16));
+ EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(18));
+ EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(20));
+ }
+}
+
+// ----------------------------------------------------------------------------
+// General operations.
+
+// void swap(flat_tree& other)
+// void swap(flat_tree& lhs, flat_tree& rhs)
+
+TEST(FlatTreeOurs, Swap) {
+ IntTree x({1, 2, 3}, KEEP_FIRST_OF_DUPES);
+ IntTree y({4}, KEEP_FIRST_OF_DUPES);
+ swap(x, y);
+ EXPECT_THAT(x, ElementsAre(4));
+ EXPECT_THAT(y, ElementsAre(1, 2, 3));
+
+ y.swap(x);
+ EXPECT_THAT(x, ElementsAre(1, 2, 3));
+ EXPECT_THAT(y, ElementsAre(4));
+}
+
+// bool operator==(const flat_tree& lhs, const flat_tree& rhs)
+// bool operator!=(const flat_tree& lhs, const flat_tree& rhs)
+// bool operator<(const flat_tree& lhs, const flat_tree& rhs)
+// bool operator>(const flat_tree& lhs, const flat_tree& rhs)
+// bool operator<=(const flat_tree& lhs, const flat_tree& rhs)
+// bool operator>=(const flat_tree& lhs, const flat_tree& rhs)
+
+TEST(FlatTree, Comparison) {
+ // Provided comparator does not participate in comparison.
+ ReversedTree biggest({3}, KEEP_FIRST_OF_DUPES);
+ ReversedTree smallest({1}, KEEP_FIRST_OF_DUPES);
+ ReversedTree middle({1, 2}, KEEP_FIRST_OF_DUPES);
+
+ EXPECT_EQ(biggest, biggest);
+ EXPECT_NE(biggest, smallest);
+ EXPECT_LT(smallest, middle);
+ EXPECT_LE(smallest, middle);
+ EXPECT_LE(middle, middle);
+ EXPECT_GT(biggest, middle);
+ EXPECT_GE(biggest, middle);
+ EXPECT_GE(biggest, biggest);
+}
+
+TEST(FlatSet, EraseIf) {
+ IntTree x;
+ EraseIf(x, [](int) { return false; });
+ EXPECT_THAT(x, ElementsAre());
+
+ x = {1, 2, 3};
+ EraseIf(x, [](int elem) { return !(elem & 1); });
+ EXPECT_THAT(x, ElementsAre(1, 3));
+
+ x = {1, 2, 3, 4};
+ EraseIf(x, [](int elem) { return elem & 1; });
+ EXPECT_THAT(x, ElementsAre(2, 4));
+}
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/containers/mru_cache.h b/chromium/base/containers/mru_cache.h
index 4005489d4b2..7c684a9690d 100644
--- a/chromium/base/containers/mru_cache.h
+++ b/chromium/base/containers/mru_cache.h
@@ -105,8 +105,6 @@ class MRUCacheBase {
// Retrieves the contents of the given key, or end() if not found. This method
// has the side effect of moving the requested item to the front of the
// recency list.
- //
- // TODO(brettw) We may want a const version of this function in the future.
iterator Get(const KeyType& key) {
typename KeyIndex::iterator index_iter = index_.find(key);
if (index_iter == index_.end())
diff --git a/chromium/base/critical_closure.h b/chromium/base/critical_closure.h
index 1b10cde7ce6..94c618dfbb1 100644
--- a/chromium/base/critical_closure.h
+++ b/chromium/base/critical_closure.h
@@ -5,6 +5,8 @@
#ifndef BASE_CRITICAL_CLOSURE_H_
#define BASE_CRITICAL_CLOSURE_H_
+#include <utility>
+
#include "base/callback.h"
#include "base/macros.h"
#include "build/build_config.h"
@@ -27,13 +29,13 @@ bool IsMultiTaskingSupported();
// |ios::ScopedCriticalAction|.
class CriticalClosure {
public:
- explicit CriticalClosure(const Closure& closure);
+ explicit CriticalClosure(OnceClosure closure);
~CriticalClosure();
void Run();
private:
ios::ScopedCriticalAction critical_action_;
- Closure closure_;
+ OnceClosure closure_;
DISALLOW_COPY_AND_ASSIGN(CriticalClosure);
};
@@ -55,13 +57,14 @@ class CriticalClosure {
// background running time, |MakeCriticalClosure| should be applied on them
// before posting.
#if defined(OS_IOS)
-inline Closure MakeCriticalClosure(const Closure& closure) {
+inline OnceClosure MakeCriticalClosure(OnceClosure closure) {
DCHECK(internal::IsMultiTaskingSupported());
- return base::Bind(&internal::CriticalClosure::Run,
- Owned(new internal::CriticalClosure(closure)));
+ return base::BindOnce(
+ &internal::CriticalClosure::Run,
+ Owned(new internal::CriticalClosure(std::move(closure))));
}
#else // defined(OS_IOS)
-inline Closure MakeCriticalClosure(const Closure& closure) {
+inline OnceClosure MakeCriticalClosure(OnceClosure closure) {
// No-op for platforms where the application does not need to acquire
// background time for closures to finish when it goes into the background.
return closure;
diff --git a/chromium/base/critical_closure_internal_ios.mm b/chromium/base/critical_closure_internal_ios.mm
index 063d3dd9a24..e35eca0c7e0 100644
--- a/chromium/base/critical_closure_internal_ios.mm
+++ b/chromium/base/critical_closure_internal_ios.mm
@@ -13,12 +13,13 @@ bool IsMultiTaskingSupported() {
return [[UIDevice currentDevice] isMultitaskingSupported];
}
-CriticalClosure::CriticalClosure(const Closure& closure) : closure_(closure) {}
+CriticalClosure::CriticalClosure(OnceClosure closure)
+ : closure_(std::move(closure)) {}
CriticalClosure::~CriticalClosure() {}
void CriticalClosure::Run() {
- closure_.Run();
+ std::move(closure_).Run();
}
} // namespace internal
diff --git a/chromium/base/debug/activity_analyzer.cc b/chromium/base/debug/activity_analyzer.cc
index 7c421e96309..3c672341dd0 100644
--- a/chromium/base/debug/activity_analyzer.cc
+++ b/chromium/base/debug/activity_analyzer.cc
@@ -4,9 +4,12 @@
#include "base/debug/activity_analyzer.h"
+#include <algorithm>
+
#include "base/files/file.h"
#include "base/files/file_path.h"
#include "base/files/memory_mapped_file.h"
+#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/stl_util.h"
@@ -15,6 +18,11 @@
namespace base {
namespace debug {
+namespace {
+// An empty snapshot that can be returned when there otherwise is none.
+LazyInstance<ActivityUserData::Snapshot>::Leaky g_empty_user_data_snapshot;
+} // namespace
+
ThreadActivityAnalyzer::Snapshot::Snapshot() {}
ThreadActivityAnalyzer::Snapshot::~Snapshot() {}
@@ -48,7 +56,8 @@ void ThreadActivityAnalyzer::AddGlobalInformation(
// The global GetUserDataSnapshot will return an empty snapshot if the ref
// or id is not valid.
activity_snapshot_.user_data_stack.push_back(global->GetUserDataSnapshot(
- activity.user_data_ref, activity.user_data_id));
+ activity_snapshot_.process_id, activity.user_data_ref,
+ activity.user_data_id));
}
}
@@ -78,19 +87,42 @@ std::unique_ptr<GlobalActivityAnalyzer> GlobalActivityAnalyzer::CreateWithFile(
}
#endif // !defined(OS_NACL)
-ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetFirstAnalyzer() {
+int64_t GlobalActivityAnalyzer::GetFirstProcess() {
PrepareAllAnalyzers();
+ return GetNextProcess();
+}
+
+int64_t GlobalActivityAnalyzer::GetNextProcess() {
+ if (process_ids_.empty())
+ return 0;
+ int64_t pid = process_ids_.back();
+ process_ids_.pop_back();
+ return pid;
+}
+
+ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetFirstAnalyzer(int64_t pid) {
analyzers_iterator_ = analyzers_.begin();
+ analyzers_iterator_pid_ = pid;
if (analyzers_iterator_ == analyzers_.end())
return nullptr;
- return analyzers_iterator_->second.get();
+ int64_t create_stamp;
+ if (analyzers_iterator_->second->GetProcessId(&create_stamp) == pid &&
+ create_stamp <= analysis_stamp_) {
+ return analyzers_iterator_->second.get();
+ }
+ return GetNextAnalyzer();
}
ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetNextAnalyzer() {
DCHECK(analyzers_iterator_ != analyzers_.end());
- ++analyzers_iterator_;
- if (analyzers_iterator_ == analyzers_.end())
- return nullptr;
+ int64_t create_stamp;
+ do {
+ ++analyzers_iterator_;
+ if (analyzers_iterator_ == analyzers_.end())
+ return nullptr;
+ } while (analyzers_iterator_->second->GetProcessId(&create_stamp) !=
+ analyzers_iterator_pid_ ||
+ create_stamp > analysis_stamp_);
return analyzers_iterator_->second.get();
}
@@ -103,6 +135,7 @@ ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetAnalyzerForThread(
}
ActivityUserData::Snapshot GlobalActivityAnalyzer::GetUserDataSnapshot(
+ int64_t pid,
uint32_t ref,
uint32_t id) {
ActivityUserData::Snapshot snapshot;
@@ -114,7 +147,11 @@ ActivityUserData::Snapshot GlobalActivityAnalyzer::GetUserDataSnapshot(
size_t size = allocator_->GetAllocSize(ref);
const ActivityUserData user_data(memory, size);
user_data.CreateSnapshot(&snapshot);
- if (user_data.id() != id) {
+ int64_t process_id;
+ int64_t create_stamp;
+ if (!ActivityUserData::GetOwningProcessId(memory, &process_id,
+ &create_stamp) ||
+ process_id != pid || user_data.id() != id) {
// This allocation has been overwritten since it was created. Return an
// empty snapshot because whatever was captured is incorrect.
snapshot.clear();
@@ -124,22 +161,15 @@ ActivityUserData::Snapshot GlobalActivityAnalyzer::GetUserDataSnapshot(
return snapshot;
}
-ActivityUserData::Snapshot GlobalActivityAnalyzer::GetGlobalUserDataSnapshot() {
- ActivityUserData::Snapshot snapshot;
-
- PersistentMemoryAllocator::Reference ref =
- PersistentMemoryAllocator::Iterator(allocator_.get())
- .GetNextOfType(GlobalActivityTracker::kTypeIdGlobalDataRecord);
- void* memory = allocator_->GetAsArray<char>(
- ref, GlobalActivityTracker::kTypeIdGlobalDataRecord,
- PersistentMemoryAllocator::kSizeAny);
- if (memory) {
- size_t size = allocator_->GetAllocSize(ref);
- const ActivityUserData global_data(memory, size);
- global_data.CreateSnapshot(&snapshot);
- }
-
- return snapshot;
+const ActivityUserData::Snapshot&
+GlobalActivityAnalyzer::GetProcessDataSnapshot(int64_t pid) {
+ auto iter = process_data_.find(pid);
+ if (iter == process_data_.end())
+ return g_empty_user_data_snapshot.Get();
+ if (iter->second.create_stamp > analysis_stamp_)
+ return g_empty_user_data_snapshot.Get();
+ DCHECK_EQ(pid, iter->second.process_id);
+ return iter->second.data;
}
std::vector<std::string> GlobalActivityAnalyzer::GetLogMessages() {
@@ -185,7 +215,17 @@ GlobalActivityAnalyzer::GetProgramLocationFromAddress(uint64_t address) {
return { 0, 0 };
}
+GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot() {}
+GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot(
+ const UserDataSnapshot& rhs) = default;
+GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot(
+ UserDataSnapshot&& rhs) = default;
+GlobalActivityAnalyzer::UserDataSnapshot::~UserDataSnapshot() {}
+
void GlobalActivityAnalyzer::PrepareAllAnalyzers() {
+ // Record the time when analysis started.
+ analysis_stamp_ = base::Time::Now().ToInternalValue();
+
// Fetch all the records. This will retrieve only ones created since the
// last run since the PMA iterator will continue from where it left off.
uint32_t type;
@@ -194,39 +234,95 @@ void GlobalActivityAnalyzer::PrepareAllAnalyzers() {
switch (type) {
case GlobalActivityTracker::kTypeIdActivityTracker:
case GlobalActivityTracker::kTypeIdActivityTrackerFree:
- // Free or not, add it to the list of references for later analysis.
- tracker_references_.insert(ref);
+ case GlobalActivityTracker::kTypeIdProcessDataRecord:
+ case GlobalActivityTracker::kTypeIdProcessDataRecordFree:
+ case PersistentMemoryAllocator::kTypeIdTransitioning:
+ // Active, free, or transitioning: add it to the list of references
+ // for later analysis.
+ memory_references_.insert(ref);
break;
}
}
- // Go through all the known references and create analyzers for them with
- // snapshots of the current state.
+ // Clear out any old information.
analyzers_.clear();
- for (PersistentMemoryAllocator::Reference tracker_ref : tracker_references_) {
- // Get the actual data segment for the tracker. This can fail if the
- // record has been marked "free" since the type will not match.
- void* base = allocator_->GetAsArray<char>(
- tracker_ref, GlobalActivityTracker::kTypeIdActivityTracker,
+ process_data_.clear();
+ process_ids_.clear();
+ std::set<int64_t> seen_pids;
+
+ // Go through all the known references and create objects for them with
+ // snapshots of the current state.
+ for (PersistentMemoryAllocator::Reference memory_ref : memory_references_) {
+ // Get the actual data segment for the tracker. Any type will do since it
+ // is checked below.
+ void* const base = allocator_->GetAsArray<char>(
+ memory_ref, PersistentMemoryAllocator::kTypeIdAny,
PersistentMemoryAllocator::kSizeAny);
+ const size_t size = allocator_->GetAllocSize(memory_ref);
if (!base)
continue;
- // Create the analyzer on the data. This will capture a snapshot of the
- // tracker state. This can fail if the tracker is somehow corrupted or is
- // in the process of shutting down.
- std::unique_ptr<ThreadActivityAnalyzer> analyzer(new ThreadActivityAnalyzer(
- base, allocator_->GetAllocSize(tracker_ref)));
- if (!analyzer->IsValid())
- continue;
- analyzer->AddGlobalInformation(this);
-
- // Add this analyzer to the map of known ones, indexed by a unique thread
- // identifier.
- DCHECK(!base::ContainsKey(analyzers_, analyzer->GetThreadKey()));
- analyzer->allocator_reference_ = ref;
- analyzers_[analyzer->GetThreadKey()] = std::move(analyzer);
+ switch (allocator_->GetType(memory_ref)) {
+ case GlobalActivityTracker::kTypeIdActivityTracker: {
+ // Create the analyzer on the data. This will capture a snapshot of the
+ // tracker state. This can fail if the tracker is somehow corrupted or
+ // is in the process of shutting down.
+ std::unique_ptr<ThreadActivityAnalyzer> analyzer(
+ new ThreadActivityAnalyzer(base, size));
+ if (!analyzer->IsValid())
+ continue;
+ analyzer->AddGlobalInformation(this);
+
+ // Track PIDs.
+ int64_t pid = analyzer->GetProcessId();
+ if (seen_pids.find(pid) == seen_pids.end()) {
+ process_ids_.push_back(pid);
+ seen_pids.insert(pid);
+ }
+
+ // Add this analyzer to the map of known ones, indexed by a unique
+ // thread
+ // identifier.
+ DCHECK(!base::ContainsKey(analyzers_, analyzer->GetThreadKey()));
+ analyzer->allocator_reference_ = ref;
+ analyzers_[analyzer->GetThreadKey()] = std::move(analyzer);
+ } break;
+
+ case GlobalActivityTracker::kTypeIdProcessDataRecord: {
+ // Get the PID associated with this data record.
+ int64_t process_id;
+ int64_t create_stamp;
+ ActivityUserData::GetOwningProcessId(base, &process_id, &create_stamp);
+ DCHECK(!base::ContainsKey(process_data_, process_id));
+
+ // Create a snapshot of the data. This can fail if the data is somehow
+ // corrupted or the process shutdown and the memory being released.
+ UserDataSnapshot& snapshot = process_data_[process_id];
+ snapshot.process_id = process_id;
+ snapshot.create_stamp = create_stamp;
+ const ActivityUserData process_data(base, size);
+ if (!process_data.CreateSnapshot(&snapshot.data))
+ break;
+
+ // Check that nothing changed. If it did, forget what was recorded.
+ ActivityUserData::GetOwningProcessId(base, &process_id, &create_stamp);
+ if (process_id != snapshot.process_id ||
+ create_stamp != snapshot.create_stamp) {
+ process_data_.erase(process_id);
+ break;
+ }
+
+ // Track PIDs.
+ if (seen_pids.find(process_id) == seen_pids.end()) {
+ process_ids_.push_back(process_id);
+ seen_pids.insert(process_id);
+ }
+ } break;
+ }
}
+
+ // Reverse the list of PIDs so that they get popped in the order found.
+ std::reverse(process_ids_.begin(), process_ids_.end());
}
} // namespace debug
diff --git a/chromium/base/debug/activity_analyzer.h b/chromium/base/debug/activity_analyzer.h
index 95df69fd9b4..e98046ea38b 100644
--- a/chromium/base/debug/activity_analyzer.h
+++ b/chromium/base/debug/activity_analyzer.h
@@ -9,6 +9,7 @@
#include <memory>
#include <set>
#include <string>
+#include <vector>
#include "base/base_export.h"
#include "base/debug/activity_tracker.h"
@@ -77,6 +78,13 @@ class BASE_EXPORT ThreadActivityAnalyzer {
// methods are undefined if this returns false.
bool IsValid() { return activity_snapshot_valid_; }
+ // Gets the process id and its creation stamp.
+ int64_t GetProcessId(int64_t* out_stamp = nullptr) {
+ if (out_stamp)
+ *out_stamp = activity_snapshot_.create_stamp;
+ return activity_snapshot_.process_id;
+ }
+
// Gets the name of the thread.
const std::string& GetThreadName() {
return activity_snapshot_.thread_name;
@@ -136,10 +144,23 @@ class BASE_EXPORT GlobalActivityAnalyzer {
const FilePath& file_path);
#endif // !defined(OS_NACL)
- // Iterates over all known valid analyzers or returns null if there are no
- // more. Ownership stays with the global analyzer object and all existing
- // analyzer pointers are invalidated when GetFirstAnalyzer() is called.
- ThreadActivityAnalyzer* GetFirstAnalyzer();
+ // Iterates over all known valid processes and returns their PIDs or zero
+ // if there are no more. Calls to GetFirstProcess() will perform a global
+ // snapshot in order to provide a relatively consistent state across the
+ // future calls to GetNextProcess() and GetFirst/NextAnalyzer(). PIDs are
+ // returned in the order they're found meaning that a first-launched
+ // controlling process will be found first. Note, however, that space
+ // freed by an exiting process may be re-used by a later process.
+ int64_t GetFirstProcess();
+ int64_t GetNextProcess();
+
+ // Iterates over all known valid analyzers for the a given process or returns
+ // null if there are no more.
+ //
+ // GetFirstProcess() must be called first in order to capture a global
+ // snapshot! Ownership stays with the global analyzer object and all existing
+ // analyzer pointers are invalidated when GetFirstProcess() is called.
+ ThreadActivityAnalyzer* GetFirstAnalyzer(int64_t pid);
ThreadActivityAnalyzer* GetNextAnalyzer();
// Gets the analyzer for a specific thread or null if there is none.
@@ -147,10 +168,13 @@ class BASE_EXPORT GlobalActivityAnalyzer {
ThreadActivityAnalyzer* GetAnalyzerForThread(const ThreadKey& key);
// Extract user data based on a reference and its identifier.
- ActivityUserData::Snapshot GetUserDataSnapshot(uint32_t ref, uint32_t id);
+ ActivityUserData::Snapshot GetUserDataSnapshot(int64_t pid,
+ uint32_t ref,
+ uint32_t id);
- // Extract the global user data.
- ActivityUserData::Snapshot GetGlobalUserDataSnapshot();
+ // Extract the data for a specific process. An empty snapshot will be
+ // returned if the process is not known.
+ const ActivityUserData::Snapshot& GetProcessDataSnapshot(int64_t pid);
// Gets all log messages stored within.
std::vector<std::string> GetLogMessages();
@@ -166,17 +190,40 @@ class BASE_EXPORT GlobalActivityAnalyzer {
using AnalyzerMap =
std::map<ThreadKey, std::unique_ptr<ThreadActivityAnalyzer>>;
+ struct UserDataSnapshot {
+ // Complex class needs out-of-line ctor/dtor.
+ UserDataSnapshot();
+ UserDataSnapshot(const UserDataSnapshot& rhs);
+ UserDataSnapshot(UserDataSnapshot&& rhs);
+ ~UserDataSnapshot();
+
+ int64_t process_id;
+ int64_t create_stamp;
+ ActivityUserData::Snapshot data;
+ };
+
// Finds, creates, and indexes analyzers for all known processes and threads.
void PrepareAllAnalyzers();
// The persistent memory allocator holding all tracking data.
std::unique_ptr<PersistentMemoryAllocator> allocator_;
+ // The time stamp when analysis began. This is used to prevent looking into
+ // process IDs that get reused when analyzing a live system.
+ int64_t analysis_stamp_;
+
// The iterator for finding tracking information in the allocator.
PersistentMemoryAllocator::Iterator allocator_iterator_;
- // A set of all tracker memory references found within the allocator.
- std::set<PersistentMemoryAllocator::Reference> tracker_references_;
+ // A set of all interesting memory references found within the allocator.
+ std::set<PersistentMemoryAllocator::Reference> memory_references_;
+
+ // A set of all process-data memory references found within the allocator.
+ std::map<int64_t, UserDataSnapshot> process_data_;
+
+ // A set of all process IDs collected during PrepareAllAnalyzers. These are
+ // popped and returned one-by-one with calls to GetFirst/NextProcess().
+ std::vector<int64_t> process_ids_;
// A map, keyed by ThreadKey, of all valid activity analyzers.
AnalyzerMap analyzers_;
@@ -184,6 +231,7 @@ class BASE_EXPORT GlobalActivityAnalyzer {
// The iterator within the analyzers_ map for returning analyzers through
// first/next iteration.
AnalyzerMap::iterator analyzers_iterator_;
+ int64_t analyzers_iterator_pid_;
DISALLOW_COPY_AND_ASSIGN(GlobalActivityAnalyzer);
};
diff --git a/chromium/base/debug/activity_analyzer_unittest.cc b/chromium/base/debug/activity_analyzer_unittest.cc
index ee614eaeb9a..31871f5dd69 100644
--- a/chromium/base/debug/activity_analyzer_unittest.cc
+++ b/chromium/base/debug/activity_analyzer_unittest.cc
@@ -7,6 +7,7 @@
#include <atomic>
#include <memory>
+#include "base/auto_reset.h"
#include "base/bind.h"
#include "base/debug/activity_tracker.h"
#include "base/files/file.h"
@@ -65,6 +66,26 @@ class ActivityAnalyzerTest : public testing::Test {
return MakeUnique<TestActivityTracker>(std::move(memory), kStackSize);
}
+ template <typename Function>
+ void AsOtherProcess(int64_t pid, Function function) {
+ std::unique_ptr<GlobalActivityTracker> old_global =
+ GlobalActivityTracker::ReleaseForTesting();
+ ASSERT_TRUE(old_global);
+
+ PersistentMemoryAllocator* old_allocator = old_global->allocator();
+ std::unique_ptr<PersistentMemoryAllocator> new_allocator(
+ MakeUnique<PersistentMemoryAllocator>(
+ const_cast<void*>(old_allocator->data()), old_allocator->size(), 0,
+ 0, "", false));
+ GlobalActivityTracker::CreateWithAllocator(std::move(new_allocator), 3,
+ pid);
+
+ function();
+
+ GlobalActivityTracker::ReleaseForTesting();
+ GlobalActivityTracker::SetForTesting(std::move(old_global));
+ }
+
static void DoNothing() {}
};
@@ -138,19 +159,23 @@ class SimpleActivityThread : public SimpleThread {
};
TEST_F(ActivityAnalyzerTest, GlobalAnalyzerConstruction) {
- GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+ GlobalActivityTracker::Get()->process_data().SetString("foo", "bar");
PersistentMemoryAllocator* allocator =
GlobalActivityTracker::Get()->allocator();
GlobalActivityAnalyzer analyzer(MakeUnique<PersistentMemoryAllocator>(
const_cast<void*>(allocator->data()), allocator->size(), 0, 0, "", true));
- // The only thread at thois point is the test thread.
- ThreadActivityAnalyzer* ta1 = analyzer.GetFirstAnalyzer();
+ // The only thread at this point is the test thread of this process.
+ const int64_t pid = analyzer.GetFirstProcess();
+ ASSERT_NE(0, pid);
+ ThreadActivityAnalyzer* ta1 = analyzer.GetFirstAnalyzer(pid);
ASSERT_TRUE(ta1);
EXPECT_FALSE(analyzer.GetNextAnalyzer());
ThreadActivityAnalyzer::ThreadKey tk1 = ta1->GetThreadKey();
EXPECT_EQ(ta1, analyzer.GetAnalyzerForThread(tk1));
+ EXPECT_EQ(0, analyzer.GetNextProcess());
// Create a second thread that will do something.
SimpleActivityThread t2("t2", nullptr, Activity::ACT_TASK,
@@ -158,28 +183,38 @@ TEST_F(ActivityAnalyzerTest, GlobalAnalyzerConstruction) {
t2.Start();
t2.WaitReady();
- // Now there should be two.
- EXPECT_TRUE(analyzer.GetFirstAnalyzer());
+ // Now there should be two. Calling GetFirstProcess invalidates any
+ // previously returned analyzer pointers.
+ ASSERT_EQ(pid, analyzer.GetFirstProcess());
+ EXPECT_TRUE(analyzer.GetFirstAnalyzer(pid));
EXPECT_TRUE(analyzer.GetNextAnalyzer());
EXPECT_FALSE(analyzer.GetNextAnalyzer());
+ EXPECT_EQ(0, analyzer.GetNextProcess());
// Let thread exit.
t2.Exit();
t2.Join();
- // Now there should be only one again. Calling GetFirstAnalyzer invalidates
- // any previously returned analyzer pointers.
- ThreadActivityAnalyzer* ta2 = analyzer.GetFirstAnalyzer();
+ // Now there should be only one again.
+ ASSERT_EQ(pid, analyzer.GetFirstProcess());
+ ThreadActivityAnalyzer* ta2 = analyzer.GetFirstAnalyzer(pid);
ASSERT_TRUE(ta2);
EXPECT_FALSE(analyzer.GetNextAnalyzer());
ThreadActivityAnalyzer::ThreadKey tk2 = ta2->GetThreadKey();
EXPECT_EQ(ta2, analyzer.GetAnalyzerForThread(tk2));
EXPECT_EQ(tk1, tk2);
+ EXPECT_EQ(0, analyzer.GetNextProcess());
+
+ // Verify that there is process data.
+ const ActivityUserData::Snapshot& data_snapshot =
+ analyzer.GetProcessDataSnapshot(pid);
+ ASSERT_LE(1U, data_snapshot.size());
+ EXPECT_EQ("bar", data_snapshot.at("foo").GetString());
}
TEST_F(ActivityAnalyzerTest, UserDataSnapshotTest) {
- GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
- ThreadActivityAnalyzer::Snapshot snapshot;
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+ ThreadActivityAnalyzer::Snapshot tracker_snapshot;
const char string1a[] = "string1a";
const char string1b[] = "string1b";
@@ -218,16 +253,16 @@ TEST_F(ActivityAnalyzerTest, UserDataSnapshotTest) {
user_data2.SetReference("ref2", string2a, sizeof(string2a));
user_data2.SetStringReference("sref2", string2b);
- ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
- ASSERT_EQ(2U, snapshot.activity_stack.size());
+ ASSERT_TRUE(tracker->CreateSnapshot(&tracker_snapshot));
+ ASSERT_EQ(2U, tracker_snapshot.activity_stack.size());
ThreadActivityAnalyzer analyzer(*tracker);
analyzer.AddGlobalInformation(&global_analyzer);
- const ThreadActivityAnalyzer::Snapshot& snapshot =
+ const ThreadActivityAnalyzer::Snapshot& analyzer_snapshot =
analyzer.activity_snapshot();
- ASSERT_EQ(2U, snapshot.user_data_stack.size());
+ ASSERT_EQ(2U, analyzer_snapshot.user_data_stack.size());
const ActivityUserData::Snapshot& user_data =
- snapshot.user_data_stack.at(1);
+ analyzer_snapshot.user_data_stack.at(1);
EXPECT_EQ(8U, user_data.size());
ASSERT_TRUE(ContainsKey(user_data, "raw2"));
EXPECT_EQ("foo2", user_data.at("raw2").Get().as_string());
@@ -250,16 +285,16 @@ TEST_F(ActivityAnalyzerTest, UserDataSnapshotTest) {
user_data.at("sref2").GetStringReference().size());
}
- ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
- ASSERT_EQ(1U, snapshot.activity_stack.size());
+ ASSERT_TRUE(tracker->CreateSnapshot(&tracker_snapshot));
+ ASSERT_EQ(1U, tracker_snapshot.activity_stack.size());
ThreadActivityAnalyzer analyzer(*tracker);
analyzer.AddGlobalInformation(&global_analyzer);
- const ThreadActivityAnalyzer::Snapshot& snapshot =
+ const ThreadActivityAnalyzer::Snapshot& analyzer_snapshot =
analyzer.activity_snapshot();
- ASSERT_EQ(1U, snapshot.user_data_stack.size());
+ ASSERT_EQ(1U, analyzer_snapshot.user_data_stack.size());
const ActivityUserData::Snapshot& user_data =
- snapshot.user_data_stack.at(0);
+ analyzer_snapshot.user_data_stack.at(0);
EXPECT_EQ(8U, user_data.size());
EXPECT_EQ("foo1", user_data.at("raw1").Get().as_string());
EXPECT_EQ("bar1", user_data.at("string1").GetString().as_string());
@@ -274,12 +309,13 @@ TEST_F(ActivityAnalyzerTest, UserDataSnapshotTest) {
user_data.at("sref1").GetStringReference().size());
}
- ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
- ASSERT_EQ(0U, snapshot.activity_stack.size());
+ ASSERT_TRUE(tracker->CreateSnapshot(&tracker_snapshot));
+ ASSERT_EQ(0U, tracker_snapshot.activity_stack.size());
}
TEST_F(ActivityAnalyzerTest, GlobalUserDataTest) {
- GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ const int64_t pid = GetCurrentProcId();
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
const char string1[] = "foo";
const char string2[] = "bar";
@@ -289,18 +325,21 @@ TEST_F(ActivityAnalyzerTest, GlobalUserDataTest) {
GlobalActivityAnalyzer global_analyzer(MakeUnique<PersistentMemoryAllocator>(
const_cast<void*>(allocator->data()), allocator->size(), 0, 0, "", true));
- ActivityUserData& global_data = GlobalActivityTracker::Get()->global_data();
- global_data.Set("raw", "foo", 3);
- global_data.SetString("string", "bar");
- global_data.SetChar("char", '9');
- global_data.SetInt("int", -9999);
- global_data.SetUint("uint", 9999);
- global_data.SetBool("bool", true);
- global_data.SetReference("ref", string1, sizeof(string1));
- global_data.SetStringReference("sref", string2);
-
- ActivityUserData::Snapshot snapshot =
- global_analyzer.GetGlobalUserDataSnapshot();
+ ActivityUserData& process_data = GlobalActivityTracker::Get()->process_data();
+ ASSERT_NE(0U, process_data.id());
+ process_data.Set("raw", "foo", 3);
+ process_data.SetString("string", "bar");
+ process_data.SetChar("char", '9');
+ process_data.SetInt("int", -9999);
+ process_data.SetUint("uint", 9999);
+ process_data.SetBool("bool", true);
+ process_data.SetReference("ref", string1, sizeof(string1));
+ process_data.SetStringReference("sref", string2);
+
+ int64_t first_pid = global_analyzer.GetFirstProcess();
+ DCHECK_EQ(pid, first_pid);
+ const ActivityUserData::Snapshot& snapshot =
+ global_analyzer.GetProcessDataSnapshot(pid);
ASSERT_TRUE(ContainsKey(snapshot, "raw"));
EXPECT_EQ("foo", snapshot.at("raw").Get().as_string());
ASSERT_TRUE(ContainsKey(snapshot, "string"));
@@ -322,7 +361,7 @@ TEST_F(ActivityAnalyzerTest, GlobalUserDataTest) {
}
TEST_F(ActivityAnalyzerTest, GlobalModulesTest) {
- GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
PersistentMemoryAllocator* allocator =
GlobalActivityTracker::Get()->allocator();
@@ -398,7 +437,7 @@ TEST_F(ActivityAnalyzerTest, GlobalModulesTest) {
}
TEST_F(ActivityAnalyzerTest, GlobalLogMessages) {
- GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
PersistentMemoryAllocator* allocator =
GlobalActivityTracker::Get()->allocator();
@@ -414,5 +453,55 @@ TEST_F(ActivityAnalyzerTest, GlobalLogMessages) {
EXPECT_EQ("foo bar", messages[1]);
}
+TEST_F(ActivityAnalyzerTest, GlobalMultiProcess) {
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 1001);
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+ PersistentMemoryAllocator* allocator = global->allocator();
+ EXPECT_EQ(1001, global->process_id());
+
+ int64_t process_id;
+ int64_t create_stamp;
+ ActivityUserData::GetOwningProcessId(
+ GlobalActivityTracker::Get()->process_data().GetBaseAddress(),
+ &process_id, &create_stamp);
+ ASSERT_EQ(1001, process_id);
+
+ GlobalActivityTracker::Get()->process_data().SetInt("pid",
+ global->process_id());
+
+ GlobalActivityAnalyzer analyzer(MakeUnique<PersistentMemoryAllocator>(
+ const_cast<void*>(allocator->data()), allocator->size(), 0, 0, "", true));
+
+ AsOtherProcess(2002, [&global]() {
+ ASSERT_NE(global, GlobalActivityTracker::Get());
+ EXPECT_EQ(2002, GlobalActivityTracker::Get()->process_id());
+
+ int64_t process_id;
+ int64_t create_stamp;
+ ActivityUserData::GetOwningProcessId(
+ GlobalActivityTracker::Get()->process_data().GetBaseAddress(),
+ &process_id, &create_stamp);
+ ASSERT_EQ(2002, process_id);
+
+ GlobalActivityTracker::Get()->process_data().SetInt(
+ "pid", GlobalActivityTracker::Get()->process_id());
+ });
+ ASSERT_EQ(global, GlobalActivityTracker::Get());
+ EXPECT_EQ(1001, GlobalActivityTracker::Get()->process_id());
+
+ const int64_t pid1 = analyzer.GetFirstProcess();
+ ASSERT_EQ(1001, pid1);
+ const int64_t pid2 = analyzer.GetNextProcess();
+ ASSERT_EQ(2002, pid2);
+ EXPECT_EQ(0, analyzer.GetNextProcess());
+
+ const ActivityUserData::Snapshot& pdata1 =
+ analyzer.GetProcessDataSnapshot(pid1);
+ const ActivityUserData::Snapshot& pdata2 =
+ analyzer.GetProcessDataSnapshot(pid2);
+ EXPECT_EQ(1001, pdata1.at("pid").GetInt());
+ EXPECT_EQ(2002, pdata2.at("pid").GetInt());
+}
+
} // namespace debug
} // namespace base
diff --git a/chromium/base/debug/activity_tracker.cc b/chromium/base/debug/activity_tracker.cc
index 09946eed729..0e8db930e28 100644
--- a/chromium/base/debug/activity_tracker.cc
+++ b/chromium/base/debug/activity_tracker.cc
@@ -23,6 +23,7 @@
#include "base/process/process_handle.h"
#include "base/stl_util.h"
#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
#include "base/threading/platform_thread.h"
namespace base {
@@ -30,25 +31,26 @@ namespace debug {
namespace {
-// A number that identifies the memory as having been initialized. It's
-// arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker).
-// A version number is added on so that major structure changes won't try to
-// read an older version (since the cookie won't match).
-const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2
-
// The minimum depth a stack should support.
const int kMinStackDepth = 2;
// The amount of memory set aside for holding arbitrary user data (key/value
// pairs) globally or associated with ActivityData entries.
const size_t kUserDataSize = 1 << 10; // 1 KiB
-const size_t kGlobalDataSize = 16 << 10; // 16 KiB
+const size_t kProcessDataSize = 4 << 10; // 4 KiB
const size_t kMaxUserDataNameLength =
static_cast<size_t>(std::numeric_limits<uint8_t>::max());
// A constant used to indicate that module information is changing.
const uint32_t kModuleInformationChanging = 0x80000000;
+// The key used to record process information.
+const char kProcessPhaseDataKey[] = "process-phase";
+
+// An atomically incrementing number, used to check for recreations of objects
+// in the same memory space.
+StaticAtomicSequenceNumber g_next_id;
+
union ThreadRef {
int64_t as_id;
#if defined(OS_WIN)
@@ -64,6 +66,43 @@ union ThreadRef {
#endif
};
+// Gets the next non-zero identifier. It is only unique within a process.
+uint32_t GetNextDataId() {
+ uint32_t id;
+ while ((id = g_next_id.GetNext()) == 0)
+ ;
+ return id;
+}
+
+// Gets the current process-id, either from the GlobalActivityTracker if it
+// exists (where the PID can be defined for testing) or from the system if
+// there isn't such.
+int64_t GetProcessId() {
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+ if (global)
+ return global->process_id();
+ return GetCurrentProcId();
+}
+
+// Finds and reuses a specific allocation or creates a new one.
+PersistentMemoryAllocator::Reference AllocateFrom(
+ PersistentMemoryAllocator* allocator,
+ uint32_t from_type,
+ size_t size,
+ uint32_t to_type) {
+ PersistentMemoryAllocator::Iterator iter(allocator);
+ PersistentMemoryAllocator::Reference ref;
+ while ((ref = iter.GetNextOfType(from_type)) != 0) {
+ DCHECK_LE(size, allocator->GetAllocSize(ref));
+ // This can fail if a another thread has just taken it. It is assumed that
+ // the memory is cleared during the "free" operation.
+ if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false))
+ return ref;
+ }
+
+ return allocator->Allocate(size, to_type);
+}
+
// Determines the previous aligned index.
size_t RoundDownToAlignment(size_t index, size_t alignment) {
return index & (0 - alignment);
@@ -74,8 +113,43 @@ size_t RoundUpToAlignment(size_t index, size_t alignment) {
return (index + (alignment - 1)) & (0 - alignment);
}
+// Converts "tick" timing into wall time.
+Time WallTimeFromTickTime(int64_t ticks_start, int64_t ticks, Time time_start) {
+ return time_start + TimeDelta::FromInternalValue(ticks - ticks_start);
+}
+
} // namespace
+OwningProcess::OwningProcess() {}
+OwningProcess::~OwningProcess() {}
+
+void OwningProcess::Release_Initialize(int64_t pid) {
+ uint32_t old_id = data_id.load(std::memory_order_acquire);
+ DCHECK_EQ(0U, old_id);
+ process_id = pid != 0 ? pid : GetProcessId();
+ create_stamp = Time::Now().ToInternalValue();
+ data_id.store(GetNextDataId(), std::memory_order_release);
+}
+
+void OwningProcess::SetOwningProcessIdForTesting(int64_t pid, int64_t stamp) {
+ DCHECK_NE(0U, data_id);
+ process_id = pid;
+ create_stamp = stamp;
+}
+
+// static
+bool OwningProcess::GetOwningProcessId(const void* memory,
+ int64_t* out_id,
+ int64_t* out_stamp) {
+ const OwningProcess* info = reinterpret_cast<const OwningProcess*>(memory);
+ uint32_t id = info->data_id.load(std::memory_order_acquire);
+ if (id == 0)
+ return false;
+
+ *out_id = info->process_id;
+ *out_stamp = info->create_stamp;
+ return id == info->data_id.load(std::memory_order_seq_cst);
+}
// It doesn't matter what is contained in this (though it will be all zeros)
// as only the address of it is important.
@@ -246,32 +320,42 @@ StringPiece ActivityUserData::TypedValue::GetStringReference() const {
return ref_value_;
}
+// These are required because std::atomic is (currently) not a POD type and
+// thus clang requires explicit out-of-line constructors and destructors even
+// when they do nothing.
ActivityUserData::ValueInfo::ValueInfo() {}
ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
ActivityUserData::ValueInfo::~ValueInfo() {}
+ActivityUserData::MemoryHeader::MemoryHeader() {}
+ActivityUserData::MemoryHeader::~MemoryHeader() {}
+ActivityUserData::FieldHeader::FieldHeader() {}
+ActivityUserData::FieldHeader::~FieldHeader() {}
-StaticAtomicSequenceNumber ActivityUserData::next_id_;
+ActivityUserData::ActivityUserData() : ActivityUserData(nullptr, 0, -1) {}
-ActivityUserData::ActivityUserData(void* memory, size_t size)
+ActivityUserData::ActivityUserData(void* memory, size_t size, int64_t pid)
: memory_(reinterpret_cast<char*>(memory)),
available_(RoundDownToAlignment(size, kMemoryAlignment)),
- id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) {
+ header_(reinterpret_cast<MemoryHeader*>(memory)),
+ orig_data_id(0),
+ orig_process_id(0),
+ orig_create_stamp(0) {
// It's possible that no user data is being stored.
if (!memory_)
return;
- DCHECK_LT(kMemoryAlignment, available_);
- if (id_->load(std::memory_order_relaxed) == 0) {
- // Generate a new ID and store it in the first 32-bit word of memory_.
- // |id_| must be non-zero for non-sink instances.
- uint32_t id;
- while ((id = next_id_.GetNext()) == 0)
- ;
- id_->store(id, std::memory_order_relaxed);
- DCHECK_NE(0U, id_->load(std::memory_order_relaxed));
- }
- memory_ += kMemoryAlignment;
- available_ -= kMemoryAlignment;
+ static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
+ DCHECK_LT(sizeof(MemoryHeader), available_);
+ if (header_->owner.data_id.load(std::memory_order_acquire) == 0)
+ header_->owner.Release_Initialize(pid);
+ memory_ += sizeof(MemoryHeader);
+ available_ -= sizeof(MemoryHeader);
+
+ // Make a copy of identifying information for later comparison.
+ *const_cast<uint32_t*>(&orig_data_id) =
+ header_->owner.data_id.load(std::memory_order_acquire);
+ *const_cast<int64_t*>(&orig_process_id) = header_->owner.process_id;
+ *const_cast<int64_t*>(&orig_create_stamp) = header_->owner.create_stamp;
// If there is already data present, load that. This allows the same class
// to be used for analysis through snapshots.
@@ -280,6 +364,85 @@ ActivityUserData::ActivityUserData(void* memory, size_t size)
ActivityUserData::~ActivityUserData() {}
+bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
+ DCHECK(output_snapshot);
+ DCHECK(output_snapshot->empty());
+
+ // Find any new data that may have been added by an active instance of this
+ // class that is adding records.
+ ImportExistingData();
+
+ // Add all the values to the snapshot.
+ for (const auto& entry : values_) {
+ TypedValue value;
+ const size_t size = entry.second.size_ptr->load(std::memory_order_acquire);
+ value.type_ = entry.second.type;
+ DCHECK_GE(entry.second.extent, size);
+
+ switch (entry.second.type) {
+ case RAW_VALUE:
+ case STRING_VALUE:
+ value.long_value_ =
+ std::string(reinterpret_cast<char*>(entry.second.memory), size);
+ break;
+ case RAW_VALUE_REFERENCE:
+ case STRING_VALUE_REFERENCE: {
+ ReferenceRecord* ref =
+ reinterpret_cast<ReferenceRecord*>(entry.second.memory);
+ value.ref_value_ = StringPiece(
+ reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
+ static_cast<size_t>(ref->size));
+ } break;
+ case BOOL_VALUE:
+ case CHAR_VALUE:
+ value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
+ break;
+ case SIGNED_VALUE:
+ case UNSIGNED_VALUE:
+ value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
+ break;
+ case END_OF_VALUES: // Included for completeness purposes.
+ NOTREACHED();
+ }
+ auto inserted = output_snapshot->insert(
+ std::make_pair(entry.second.name.as_string(), std::move(value)));
+ DCHECK(inserted.second); // True if inserted, false if existed.
+ }
+
+ // Another import attempt will validate that the underlying memory has not
+ // been reused for another purpose. Entries added since the first import
+ // will be ignored here but will be returned if another snapshot is created.
+ ImportExistingData();
+ if (!memory_) {
+ output_snapshot->clear();
+ return false;
+ }
+
+ // Successful snapshot.
+ return true;
+}
+
+const void* ActivityUserData::GetBaseAddress() const {
+ // The |memory_| pointer advances as elements are written but the |header_|
+ // value is always at the start of the block so just return that.
+ return header_;
+}
+
+void ActivityUserData::SetOwningProcessIdForTesting(int64_t pid,
+ int64_t stamp) {
+ if (!header_)
+ return;
+ header_->owner.SetOwningProcessIdForTesting(pid, stamp);
+}
+
+// static
+bool ActivityUserData::GetOwningProcessId(const void* memory,
+ int64_t* out_id,
+ int64_t* out_stamp) {
+ const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory);
+ return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
+}
+
void ActivityUserData::Set(StringPiece name,
ValueType type,
const void* memory,
@@ -308,13 +471,13 @@ void ActivityUserData::Set(StringPiece name,
// following field will be aligned properly.
size_t name_size = name.length();
size_t name_extent =
- RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) -
- sizeof(Header);
+ RoundUpToAlignment(sizeof(FieldHeader) + name_size, kMemoryAlignment) -
+ sizeof(FieldHeader);
size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
// The "base size" is the size of the header and (padded) string key. Stop
// now if there's not room enough for even this.
- size_t base_size = sizeof(Header) + name_extent;
+ size_t base_size = sizeof(FieldHeader) + name_extent;
if (base_size > available_)
return;
@@ -331,12 +494,14 @@ void ActivityUserData::Set(StringPiece name,
// Truncate the stored size to the amount of available memory. Stop now if
// there's not any room for even part of the value.
- size = std::min(full_size - base_size, size);
- if (size == 0)
- return;
+ if (size != 0) {
+ size = std::min(full_size - base_size, size);
+ if (size == 0)
+ return;
+ }
// Allocate a chunk of memory.
- Header* header = reinterpret_cast<Header*>(memory_);
+ FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
memory_ += full_size;
available_ -= full_size;
@@ -346,9 +511,9 @@ void ActivityUserData::Set(StringPiece name,
DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
header->name_size = static_cast<uint8_t>(name_size);
header->record_size = full_size;
- char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header);
+ char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader);
void* value_memory =
- reinterpret_cast<char*>(header) + sizeof(Header) + name_extent;
+ reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent;
memcpy(name_memory, name.data(), name_size);
header->type.store(type, std::memory_order_release);
@@ -362,7 +527,7 @@ void ActivityUserData::Set(StringPiece name,
info->name = persistent_name;
info->memory = value_memory;
info->size_ptr = &header->value_size;
- info->extent = full_size - sizeof(Header) - name_extent;
+ info->extent = full_size - sizeof(FieldHeader) - name_extent;
info->type = type;
}
@@ -387,8 +552,12 @@ void ActivityUserData::SetReference(StringPiece name,
}
void ActivityUserData::ImportExistingData() const {
- while (available_ > sizeof(Header)) {
- Header* header = reinterpret_cast<Header*>(memory_);
+ // It's possible that no user data is being stored.
+ if (!memory_)
+ return;
+
+ while (available_ > sizeof(FieldHeader)) {
+ FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
ValueType type =
static_cast<ValueType>(header->type.load(std::memory_order_acquire));
if (type == END_OF_VALUES)
@@ -396,8 +565,8 @@ void ActivityUserData::ImportExistingData() const {
if (header->record_size > available_)
return;
- size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size,
- kMemoryAlignment);
+ size_t value_offset = RoundUpToAlignment(
+ sizeof(FieldHeader) + header->name_size, kMemoryAlignment);
if (header->record_size == value_offset &&
header->value_size.load(std::memory_order_relaxed) == 1) {
value_offset -= 1;
@@ -406,7 +575,7 @@ void ActivityUserData::ImportExistingData() const {
return;
ValueInfo info;
- info.name = StringPiece(memory_ + sizeof(Header), header->name_size);
+ info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size);
info.type = type;
info.memory = memory_ + value_offset;
info.size_ptr = &header->value_size;
@@ -418,60 +587,14 @@ void ActivityUserData::ImportExistingData() const {
memory_ += header->record_size;
available_ -= header->record_size;
}
-}
-
-bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
- DCHECK(output_snapshot);
- DCHECK(output_snapshot->empty());
-
- // Find any new data that may have been added by an active instance of this
- // class that is adding records.
- ImportExistingData();
- for (const auto& entry : values_) {
- TypedValue value;
- value.type_ = entry.second.type;
- DCHECK_GE(entry.second.extent,
- entry.second.size_ptr->load(std::memory_order_relaxed));
-
- switch (entry.second.type) {
- case RAW_VALUE:
- case STRING_VALUE:
- value.long_value_ =
- std::string(reinterpret_cast<char*>(entry.second.memory),
- entry.second.size_ptr->load(std::memory_order_relaxed));
- break;
- case RAW_VALUE_REFERENCE:
- case STRING_VALUE_REFERENCE: {
- ReferenceRecord* ref =
- reinterpret_cast<ReferenceRecord*>(entry.second.memory);
- value.ref_value_ = StringPiece(
- reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
- static_cast<size_t>(ref->size));
- } break;
- case BOOL_VALUE:
- case CHAR_VALUE:
- value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
- break;
- case SIGNED_VALUE:
- case UNSIGNED_VALUE:
- value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
- break;
- case END_OF_VALUES: // Included for completeness purposes.
- NOTREACHED();
- }
- auto inserted = output_snapshot->insert(
- std::make_pair(entry.second.name.as_string(), std::move(value)));
- DCHECK(inserted.second); // True if inserted, false if existed.
+ // Check if memory has been completely reused.
+ if (header_->owner.data_id.load(std::memory_order_acquire) != orig_data_id ||
+ header_->owner.process_id != orig_process_id ||
+ header_->owner.create_stamp != orig_create_stamp) {
+ memory_ = nullptr;
+ values_.clear();
}
-
- return true;
-}
-
-const void* ActivityUserData::GetBaseAddress() {
- // The |memory_| pointer advances as elements are written but the |id_|
- // value is always at the start of the block so just return that.
- return id_;
}
// This information is kept for every thread that is tracked. It is filled
@@ -483,27 +606,16 @@ struct ThreadActivityTracker::Header {
GlobalActivityTracker::kTypeIdActivityTracker;
// Expected size for 32/64-bit check.
- static constexpr size_t kExpectedInstanceSize = 80;
-
- // This unique number indicates a valid initialization of the memory.
- std::atomic<uint32_t> cookie;
+ static constexpr size_t kExpectedInstanceSize =
+ OwningProcess::kExpectedInstanceSize + Activity::kExpectedInstanceSize +
+ 72;
- // The number of Activity slots (spaces that can hold an Activity) that
- // immediately follow this structure in memory.
- uint32_t stack_slots;
+ // This information uniquely identifies a process.
+ OwningProcess owner;
- // The process-id and thread-id (thread_ref.as_id) to which this data belongs.
- // These identifiers are not guaranteed to mean anything but are unique, in
- // combination, among all active trackers. It would be nice to always have
- // the process_id be a 64-bit value but the necessity of having it atomic
- // (for the memory barriers it provides) limits it to the natural word size
- // of the machine.
-#ifdef ARCH_CPU_64_BITS
- std::atomic<int64_t> process_id;
-#else
- std::atomic<int32_t> process_id;
- int32_t process_id_padding;
-#endif
+ // The thread-id (thread_ref.as_id) to which this data belongs. This number
+ // is not guaranteed to mean anything but combined with the process-id from
+ // OwningProcess is unique among all active trackers.
ThreadRef thread_ref;
// The start-time and start-ticks when the data was created. Each activity
@@ -512,12 +624,19 @@ struct ThreadActivityTracker::Header {
int64_t start_time;
int64_t start_ticks;
+ // The number of Activity slots (spaces that can hold an Activity) that
+ // immediately follow this structure in memory.
+ uint32_t stack_slots;
+
+ // Some padding to keep everything 64-bit aligned.
+ uint32_t padding;
+
// The current depth of the stack. This may be greater than the number of
// slots. If the depth exceeds the number of slots, the newest entries
// won't be recorded.
std::atomic<uint32_t> current_depth;
- // A memory location used to indicate if changes have been made to the stack
+ // A memory location used to indicate if changes have been made to the data
// that would invalidate an in-progress read of its contents. The active
// tracker will zero the value whenever something gets popped from the
// stack. A monitoring tracker can write a non-zero value here, copy the
@@ -525,7 +644,11 @@ struct ThreadActivityTracker::Header {
// the contents didn't change while being copied. This can handle concurrent
// snapshot operations only if each snapshot writes a different bit (which
// is not the current implementation so no parallel snapshots allowed).
- std::atomic<uint32_t> stack_unchanged;
+ std::atomic<uint32_t> data_unchanged;
+
+ // The last "exception" activity. This can't be stored on the stack because
+ // that could get popped as things unwind.
+ Activity last_exception;
// The name of the thread (up to a maximum length). Dynamic-length names
// are not practical since the memory has to come from the same persistent
@@ -594,15 +717,16 @@ ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
"ActivityData.data is not 64-bit aligned");
// Provided memory should either be completely initialized or all zeros.
- if (header_->cookie.load(std::memory_order_relaxed) == 0) {
+ if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) {
// This is a new file. Double-check other fields and then initialize.
- DCHECK_EQ(0, header_->process_id.load(std::memory_order_relaxed));
+ DCHECK_EQ(0, header_->owner.process_id);
+ DCHECK_EQ(0, header_->owner.create_stamp);
DCHECK_EQ(0, header_->thread_ref.as_id);
DCHECK_EQ(0, header_->start_time);
DCHECK_EQ(0, header_->start_ticks);
DCHECK_EQ(0U, header_->stack_slots);
DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
- DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed));
+ DCHECK_EQ(0U, header_->data_unchanged.load(std::memory_order_relaxed));
DCHECK_EQ(0, stack_[0].time_internal);
DCHECK_EQ(0U, stack_[0].origin_address);
DCHECK_EQ(0U, stack_[0].call_stack[0]);
@@ -614,7 +738,6 @@ ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
header_->thread_ref.as_handle =
PlatformThread::CurrentHandle().platform_handle();
#endif
- header_->process_id.store(GetCurrentProcId(), std::memory_order_relaxed);
header_->start_time = base::Time::Now().ToInternalValue();
header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
@@ -624,7 +747,7 @@ ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
// This is done last so as to guarantee that everything above is "released"
// by the time this value gets written.
- header_->cookie.store(kHeaderCookie, std::memory_order_release);
+ header_->owner.Release_Initialize();
valid_ = true;
DCHECK(IsValid());
@@ -717,40 +840,28 @@ void ThreadActivityTracker::PopActivity(ActivityId id) {
// The stack has shrunk meaning that some other thread trying to copy the
// contents for reporting purposes could get bad data. That thread would
- // have written a non-zero value into |stack_unchanged|; clearing it here
+ // have written a non-zero value into |data_unchanged|; clearing it here
// will let that thread detect that something did change. This needs to
// happen after the atomic |depth| operation above so a "release" store
// is required.
- header_->stack_unchanged.store(0, std::memory_order_release);
+ header_->data_unchanged.store(0, std::memory_order_release);
}
std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
ActivityId id,
ActivityTrackerMemoryAllocator* allocator) {
- // User-data is only stored for activities actually held in the stack.
- if (id < stack_slots_) {
- // Don't allow user data for lock acquisition as recursion may occur.
- if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
- NOTREACHED();
- return MakeUnique<ActivityUserData>(nullptr, 0);
- }
-
- // Get (or reuse) a block of memory and create a real UserData object
- // on it.
- PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
- void* memory =
- allocator->GetAsArray<char>(ref, PersistentMemoryAllocator::kSizeAny);
- if (memory) {
- std::unique_ptr<ActivityUserData> user_data =
- MakeUnique<ActivityUserData>(memory, kUserDataSize);
- stack_[id].user_data_ref = ref;
- stack_[id].user_data_id = user_data->id();
- return user_data;
- }
+ // Don't allow user data for lock acquisition as recursion may occur.
+ if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
+ NOTREACHED();
+ return MakeUnique<ActivityUserData>();
}
- // Return a dummy object that will still accept (but ignore) Set() calls.
- return MakeUnique<ActivityUserData>(nullptr, 0);
+ // User-data is only stored for activities actually held in the stack.
+ if (id >= stack_slots_)
+ return MakeUnique<ActivityUserData>();
+
+ // Create and return a real UserData object.
+ return CreateUserDataForActivity(&stack_[id], allocator);
}
bool ThreadActivityTracker::HasUserData(ActivityId id) {
@@ -768,12 +879,27 @@ void ThreadActivityTracker::ReleaseUserData(
}
}
+void ThreadActivityTracker::RecordExceptionActivity(const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data) {
+ // A thread-checker creates a lock to check the thread-id which means
+ // re-entry into this code if lock acquisitions are being tracked.
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Fill the reusable exception activity.
+ Activity::FillFrom(&header_->last_exception, program_counter, origin, type,
+ data);
+
+ // The data has changed meaning that some other thread trying to copy the
+ // contents for reporting purposes could get bad data.
+ header_->data_unchanged.store(0, std::memory_order_relaxed);
+}
+
bool ThreadActivityTracker::IsValid() const {
- if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie ||
- header_->process_id.load(std::memory_order_relaxed) == 0 ||
- header_->thread_ref.as_id == 0 ||
- header_->start_time == 0 ||
- header_->start_ticks == 0 ||
+ if (header_->owner.data_id.load(std::memory_order_acquire) == 0 ||
+ header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 ||
+ header_->start_time == 0 || header_->start_ticks == 0 ||
header_->stack_slots != stack_slots_ ||
header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
return false;
@@ -804,20 +930,21 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
output_snapshot->activity_stack.reserve(stack_slots_);
for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
- // Remember the process and thread IDs to ensure they aren't replaced
- // during the snapshot operation. Use "acquire" to ensure that all the
- // non-atomic fields of the structure are valid (at least at the current
- // moment in time).
- const int64_t starting_process_id =
- header_->process_id.load(std::memory_order_acquire);
+ // Remember the data IDs to ensure nothing is replaced during the snapshot
+ // operation. Use "acquire" so that all the non-atomic fields of the
+ // structure are valid (at least at the current moment in time).
+ const uint32_t starting_id =
+ header_->owner.data_id.load(std::memory_order_acquire);
+ const int64_t starting_create_stamp = header_->owner.create_stamp;
+ const int64_t starting_process_id = header_->owner.process_id;
const int64_t starting_thread_id = header_->thread_ref.as_id;
- // Write a non-zero value to |stack_unchanged| so it's possible to detect
+ // Write a non-zero value to |data_unchanged| so it's possible to detect
// at the end that nothing has changed since copying the data began. A
// "cst" operation is required to ensure it occurs before everything else.
// Using "cst" memory ordering is relatively expensive but this is only
// done during analysis so doesn't directly affect the worker threads.
- header_->stack_unchanged.store(1, std::memory_order_seq_cst);
+ header_->data_unchanged.store(1, std::memory_order_seq_cst);
// Fetching the current depth also "acquires" the contents of the stack.
depth = header_->current_depth.load(std::memory_order_acquire);
@@ -829,29 +956,26 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
count * sizeof(Activity));
}
+ // Capture the last exception.
+ memcpy(&output_snapshot->last_exception, &header_->last_exception,
+ sizeof(Activity));
+
+ // TODO(bcwhite): Snapshot other things here.
+
// Retry if something changed during the copy. A "cst" operation ensures
// it must happen after all the above operations.
- if (!header_->stack_unchanged.load(std::memory_order_seq_cst))
+ if (!header_->data_unchanged.load(std::memory_order_seq_cst))
continue;
// Stack copied. Record it's full depth.
output_snapshot->activity_stack_depth = depth;
- // TODO(bcwhite): Snapshot other things here.
-
- // Get the general thread information. Loading of "process_id" is guaranteed
- // to be last so that it's possible to detect below if any content has
- // changed while reading it. It's technically possible for a thread to end,
- // have its data cleared, a new thread get created with the same IDs, and
- // it perform an action which starts tracking all in the time since the
- // ID reads above but the chance is so unlikely that it's not worth the
- // effort and complexity of protecting against it (perhaps with an
- // "unchanged" field like is done for the stack).
+ // Get the general thread information.
output_snapshot->thread_name =
std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
+ output_snapshot->create_stamp = header_->owner.create_stamp;
output_snapshot->thread_id = header_->thread_ref.as_id;
- output_snapshot->process_id =
- header_->process_id.load(std::memory_order_seq_cst);
+ output_snapshot->process_id = header_->owner.process_id;
// All characters of the thread-name buffer were copied so as to not break
// if the trailing NUL were missing. Now limit the length if the actual
@@ -859,9 +983,11 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
output_snapshot->thread_name.resize(
strlen(output_snapshot->thread_name.c_str()));
- // If the process or thread ID has changed then the tracker has exited and
- // the memory reused by a new one. Try again.
- if (output_snapshot->process_id != starting_process_id ||
+ // If the data ID has changed then the tracker has exited and the memory
+ // reused by a new one. Try again.
+ if (header_->owner.data_id.load(std::memory_order_seq_cst) != starting_id ||
+ output_snapshot->create_stamp != starting_create_stamp ||
+ output_snapshot->process_id != starting_process_id ||
output_snapshot->thread_id != starting_thread_id) {
continue;
}
@@ -877,10 +1003,14 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
const int64_t start_ticks = header_->start_ticks;
for (Activity& activity : output_snapshot->activity_stack) {
activity.time_internal =
- (start_time +
- TimeDelta::FromInternalValue(activity.time_internal - start_ticks))
+ WallTimeFromTickTime(start_ticks, activity.time_internal, start_time)
.ToInternalValue();
}
+ output_snapshot->last_exception.time_internal =
+ WallTimeFromTickTime(start_ticks,
+ output_snapshot->last_exception.time_internal,
+ start_time)
+ .ToInternalValue();
// Success!
return true;
@@ -890,11 +1020,48 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
return false;
}
+const void* ThreadActivityTracker::GetBaseAddress() {
+ return header_;
+}
+
+void ThreadActivityTracker::SetOwningProcessIdForTesting(int64_t pid,
+ int64_t stamp) {
+ header_->owner.SetOwningProcessIdForTesting(pid, stamp);
+}
+
+// static
+bool ThreadActivityTracker::GetOwningProcessId(const void* memory,
+ int64_t* out_id,
+ int64_t* out_stamp) {
+ const Header* header = reinterpret_cast<const Header*>(memory);
+ return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
+}
+
// static
size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
}
+std::unique_ptr<ActivityUserData>
+ThreadActivityTracker::CreateUserDataForActivity(
+ Activity* activity,
+ ActivityTrackerMemoryAllocator* allocator) {
+ DCHECK_EQ(0U, activity->user_data_ref);
+
+ PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
+ void* memory = allocator->GetAsArray<char>(ref, kUserDataSize);
+ if (memory) {
+ std::unique_ptr<ActivityUserData> user_data =
+ MakeUnique<ActivityUserData>(memory, kUserDataSize);
+ activity->user_data_ref = ref;
+ activity->user_data_id = user_data->id();
+ return user_data;
+ }
+
+ // Return a dummy object that will still accept (but ignore) Set() calls.
+ return MakeUnique<ActivityUserData>();
+}
+
// The instantiation of the GlobalActivityTracker object.
// The object held here will obviously not be destructed at process exit
// but that's best since PersistentMemoryAllocator objects (that underlie
@@ -977,6 +1144,9 @@ bool GlobalActivityTracker::ModuleInfoRecord::EncodeFrom(
pickle_size = pickler.size();
changes.store(0, std::memory_order_relaxed);
+ // Initialize the owner info.
+ owner.Release_Initialize();
+
// Now set those fields that can change.
return UpdateFrom(info);
}
@@ -1045,21 +1215,23 @@ ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() {
user_data_ =
tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
} else {
- user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
+ user_data_ = MakeUnique<ActivityUserData>();
}
}
return *user_data_;
}
-GlobalActivityTracker::GlobalUserData::GlobalUserData(void* memory, size_t size)
- : ActivityUserData(memory, size) {}
+GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory,
+ size_t size,
+ int64_t pid)
+ : ActivityUserData(memory, size, pid) {}
-GlobalActivityTracker::GlobalUserData::~GlobalUserData() {}
+GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {}
-void GlobalActivityTracker::GlobalUserData::Set(StringPiece name,
- ValueType type,
- const void* memory,
- size_t size) {
+void GlobalActivityTracker::ThreadSafeUserData::Set(StringPiece name,
+ ValueType type,
+ const void* memory,
+ size_t size) {
AutoLock lock(data_lock_);
ActivityUserData::Set(name, type, memory, size);
}
@@ -1082,10 +1254,11 @@ GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() {
void GlobalActivityTracker::CreateWithAllocator(
std::unique_ptr<PersistentMemoryAllocator> allocator,
- int stack_depth) {
+ int stack_depth,
+ int64_t process_id) {
// There's no need to do anything with the result. It is self-managing.
GlobalActivityTracker* global_tracker =
- new GlobalActivityTracker(std::move(allocator), stack_depth);
+ new GlobalActivityTracker(std::move(allocator), stack_depth, process_id);
// Create a tracker for this thread since it is known.
global_tracker->CreateTrackerForCurrentThread();
}
@@ -1111,7 +1284,7 @@ void GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
DCHECK(success);
CreateWithAllocator(MakeUnique<FilePersistentMemoryAllocator>(
std::move(mapped_file), size, id, name, false),
- stack_depth);
+ stack_depth, 0);
}
#endif // !defined(OS_NACL)
@@ -1119,11 +1292,37 @@ void GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
void GlobalActivityTracker::CreateWithLocalMemory(size_t size,
uint64_t id,
StringPiece name,
- int stack_depth) {
+ int stack_depth,
+ int64_t process_id) {
CreateWithAllocator(
- MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth);
+ MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth,
+ process_id);
+}
+
+// static
+void GlobalActivityTracker::SetForTesting(
+ std::unique_ptr<GlobalActivityTracker> tracker) {
+ CHECK(!subtle::NoBarrier_Load(&g_tracker_));
+ subtle::Release_Store(&g_tracker_,
+ reinterpret_cast<uintptr_t>(tracker.release()));
}
+// static
+std::unique_ptr<GlobalActivityTracker>
+GlobalActivityTracker::ReleaseForTesting() {
+ GlobalActivityTracker* tracker = Get();
+ if (!tracker)
+ return nullptr;
+
+ // Thread trackers assume that the global tracker is present for some
+ // operations so ensure that there aren't any.
+ tracker->ReleaseTrackerForCurrentThreadForTesting();
+ DCHECK_EQ(0, tracker->thread_tracker_count_.load(std::memory_order_relaxed));
+
+ subtle::Release_Store(&g_tracker_, 0);
+ return WrapUnique(tracker);
+};
+
ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
DCHECK(!this_thread_tracker_.Get());
@@ -1180,8 +1379,181 @@ ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
ThreadActivityTracker* tracker =
reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
- if (tracker)
+ if (tracker) {
+ this_thread_tracker_.Set(nullptr);
delete tracker;
+ }
+}
+
+void GlobalActivityTracker::SetBackgroundTaskRunner(
+ const scoped_refptr<TaskRunner>& runner) {
+ AutoLock lock(global_tracker_lock_);
+ background_task_runner_ = runner;
+}
+
+void GlobalActivityTracker::SetProcessExitCallback(
+ ProcessExitCallback callback) {
+ AutoLock lock(global_tracker_lock_);
+ process_exit_callback_ = callback;
+}
+
+void GlobalActivityTracker::RecordProcessLaunch(
+ ProcessId process_id,
+ const FilePath::StringType& cmd) {
+ const int64_t pid = process_id;
+ DCHECK_NE(GetProcessId(), pid);
+ DCHECK_NE(0, pid);
+
+ base::AutoLock lock(global_tracker_lock_);
+ if (base::ContainsKey(known_processes_, pid)) {
+ // TODO(bcwhite): Measure this in UMA.
+ NOTREACHED() << "Process #" << process_id
+ << " was previously recorded as \"launched\""
+ << " with no corresponding exit.";
+ known_processes_.erase(pid);
+ }
+
+#if defined(OS_WIN)
+ known_processes_.insert(std::make_pair(pid, UTF16ToUTF8(cmd)));
+#else
+ known_processes_.insert(std::make_pair(pid, cmd));
+#endif
+}
+
+void GlobalActivityTracker::RecordProcessLaunch(
+ ProcessId process_id,
+ const FilePath::StringType& exe,
+ const FilePath::StringType& args) {
+ const int64_t pid = process_id;
+ if (exe.find(FILE_PATH_LITERAL(" "))) {
+ RecordProcessLaunch(pid, FilePath::StringType(FILE_PATH_LITERAL("\"")) +
+ exe + FILE_PATH_LITERAL("\" ") + args);
+ } else {
+ RecordProcessLaunch(pid, exe + FILE_PATH_LITERAL(' ') + args);
+ }
+}
+
+void GlobalActivityTracker::RecordProcessExit(ProcessId process_id,
+ int exit_code) {
+ const int64_t pid = process_id;
+ DCHECK_NE(GetProcessId(), pid);
+ DCHECK_NE(0, pid);
+
+ scoped_refptr<TaskRunner> task_runner;
+ std::string command_line;
+ {
+ base::AutoLock lock(global_tracker_lock_);
+ task_runner = background_task_runner_;
+ auto found = known_processes_.find(pid);
+ if (found != known_processes_.end()) {
+ command_line = std::move(found->second);
+ known_processes_.erase(found);
+ } else {
+ DLOG(ERROR) << "Recording exit of unknown process #" << process_id;
+ }
+ }
+
+ // Use the current time to differentiate the process that just exited
+ // from any that might be created in the future with the same ID.
+ int64_t now_stamp = Time::Now().ToInternalValue();
+
+ // The persistent allocator is thread-safe so run the iteration and
+ // adjustments on a worker thread if one was provided.
+ if (task_runner && !task_runner->RunsTasksOnCurrentThread()) {
+ task_runner->PostTask(
+ FROM_HERE,
+ BindOnce(&GlobalActivityTracker::CleanupAfterProcess, Unretained(this),
+ pid, now_stamp, exit_code, Passed(&command_line)));
+ return;
+ }
+
+ CleanupAfterProcess(pid, now_stamp, exit_code, std::move(command_line));
+}
+
+void GlobalActivityTracker::SetProcessPhase(ProcessPhase phase) {
+ process_data().SetInt(kProcessPhaseDataKey, phase);
+}
+
+void GlobalActivityTracker::CleanupAfterProcess(int64_t process_id,
+ int64_t exit_stamp,
+ int exit_code,
+ std::string&& command_line) {
+ // The process may not have exited cleanly so its necessary to go through
+ // all the data structures it may have allocated in the persistent memory
+ // segment and mark them as "released". This will allow them to be reused
+ // later on.
+
+ PersistentMemoryAllocator::Iterator iter(allocator_.get());
+ PersistentMemoryAllocator::Reference ref;
+
+ ProcessExitCallback process_exit_callback;
+ {
+ AutoLock lock(global_tracker_lock_);
+ process_exit_callback = process_exit_callback_;
+ }
+ if (process_exit_callback) {
+ // Find the processes user-data record so the process phase can be passed
+ // to the callback.
+ ActivityUserData::Snapshot process_data_snapshot;
+ while ((ref = iter.GetNextOfType(kTypeIdProcessDataRecord)) != 0) {
+ const void* memory = allocator_->GetAsArray<char>(
+ ref, kTypeIdProcessDataRecord, PersistentMemoryAllocator::kSizeAny);
+ int64_t found_id;
+ int64_t create_stamp;
+ if (ActivityUserData::GetOwningProcessId(memory, &found_id,
+ &create_stamp)) {
+ if (found_id == process_id && create_stamp < exit_stamp) {
+ const ActivityUserData process_data(const_cast<void*>(memory),
+ allocator_->GetAllocSize(ref));
+ process_data.CreateSnapshot(&process_data_snapshot);
+ break; // No need to look for any others.
+ }
+ }
+ }
+ iter.Reset(); // So it starts anew when used below.
+
+ // Record the process's phase at exit so callback doesn't need to go
+ // searching based on a private key value.
+ ProcessPhase exit_phase = PROCESS_PHASE_UNKNOWN;
+ auto phase = process_data_snapshot.find(kProcessPhaseDataKey);
+ if (phase != process_data_snapshot.end())
+ exit_phase = static_cast<ProcessPhase>(phase->second.GetInt());
+
+ // Perform the callback.
+ process_exit_callback.Run(process_id, exit_stamp, exit_code, exit_phase,
+ std::move(command_line),
+ std::move(process_data_snapshot));
+ }
+
+ // Find all allocations associated with the exited process and free them.
+ uint32_t type;
+ while ((ref = iter.GetNext(&type)) != 0) {
+ switch (type) {
+ case kTypeIdActivityTracker:
+ case kTypeIdUserDataRecord:
+ case kTypeIdProcessDataRecord:
+ case ModuleInfoRecord::kPersistentTypeId: {
+ const void* memory = allocator_->GetAsArray<char>(
+ ref, type, PersistentMemoryAllocator::kSizeAny);
+ int64_t found_id;
+ int64_t create_stamp;
+
+ // By convention, the OwningProcess structure is always the first
+ // field of the structure so there's no need to handle all the
+ // cases separately.
+ if (OwningProcess::GetOwningProcessId(memory, &found_id,
+ &create_stamp)) {
+ // Only change the type to be "free" if the process ID matches and
+ // the creation time is before the exit time (so PID re-use doesn't
+ // cause the erasure of something that is in-use). Memory is cleared
+ // here, rather than when it's needed, so as to limit the impact at
+ // that critical time.
+ if (found_id == process_id && create_stamp < exit_stamp)
+ allocator_->ChangeType(ref, ~type, type, /*clear=*/true);
+ }
+ } break;
+ }
+ }
}
void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
@@ -1226,14 +1598,16 @@ void GlobalActivityTracker::RecordModuleInfo(const ModuleInfo& info) {
void GlobalActivityTracker::RecordFieldTrial(const std::string& trial_name,
StringPiece group_name) {
const std::string key = std::string("FieldTrial.") + trial_name;
- global_data_.SetString(key, group_name);
+ process_data_.SetString(key, group_name);
}
GlobalActivityTracker::GlobalActivityTracker(
std::unique_ptr<PersistentMemoryAllocator> allocator,
- int stack_depth)
+ int stack_depth,
+ int64_t process_id)
: allocator_(std::move(allocator)),
stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
+ process_id_(process_id == 0 ? GetCurrentProcId() : process_id),
this_thread_tracker_(&OnTLSDestroy),
thread_tracker_count_(0),
thread_tracker_allocator_(allocator_.get(),
@@ -1247,24 +1621,28 @@ GlobalActivityTracker::GlobalActivityTracker(
kTypeIdUserDataRecordFree,
kUserDataSize,
kCachedUserDataMemories,
- /*make_iterable=*/false),
- global_data_(
- allocator_->GetAsArray<char>(
- allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
- kTypeIdGlobalDataRecord,
- PersistentMemoryAllocator::kSizeAny),
- kGlobalDataSize) {
- // Ensure the passed memory is valid and empty (iterator finds nothing).
- uint32_t type;
- DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
+ /*make_iterable=*/true),
+ process_data_(allocator_->GetAsArray<char>(
+ AllocateFrom(allocator_.get(),
+ kTypeIdProcessDataRecordFree,
+ kProcessDataSize,
+ kTypeIdProcessDataRecord),
+ kTypeIdProcessDataRecord,
+ kProcessDataSize),
+ kProcessDataSize,
+ process_id_) {
+ DCHECK_NE(0, process_id_);
// Ensure that there is no other global object and then make this one such.
DCHECK(!g_tracker_);
subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this));
- // The global records must be iterable in order to be found by an analyzer.
+ // The data records must be iterable in order to be found by an analyzer.
allocator_->MakeIterable(allocator_->GetAsReference(
- global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord));
+ process_data_.GetBaseAddress(), kTypeIdProcessDataRecord));
+
+ // Note that this process has launched.
+ SetProcessPhase(PROCESS_LAUNCHED);
// Fetch and record all activated field trials.
FieldTrial::ActiveGroups active_groups;
@@ -1274,7 +1652,7 @@ GlobalActivityTracker::GlobalActivityTracker(
}
GlobalActivityTracker::~GlobalActivityTracker() {
- DCHECK_EQ(Get(), this);
+ DCHECK(Get() == nullptr || Get() == this);
DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
subtle::Release_Store(&g_tracker_, 0);
}
@@ -1295,6 +1673,23 @@ void GlobalActivityTracker::ReturnTrackerMemory(
thread_tracker_allocator_.ReleaseObjectReference(mem_reference);
}
+void GlobalActivityTracker::RecordExceptionImpl(const void* pc,
+ const void* origin,
+ uint32_t code) {
+ // Get an existing tracker for this thread. It's not possible to create
+ // one at this point because such would involve memory allocations and
+ // other potentially complex operations that can cause failures if done
+ // within an exception handler. In most cases various operations will
+ // have already created the tracker so this shouldn't generally be a
+ // problem.
+ ThreadActivityTracker* tracker = GetTrackerForCurrentThread();
+ if (!tracker)
+ return;
+
+ tracker->RecordExceptionActivity(pc, origin, Activity::ACT_EXCEPTION,
+ ActivityData::ForException(code));
+}
+
// static
void GlobalActivityTracker::OnTLSDestroy(void* value) {
delete reinterpret_cast<ManagedActivityTracker*>(value);
diff --git a/chromium/base/debug/activity_tracker.h b/chromium/base/debug/activity_tracker.h
index 719a31865ca..c968f38c70b 100644
--- a/chromium/base/debug/activity_tracker.h
+++ b/chromium/base/debug/activity_tracker.h
@@ -23,12 +23,15 @@
#include "base/atomicops.h"
#include "base/base_export.h"
+#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/gtest_prod_util.h"
#include "base/location.h"
#include "base/metrics/persistent_memory_allocator.h"
+#include "base/process/process_handle.h"
#include "base/strings/string_piece.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/task_runner.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_checker.h"
#include "base/threading/thread_local_storage.h"
@@ -41,7 +44,6 @@ class FilePath;
class Lock;
class PlatformThreadHandle;
class Process;
-class StaticAtomicSequenceNumber;
class WaitableEvent;
namespace debug {
@@ -56,11 +58,48 @@ enum : int {
kActivityCallStackSize = 10,
};
+// A class for keeping all information needed to verify that a structure is
+// associated with a given process.
+struct OwningProcess {
+ OwningProcess();
+ ~OwningProcess();
+
+ // Initializes structure with the current process id and the current time.
+ // These can uniquely identify a process. A unique non-zero data_id will be
+ // set making it possible to tell using atomic reads if the data has changed.
+ void Release_Initialize(int64_t pid = 0);
+
+ // Explicitly sets the process ID.
+ void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
+
+ // Gets the associated process ID, in native form, and the creation timestamp
+ // from memory without loading the entire structure for analysis. This will
+ // return false if no valid process ID is available.
+ static bool GetOwningProcessId(const void* memory,
+ int64_t* out_id,
+ int64_t* out_stamp);
+
+ // SHA1(base::debug::OwningProcess): Increment this if structure changes!
+ static constexpr uint32_t kPersistentTypeId = 0xB1179672 + 1;
+
+ // Expected size for 32/64-bit check by PersistentMemoryAllocator.
+ static constexpr size_t kExpectedInstanceSize = 24;
+
+ std::atomic<uint32_t> data_id;
+ uint32_t padding;
+ int64_t process_id;
+ int64_t create_stamp;
+};
+
// The data associated with an activity is dependent upon the activity type.
// This union defines all of the various fields. All fields must be explicitly
// sized types to ensure no interoperability problems between 32-bit and
// 64-bit systems.
union ActivityData {
+ // Expected size for 32/64-bit check.
+ // TODO(bcwhite): VC2015 doesn't allow statics in unions. Fix when it does.
+ // static constexpr size_t kExpectedInstanceSize = 8;
+
// Generic activities don't have any defined structure.
struct {
uint32_t id; // An arbitrary identifier used for association.
@@ -81,6 +120,9 @@ union ActivityData {
struct {
int64_t process_id; // A unique identifier for a process.
} process;
+ struct {
+ uint32_t code; // An "exception code" number.
+ } exception;
// These methods create an ActivityData object from the appropriate
// parameters. Objects of this type should always be created this way to
@@ -126,6 +168,12 @@ union ActivityData {
data.process.process_id = id;
return data;
}
+
+ static ActivityData ForException(const uint32_t code) {
+ ActivityData data;
+ data.exception.code = code;
+ return data;
+ }
};
// A "null" activity-data that can be passed to indicate "do not change".
@@ -237,6 +285,9 @@ struct Activity {
ACT_PROCESS_START = ACT_PROCESS,
ACT_PROCESS_WAIT,
+ // Exception activities indicate the occurence of something unexpected.
+ ACT_EXCEPTION = 14 << 4,
+
// Generic activities are user defined and can be anything.
ACT_GENERIC = 15 << 4,
@@ -293,7 +344,9 @@ struct Activity {
// This class manages arbitrary user data that can be associated with activities
// done by a thread by supporting key/value pairs of any type. This can provide
// additional information during debugging. It is also used to store arbitrary
-// global data. All updates must be done from the same thread.
+// global data. All updates must be done from the same thread though other
+// threads can read it concurrently if they create new objects using the same
+// memory.
class BASE_EXPORT ActivityUserData {
public:
// List of known value type. REFERENCE types must immediately follow the non-
@@ -340,7 +393,7 @@ class BASE_EXPORT ActivityUserData {
private:
friend class ActivityUserData;
- ValueType type_;
+ ValueType type_ = END_OF_VALUES;
uint64_t short_value_; // Used to hold copy of numbers, etc.
std::string long_value_; // Used to hold copy of raw/string data.
StringPiece ref_value_; // Used to hold reference to external data.
@@ -348,14 +401,17 @@ class BASE_EXPORT ActivityUserData {
using Snapshot = std::map<std::string, TypedValue>;
- ActivityUserData(void* memory, size_t size);
+ // Initialize the object either as a "sink" that just accepts and discards
+ // data or an active one that writes to a given (zeroed) memory block.
+ ActivityUserData();
+ ActivityUserData(void* memory, size_t size, int64_t pid = 0);
virtual ~ActivityUserData();
// Gets the unique ID number for this user data. If this changes then the
// contents have been overwritten by another thread. The return value is
// always non-zero unless it's actually just a data "sink".
uint32_t id() const {
- return memory_ ? id_->load(std::memory_order_relaxed) : 0;
+ return header_ ? header_->owner.data_id.load(std::memory_order_relaxed) : 0;
}
// Writes a |value| (as part of a key/value pair) that will be included with
@@ -403,13 +459,23 @@ class BASE_EXPORT ActivityUserData {
// Creates a snapshot of the key/value pairs contained within. The returned
// data will be fixed, independent of whatever changes afterward. There is
- // protection against concurrent modification of the values but no protection
- // against a complete overwrite of the contents; the caller must ensure that
- // the memory segment is not going to be re-initialized while this runs.
+ // some protection against concurrent modification. This will return false
+ // if the data is invalid or if a complete overwrite of the contents is
+ // detected.
bool CreateSnapshot(Snapshot* output_snapshot) const;
// Gets the base memory address used for storing data.
- const void* GetBaseAddress();
+ const void* GetBaseAddress() const;
+
+ // Explicitly sets the process ID.
+ void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
+
+ // Gets the associated process ID, in native form, and the creation timestamp
+ // from tracker memory without loading the entire structure for analysis. This
+ // will return false if no valid process ID is available.
+ static bool GetOwningProcessId(const void* memory,
+ int64_t* out_id,
+ int64_t* out_stamp);
protected:
virtual void Set(StringPiece name,
@@ -422,20 +488,31 @@ class BASE_EXPORT ActivityUserData {
enum : size_t { kMemoryAlignment = sizeof(uint64_t) };
- // A structure used to reference data held outside of persistent memory.
- struct ReferenceRecord {
- uint64_t address;
- uint64_t size;
+ // A structure that defines the structure header in memory.
+ struct MemoryHeader {
+ MemoryHeader();
+ ~MemoryHeader();
+
+ OwningProcess owner; // Information about the creating process.
};
// Header to a key/value record held in persistent memory.
- struct Header {
+ struct FieldHeader {
+ FieldHeader();
+ ~FieldHeader();
+
std::atomic<uint8_t> type; // Encoded ValueType
uint8_t name_size; // Length of "name" key.
std::atomic<uint16_t> value_size; // Actual size of of the stored value.
uint16_t record_size; // Total storage of name, value, header.
};
+ // A structure used to reference data held outside of persistent memory.
+ struct ReferenceRecord {
+ uint64_t address;
+ uint64_t size;
+ };
+
// This record is used to hold known value is a map so that they can be
// found and overwritten later.
struct ValueInfo {
@@ -456,7 +533,10 @@ class BASE_EXPORT ActivityUserData {
size_t size);
// Loads any data already in the memory segment. This allows for accessing
- // records created previously.
+ // records created previously. If this detects that the underlying data has
+ // gone away (cleared by another thread/process), it will invalidate all the
+ // data in this object and turn it into simple "sink" with no values to
+ // return.
void ImportExistingData() const;
// A map of all the values within the memory block, keyed by name for quick
@@ -470,12 +550,14 @@ class BASE_EXPORT ActivityUserData {
mutable char* memory_;
mutable size_t available_;
- // A pointer to the unique ID for this instance.
- std::atomic<uint32_t>* const id_;
+ // A pointer to the memory header for this instance.
+ MemoryHeader* const header_;
- // This ID is used to create unique indentifiers for user data so that it's
- // possible to tell if the information has been overwritten.
- static StaticAtomicSequenceNumber next_id_;
+ // These hold values used when initially creating the object. They are
+ // compared against current header values to check for outside changes.
+ const uint32_t orig_data_id;
+ const int64_t orig_process_id;
+ const int64_t orig_create_stamp;
DISALLOW_COPY_AND_ASSIGN(ActivityUserData);
};
@@ -511,6 +593,9 @@ class BASE_EXPORT ThreadActivityTracker {
// truncated due to internal length limitations.
std::string thread_name;
+ // The timestamp at which this process was created.
+ int64_t create_stamp;
+
// The process and thread IDs. These values have no meaning other than
// they uniquely identify a running process and a running thread within
// that process. Thread-IDs can be re-used across different processes
@@ -525,6 +610,9 @@ class BASE_EXPORT ThreadActivityTracker {
// The current total depth of the activity stack, including those later
// entries not recorded in the |activity_stack| vector.
uint32_t activity_stack_depth = 0;
+
+ // The last recorded "exception" activity.
+ Activity last_exception;
};
// This is the base class for having the compiler manage an activity on the
@@ -608,6 +696,12 @@ class BASE_EXPORT ThreadActivityTracker {
void ReleaseUserData(ActivityId id,
ActivityTrackerMemoryAllocator* allocator);
+ // Save an exception. |origin| is the location of the exception.
+ void RecordExceptionActivity(const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data);
+
// Returns whether the current data is valid or not. It is not valid if
// corruption has been detected in the header or other data structures.
bool IsValid() const;
@@ -618,6 +712,19 @@ class BASE_EXPORT ThreadActivityTracker {
// implementation does not support concurrent snapshot operations.
bool CreateSnapshot(Snapshot* output_snapshot) const;
+ // Gets the base memory address used for storing data.
+ const void* GetBaseAddress();
+
+ // Explicitly sets the process ID.
+ void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
+
+ // Gets the associated process ID, in native form, and the creation timestamp
+ // from tracker memory without loading the entire structure for analysis. This
+ // will return false if no valid process ID is available.
+ static bool GetOwningProcessId(const void* memory,
+ int64_t* out_id,
+ int64_t* out_stamp);
+
// Calculates the memory size required for a given stack depth, including
// the internal header structure for the stack.
static size_t SizeForStackDepth(int stack_depth);
@@ -625,6 +732,10 @@ class BASE_EXPORT ThreadActivityTracker {
private:
friend class ActivityTrackerTest;
+ std::unique_ptr<ActivityUserData> CreateUserDataForActivity(
+ Activity* activity,
+ ActivityTrackerMemoryAllocator* allocator);
+
Header* const header_; // Pointer to the Header structure.
Activity* const stack_; // The stack of activities.
const uint32_t stack_slots_; // The total number of stack slots.
@@ -649,15 +760,44 @@ class BASE_EXPORT GlobalActivityTracker {
// will be safely ignored. These are public so that an external process
// can recognize records of this type within an allocator.
enum : uint32_t {
- kTypeIdActivityTracker = 0x5D7381AF + 3, // SHA1(ActivityTracker) v3
- kTypeIdUserDataRecord = 0x615EDDD7 + 2, // SHA1(UserDataRecord) v2
+ kTypeIdActivityTracker = 0x5D7381AF + 4, // SHA1(ActivityTracker) v4
+ kTypeIdUserDataRecord = 0x615EDDD7 + 3, // SHA1(UserDataRecord) v3
kTypeIdGlobalLogMessage = 0x4CF434F9 + 1, // SHA1(GlobalLogMessage) v1
- kTypeIdGlobalDataRecord = kTypeIdUserDataRecord + 1000,
+ kTypeIdProcessDataRecord = kTypeIdUserDataRecord + 0x100,
kTypeIdActivityTrackerFree = ~kTypeIdActivityTracker,
kTypeIdUserDataRecordFree = ~kTypeIdUserDataRecord,
+ kTypeIdProcessDataRecordFree = ~kTypeIdProcessDataRecord,
+ };
+
+ // An enumeration of common process life stages. All entries are given an
+ // explicit number so they are known and remain constant; this allows for
+ // cross-version analysis either locally or on a server.
+ enum ProcessPhase : int {
+ // The phases are generic and may have meaning to the tracker.
+ PROCESS_PHASE_UNKNOWN = 0,
+ PROCESS_LAUNCHED = 1,
+ PROCESS_LAUNCH_FAILED = 2,
+ PROCESS_EXITED_CLEANLY = 10,
+ PROCESS_EXITED_WITH_CODE = 11,
+
+ // Add here whatever is useful for analysis.
+ PROCESS_SHUTDOWN_STARTED = 100,
+ PROCESS_MAIN_LOOP_STARTED = 101,
};
+ // A callback made when a process exits to allow immediate analysis of its
+ // data. Note that the system may reuse the |process_id| so when fetching
+ // records it's important to ensure that what is returned was created before
+ // the |exit_stamp|. Movement of |process_data| information is allowed.
+ using ProcessExitCallback =
+ Callback<void(int64_t process_id,
+ int64_t exit_stamp,
+ int exit_code,
+ ProcessPhase exit_phase,
+ std::string&& command_line,
+ ActivityUserData::Snapshot&& process_data)>;
+
// This structure contains information about a loaded module, as shown to
// users of the tracker.
struct BASE_EXPORT ModuleInfo {
@@ -685,9 +825,8 @@ class BASE_EXPORT GlobalActivityTracker {
};
// This is a thin wrapper around the thread-tracker's ScopedActivity that
- // accesses the global tracker to provide some of the information, notably
- // which thread-tracker to use. It is safe to create even if activity
- // tracking is not enabled.
+ // allows thread-safe access to data values. It is safe to use even if
+ // activity tracking is not enabled.
class BASE_EXPORT ScopedThreadActivity
: public ThreadActivityTracker::ScopedActivity {
public:
@@ -728,9 +867,12 @@ class BASE_EXPORT GlobalActivityTracker {
// Creates a global tracker using a given persistent-memory |allocator| and
// providing the given |stack_depth| to each thread tracker it manages. The
// created object is activated so tracking will begin immediately upon return.
+ // The |process_id| can be zero to get it from the OS but is taken for testing
+ // purposes.
static void CreateWithAllocator(
std::unique_ptr<PersistentMemoryAllocator> allocator,
- int stack_depth);
+ int stack_depth,
+ int64_t process_id);
#if !defined(OS_NACL)
// Like above but internally creates an allocator around a disk file with
@@ -745,11 +887,13 @@ class BASE_EXPORT GlobalActivityTracker {
#endif // !defined(OS_NACL)
// Like above but internally creates an allocator using local heap memory of
- // the specified size. This is used primarily for unit tests.
+ // the specified size. This is used primarily for unit tests. The |process_id|
+ // can be zero to get it from the OS but is taken for testing purposes.
static void CreateWithLocalMemory(size_t size,
uint64_t id,
StringPiece name,
- int stack_depth);
+ int stack_depth,
+ int64_t process_id);
// Gets the global activity-tracker or null if none exists.
static GlobalActivityTracker* Get() {
@@ -757,6 +901,15 @@ class BASE_EXPORT GlobalActivityTracker {
subtle::Acquire_Load(&g_tracker_));
}
+ // Sets the global activity-tracker for testing purposes.
+ static void SetForTesting(std::unique_ptr<GlobalActivityTracker> tracker);
+
+ // This access to the persistent allocator is only for testing; it extracts
+ // the global tracker completely. All tracked threads must exit before
+ // calling this. Tracking for the current thread will be automatically
+ // stopped.
+ static std::unique_ptr<GlobalActivityTracker> ReleaseForTesting();
+
// Convenience method for determining if a global tracker is active.
static bool IsEnabled() { return Get() != nullptr; }
@@ -789,6 +942,50 @@ class BASE_EXPORT GlobalActivityTracker {
// Releases the activity-tracker for the current thread (for testing only).
void ReleaseTrackerForCurrentThreadForTesting();
+ // Sets a task-runner that can be used for background work.
+ void SetBackgroundTaskRunner(const scoped_refptr<TaskRunner>& runner);
+
+ // Sets an optional callback to be called when a process exits.
+ void SetProcessExitCallback(ProcessExitCallback callback);
+
+ // Manages process lifetimes. These are called by the process that launched
+ // and reaped the subprocess, not the subprocess itself. If it is expensive
+ // to generate the parameters, Get() the global tracker and call these
+ // conditionally rather than using the static versions.
+ void RecordProcessLaunch(ProcessId process_id,
+ const FilePath::StringType& cmd);
+ void RecordProcessLaunch(ProcessId process_id,
+ const FilePath::StringType& exe,
+ const FilePath::StringType& args);
+ void RecordProcessExit(ProcessId process_id, int exit_code);
+ static void RecordProcessLaunchIfEnabled(ProcessId process_id,
+ const FilePath::StringType& cmd) {
+ GlobalActivityTracker* tracker = Get();
+ if (tracker)
+ tracker->RecordProcessLaunch(process_id, cmd);
+ }
+ static void RecordProcessLaunchIfEnabled(ProcessId process_id,
+ const FilePath::StringType& exe,
+ const FilePath::StringType& args) {
+ GlobalActivityTracker* tracker = Get();
+ if (tracker)
+ tracker->RecordProcessLaunch(process_id, exe, args);
+ }
+ static void RecordProcessExitIfEnabled(ProcessId process_id, int exit_code) {
+ GlobalActivityTracker* tracker = Get();
+ if (tracker)
+ tracker->RecordProcessExit(process_id, exit_code);
+ }
+
+ // Sets the "phase" of the current process, useful for knowing what it was
+ // doing when it last reported.
+ void SetProcessPhase(ProcessPhase phase);
+ static void SetProcessPhaseIfEnabled(ProcessPhase phase) {
+ GlobalActivityTracker* tracker = Get();
+ if (tracker)
+ tracker->SetProcessPhase(phase);
+ }
+
// Records a log message. The current implementation does NOT recycle these
// only store critical messages such as FATAL ones.
void RecordLogMessage(StringPiece message);
@@ -818,8 +1015,20 @@ class BASE_EXPORT GlobalActivityTracker {
tracker->RecordFieldTrial(trial_name, group_name);
}
- // Accesses the global data record for storing arbitrary key/value pairs.
- ActivityUserData& global_data() { return global_data_; }
+ // Record exception information for the current thread.
+ ALWAYS_INLINE
+ void RecordException(const void* origin, uint32_t code) {
+ return RecordExceptionImpl(::tracked_objects::GetProgramCounter(), origin,
+ code);
+ }
+
+ // Gets the process ID used for tracking. This is typically the same as what
+ // the OS thinks is the current process but can be overridden for testing.
+ int64_t process_id() { return process_id_; };
+
+ // Accesses the process data record for storing arbitrary key/value pairs.
+ // Updates to this are thread-safe.
+ ActivityUserData& process_data() { return process_data_; }
private:
friend class GlobalActivityAnalyzer;
@@ -837,10 +1046,10 @@ class BASE_EXPORT GlobalActivityTracker {
// A wrapper around ActivityUserData that is thread-safe and thus can be used
// in the global scope without the requirement of being called from only one
// thread.
- class GlobalUserData : public ActivityUserData {
+ class ThreadSafeUserData : public ActivityUserData {
public:
- GlobalUserData(void* memory, size_t size);
- ~GlobalUserData() override;
+ ThreadSafeUserData(void* memory, size_t size, int64_t pid = 0);
+ ~ThreadSafeUserData() override;
private:
void Set(StringPiece name,
@@ -850,7 +1059,7 @@ class BASE_EXPORT GlobalActivityTracker {
Lock data_lock_;
- DISALLOW_COPY_AND_ASSIGN(GlobalUserData);
+ DISALLOW_COPY_AND_ASSIGN(ThreadSafeUserData);
};
// State of a module as stored in persistent memory. This supports a single
@@ -862,7 +1071,8 @@ class BASE_EXPORT GlobalActivityTracker {
static constexpr uint32_t kPersistentTypeId = 0x05DB5F41 + 1;
// Expected size for 32/64-bit check by PersistentMemoryAllocator.
- static constexpr size_t kExpectedInstanceSize = 56;
+ static constexpr size_t kExpectedInstanceSize =
+ OwningProcess::kExpectedInstanceSize + 56;
// The atomic unfortunately makes this a "complex" class on some compilers
// and thus requires an out-of-line constructor & destructor even though
@@ -870,6 +1080,7 @@ class BASE_EXPORT GlobalActivityTracker {
ModuleInfoRecord();
~ModuleInfoRecord();
+ OwningProcess owner; // The process that created this record.
uint64_t address; // The base address of the module.
uint64_t load_time; // Time of last load/unload.
uint64_t size; // The size of the module in bytes.
@@ -921,18 +1132,30 @@ class BASE_EXPORT GlobalActivityTracker {
// Creates a global tracker using a given persistent-memory |allocator| and
// providing the given |stack_depth| to each thread tracker it manages. The
// created object is activated so tracking has already started upon return.
+ // The |process_id| can be zero to get it from the OS but is taken for testing
+ // purposes.
GlobalActivityTracker(std::unique_ptr<PersistentMemoryAllocator> allocator,
- int stack_depth);
+ int stack_depth,
+ int64_t process_id);
// Returns the memory used by an activity-tracker managed by this class.
// It is called during the destruction of a ManagedActivityTracker object.
void ReturnTrackerMemory(ManagedActivityTracker* tracker);
+ // Records exception information.
+ void RecordExceptionImpl(const void* pc, const void* origin, uint32_t code);
+
// Releases the activity-tracker associcated with thread. It is called
// automatically when a thread is joined and thus there is nothing more to
// be tracked. |value| is a pointer to a ManagedActivityTracker.
static void OnTLSDestroy(void* value);
+ // Does process-exit work. This can be run on any thread.
+ void CleanupAfterProcess(int64_t process_id,
+ int64_t exit_stamp,
+ int exit_code,
+ std::string&& command_line);
+
// The persistent-memory allocator from which the memory for all trackers
// is taken.
std::unique_ptr<PersistentMemoryAllocator> allocator_;
@@ -941,6 +1164,10 @@ class BASE_EXPORT GlobalActivityTracker {
// provide the stack-depth requested during construction.
const size_t stack_memory_size_;
+ // The process-id of the current process. This is kept as a member variable,
+ // defined during initialization, for testing purposes.
+ const int64_t process_id_;
+
// The activity tracker for the currently executing thread.
base::ThreadLocalStorage::Slot this_thread_tracker_;
@@ -955,9 +1182,8 @@ class BASE_EXPORT GlobalActivityTracker {
ActivityTrackerMemoryAllocator user_data_allocator_;
base::Lock user_data_allocator_lock_;
- // An object for holding global arbitrary key value pairs. Values must always
- // be written from the main UI thread.
- GlobalUserData global_data_;
+ // An object for holding arbitrary key value pairs with thread-safe access.
+ ThreadSafeUserData process_data_;
// A map of global module information, keyed by module path.
std::map<const std::string, ModuleInfoRecord*> modules_;
@@ -966,6 +1192,21 @@ class BASE_EXPORT GlobalActivityTracker {
// The active global activity tracker.
static subtle::AtomicWord g_tracker_;
+ // A lock that is used to protect access to the following fields.
+ base::Lock global_tracker_lock_;
+
+ // The collection of processes being tracked and their command-lines.
+ std::map<int64_t, std::string> known_processes_;
+
+ // A task-runner that can be used for doing background processing.
+ scoped_refptr<TaskRunner> background_task_runner_;
+
+ // A callback performed when a subprocess exits, including its exit-code
+ // and the phase it was in when that occurred. This will be called via
+ // the |background_task_runner_| if one is set or whatever thread reaped
+ // the process otherwise.
+ ProcessExitCallback process_exit_callback_;
+
DISALLOW_COPY_AND_ASSIGN(GlobalActivityTracker);
};
diff --git a/chromium/base/debug/activity_tracker_unittest.cc b/chromium/base/debug/activity_tracker_unittest.cc
index c46b81686b9..372d6acaaef 100644
--- a/chromium/base/debug/activity_tracker_unittest.cc
+++ b/chromium/base/debug/activity_tracker_unittest.cc
@@ -84,41 +84,73 @@ class ActivityTrackerTest : public testing::Test {
return GlobalActivityTracker::Get()->user_data_allocator_.cache_used();
}
+ void HandleProcessExit(int64_t id,
+ int64_t stamp,
+ int code,
+ GlobalActivityTracker::ProcessPhase phase,
+ std::string&& command,
+ ActivityUserData::Snapshot&& data) {
+ exit_id = id;
+ exit_stamp = stamp;
+ exit_code = code;
+ exit_phase = phase;
+ exit_command = std::move(command);
+ exit_data = std::move(data);
+ }
+
static void DoNothing() {}
+
+ int64_t exit_id = 0;
+ int64_t exit_stamp;
+ int exit_code;
+ GlobalActivityTracker::ProcessPhase exit_phase;
+ std::string exit_command;
+ ActivityUserData::Snapshot exit_data;
};
TEST_F(ActivityTrackerTest, UserDataTest) {
char buffer[256];
memset(buffer, 0, sizeof(buffer));
ActivityUserData data(buffer, sizeof(buffer));
- ASSERT_EQ(sizeof(buffer) - 8, data.available_);
+ size_t space = sizeof(buffer) - sizeof(ActivityUserData::MemoryHeader);
+ ASSERT_EQ(space, data.available_);
data.SetInt("foo", 1);
- ASSERT_EQ(sizeof(buffer) - 8 - 24, data.available_);
+ space -= 24;
+ ASSERT_EQ(space, data.available_);
data.SetUint("b", 1U); // Small names fit beside header in a word.
- ASSERT_EQ(sizeof(buffer) - 8 - 24 - 16, data.available_);
+ space -= 16;
+ ASSERT_EQ(space, data.available_);
data.Set("c", buffer, 10);
- ASSERT_EQ(sizeof(buffer) - 8 - 24 - 16 - 24, data.available_);
+ space -= 24;
+ ASSERT_EQ(space, data.available_);
data.SetString("dear john", "it's been fun");
- ASSERT_EQ(sizeof(buffer) - 8 - 24 - 16 - 24 - 32, data.available_);
+ space -= 32;
+ ASSERT_EQ(space, data.available_);
data.Set("c", buffer, 20);
- ASSERT_EQ(sizeof(buffer) - 8 - 24 - 16 - 24 - 32, data.available_);
+ ASSERT_EQ(space, data.available_);
data.SetString("dear john", "but we're done together");
- ASSERT_EQ(sizeof(buffer) - 8 - 24 - 16 - 24 - 32, data.available_);
+ ASSERT_EQ(space, data.available_);
data.SetString("dear john", "bye");
- ASSERT_EQ(sizeof(buffer) - 8 - 24 - 16 - 24 - 32, data.available_);
+ ASSERT_EQ(space, data.available_);
data.SetChar("d", 'x');
- ASSERT_EQ(sizeof(buffer) - 8 - 24 - 16 - 24 - 32 - 8, data.available_);
+ space -= 8;
+ ASSERT_EQ(space, data.available_);
data.SetBool("ee", true);
- ASSERT_EQ(sizeof(buffer) - 8 - 24 - 16 - 24 - 32 - 8 - 16, data.available_);
+ space -= 16;
+ ASSERT_EQ(space, data.available_);
+
+ data.SetString("f", "");
+ space -= 8;
+ ASSERT_EQ(space, data.available_);
}
TEST_F(ActivityTrackerTest, PushPopTest) {
@@ -172,7 +204,7 @@ TEST_F(ActivityTrackerTest, PushPopTest) {
}
TEST_F(ActivityTrackerTest, ScopedTaskTest) {
- GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
ThreadActivityTracker* tracker =
GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
@@ -184,7 +216,7 @@ TEST_F(ActivityTrackerTest, ScopedTaskTest) {
ASSERT_EQ(0U, snapshot.activity_stack.size());
{
- PendingTask task1(FROM_HERE, base::Bind(&DoNothing));
+ PendingTask task1(FROM_HERE, base::BindOnce(&DoNothing));
ScopedTaskRunActivity activity1(task1);
ActivityUserData& user_data1 = activity1.user_data();
(void)user_data1; // Tell compiler it's been used.
@@ -195,7 +227,7 @@ TEST_F(ActivityTrackerTest, ScopedTaskTest) {
EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[0].activity_type);
{
- PendingTask task2(FROM_HERE, base::Bind(&DoNothing));
+ PendingTask task2(FROM_HERE, base::BindOnce(&DoNothing));
ScopedTaskRunActivity activity2(task2);
ActivityUserData& user_data2 = activity2.user_data();
(void)user_data2; // Tell compiler it's been used.
@@ -218,6 +250,28 @@ TEST_F(ActivityTrackerTest, ScopedTaskTest) {
ASSERT_EQ(2U, GetGlobalUserDataMemoryCacheUsed());
}
+TEST_F(ActivityTrackerTest, ExceptionTest) {
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+
+ ThreadActivityTracker* tracker =
+ GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
+ ThreadActivityTracker::Snapshot snapshot;
+ ASSERT_EQ(0U, GetGlobalUserDataMemoryCacheUsed());
+
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ ASSERT_EQ(0U, snapshot.last_exception.activity_type);
+
+ char origin;
+ global->RecordException(&origin, 42);
+
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ EXPECT_EQ(Activity::ACT_EXCEPTION, snapshot.last_exception.activity_type);
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(&origin),
+ snapshot.last_exception.origin_address);
+ EXPECT_EQ(42U, snapshot.last_exception.data.exception.code);
+}
+
TEST_F(ActivityTrackerTest, CreateWithFileTest) {
const char temp_name[] = "CreateWithFileTest";
ScopedTempDir temp_dir;
@@ -246,6 +300,14 @@ TEST_F(ActivityTrackerTest, CreateWithFileTest) {
// GlobalActivityTracker tests below.
+TEST_F(ActivityTrackerTest, BasicTest) {
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+
+ // Ensure the data repositories have backing store, indicated by non-zero ID.
+ EXPECT_NE(0U, global->process_data().id());
+}
+
class SimpleActivityThread : public SimpleThread {
public:
SimpleActivityThread(const std::string& name,
@@ -300,7 +362,7 @@ class SimpleActivityThread : public SimpleThread {
};
TEST_F(ActivityTrackerTest, ThreadDeathTest) {
- GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
const size_t starting_active = GetGlobalActiveTrackerCount();
const size_t starting_inactive = GetGlobalInactiveTrackerCount();
@@ -332,5 +394,107 @@ TEST_F(ActivityTrackerTest, ThreadDeathTest) {
EXPECT_EQ(starting_inactive + 1, GetGlobalInactiveTrackerCount());
}
+TEST_F(ActivityTrackerTest, ProcessDeathTest) {
+ // This doesn't actually create and destroy a process. Instead, it uses for-
+ // testing interfaces to simulate data created by other processes.
+ const ProcessId other_process_id = GetCurrentProcId() + 1;
+
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+ ThreadActivityTracker* thread = global->GetOrCreateTrackerForCurrentThread();
+
+ // Get callbacks for process exit.
+ global->SetProcessExitCallback(
+ Bind(&ActivityTrackerTest::HandleProcessExit, Unretained(this)));
+
+ // Pretend than another process has started.
+ global->RecordProcessLaunch(other_process_id, FILE_PATH_LITERAL("foo --bar"));
+
+ // Do some activities.
+ PendingTask task(FROM_HERE, base::BindOnce(&DoNothing));
+ ScopedTaskRunActivity activity(task);
+ ActivityUserData& user_data = activity.user_data();
+ ASSERT_NE(0U, user_data.id());
+
+ // Get the memory-allocator references to that data.
+ PersistentMemoryAllocator::Reference proc_data_ref =
+ global->allocator()->GetAsReference(
+ global->process_data().GetBaseAddress(),
+ GlobalActivityTracker::kTypeIdProcessDataRecord);
+ ASSERT_TRUE(proc_data_ref);
+ PersistentMemoryAllocator::Reference tracker_ref =
+ global->allocator()->GetAsReference(
+ thread->GetBaseAddress(),
+ GlobalActivityTracker::kTypeIdActivityTracker);
+ ASSERT_TRUE(tracker_ref);
+ PersistentMemoryAllocator::Reference user_data_ref =
+ global->allocator()->GetAsReference(
+ user_data.GetBaseAddress(),
+ GlobalActivityTracker::kTypeIdUserDataRecord);
+ ASSERT_TRUE(user_data_ref);
+
+ // Make a copy of the thread-tracker state so it can be restored later.
+ const size_t tracker_size = global->allocator()->GetAllocSize(tracker_ref);
+ std::unique_ptr<char[]> tracker_copy(new char[tracker_size]);
+ memcpy(tracker_copy.get(), thread->GetBaseAddress(), tracker_size);
+
+ // Change the objects to appear to be owned by another process.
+ int64_t owning_id;
+ int64_t stamp;
+ ASSERT_TRUE(ActivityUserData::GetOwningProcessId(
+ global->process_data().GetBaseAddress(), &owning_id, &stamp));
+ EXPECT_NE(other_process_id, owning_id);
+ ASSERT_TRUE(ThreadActivityTracker::GetOwningProcessId(
+ thread->GetBaseAddress(), &owning_id, &stamp));
+ EXPECT_NE(other_process_id, owning_id);
+ ASSERT_TRUE(ActivityUserData::GetOwningProcessId(user_data.GetBaseAddress(),
+ &owning_id, &stamp));
+ EXPECT_NE(other_process_id, owning_id);
+ global->process_data().SetOwningProcessIdForTesting(other_process_id, stamp);
+ thread->SetOwningProcessIdForTesting(other_process_id, stamp);
+ user_data.SetOwningProcessIdForTesting(other_process_id, stamp);
+ ASSERT_TRUE(ActivityUserData::GetOwningProcessId(
+ global->process_data().GetBaseAddress(), &owning_id, &stamp));
+ EXPECT_EQ(other_process_id, owning_id);
+ ASSERT_TRUE(ThreadActivityTracker::GetOwningProcessId(
+ thread->GetBaseAddress(), &owning_id, &stamp));
+ EXPECT_EQ(other_process_id, owning_id);
+ ASSERT_TRUE(ActivityUserData::GetOwningProcessId(user_data.GetBaseAddress(),
+ &owning_id, &stamp));
+ EXPECT_EQ(other_process_id, owning_id);
+
+ // Check that process exit will perform callback and free the allocations.
+ ASSERT_EQ(0, exit_id);
+ ASSERT_EQ(GlobalActivityTracker::kTypeIdProcessDataRecord,
+ global->allocator()->GetType(proc_data_ref));
+ ASSERT_EQ(GlobalActivityTracker::kTypeIdActivityTracker,
+ global->allocator()->GetType(tracker_ref));
+ ASSERT_EQ(GlobalActivityTracker::kTypeIdUserDataRecord,
+ global->allocator()->GetType(user_data_ref));
+ global->RecordProcessExit(other_process_id, 0);
+ EXPECT_EQ(other_process_id, exit_id);
+ EXPECT_EQ("foo --bar", exit_command);
+ EXPECT_EQ(GlobalActivityTracker::kTypeIdProcessDataRecordFree,
+ global->allocator()->GetType(proc_data_ref));
+ EXPECT_EQ(GlobalActivityTracker::kTypeIdActivityTrackerFree,
+ global->allocator()->GetType(tracker_ref));
+ EXPECT_EQ(GlobalActivityTracker::kTypeIdUserDataRecordFree,
+ global->allocator()->GetType(user_data_ref));
+
+ // Restore memory contents and types so things don't crash when doing real
+ // process clean-up.
+ memcpy(const_cast<void*>(thread->GetBaseAddress()), tracker_copy.get(),
+ tracker_size);
+ global->allocator()->ChangeType(
+ proc_data_ref, GlobalActivityTracker::kTypeIdProcessDataRecord,
+ GlobalActivityTracker::kTypeIdUserDataRecordFree, false);
+ global->allocator()->ChangeType(
+ tracker_ref, GlobalActivityTracker::kTypeIdActivityTracker,
+ GlobalActivityTracker::kTypeIdActivityTrackerFree, false);
+ global->allocator()->ChangeType(
+ user_data_ref, GlobalActivityTracker::kTypeIdUserDataRecord,
+ GlobalActivityTracker::kTypeIdUserDataRecordFree, false);
+}
+
} // namespace debug
} // namespace base
diff --git a/chromium/base/debug/stack_trace.cc b/chromium/base/debug/stack_trace.cc
index af4a6efc3ed..06637188294 100644
--- a/chromium/base/debug/stack_trace.cc
+++ b/chromium/base/debug/stack_trace.cc
@@ -12,7 +12,7 @@
#include "base/logging.h"
#include "base/macros.h"
-#if HAVE_TRACE_STACK_FRAME_POINTERS
+#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
#if defined(OS_LINUX) || defined(OS_ANDROID)
#include <pthread.h>
@@ -28,14 +28,14 @@
extern "C" void* __libc_stack_end;
#endif
-#endif // HAVE_TRACE_STACK_FRAME_POINTERS
+#endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
namespace base {
namespace debug {
namespace {
-#if HAVE_TRACE_STACK_FRAME_POINTERS
+#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
#if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
// GCC and LLVM generate slightly different frames on ARM, see
@@ -142,11 +142,11 @@ void* LinkStackFrames(void* fpp, void* parent_fp) {
return prev_parent_fp;
}
-#endif // HAVE_TRACE_STACK_FRAME_POINTERS
+#endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
} // namespace
-#if HAVE_TRACE_STACK_FRAME_POINTERS
+#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
uintptr_t GetStackEnd() {
#if defined(OS_ANDROID)
// Bionic reads proc/maps on every call to pthread_getattr_np() when called
@@ -194,7 +194,7 @@ uintptr_t GetStackEnd() {
// Don't know how to get end of the stack.
return 0;
}
-#endif // HAVE_TRACE_STACK_FRAME_POINTERS
+#endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
StackTrace::StackTrace() : StackTrace(arraysize(trace_)) {}
@@ -220,7 +220,7 @@ std::string StackTrace::ToString() const {
return stream.str();
}
-#if HAVE_TRACE_STACK_FRAME_POINTERS
+#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
size_t TraceStackFramePointers(const void** out_trace,
size_t max_depth,
@@ -271,7 +271,7 @@ ScopedStackFrameLinker::~ScopedStackFrameLinker() {
<< "Stack frame's parent pointer has changed!";
}
-#endif // HAVE_TRACE_STACK_FRAME_POINTERS
+#endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
} // namespace debug
} // namespace base
diff --git a/chromium/base/debug/stack_trace.h b/chromium/base/debug/stack_trace.h
index 4c9b73e87d6..72056ec6b6e 100644
--- a/chromium/base/debug/stack_trace.h
+++ b/chromium/base/debug/stack_trace.h
@@ -11,6 +11,7 @@
#include <string>
#include "base/base_export.h"
+#include "base/debug/debugging_flags.h"
#include "base/macros.h"
#include "build/build_config.h"
@@ -23,14 +24,6 @@ struct _EXCEPTION_POINTERS;
struct _CONTEXT;
#endif
-#if defined(OS_POSIX) && ( \
- defined(__i386__) || defined(__x86_64__) || \
- (defined(__arm__) && !defined(__thumb__)))
-#define HAVE_TRACE_STACK_FRAME_POINTERS 1
-#else
-#define HAVE_TRACE_STACK_FRAME_POINTERS 0
-#endif
-
namespace base {
namespace debug {
@@ -46,7 +39,7 @@ namespace debug {
BASE_EXPORT bool EnableInProcessStackDumping();
// Returns end of the stack, or 0 if we couldn't get it.
-#if HAVE_TRACE_STACK_FRAME_POINTERS
+#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
BASE_EXPORT uintptr_t GetStackEnd();
#endif
@@ -109,7 +102,7 @@ class BASE_EXPORT StackTrace {
size_t count_;
};
-#if HAVE_TRACE_STACK_FRAME_POINTERS
+#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
// Traces the stack by using frame pointers. This function is faster but less
// reliable than StackTrace. It should work for debug and profiling builds,
// but not for release builds (although there are some exceptions).
@@ -172,7 +165,7 @@ class BASE_EXPORT ScopedStackFrameLinker {
DISALLOW_COPY_AND_ASSIGN(ScopedStackFrameLinker);
};
-#endif // HAVE_TRACE_STACK_FRAME_POINTERS
+#endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
namespace internal {
diff --git a/chromium/base/debug/stack_trace_unittest.cc b/chromium/base/debug/stack_trace_unittest.cc
index 560dc1ddde8..3d6b6d75345 100644
--- a/chromium/base/debug/stack_trace_unittest.cc
+++ b/chromium/base/debug/stack_trace_unittest.cc
@@ -8,6 +8,7 @@
#include <sstream>
#include <string>
+#include "base/debug/debugging_flags.h"
#include "base/debug/stack_trace.h"
#include "base/logging.h"
#include "base/process/kill.h"
@@ -122,7 +123,7 @@ TEST_F(StackTraceTest, MAYBE_OutputToStream) {
#endif // define(OS_MACOSX)
}
-#if !defined(OFFICIAL_BUILD)
+#if !defined(OFFICIAL_BUILD) && !defined(NO_UNWIND_TABLES)
// Disabled in Official builds, where Link-Time Optimization can result in two
// or fewer stack frames being available, causing the test to fail.
TEST_F(StackTraceTest, TruncatedTrace) {
@@ -171,11 +172,11 @@ MULTIPROCESS_TEST_MAIN(MismatchedMallocChildProcess) {
// and e.g. mismatched new[]/delete would cause a hang because
// of re-entering malloc.
TEST_F(StackTraceTest, AsyncSignalUnsafeSignalHandlerHang) {
- Process child = SpawnChild("MismatchedMallocChildProcess");
- ASSERT_TRUE(child.IsValid());
+ SpawnChildResult spawn_result = SpawnChild("MismatchedMallocChildProcess");
+ ASSERT_TRUE(spawn_result.process.IsValid());
int exit_code;
- ASSERT_TRUE(child.WaitForExitWithTimeout(TestTimeouts::action_timeout(),
- &exit_code));
+ ASSERT_TRUE(spawn_result.process.WaitForExitWithTimeout(
+ TestTimeouts::action_timeout(), &exit_code));
}
#endif // !defined(OS_IOS)
@@ -254,7 +255,7 @@ TEST_F(StackTraceTest, itoa_r) {
}
#endif // defined(OS_POSIX) && !defined(OS_ANDROID)
-#if HAVE_TRACE_STACK_FRAME_POINTERS
+#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
template <size_t Depth>
void NOINLINE ExpectStackFramePointers(const void** frames,
@@ -313,7 +314,7 @@ TEST_F(StackTraceTest, MAYBE_StackEnd) {
EXPECT_NE(0u, GetStackEnd());
}
-#endif // HAVE_TRACE_STACK_FRAME_POINTERS
+#endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
} // namespace debug
} // namespace base
diff --git a/chromium/base/debug/task_annotator_unittest.cc b/chromium/base/debug/task_annotator_unittest.cc
index 8a1c8bdc872..bfb0e7c9139 100644
--- a/chromium/base/debug/task_annotator_unittest.cc
+++ b/chromium/base/debug/task_annotator_unittest.cc
@@ -19,7 +19,7 @@ void TestTask(int* result) {
TEST(TaskAnnotatorTest, QueueAndRunTask) {
int result = 0;
- PendingTask pending_task(FROM_HERE, Bind(&TestTask, &result));
+ PendingTask pending_task(FROM_HERE, BindOnce(&TestTask, &result));
TaskAnnotator annotator;
annotator.DidQueueTask("TaskAnnotatorTest::Queue", pending_task);
diff --git a/chromium/base/debug/thread_heap_usage_tracker_unittest.cc b/chromium/base/debug/thread_heap_usage_tracker_unittest.cc
index 2012e4971f0..5a6a4fe7153 100644
--- a/chromium/base/debug/thread_heap_usage_tracker_unittest.cc
+++ b/chromium/base/debug/thread_heap_usage_tracker_unittest.cc
@@ -10,6 +10,10 @@
#include "base/allocator/features.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if defined(OS_MACOSX)
+#include "base/allocator/allocator_interception_mac.h"
+#endif
+
namespace base {
namespace debug {
@@ -550,11 +554,14 @@ TEST_F(ThreadHeapUsageTrackerTest, AllShimFunctionsAreProvided) {
}
#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
-TEST(ThreadHeapUsageShimTest, HooksIntoMallocWhenShimAvailable) {
+class ThreadHeapUsageShimTest : public testing::Test {
#if defined(OS_MACOSX)
- allocator::InitializeAllocatorShim();
+ void SetUp() override { allocator::InitializeAllocatorShim(); }
+ void TearDown() override { allocator::UninterceptMallocZonesForTesting(); }
#endif
+};
+TEST_F(ThreadHeapUsageShimTest, HooksIntoMallocWhenShimAvailable) {
ASSERT_FALSE(ThreadHeapUsageTracker::IsHeapTrackingEnabled());
ThreadHeapUsageTracker::EnableHeapTracking();
diff --git a/chromium/base/deferred_sequenced_task_runner.cc b/chromium/base/deferred_sequenced_task_runner.cc
index 7d09ef838cf..0301945bd10 100644
--- a/chromium/base/deferred_sequenced_task_runner.cc
+++ b/chromium/base/deferred_sequenced_task_runner.cc
@@ -4,6 +4,8 @@
#include "base/deferred_sequenced_task_runner.h"
+#include <utility>
+
#include "base/bind.h"
#include "base/logging.h"
@@ -13,12 +15,16 @@ DeferredSequencedTaskRunner::DeferredTask::DeferredTask()
: is_non_nestable(false) {
}
-DeferredSequencedTaskRunner::DeferredTask::DeferredTask(
- const DeferredTask& other) = default;
+DeferredSequencedTaskRunner::DeferredTask::DeferredTask(DeferredTask&& other) =
+ default;
DeferredSequencedTaskRunner::DeferredTask::~DeferredTask() {
}
+DeferredSequencedTaskRunner::DeferredTask&
+DeferredSequencedTaskRunner::DeferredTask::operator=(DeferredTask&& other) =
+ default;
+
DeferredSequencedTaskRunner::DeferredSequencedTaskRunner(
scoped_refptr<SequencedTaskRunner> target_task_runner)
: started_(false), target_task_runner_(std::move(target_task_runner)) {}
@@ -28,15 +34,17 @@ DeferredSequencedTaskRunner::~DeferredSequencedTaskRunner() {
bool DeferredSequencedTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
AutoLock lock(lock_);
if (started_) {
DCHECK(deferred_tasks_queue_.empty());
- return target_task_runner_->PostDelayedTask(from_here, task, delay);
+ return target_task_runner_->PostDelayedTask(from_here, std::move(task),
+ delay);
}
- QueueDeferredTask(from_here, task, delay, false /* is_non_nestable */);
+ QueueDeferredTask(from_here, std::move(task), delay,
+ false /* is_non_nestable */);
return true;
}
@@ -46,33 +54,36 @@ bool DeferredSequencedTaskRunner::RunsTasksOnCurrentThread() const {
bool DeferredSequencedTaskRunner::PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
AutoLock lock(lock_);
if (started_) {
DCHECK(deferred_tasks_queue_.empty());
- return target_task_runner_->PostNonNestableDelayedTask(from_here,
- task,
- delay);
+ return target_task_runner_->PostNonNestableDelayedTask(
+ from_here, std::move(task), delay);
}
- QueueDeferredTask(from_here, task, delay, true /* is_non_nestable */);
+ QueueDeferredTask(from_here, std::move(task), delay,
+ true /* is_non_nestable */);
return true;
}
void DeferredSequencedTaskRunner::QueueDeferredTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay,
bool is_non_nestable) {
+ // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+ // for details.
+ CHECK(task);
+
DeferredTask deferred_task;
deferred_task.posted_from = from_here;
- deferred_task.task = task;
+ deferred_task.task = std::move(task);
deferred_task.delay = delay;
deferred_task.is_non_nestable = is_non_nestable;
- deferred_tasks_queue_.push_back(deferred_task);
+ deferred_tasks_queue_.push_back(std::move(deferred_task));
}
-
void DeferredSequencedTaskRunner::Start() {
AutoLock lock(lock_);
DCHECK(!started_);
@@ -80,20 +91,14 @@ void DeferredSequencedTaskRunner::Start() {
for (std::vector<DeferredTask>::iterator i = deferred_tasks_queue_.begin();
i != deferred_tasks_queue_.end();
++i) {
- const DeferredTask& task = *i;
+ DeferredTask& task = *i;
if (task.is_non_nestable) {
- target_task_runner_->PostNonNestableDelayedTask(task.posted_from,
- task.task,
- task.delay);
+ target_task_runner_->PostNonNestableDelayedTask(
+ task.posted_from, std::move(task.task), task.delay);
} else {
target_task_runner_->PostDelayedTask(task.posted_from,
- task.task,
- task.delay);
+ std::move(task.task), task.delay);
}
- // Replace the i-th element in the |deferred_tasks_queue_| with an empty
- // |DelayedTask| to ensure that |task| is destroyed before the next task
- // is posted.
- *i = DeferredTask();
}
deferred_tasks_queue_.clear();
}
diff --git a/chromium/base/deferred_sequenced_task_runner.h b/chromium/base/deferred_sequenced_task_runner.h
index e5d57df5a0b..85f196f3a44 100644
--- a/chromium/base/deferred_sequenced_task_runner.h
+++ b/chromium/base/deferred_sequenced_task_runner.h
@@ -28,13 +28,13 @@ class BASE_EXPORT DeferredSequencedTaskRunner : public SequencedTaskRunner {
// TaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
// SequencedTaskRunner implementation
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
// Start the execution - posts all queued tasks to the target executor. The
@@ -46,11 +46,12 @@ class BASE_EXPORT DeferredSequencedTaskRunner : public SequencedTaskRunner {
private:
struct DeferredTask {
DeferredTask();
- DeferredTask(const DeferredTask& other);
+ DeferredTask(DeferredTask&& other);
~DeferredTask();
+ DeferredTask& operator=(DeferredTask&& other);
tracked_objects::Location posted_from;
- Closure task;
+ OnceClosure task;
// The delay this task was initially posted with.
TimeDelta delay;
bool is_non_nestable;
@@ -60,7 +61,7 @@ class BASE_EXPORT DeferredSequencedTaskRunner : public SequencedTaskRunner {
// Creates a |Task| object and adds it to |deferred_tasks_queue_|.
void QueueDeferredTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay,
bool is_non_nestable);
diff --git a/chromium/base/deferred_sequenced_task_runner_unittest.cc b/chromium/base/deferred_sequenced_task_runner_unittest.cc
index e34827ab9ee..a39b2d3485c 100644
--- a/chromium/base/deferred_sequenced_task_runner_unittest.cc
+++ b/chromium/base/deferred_sequenced_task_runner_unittest.cc
@@ -44,10 +44,9 @@ class DeferredSequencedTaskRunnerTest : public testing::Test,
}
void PostExecuteTask(int task_id) {
- runner_->PostTask(FROM_HERE,
- base::Bind(&DeferredSequencedTaskRunnerTest::ExecuteTask,
- base::Unretained(this),
- task_id));
+ runner_->PostTask(
+ FROM_HERE, base::BindOnce(&DeferredSequencedTaskRunnerTest::ExecuteTask,
+ base::Unretained(this), task_id));
}
void StartRunner() {
@@ -126,16 +125,17 @@ TEST_F(DeferredSequencedTaskRunnerTest, DeferredStartWithMultipleThreads) {
for (int i = 0; i < 5; ++i) {
thread1.task_runner()->PostTask(
FROM_HERE,
- base::Bind(&DeferredSequencedTaskRunnerTest::PostExecuteTask,
- base::Unretained(this), 2 * i));
+ base::BindOnce(&DeferredSequencedTaskRunnerTest::PostExecuteTask,
+ base::Unretained(this), 2 * i));
thread2.task_runner()->PostTask(
FROM_HERE,
- base::Bind(&DeferredSequencedTaskRunnerTest::PostExecuteTask,
- base::Unretained(this), 2 * i + 1));
+ base::BindOnce(&DeferredSequencedTaskRunnerTest::PostExecuteTask,
+ base::Unretained(this), 2 * i + 1));
if (i == 2) {
thread1.task_runner()->PostTask(
- FROM_HERE, base::Bind(&DeferredSequencedTaskRunnerTest::StartRunner,
- base::Unretained(this)));
+ FROM_HERE,
+ base::BindOnce(&DeferredSequencedTaskRunnerTest::StartRunner,
+ base::Unretained(this)));
}
}
}
@@ -157,9 +157,10 @@ TEST_F(DeferredSequencedTaskRunnerTest, ObjectDestructionOrder) {
scoped_refptr<ExecuteTaskOnDestructor> short_lived_object =
new ExecuteTaskOnDestructor(this, 2 * i);
runner_->PostTask(
- FROM_HERE, base::Bind(&DeferredSequencedTaskRunnerTest::DoNothing,
- base::Unretained(this),
- base::RetainedRef(short_lived_object)));
+ FROM_HERE,
+ base::BindOnce(&DeferredSequencedTaskRunnerTest::DoNothing,
+ base::Unretained(this),
+ base::RetainedRef(short_lived_object)));
}
// |short_lived_object| with id |2 * i| should be destroyed before the
// task |2 * i + 1| is executed.
diff --git a/chromium/base/environment.cc b/chromium/base/environment.cc
index 534a7a88127..8b1d8fc3125 100644
--- a/chromium/base/environment.cc
+++ b/chromium/base/environment.cc
@@ -42,7 +42,7 @@ class EnvironmentImpl : public Environment {
alternate_case_var = ToLowerASCII(variable_name);
else
return false;
- return GetVarImpl(alternate_case_var.c_str(), result);
+ return GetVarImpl(alternate_case_var, result);
}
bool SetVar(StringPiece variable_name,
diff --git a/chromium/base/feature_list.cc b/chromium/base/feature_list.cc
index 353136c12be..61043ceb731 100644
--- a/chromium/base/feature_list.cc
+++ b/chromium/base/feature_list.cc
@@ -228,9 +228,9 @@ FieldTrial* FeatureList::GetFieldTrial(const Feature& feature) {
}
// static
-std::vector<std::string> FeatureList::SplitFeatureListString(
- const std::string& input) {
- return SplitString(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+std::vector<base::StringPiece> FeatureList::SplitFeatureListString(
+ base::StringPiece input) {
+ return SplitStringPiece(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
}
// static
@@ -340,7 +340,7 @@ void FeatureList::RegisterOverridesFromCommandLine(
const std::string& feature_list,
OverrideState overridden_state) {
for (const auto& value : SplitFeatureListString(feature_list)) {
- StringPiece feature_name(value);
+ StringPiece feature_name = value;
base::FieldTrial* trial = nullptr;
// The entry may be of the form FeatureName<FieldTrialName - in which case,
@@ -348,7 +348,7 @@ void FeatureList::RegisterOverridesFromCommandLine(
std::string::size_type pos = feature_name.find('<');
if (pos != std::string::npos) {
feature_name.set(value.data(), pos);
- trial = base::FieldTrialList::Find(value.substr(pos + 1));
+ trial = base::FieldTrialList::Find(value.substr(pos + 1).as_string());
}
RegisterOverride(feature_name, overridden_state, trial);
diff --git a/chromium/base/feature_list.h b/chromium/base/feature_list.h
index 09e8408aa84..c9f4a7b0c46 100644
--- a/chromium/base/feature_list.h
+++ b/chromium/base/feature_list.h
@@ -156,9 +156,10 @@ class BASE_EXPORT FeatureList {
// called after the singleton instance has been registered via SetInstance().
static FieldTrial* GetFieldTrial(const Feature& feature);
- // Splits a comma-separated string containing feature names into a vector.
- static std::vector<std::string> SplitFeatureListString(
- const std::string& input);
+ // Splits a comma-separated string containing feature names into a vector. The
+ // resulting pieces point to parts of |input|.
+ static std::vector<base::StringPiece> SplitFeatureListString(
+ base::StringPiece input);
// Initializes and sets an instance of FeatureList with feature overrides via
// command-line flags |enable_features| and |disable_features| if one has not
diff --git a/chromium/base/feature_list_unittest.cc b/chromium/base/feature_list_unittest.cc
index fb3b320ae92..5fbd294dcf8 100644
--- a/chromium/base/feature_list_unittest.cc
+++ b/chromium/base/feature_list_unittest.cc
@@ -14,6 +14,7 @@
#include "base/memory/ptr_util.h"
#include "base/metrics/field_trial.h"
#include "base/metrics/persistent_memory_allocator.h"
+#include "base/strings/string_piece.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -33,7 +34,7 @@ struct Feature kFeatureOffByDefault {
};
std::string SortFeatureListString(const std::string& feature_list) {
- std::vector<std::string> features =
+ std::vector<base::StringPiece> features =
FeatureList::SplitFeatureListString(feature_list);
std::sort(features.begin(), features.end());
return JoinString(features, ",");
diff --git a/chromium/base/files/file.cc b/chromium/base/files/file.cc
index 1b2224e323e..672950045a0 100644
--- a/chromium/base/files/file.cc
+++ b/chromium/base/files/file.cc
@@ -71,7 +71,6 @@ File File::CreateForAsyncHandle(PlatformFile platform_file) {
}
File& File::operator=(File&& other) {
- DCHECK_NE(this, &other);
Close();
SetPlatformFile(other.TakePlatformFile());
tracing_path_ = other.tracing_path_;
diff --git a/chromium/base/files/file_descriptor_watcher_posix.cc b/chromium/base/files/file_descriptor_watcher_posix.cc
index 9746e35ea70..ce05081244a 100644
--- a/chromium/base/files/file_descriptor_watcher_posix.cc
+++ b/chromium/base/files/file_descriptor_watcher_posix.cc
@@ -124,8 +124,8 @@ void FileDescriptorWatcher::Controller::Watcher::OnFileCanReadWithoutBlocking(
DCHECK(thread_checker_.CalledOnValidThread());
// Run the callback on the sequence on which the watch was initiated.
- callback_task_runner_->PostTask(FROM_HERE,
- Bind(&Controller::RunCallback, controller_));
+ callback_task_runner_->PostTask(
+ FROM_HERE, BindOnce(&Controller::RunCallback, controller_));
}
void FileDescriptorWatcher::Controller::Watcher::OnFileCanWriteWithoutBlocking(
@@ -135,8 +135,8 @@ void FileDescriptorWatcher::Controller::Watcher::OnFileCanWriteWithoutBlocking(
DCHECK(thread_checker_.CalledOnValidThread());
// Run the callback on the sequence on which the watch was initiated.
- callback_task_runner_->PostTask(FROM_HERE,
- Bind(&Controller::RunCallback, controller_));
+ callback_task_runner_->PostTask(
+ FROM_HERE, BindOnce(&Controller::RunCallback, controller_));
}
void FileDescriptorWatcher::Controller::Watcher::
@@ -170,7 +170,7 @@ void FileDescriptorWatcher::Controller::StartWatching() {
// Controller's destructor. Since this delete task hasn't been posted yet, it
// can't run before the task posted below.
message_loop_for_io_task_runner_->PostTask(
- FROM_HERE, Bind(&Watcher::StartWatching, Unretained(watcher_.get())));
+ FROM_HERE, BindOnce(&Watcher::StartWatching, Unretained(watcher_.get())));
}
void FileDescriptorWatcher::Controller::RunCallback() {
diff --git a/chromium/base/files/file_locking_unittest.cc b/chromium/base/files/file_locking_unittest.cc
index b709b7536c4..0cb8d8fe584 100644
--- a/chromium/base/files/file_locking_unittest.cc
+++ b/chromium/base/files/file_locking_unittest.cc
@@ -152,10 +152,10 @@ class FileLockingTest : public testing::Test {
base::GetMultiProcessTestChildBaseCommandLine());
child_command_line.AppendSwitchPath(kTempDirFlag, temp_path);
child_command_line.AppendSwitch(unlock_action);
- lock_child_ =
- base::SpawnMultiProcessTestChild(ChildMainString, child_command_line,
- base::LaunchOptions());
- ASSERT_TRUE(lock_child_.IsValid());
+
+ spawn_child_ = base::SpawnMultiProcessTestChild(
+ ChildMainString, child_command_line, base::LaunchOptions());
+ ASSERT_TRUE(spawn_child_.process.IsValid());
// Wait for the child to lock the file.
ASSERT_TRUE(WaitForEventOrTimeout(kSignalLockFileLocked));
@@ -166,13 +166,13 @@ class FileLockingTest : public testing::Test {
ASSERT_TRUE(SignalEvent(kSignalExit));
int rv = -1;
ASSERT_TRUE(WaitForMultiprocessTestChildExit(
- lock_child_, TestTimeouts::action_timeout(), &rv));
+ spawn_child_.process, TestTimeouts::action_timeout(), &rv));
ASSERT_EQ(0, rv);
}
base::ScopedTempDir temp_dir_;
base::File lock_file_;
- base::Process lock_child_;
+ base::SpawnChildResult spawn_child_;
private:
DISALLOW_COPY_AND_ASSIGN(FileLockingTest);
@@ -220,7 +220,7 @@ TEST_F(FileLockingTest, UnlockOnTerminate) {
StartChildAndSignalLock(kExitUnlock);
ASSERT_NE(File::FILE_OK, lock_file_.Lock());
- ASSERT_TRUE(TerminateMultiProcessTestChild(lock_child_, 0, true));
+ ASSERT_TRUE(TerminateMultiProcessTestChild(spawn_child_.process, 0, true));
ASSERT_EQ(File::FILE_OK, lock_file_.Lock());
ASSERT_EQ(File::FILE_OK, lock_file_.Unlock());
}
diff --git a/chromium/base/files/file_path.cc b/chromium/base/files/file_path.cc
index 21a44c6dd53..5b1eb29dd6d 100644
--- a/chromium/base/files/file_path.cc
+++ b/chromium/base/files/file_path.cc
@@ -174,7 +174,7 @@ FilePath::FilePath() {
FilePath::FilePath(const FilePath& that) : path_(that.path_) {
}
-FilePath::FilePath(FilePath&& that) = default;
+FilePath::FilePath(FilePath&& that) noexcept = default;
FilePath::FilePath(StringPieceType path) {
path.CopyToString(&path_);
diff --git a/chromium/base/files/file_path.h b/chromium/base/files/file_path.h
index 02846f68921..0be0ad0b104 100644
--- a/chromium/base/files/file_path.h
+++ b/chromium/base/files/file_path.h
@@ -184,7 +184,7 @@ class BASE_EXPORT FilePath {
// Constructs FilePath with the contents of |that|, which is left in valid but
// unspecified state.
- FilePath(FilePath&& that);
+ FilePath(FilePath&& that) noexcept;
// Replaces the contents with those of |that|, which is left in valid but
// unspecified state.
FilePath& operator=(FilePath&& that);
diff --git a/chromium/base/files/file_path_watcher_linux.cc b/chromium/base/files/file_path_watcher_linux.cc
index 9589e9b788d..f02a8ea6604 100644
--- a/chromium/base/files/file_path_watcher_linux.cc
+++ b/chromium/base/files/file_path_watcher_linux.cc
@@ -63,7 +63,7 @@ class InotifyReader {
void OnInotifyEvent(const inotify_event* event);
private:
- friend struct DefaultLazyInstanceTraits<InotifyReader>;
+ friend struct LazyInstanceTraitsBase<InotifyReader>;
typedef std::set<FilePathWatcherImpl*> WatcherSet;
@@ -255,8 +255,7 @@ InotifyReader::InotifyReader()
if (inotify_fd_ >= 0 && thread_.Start()) {
thread_.task_runner()->PostTask(
- FROM_HERE,
- Bind(&InotifyReaderCallback, this, inotify_fd_));
+ FROM_HERE, BindOnce(&InotifyReaderCallback, this, inotify_fd_));
valid_ = true;
}
}
@@ -331,9 +330,10 @@ void FilePathWatcherImpl::OnFilePathChanged(InotifyReader::Watch fired_watch,
// access |watches_| safely. Use a WeakPtr to prevent the callback from
// running after |this| is destroyed (i.e. after the watch is cancelled).
task_runner()->PostTask(
- FROM_HERE, Bind(&FilePathWatcherImpl::OnFilePathChangedOnOriginSequence,
- weak_factory_.GetWeakPtr(), fired_watch, child, created,
- deleted, is_dir));
+ FROM_HERE,
+ BindOnce(&FilePathWatcherImpl::OnFilePathChangedOnOriginSequence,
+ weak_factory_.GetWeakPtr(), fired_watch, child, created, deleted,
+ is_dir));
}
void FilePathWatcherImpl::OnFilePathChangedOnOriginSequence(
diff --git a/chromium/base/files/file_path_watcher_unittest.cc b/chromium/base/files/file_path_watcher_unittest.cc
index d2ec37bbec1..dcc6d5fc0f9 100644
--- a/chromium/base/files/file_path_watcher_unittest.cc
+++ b/chromium/base/files/file_path_watcher_unittest.cc
@@ -56,8 +56,8 @@ class NotificationCollector
// Called from the file thread by the delegates.
void OnChange(TestDelegate* delegate) {
task_runner_->PostTask(
- FROM_HERE, base::Bind(&NotificationCollector::RecordChange, this,
- base::Unretained(delegate)));
+ FROM_HERE, base::BindOnce(&NotificationCollector::RecordChange, this,
+ base::Unretained(delegate)));
}
void Register(TestDelegate* delegate) {
diff --git a/chromium/base/files/file_proxy.cc b/chromium/base/files/file_proxy.cc
index 01933d0d791..a7950f001b7 100644
--- a/chromium/base/files/file_proxy.cc
+++ b/chromium/base/files/file_proxy.cc
@@ -37,7 +37,8 @@ class FileHelper {
if (proxy_)
proxy_->SetFile(std::move(file_));
else if (file_.IsValid())
- task_runner_->PostTask(FROM_HERE, Bind(&FileDeleter, Passed(&file_)));
+ task_runner_->PostTask(FROM_HERE,
+ BindOnce(&FileDeleter, Passed(&file_)));
}
protected:
@@ -235,7 +236,7 @@ FileProxy::FileProxy(TaskRunner* task_runner) : task_runner_(task_runner) {
FileProxy::~FileProxy() {
if (file_.IsValid())
- task_runner_->PostTask(FROM_HERE, Bind(&FileDeleter, Passed(&file_)));
+ task_runner_->PostTask(FROM_HERE, BindOnce(&FileDeleter, Passed(&file_)));
}
bool FileProxy::CreateOrOpen(const FilePath& file_path,
@@ -245,9 +246,9 @@ bool FileProxy::CreateOrOpen(const FilePath& file_path,
CreateOrOpenHelper* helper = new CreateOrOpenHelper(this, File());
return task_runner_->PostTaskAndReply(
FROM_HERE,
- Bind(&CreateOrOpenHelper::RunWork, Unretained(helper), file_path,
- file_flags),
- Bind(&CreateOrOpenHelper::Reply, Owned(helper), callback));
+ BindOnce(&CreateOrOpenHelper::RunWork, Unretained(helper), file_path,
+ file_flags),
+ BindOnce(&CreateOrOpenHelper::Reply, Owned(helper), callback));
}
bool FileProxy::CreateTemporary(uint32_t additional_file_flags,
@@ -256,9 +257,9 @@ bool FileProxy::CreateTemporary(uint32_t additional_file_flags,
CreateTemporaryHelper* helper = new CreateTemporaryHelper(this, File());
return task_runner_->PostTaskAndReply(
FROM_HERE,
- Bind(&CreateTemporaryHelper::RunWork, Unretained(helper),
- additional_file_flags),
- Bind(&CreateTemporaryHelper::Reply, Owned(helper), callback));
+ BindOnce(&CreateTemporaryHelper::RunWork, Unretained(helper),
+ additional_file_flags),
+ BindOnce(&CreateTemporaryHelper::Reply, Owned(helper), callback));
}
bool FileProxy::IsValid() const {
@@ -286,18 +287,16 @@ bool FileProxy::Close(const StatusCallback& callback) {
DCHECK(file_.IsValid());
GenericFileHelper* helper = new GenericFileHelper(this, std::move(file_));
return task_runner_->PostTaskAndReply(
- FROM_HERE,
- Bind(&GenericFileHelper::Close, Unretained(helper)),
- Bind(&GenericFileHelper::Reply, Owned(helper), callback));
+ FROM_HERE, BindOnce(&GenericFileHelper::Close, Unretained(helper)),
+ BindOnce(&GenericFileHelper::Reply, Owned(helper), callback));
}
bool FileProxy::GetInfo(const GetFileInfoCallback& callback) {
DCHECK(file_.IsValid());
GetInfoHelper* helper = new GetInfoHelper(this, std::move(file_));
return task_runner_->PostTaskAndReply(
- FROM_HERE,
- Bind(&GetInfoHelper::RunWork, Unretained(helper)),
- Bind(&GetInfoHelper::Reply, Owned(helper), callback));
+ FROM_HERE, BindOnce(&GetInfoHelper::RunWork, Unretained(helper)),
+ BindOnce(&GetInfoHelper::Reply, Owned(helper), callback));
}
bool FileProxy::Read(int64_t offset,
@@ -309,9 +308,8 @@ bool FileProxy::Read(int64_t offset,
ReadHelper* helper = new ReadHelper(this, std::move(file_), bytes_to_read);
return task_runner_->PostTaskAndReply(
- FROM_HERE,
- Bind(&ReadHelper::RunWork, Unretained(helper), offset),
- Bind(&ReadHelper::Reply, Owned(helper), callback));
+ FROM_HERE, BindOnce(&ReadHelper::RunWork, Unretained(helper), offset),
+ BindOnce(&ReadHelper::Reply, Owned(helper), callback));
}
bool FileProxy::Write(int64_t offset,
@@ -325,9 +323,8 @@ bool FileProxy::Write(int64_t offset,
WriteHelper* helper =
new WriteHelper(this, std::move(file_), buffer, bytes_to_write);
return task_runner_->PostTaskAndReply(
- FROM_HERE,
- Bind(&WriteHelper::RunWork, Unretained(helper), offset),
- Bind(&WriteHelper::Reply, Owned(helper), callback));
+ FROM_HERE, BindOnce(&WriteHelper::RunWork, Unretained(helper), offset),
+ BindOnce(&WriteHelper::Reply, Owned(helper), callback));
}
bool FileProxy::SetTimes(Time last_access_time,
@@ -337,9 +334,9 @@ bool FileProxy::SetTimes(Time last_access_time,
GenericFileHelper* helper = new GenericFileHelper(this, std::move(file_));
return task_runner_->PostTaskAndReply(
FROM_HERE,
- Bind(&GenericFileHelper::SetTimes, Unretained(helper), last_access_time,
- last_modified_time),
- Bind(&GenericFileHelper::Reply, Owned(helper), callback));
+ BindOnce(&GenericFileHelper::SetTimes, Unretained(helper),
+ last_access_time, last_modified_time),
+ BindOnce(&GenericFileHelper::Reply, Owned(helper), callback));
}
bool FileProxy::SetLength(int64_t length, const StatusCallback& callback) {
@@ -347,17 +344,16 @@ bool FileProxy::SetLength(int64_t length, const StatusCallback& callback) {
GenericFileHelper* helper = new GenericFileHelper(this, std::move(file_));
return task_runner_->PostTaskAndReply(
FROM_HERE,
- Bind(&GenericFileHelper::SetLength, Unretained(helper), length),
- Bind(&GenericFileHelper::Reply, Owned(helper), callback));
+ BindOnce(&GenericFileHelper::SetLength, Unretained(helper), length),
+ BindOnce(&GenericFileHelper::Reply, Owned(helper), callback));
}
bool FileProxy::Flush(const StatusCallback& callback) {
DCHECK(file_.IsValid());
GenericFileHelper* helper = new GenericFileHelper(this, std::move(file_));
return task_runner_->PostTaskAndReply(
- FROM_HERE,
- Bind(&GenericFileHelper::Flush, Unretained(helper)),
- Bind(&GenericFileHelper::Reply, Owned(helper), callback));
+ FROM_HERE, BindOnce(&GenericFileHelper::Flush, Unretained(helper)),
+ BindOnce(&GenericFileHelper::Reply, Owned(helper), callback));
}
} // namespace base
diff --git a/chromium/base/files/file_unittest.cc b/chromium/base/files/file_unittest.cc
index 66c312b60d4..e179bbba55e 100644
--- a/chromium/base/files/file_unittest.cc
+++ b/chromium/base/files/file_unittest.cc
@@ -105,6 +105,17 @@ TEST(FileTest, Create) {
EXPECT_FALSE(base::PathExists(file_path));
}
+TEST(FileTest, SelfSwap) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("create_file_1");
+ File file(file_path,
+ base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_DELETE_ON_CLOSE);
+ using namespace std;
+ swap(file, file);
+ EXPECT_TRUE(file.IsValid());
+}
+
TEST(FileTest, Async) {
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
diff --git a/chromium/base/files/file_util_mac.mm b/chromium/base/files/file_util_mac.mm
index 5a99aa0e81d..d3e14a37870 100644
--- a/chromium/base/files/file_util_mac.mm
+++ b/chromium/base/files/file_util_mac.mm
@@ -7,8 +7,10 @@
#import <Foundation/Foundation.h>
#include <copyfile.h>
#include <stdlib.h>
+#include <string.h>
#include "base/files/file_path.h"
+#include "base/logging.h"
#include "base/mac/foundation_util.h"
#include "base/strings/string_util.h"
#include "base/threading/thread_restrictions.h"
@@ -24,10 +26,14 @@ bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
}
bool GetTempDir(base::FilePath* path) {
- // In order to facilitate hermetic runs on macOS, first check $TMPDIR.
- // NOTE: $TMPDIR is ALMOST ALWAYS set on macOS (unless the user un-set it).
- const char* env_tmpdir = getenv("TMPDIR");
+ // In order to facilitate hermetic runs on macOS, first check
+ // $MAC_CHROMIUM_TMPDIR. We check this instead of $TMPDIR because external
+ // programs currently set $TMPDIR with no effect, but when we respect it
+ // directly it can cause crashes (like crbug.com/698759).
+ const char* env_tmpdir = getenv("MAC_CHROMIUM_TMPDIR");
if (env_tmpdir) {
+ DCHECK_LT(strlen(env_tmpdir), 50u)
+ << "too-long TMPDIR causes socket name length issues.";
*path = base::FilePath(env_tmpdir);
return true;
}
diff --git a/chromium/base/files/file_util_proxy.cc b/chromium/base/files/file_util_proxy.cc
index 54a56610630..dc39a654ed4 100644
--- a/chromium/base/files/file_util_proxy.cc
+++ b/chromium/base/files/file_util_proxy.cc
@@ -59,9 +59,9 @@ bool FileUtilProxy::GetFileInfo(
GetFileInfoHelper* helper = new GetFileInfoHelper;
return task_runner->PostTaskAndReply(
FROM_HERE,
- Bind(&GetFileInfoHelper::RunWorkForFilePath,
- Unretained(helper), file_path),
- Bind(&GetFileInfoHelper::Reply, Owned(helper), callback));
+ BindOnce(&GetFileInfoHelper::RunWorkForFilePath, Unretained(helper),
+ file_path),
+ BindOnce(&GetFileInfoHelper::Reply, Owned(helper), callback));
}
// static
diff --git a/chromium/base/files/file_util_unittest.cc b/chromium/base/files/file_util_unittest.cc
index 5c918f88880..2d95be4fff5 100644
--- a/chromium/base/files/file_util_unittest.cc
+++ b/chromium/base/files/file_util_unittest.cc
@@ -1744,7 +1744,7 @@ TEST_F(FileUtilTest, GetTempDirTest) {
TEST_F(FileUtilTest, OpenFileNoInheritance) {
FilePath file_path(temp_dir_.GetPath().Append(FPL("a_file")));
- for (const char* mode : {"wb", "r,ccs=UNICODE"}) {
+ for (const char* mode : {"wb", "r,ccs=UTF-8"}) {
SCOPED_TRACE(mode);
ASSERT_NO_FATAL_FAILURE(CreateTextFile(file_path, L"Geepers"));
FILE* file = OpenFile(file_path, mode);
@@ -2212,7 +2212,8 @@ TEST_F(FileUtilTest, TouchFile) {
FilePath foobar(data_dir.Append(FILE_PATH_LITERAL("foobar.txt")));
std::string data("hello");
- ASSERT_TRUE(WriteFile(foobar, data.c_str(), data.length()));
+ ASSERT_EQ(static_cast<int>(data.length()),
+ WriteFile(foobar, data.c_str(), data.length()));
Time access_time;
// This timestamp is divisible by one day (in local timezone),
@@ -2247,7 +2248,8 @@ TEST_F(FileUtilTest, IsDirectoryEmpty) {
FilePath foo(empty_dir.Append(FILE_PATH_LITERAL("foo.txt")));
std::string bar("baz");
- ASSERT_TRUE(WriteFile(foo, bar.c_str(), bar.length()));
+ ASSERT_EQ(static_cast<int>(bar.length()),
+ WriteFile(foo, bar.c_str(), bar.length()));
EXPECT_FALSE(IsDirectoryEmpty(empty_dir));
}
diff --git a/chromium/base/files/file_util_win.cc b/chromium/base/files/file_util_win.cc
index 65dc5ce1e2f..294726a8279 100644
--- a/chromium/base/files/file_util_win.cc
+++ b/chromium/base/files/file_util_win.cc
@@ -20,6 +20,7 @@
#include "base/files/file_enumerator.h"
#include "base/files/file_path.h"
+#include "base/guid.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/metrics/histogram.h"
@@ -340,25 +341,47 @@ FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir, FilePath* path) {
bool CreateTemporaryFileInDir(const FilePath& dir, FilePath* temp_file) {
ThreadRestrictions::AssertIOAllowed();
- wchar_t temp_name[MAX_PATH + 1];
+ // Use GUID instead of ::GetTempFileName() to generate unique file names.
+ // "Due to the algorithm used to generate file names, GetTempFileName can
+ // perform poorly when creating a large number of files with the same prefix.
+ // In such cases, it is recommended that you construct unique file names based
+ // on GUIDs."
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/aa364991(v=vs.85).aspx
+
+ FilePath temp_name;
+ bool create_file_success = false;
+
+ // Although it is nearly impossible to get a duplicate name with GUID, we
+ // still use a loop here in case it happens.
+ for (int i = 0; i < 100; ++i) {
+ temp_name = dir.Append(ASCIIToUTF16(base::GenerateGUID()) + L".tmp");
+ File file(temp_name,
+ File::FLAG_CREATE | File::FLAG_READ | File::FLAG_WRITE);
+ if (file.IsValid()) {
+ file.Close();
+ create_file_success = true;
+ break;
+ }
+ }
- if (!GetTempFileName(dir.value().c_str(), L"", 0, temp_name)) {
+ if (!create_file_success) {
DPLOG(WARNING) << "Failed to get temporary file name in "
<< UTF16ToUTF8(dir.value());
return false;
}
wchar_t long_temp_name[MAX_PATH + 1];
- DWORD long_name_len = GetLongPathName(temp_name, long_temp_name, MAX_PATH);
+ DWORD long_name_len =
+ GetLongPathName(temp_name.value().c_str(), long_temp_name, MAX_PATH);
if (long_name_len > MAX_PATH || long_name_len == 0) {
// GetLongPathName() failed, but we still have a temporary file.
- *temp_file = FilePath(temp_name);
+ *temp_file = std::move(temp_name);
return true;
}
FilePath::StringType long_temp_name_str;
long_temp_name_str.assign(long_temp_name, long_name_len);
- *temp_file = FilePath(long_temp_name_str);
+ *temp_file = FilePath(std::move(long_temp_name_str));
return true;
}
diff --git a/chromium/base/files/important_file_writer_unittest.cc b/chromium/base/files/important_file_writer_unittest.cc
index 9b8dcfd4e36..88ad35281e6 100644
--- a/chromium/base/files/important_file_writer_unittest.cc
+++ b/chromium/base/files/important_file_writer_unittest.cc
@@ -201,8 +201,8 @@ TEST_F(ImportantFileWriterTest, CallbackRunsOnWriterThread) {
base::WaitableEvent::ResetPolicy::MANUAL,
base::WaitableEvent::InitialState::NOT_SIGNALED);
file_writer_thread.task_runner()->PostTask(
- FROM_HERE,
- base::Bind(&base::WaitableEvent::Wait, base::Unretained(&wait_helper)));
+ FROM_HERE, base::BindOnce(&base::WaitableEvent::Wait,
+ base::Unretained(&wait_helper)));
write_callback_observer_.ObserveNextWriteCallbacks(&writer);
writer.WriteNow(MakeUnique<std::string>("foo"));
diff --git a/chromium/base/i18n/encoding_detection.cc b/chromium/base/i18n/encoding_detection.cc
index 56582511f30..cad67ce0ac9 100644
--- a/chromium/base/i18n/encoding_detection.cc
+++ b/chromium/base/i18n/encoding_detection.cc
@@ -23,18 +23,6 @@ bool DetectEncoding(const std::string& text, std::string* encoding) {
if (enc == UNKNOWN_ENCODING)
return false;
- // 7-bit encodings (except ISO-2022-JP) are not supported in web standard.
- // Mark them as ascii to keep the raw bytes intact.
- switch (enc) {
- case HZ_GB_2312:
- case ISO_2022_KR:
- case ISO_2022_CN:
- case UTF7:
- enc = ASCII_7BIT;
- break;
- default:
- break;
- }
*encoding = MimeEncodingName(enc);
return true;
}
diff --git a/chromium/base/i18n/file_util_icu.cc b/chromium/base/i18n/file_util_icu.cc
index 7b3375e9c08..6189577c3a5 100644
--- a/chromium/base/i18n/file_util_icu.cc
+++ b/chromium/base/i18n/file_util_icu.cc
@@ -164,8 +164,8 @@ bool LocaleAwareCompareFilenames(const FilePath& a, const FilePath& b) {
// On linux, the file system encoding is not defined. We assume
// SysNativeMBToWide takes care of it.
return CompareString16WithCollator(
- *collator, WideToUTF16(SysNativeMBToWide(a.value().c_str())),
- WideToUTF16(SysNativeMBToWide(b.value().c_str()))) == UCOL_LESS;
+ *collator, WideToUTF16(SysNativeMBToWide(a.value())),
+ WideToUTF16(SysNativeMBToWide(b.value()))) == UCOL_LESS;
#else
#error Not implemented on your system
#endif
diff --git a/chromium/base/i18n/message_formatter.cc b/chromium/base/i18n/message_formatter.cc
index 702e51b94aa..6962a282975 100644
--- a/chromium/base/i18n/message_formatter.cc
+++ b/chromium/base/i18n/message_formatter.cc
@@ -4,6 +4,7 @@
#include "base/i18n/message_formatter.h"
+#include "base/i18n/unicodestring.h"
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/time/time.h"
@@ -91,7 +92,7 @@ string16 MessageFormatter::FormatWithNumberedArgs(
<< u_errorName(error);
return string16();
}
- return string16(formatted.getBuffer(), formatted.length());
+ return i18n::UnicodeStringToString16(formatted);
}
string16 MessageFormatter::FormatWithNamedArgs(
@@ -134,7 +135,7 @@ string16 MessageFormatter::FormatWithNamedArgs(
<< u_errorName(error);
return string16();
}
- return string16(formatted.getBuffer(), formatted.length());
+ return i18n::UnicodeStringToString16(formatted);
}
} // namespace i18n
diff --git a/chromium/base/i18n/number_formatting.cc b/chromium/base/i18n/number_formatting.cc
index b5108334846..0ab031ecaf8 100644
--- a/chromium/base/i18n/number_formatting.cc
+++ b/chromium/base/i18n/number_formatting.cc
@@ -10,6 +10,7 @@
#include "base/format_macros.h"
#include "base/i18n/message_formatter.h"
+#include "base/i18n/unicodestring.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/strings/string_util.h"
@@ -42,9 +43,9 @@ struct NumberFormatWrapper {
std::unique_ptr<icu::NumberFormat> number_format;
};
-LazyInstance<NumberFormatWrapper> g_number_format_int =
+LazyInstance<NumberFormatWrapper>::DestructorAtExit g_number_format_int =
LAZY_INSTANCE_INITIALIZER;
-LazyInstance<NumberFormatWrapper> g_number_format_float =
+LazyInstance<NumberFormatWrapper>::DestructorAtExit g_number_format_float =
LAZY_INSTANCE_INITIALIZER;
} // namespace
@@ -60,7 +61,7 @@ string16 FormatNumber(int64_t number) {
icu::UnicodeString ustr;
number_format->format(number, ustr);
- return string16(ustr.getBuffer(), static_cast<size_t>(ustr.length()));
+ return i18n::UnicodeStringToString16(ustr);
}
string16 FormatDouble(double number, int fractional_digits) {
@@ -76,7 +77,7 @@ string16 FormatDouble(double number, int fractional_digits) {
icu::UnicodeString ustr;
number_format->format(number, ustr);
- return string16(ustr.getBuffer(), static_cast<size_t>(ustr.length()));
+ return i18n::UnicodeStringToString16(ustr);
}
string16 FormatPercent(int number) {
diff --git a/chromium/base/i18n/rtl.cc b/chromium/base/i18n/rtl.cc
index 28fa0b087c2..095d66c6d8a 100644
--- a/chromium/base/i18n/rtl.cc
+++ b/chromium/base/i18n/rtl.cc
@@ -12,14 +12,12 @@
#include "base/command_line.h"
#include "base/files/file_path.h"
#include "base/i18n/base_i18n_switches.h"
-#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "base/strings/sys_string_conversions.h"
#include "base/strings/utf_string_conversions.h"
-#include "base/synchronization/lock.h"
#include "build/build_config.h"
#include "third_party/icu/source/common/unicode/locid.h"
#include "third_party/icu/source/common/unicode/uchar.h"
@@ -103,8 +101,6 @@ namespace base {
namespace i18n {
// Represents the locale-specific ICU text direction.
-static base::LazyInstance<base::Lock>::Leaky g_icu_text_direction_lock =
- LAZY_INSTANCE_INITIALIZER;
static TextDirection g_icu_text_direction = UNKNOWN_DIRECTION;
// Convert the ICU default locale to a string.
@@ -154,10 +150,7 @@ void SetICUDefaultLocale(const std::string& locale_string) {
// presence of actual locale data). However,
// it does not hurt to have it as a sanity check.
DCHECK(U_SUCCESS(error_code));
- {
- base::AutoLock lock(g_icu_text_direction_lock.Get());
- g_icu_text_direction = UNKNOWN_DIRECTION;
- }
+ g_icu_text_direction = UNKNOWN_DIRECTION;
}
bool IsRTL() {
@@ -165,7 +158,6 @@ bool IsRTL() {
}
bool ICUIsRTL() {
- base::AutoLock lock(g_icu_text_direction_lock.Get());
if (g_icu_text_direction == UNKNOWN_DIRECTION) {
const icu::Locale& locale = icu::Locale::getDefault();
g_icu_text_direction = GetTextDirectionForLocaleInStartUp(locale.getName());
diff --git a/chromium/base/i18n/string_compare.cc b/chromium/base/i18n/string_compare.cc
index 2851e7d2dce..649c28119fe 100644
--- a/chromium/base/i18n/string_compare.cc
+++ b/chromium/base/i18n/string_compare.cc
@@ -6,6 +6,7 @@
#include "base/logging.h"
#include "base/strings/utf_string_conversions.h"
+#include "third_party/icu/source/common/unicode/unistr.h"
namespace base {
namespace i18n {
@@ -17,8 +18,8 @@ UCollationResult CompareString16WithCollator(const icu::Collator& collator,
const string16& rhs) {
UErrorCode error = U_ZERO_ERROR;
UCollationResult result = collator.compare(
- static_cast<const UChar*>(lhs.c_str()), static_cast<int>(lhs.length()),
- static_cast<const UChar*>(rhs.c_str()), static_cast<int>(rhs.length()),
+ icu::UnicodeString(FALSE, lhs.c_str(), static_cast<int>(lhs.length())),
+ icu::UnicodeString(FALSE, rhs.c_str(), static_cast<int>(rhs.length())),
error);
DCHECK(U_SUCCESS(error));
return result;
diff --git a/chromium/base/i18n/time_formatting.cc b/chromium/base/i18n/time_formatting.cc
index 4755bddab72..3a5394ae3ac 100644
--- a/chromium/base/i18n/time_formatting.cc
+++ b/chromium/base/i18n/time_formatting.cc
@@ -8,6 +8,7 @@
#include <memory>
+#include "base/i18n/unicodestring.h"
#include "base/logging.h"
#include "base/strings/utf_string_conversions.h"
#include "base/time/time.h"
@@ -28,8 +29,7 @@ string16 TimeFormat(const icu::DateFormat* formatter,
icu::UnicodeString date_string;
formatter->format(static_cast<UDate>(time.ToDoubleT() * 1000), date_string);
- return string16(date_string.getBuffer(),
- static_cast<size_t>(date_string.length()));
+ return i18n::UnicodeStringToString16(date_string);
}
string16 TimeFormatWithoutAmPm(const icu::DateFormat* formatter,
@@ -48,8 +48,7 @@ string16 TimeFormatWithoutAmPm(const icu::DateFormat* formatter,
begin--;
time_string.removeBetween(begin, ampm_field.getEndIndex());
}
- return string16(time_string.getBuffer(),
- static_cast<size_t>(time_string.length()));
+ return i18n::UnicodeStringToString16(time_string);
}
icu::SimpleDateFormat CreateSimpleDateFormatter(const char* pattern) {
@@ -214,7 +213,7 @@ bool TimeDurationFormat(const TimeDelta time,
return false;
}
- *out = base::string16(formatted.getBuffer(), formatted.length());
+ *out = i18n::UnicodeStringToString16(formatted);
return true;
}
@@ -237,7 +236,7 @@ bool TimeDurationFormatWithSeconds(const TimeDelta time,
icu::UnicodeString formatted;
icu::FieldPosition ignore(icu::FieldPosition::DONT_CARE);
measure_format.formatMeasures(measures, 3, formatted, ignore, status);
- *out = base::string16(formatted.getBuffer(), formatted.length());
+ *out = i18n::UnicodeStringToString16(formatted);
return U_SUCCESS(status) == TRUE;
}
@@ -256,8 +255,7 @@ string16 DateIntervalFormat(const Time& begin_time,
icu::DateInterval interval(start_date, end_date);
icu::UnicodeString formatted;
formatter->format(&interval, formatted, pos, status);
- return string16(formatted.getBuffer(),
- static_cast<size_t>(formatted.length()));
+ return i18n::UnicodeStringToString16(formatted);
}
HourClockType GetHourClockType() {
diff --git a/chromium/base/i18n/time_formatting_unittest.cc b/chromium/base/i18n/time_formatting_unittest.cc
index aa6fe6edad3..98b6a209aa1 100644
--- a/chromium/base/i18n/time_formatting_unittest.cc
+++ b/chromium/base/i18n/time_formatting_unittest.cc
@@ -7,6 +7,7 @@
#include <memory>
#include "base/i18n/rtl.h"
+#include "base/i18n/unicodestring.h"
#include "base/strings/utf_string_conversions.h"
#include "base/test/icu_test_util.h"
#include "base/time/time.h"
@@ -37,7 +38,7 @@ string16 GetShortTimeZone(const Time& time) {
zone_formatter->format(UTZFMT_STYLE_SPECIFIC_SHORT, *zone,
static_cast<UDate>(time.ToDoubleT() * 1000),
name, nullptr);
- return string16(name.getBuffer(), name.length());
+ return i18n::UnicodeStringToString16(name);
}
// Calls TimeDurationFormat() with |delta| and |width| and returns the resulting
diff --git a/chromium/base/i18n/timezone.cc b/chromium/base/i18n/timezone.cc
index e881c9d680f..95e7aee34c4 100644
--- a/chromium/base/i18n/timezone.cc
+++ b/chromium/base/i18n/timezone.cc
@@ -610,9 +610,9 @@ std::string CountryCodeForCurrentTimezone() {
std::unique_ptr<icu::TimeZone> zone(icu::TimeZone::createDefault());
icu::UnicodeString id;
zone->getID(id);
- string16 olson_code(id.getBuffer(), id.length());
+ std::string olson_code;
return TimezoneMap::GetInstance()->CountryCodeForTimezone(
- UTF16ToUTF8(olson_code));
+ id.toUTF8String(olson_code));
}
} // namespace base
diff --git a/chromium/base/i18n/unicodestring.h b/chromium/base/i18n/unicodestring.h
new file mode 100644
index 00000000000..b62c5264deb
--- /dev/null
+++ b/chromium/base/i18n/unicodestring.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_UNICODESTRING_H_
+#define BASE_I18N_UNICODESTRING_H_
+
+#include "base/strings/string16.h"
+#include "third_party/icu/source/common/unicode/unistr.h"
+#include "third_party/icu/source/common/unicode/uvernum.h"
+
+#if U_ICU_VERSION_MAJOR_NUM >= 59
+#include "third_party/icu/source/common/unicode/char16ptr.h"
+#endif
+
+namespace base {
+namespace i18n {
+
+inline string16 UnicodeStringToString16(const icu::UnicodeString& unistr) {
+#if U_ICU_VERSION_MAJOR_NUM >= 59
+ return base::string16(icu::toUCharPtr(unistr.getBuffer()),
+ static_cast<size_t>(unistr.length()));
+#else
+ return base::string16(unistr.getBuffer(),
+ static_cast<size_t>(unistr.length()));
+#endif
+}
+
+} // namespace i18n
+} // namespace base
+
+#endif // BASE_UNICODESTRING_H_
diff --git a/chromium/base/json/json_parser.cc b/chromium/base/json/json_parser.cc
index 1666c122b1c..064b3cb8f0a 100644
--- a/chromium/base/json/json_parser.cc
+++ b/chromium/base/json/json_parser.cc
@@ -279,27 +279,31 @@ bool JSONParser::EatComment() {
if (*pos_ != '/' || !CanConsume(1))
return false;
- char next_char = *NextChar();
- if (next_char == '/') {
+ NextChar();
+
+ if (!CanConsume(1))
+ return false;
+
+ if (*pos_ == '/') {
// Single line comment, read to newline.
while (CanConsume(1)) {
- next_char = *NextChar();
- if (next_char == '\n' || next_char == '\r')
+ if (*pos_ == '\n' || *pos_ == '\r')
return true;
+ NextChar();
}
- } else if (next_char == '*') {
+ } else if (*pos_ == '*') {
char previous_char = '\0';
// Block comment, read until end marker.
while (CanConsume(1)) {
- next_char = *NextChar();
- if (previous_char == '*' && next_char == '/') {
+ if (previous_char == '*' && *pos_ == '/') {
// EatWhitespaceAndComments will inspect pos_, which will still be on
// the last / of the comment, so advance once more (which may also be
// end of input).
NextChar();
return true;
}
- previous_char = next_char;
+ previous_char = *pos_;
+ NextChar();
}
// If the comment is unterminated, GetNextToken will report T_END_OF_INPUT.
@@ -344,7 +348,7 @@ std::unique_ptr<Value> JSONParser::ConsumeDictionary() {
return nullptr;
}
- std::unique_ptr<DictionaryValue> dict(new DictionaryValue);
+ std::vector<Value::DictStorage::value_type> dict_storage;
NextChar();
Token token = GetNextToken();
@@ -376,7 +380,7 @@ std::unique_ptr<Value> JSONParser::ConsumeDictionary() {
return nullptr;
}
- dict->SetWithoutPathExpansion(key.AsStringPiece(), std::move(value));
+ dict_storage.emplace_back(key.DestructiveAsString(), std::move(value));
NextChar();
token = GetNextToken();
@@ -393,7 +397,8 @@ std::unique_ptr<Value> JSONParser::ConsumeDictionary() {
}
}
- return std::move(dict);
+ return MakeUnique<Value>(
+ Value::DictStorage(std::move(dict_storage), KEEP_LAST_OF_DUPES));
}
std::unique_ptr<Value> JSONParser::ConsumeList() {
@@ -444,7 +449,7 @@ std::unique_ptr<Value> JSONParser::ConsumeString() {
if (!ConsumeStringRaw(&string))
return nullptr;
- return base::MakeUnique<StringValue>(string.DestructiveAsString());
+ return base::MakeUnique<Value>(string.DestructiveAsString());
}
bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
@@ -453,15 +458,29 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
return false;
}
+ // Strings are at minimum two characters: the surrounding double quotes.
+ if (!CanConsume(2)) {
+ ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+ return false;
+ }
+
// StringBuilder will internally build a StringPiece unless a UTF-16
// conversion occurs, at which point it will perform a copy into a
// std::string.
StringBuilder string(NextChar());
+ // Handle the empty string case early.
+ if (*pos_ == '"') {
+ *out = std::move(string);
+ return true;
+ }
+
int length = end_pos_ - start_pos_;
int32_t next_char = 0;
- while (CanConsume(1)) {
+ // There must always be at least two characters left in the stream: the next
+ // string character and the terminating closing quote.
+ while (CanConsume(2)) {
int start_index = index_;
pos_ = start_pos_ + index_; // CBU8_NEXT is postcrement.
CBU8_NEXT(start_pos_, index_, length, next_char);
@@ -501,12 +520,18 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
return false;
}
- switch (*NextChar()) {
+ NextChar();
+ if (!CanConsume(1)) {
+ ReportError(JSONReader::JSON_INVALID_ESCAPE, 0);
+ return false;
+ }
+
+ switch (*pos_) {
// Allowed esape sequences:
case 'x': { // UTF-8 sequence.
// UTF-8 \x escape sequences are not allowed in the spec, but they
// are supported here for backwards-compatiblity with the old parser.
- if (!CanConsume(2)) {
+ if (!CanConsume(3)) {
ReportError(JSONReader::JSON_INVALID_ESCAPE, 1);
return false;
}
@@ -776,7 +801,7 @@ std::unique_ptr<Value> JSONParser::ConsumeLiteral() {
case 't': {
const char kTrueLiteral[] = "true";
const int kTrueLen = static_cast<int>(strlen(kTrueLiteral));
- if (!CanConsume(kTrueLen - 1) ||
+ if (!CanConsume(kTrueLen) ||
!StringsAreEqual(pos_, kTrueLiteral, kTrueLen)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
return nullptr;
@@ -787,7 +812,7 @@ std::unique_ptr<Value> JSONParser::ConsumeLiteral() {
case 'f': {
const char kFalseLiteral[] = "false";
const int kFalseLen = static_cast<int>(strlen(kFalseLiteral));
- if (!CanConsume(kFalseLen - 1) ||
+ if (!CanConsume(kFalseLen) ||
!StringsAreEqual(pos_, kFalseLiteral, kFalseLen)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
return nullptr;
@@ -798,13 +823,13 @@ std::unique_ptr<Value> JSONParser::ConsumeLiteral() {
case 'n': {
const char kNullLiteral[] = "null";
const int kNullLen = static_cast<int>(strlen(kNullLiteral));
- if (!CanConsume(kNullLen - 1) ||
+ if (!CanConsume(kNullLen) ||
!StringsAreEqual(pos_, kNullLiteral, kNullLen)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
return nullptr;
}
NextNChars(kNullLen - 1);
- return Value::CreateNullValue();
+ return MakeUnique<Value>();
}
default:
ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
diff --git a/chromium/base/json/json_parser.h b/chromium/base/json/json_parser.h
index d2850e516b2..4f264583638 100644
--- a/chromium/base/json/json_parser.h
+++ b/chromium/base/json/json_parser.h
@@ -31,7 +31,7 @@ class JSONParserTest;
// to be used directly; it encapsulates logic that need not be exposed publicly.
//
// This parser guarantees O(n) time through the input string. It also optimizes
-// base::StringValue by using StringPiece where possible when returning Value
+// base::Value by using StringPiece where possible when returning Value
// objects by using "hidden roots," discussed in the implementation.
//
// Iteration happens on the byte level, with the functions CanConsume and
diff --git a/chromium/base/json/json_parser_unittest.cc b/chromium/base/json/json_parser_unittest.cc
index e3f635b76f1..dfbe77de82b 100644
--- a/chromium/base/json/json_parser_unittest.cc
+++ b/chromium/base/json/json_parser_unittest.cc
@@ -28,6 +28,17 @@ class JSONParserTest : public testing::Test {
return parser;
}
+ // MSan will do a better job detecting over-read errors if the input is
+ // not nul-terminated on the heap. This will copy |input| to a new buffer
+ // owned by |owner|, returning a StringPiece to |owner|.
+ StringPiece MakeNotNullTerminatedInput(const char* input,
+ std::unique_ptr<char[]>* owner) {
+ size_t str_len = strlen(input);
+ owner->reset(new char[str_len]);
+ memcpy(owner->get(), input, str_len);
+ return StringPiece(owner->get(), str_len);
+ }
+
void TestLastThree(JSONParser* parser) {
EXPECT_EQ(',', *parser->NextChar());
EXPECT_EQ('|', *parser->NextChar());
@@ -367,14 +378,11 @@ TEST_F(JSONParserTest, ParseNumberErrors) {
auto test_case = kCases[i];
SCOPED_TRACE(StringPrintf("case %u: \"%s\"", i, test_case.input));
- // MSan will do a better job detecting over-read errors if the input is
- // not nul-terminated on the heap.
- size_t str_len = strlen(test_case.input);
- auto non_nul_termianted = MakeUnique<char[]>(str_len);
- memcpy(non_nul_termianted.get(), test_case.input, str_len);
+ std::unique_ptr<char[]> input_owner;
+ StringPiece input =
+ MakeNotNullTerminatedInput(test_case.input, &input_owner);
- StringPiece string_piece(non_nul_termianted.get(), str_len);
- std::unique_ptr<Value> result = JSONReader::Read(string_piece);
+ std::unique_ptr<Value> result = JSONReader::Read(input);
if (test_case.parse_success) {
EXPECT_TRUE(result);
} else {
@@ -390,5 +398,34 @@ TEST_F(JSONParserTest, ParseNumberErrors) {
}
}
+TEST_F(JSONParserTest, UnterminatedInputs) {
+ const char* kCases[] = {
+ // clang-format off
+ "/",
+ "//",
+ "/*",
+ "\"xxxxxx",
+ "\"",
+ "{ ",
+ "[\t",
+ "tru",
+ "fals",
+ "nul",
+ "\"\\x2",
+ "\"\\u123",
+ // clang-format on
+ };
+
+ for (unsigned int i = 0; i < arraysize(kCases); ++i) {
+ auto* test_case = kCases[i];
+ SCOPED_TRACE(StringPrintf("case %u: \"%s\"", i, test_case));
+
+ std::unique_ptr<char[]> input_owner;
+ StringPiece input = MakeNotNullTerminatedInput(test_case, &input_owner);
+
+ EXPECT_FALSE(JSONReader::Read(input));
+ }
+}
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/json/json_perftest.cc b/chromium/base/json/json_perftest.cc
index 345f226a2cf..15222468d78 100644
--- a/chromium/base/json/json_perftest.cc
+++ b/chromium/base/json/json_perftest.cc
@@ -26,7 +26,7 @@ std::unique_ptr<DictionaryValue> GenerateDict() {
list->Set(0, MakeUnique<Value>(2.718));
list->Set(1, MakeUnique<Value>(false));
list->Set(2, MakeUnique<Value>(123));
- list->Set(3, MakeUnique<StringValue>("Bar"));
+ list->Set(3, MakeUnique<Value>("Bar"));
root->Set("List", std::move(list));
return root;
diff --git a/chromium/base/json/json_writer.cc b/chromium/base/json/json_writer.cc
index 07b9d5091c8..19f28f21ada 100644
--- a/chromium/base/json/json_writer.cc
+++ b/chromium/base/json/json_writer.cc
@@ -128,7 +128,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
bool result = node.GetAsList(&list);
DCHECK(result);
for (const auto& value : *list) {
- if (omit_binary_values_ && value->GetType() == Value::Type::BINARY)
+ if (omit_binary_values_ && value.GetType() == Value::Type::BINARY)
continue;
if (first_value_has_been_output) {
@@ -137,7 +137,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
json_string_->push_back(' ');
}
- if (!BuildJSONString(*value, depth))
+ if (!BuildJSONString(value, depth))
result = false;
first_value_has_been_output = true;
diff --git a/chromium/base/json/json_writer_unittest.cc b/chromium/base/json/json_writer_unittest.cc
index 083fdc7eb40..1b57e060697 100644
--- a/chromium/base/json/json_writer_unittest.cc
+++ b/chromium/base/json/json_writer_unittest.cc
@@ -15,7 +15,7 @@ TEST(JSONWriterTest, BasicTypes) {
std::string output_js;
// Test null.
- EXPECT_TRUE(JSONWriter::Write(*Value::CreateNullValue(), &output_js));
+ EXPECT_TRUE(JSONWriter::Write(Value(), &output_js));
EXPECT_EQ("null", output_js);
// Test empty dict.
@@ -47,7 +47,7 @@ TEST(JSONWriterTest, BasicTypes) {
EXPECT_EQ("-0.8", output_js);
// Test String values.
- EXPECT_TRUE(JSONWriter::Write(StringValue("foo"), &output_js));
+ EXPECT_TRUE(JSONWriter::Write(Value("foo"), &output_js));
EXPECT_EQ("\"foo\"", output_js);
}
@@ -111,29 +111,29 @@ TEST(JSONWriterTest, BinaryValues) {
// Binary values should return errors unless suppressed via the
// OPTIONS_OMIT_BINARY_VALUES flag.
- std::unique_ptr<Value> root(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
+ std::unique_ptr<Value> root(Value::CreateWithCopiedBuffer("asdf", 4));
EXPECT_FALSE(JSONWriter::Write(*root, &output_js));
EXPECT_TRUE(JSONWriter::WriteWithOptions(
*root, JSONWriter::OPTIONS_OMIT_BINARY_VALUES, &output_js));
EXPECT_TRUE(output_js.empty());
ListValue binary_list;
- binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
+ binary_list.Append(Value::CreateWithCopiedBuffer("asdf", 4));
binary_list.Append(MakeUnique<Value>(5));
- binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
+ binary_list.Append(Value::CreateWithCopiedBuffer("asdf", 4));
binary_list.Append(MakeUnique<Value>(2));
- binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
+ binary_list.Append(Value::CreateWithCopiedBuffer("asdf", 4));
EXPECT_FALSE(JSONWriter::Write(binary_list, &output_js));
EXPECT_TRUE(JSONWriter::WriteWithOptions(
binary_list, JSONWriter::OPTIONS_OMIT_BINARY_VALUES, &output_js));
EXPECT_EQ("[5,2]", output_js);
DictionaryValue binary_dict;
- binary_dict.Set("a", BinaryValue::CreateWithCopiedBuffer("asdf", 4));
+ binary_dict.Set("a", Value::CreateWithCopiedBuffer("asdf", 4));
binary_dict.SetInteger("b", 5);
- binary_dict.Set("c", BinaryValue::CreateWithCopiedBuffer("asdf", 4));
+ binary_dict.Set("c", Value::CreateWithCopiedBuffer("asdf", 4));
binary_dict.SetInteger("d", 2);
- binary_dict.Set("e", BinaryValue::CreateWithCopiedBuffer("asdf", 4));
+ binary_dict.Set("e", Value::CreateWithCopiedBuffer("asdf", 4));
EXPECT_FALSE(JSONWriter::Write(binary_dict, &output_js));
EXPECT_TRUE(JSONWriter::WriteWithOptions(
binary_dict, JSONWriter::OPTIONS_OMIT_BINARY_VALUES, &output_js));
diff --git a/chromium/base/lazy_instance.h b/chromium/base/lazy_instance.h
index 1183806bef5..070e436d1f0 100644
--- a/chromium/base/lazy_instance.h
+++ b/chromium/base/lazy_instance.h
@@ -24,11 +24,11 @@
// requires that Type be a complete type so we can determine the size.
//
// Example usage:
-// static LazyInstance<MyClass> my_instance = LAZY_INSTANCE_INITIALIZER;
+// static LazyInstance<MyClass>::Leaky inst = LAZY_INSTANCE_INITIALIZER;
// void SomeMethod() {
-// my_instance.Get().SomeMethod(); // MyClass::SomeMethod()
+// inst.Get().SomeMethod(); // MyClass::SomeMethod()
//
-// MyClass* ptr = my_instance.Pointer();
+// MyClass* ptr = inst.Pointer();
// ptr->DoDoDo(); // MyClass::DoDoDo
// }
@@ -53,22 +53,15 @@
namespace base {
template <typename Type>
-struct DefaultLazyInstanceTraits {
- static const bool kRegisterOnExit = true;
-#if DCHECK_IS_ON()
- static const bool kAllowedToAccessOnNonjoinableThread = false;
-#endif
-
+struct LazyInstanceTraitsBase {
static Type* New(void* instance) {
- DCHECK_EQ(reinterpret_cast<uintptr_t>(instance) & (ALIGNOF(Type) - 1), 0u)
- << ": Bad boy, the buffer passed to placement new is not aligned!\n"
- "This may break some stuff like SSE-based optimizations assuming the "
- "<Type> objects are word aligned.";
+ DCHECK_EQ(reinterpret_cast<uintptr_t>(instance) & (ALIGNOF(Type) - 1), 0u);
// Use placement new to initialize our instance in our preallocated space.
// The parenthesis is very important here to force POD type initialization.
return new (instance) Type();
}
- static void Delete(Type* instance) {
+
+ static void CallDestructor(Type* instance) {
// Explicitly call the destructor.
instance->~Type();
}
@@ -78,6 +71,25 @@ struct DefaultLazyInstanceTraits {
// can implement the more complicated pieces out of line in the .cc file.
namespace internal {
+// This traits class causes destruction the contained Type at process exit via
+// AtExitManager. This is probably generally not what you want. Instead, prefer
+// Leaky below.
+template <typename Type>
+struct DestructorAtExitLazyInstanceTraits {
+ static const bool kRegisterOnExit = true;
+#if DCHECK_IS_ON()
+ static const bool kAllowedToAccessOnNonjoinableThread = false;
+#endif
+
+ static Type* New(void* instance) {
+ return LazyInstanceTraitsBase<Type>::New(instance);
+ }
+
+ static void Delete(Type* instance) {
+ LazyInstanceTraitsBase<Type>::CallDestructor(instance);
+ }
+};
+
// Use LazyInstance<T>::Leaky for a less-verbose call-site typedef; e.g.:
// base::LazyInstance<T>::Leaky my_leaky_lazy_instance;
// instead of:
@@ -95,12 +107,15 @@ struct LeakyLazyInstanceTraits {
static Type* New(void* instance) {
ANNOTATE_SCOPED_MEMORY_LEAK;
- return DefaultLazyInstanceTraits<Type>::New(instance);
+ return LazyInstanceTraitsBase<Type>::New(instance);
}
static void Delete(Type* instance) {
}
};
+template <typename Type>
+struct ErrorMustSelectLazyOrDestructorAtExitForLazyInstance {};
+
// Our AtomicWord doubles as a spinlock, where a value of
// kLazyInstanceStateCreating means the spinlock is being held for creation.
static const subtle::AtomicWord kLazyInstanceStateCreating = 1;
@@ -119,7 +134,10 @@ BASE_EXPORT void CompleteLazyInstance(subtle::AtomicWord* state,
} // namespace internal
-template <typename Type, typename Traits = DefaultLazyInstanceTraits<Type> >
+template <
+ typename Type,
+ typename Traits =
+ internal::ErrorMustSelectLazyOrDestructorAtExitForLazyInstance<Type>>
class LazyInstance {
public:
// Do not define a destructor, as doing so makes LazyInstance a
@@ -131,7 +149,9 @@ class LazyInstance {
// Convenience typedef to avoid having to repeat Type for leaky lazy
// instances.
- typedef LazyInstance<Type, internal::LeakyLazyInstanceTraits<Type> > Leaky;
+ typedef LazyInstance<Type, internal::LeakyLazyInstanceTraits<Type>> Leaky;
+ typedef LazyInstance<Type, internal::DestructorAtExitLazyInstanceTraits<Type>>
+ DestructorAtExit;
Type& Get() {
return *Pointer();
diff --git a/chromium/base/lazy_instance_unittest.cc b/chromium/base/lazy_instance_unittest.cc
index 8947b1291f6..0aa46594657 100644
--- a/chromium/base/lazy_instance_unittest.cc
+++ b/chromium/base/lazy_instance_unittest.cc
@@ -45,7 +45,8 @@ int SlowConstructor::constructed = 0;
class SlowDelegate : public base::DelegateSimpleThread::Delegate {
public:
- explicit SlowDelegate(base::LazyInstance<SlowConstructor>* lazy)
+ explicit SlowDelegate(
+ base::LazyInstance<SlowConstructor>::DestructorAtExit* lazy)
: lazy_(lazy) {}
void Run() override {
@@ -54,13 +55,13 @@ class SlowDelegate : public base::DelegateSimpleThread::Delegate {
}
private:
- base::LazyInstance<SlowConstructor>* lazy_;
+ base::LazyInstance<SlowConstructor>::DestructorAtExit* lazy_;
};
} // namespace
-static base::LazyInstance<ConstructAndDestructLogger> lazy_logger =
- LAZY_INSTANCE_INITIALIZER;
+static base::LazyInstance<ConstructAndDestructLogger>::DestructorAtExit
+ lazy_logger = LAZY_INSTANCE_INITIALIZER;
TEST(LazyInstanceTest, Basic) {
{
@@ -81,7 +82,7 @@ TEST(LazyInstanceTest, Basic) {
EXPECT_EQ(4, destructed_seq_.GetNext());
}
-static base::LazyInstance<SlowConstructor> lazy_slow =
+static base::LazyInstance<SlowConstructor>::DestructorAtExit lazy_slow =
LAZY_INSTANCE_INITIALIZER;
TEST(LazyInstanceTest, ConstructorThreadSafety) {
@@ -126,7 +127,8 @@ TEST(LazyInstanceTest, LeakyLazyInstance) {
bool deleted1 = false;
{
base::ShadowingAtExitManager shadow;
- static base::LazyInstance<DeleteLogger> test = LAZY_INSTANCE_INITIALIZER;
+ static base::LazyInstance<DeleteLogger>::DestructorAtExit test =
+ LAZY_INSTANCE_INITIALIZER;
test.Get().SetDeletedPtr(&deleted1);
}
EXPECT_TRUE(deleted1);
@@ -164,9 +166,12 @@ TEST(LazyInstanceTest, Alignment) {
// Create some static instances with increasing sizes and alignment
// requirements. By ordering this way, the linker will need to do some work to
// ensure proper alignment of the static data.
- static LazyInstance<AlignedData<4> > align4 = LAZY_INSTANCE_INITIALIZER;
- static LazyInstance<AlignedData<32> > align32 = LAZY_INSTANCE_INITIALIZER;
- static LazyInstance<AlignedData<4096> > align4096 = LAZY_INSTANCE_INITIALIZER;
+ static LazyInstance<AlignedData<4>>::DestructorAtExit align4 =
+ LAZY_INSTANCE_INITIALIZER;
+ static LazyInstance<AlignedData<32>>::DestructorAtExit align32 =
+ LAZY_INSTANCE_INITIALIZER;
+ static LazyInstance<AlignedData<4096>>::DestructorAtExit align4096 =
+ LAZY_INSTANCE_INITIALIZER;
EXPECT_ALIGNED(align4.Pointer(), 4);
EXPECT_ALIGNED(align32.Pointer(), 32);
diff --git a/chromium/base/logging.h b/chromium/base/logging.h
index c67b937e5b0..1dcb0f71f12 100644
--- a/chromium/base/logging.h
+++ b/chromium/base/logging.h
@@ -642,7 +642,7 @@ inline typename std::enable_if<
std::is_enum<T>::value,
void>::type
MakeCheckOpValueString(std::ostream* os, const T& v) {
- (*os) << static_cast<typename base::underlying_type<T>::type>(v);
+ (*os) << static_cast<typename std::underlying_type<T>::type>(v);
}
// We need an explicit overload for std::nullptr_t.
@@ -747,13 +747,6 @@ DEFINE_CHECK_OP_IMPL(GT, > )
#endif // DCHECK_IS_ON()
-// DEBUG_MODE is for runtime uses like
-// if (DEBUG_MODE) foo.CheckThatFoo();
-// We tie its state to DCHECK_IS_ON().
-//
-// For compile-time checks, #if DCHECK_IS_ON() can be used.
-enum { DEBUG_MODE = DCHECK_IS_ON() };
-
#define DLOG(severity) \
LAZY_STREAM(LOG_STREAM(severity), DLOG_IS_ON(severity))
diff --git a/chromium/base/logging_unittest.cc b/chromium/base/logging_unittest.cc
index 52ba9880b36..62323972b8a 100644
--- a/chromium/base/logging_unittest.cc
+++ b/chromium/base/logging_unittest.cc
@@ -69,16 +69,14 @@ class MockLogSource {
TEST_F(LoggingTest, BasicLogging) {
MockLogSource mock_log_source;
- EXPECT_CALL(mock_log_source, Log()).Times(DEBUG_MODE ? 16 : 8).
- WillRepeatedly(Return("log message"));
+ EXPECT_CALL(mock_log_source, Log())
+ .Times(DCHECK_IS_ON() ? 16 : 8)
+ .WillRepeatedly(Return("log message"));
SetMinLogLevel(LOG_INFO);
EXPECT_TRUE(LOG_IS_ON(INFO));
- // As of g++-4.5, the first argument to EXPECT_EQ cannot be a
- // constant expression.
- const bool kIsDebugMode = (DEBUG_MODE != 0);
- EXPECT_TRUE(kIsDebugMode == DLOG_IS_ON(INFO));
+ EXPECT_TRUE((DCHECK_IS_ON() != 0) == DLOG_IS_ON(INFO));
EXPECT_TRUE(VLOG_IS_ON(0));
LOG(INFO) << mock_log_source.Log();
@@ -187,7 +185,13 @@ TEST_F(LoggingTest, LoggingIsLazyByDestination) {
// Official builds have CHECKs directly call BreakDebugger.
#if !defined(OFFICIAL_BUILD)
-TEST_F(LoggingTest, CheckStreamsAreLazy) {
+// https://crbug.com/709067 tracks test flakiness on iOS.
+#if defined(OS_IOS)
+#define MAYBE_CheckStreamsAreLazy DISABLED_CheckStreamsAreLazy
+#else
+#define MAYBE_CheckStreamsAreLazy CheckStreamsAreLazy
+#endif
+TEST_F(LoggingTest, MAYBE_CheckStreamsAreLazy) {
MockLogSource mock_log_source, uncalled_mock_log_source;
EXPECT_CALL(mock_log_source, Log()).Times(8).
WillRepeatedly(Return("check message"));
@@ -388,7 +392,13 @@ void DcheckEmptyFunction1() {
}
void DcheckEmptyFunction2() {}
-TEST_F(LoggingTest, Dcheck) {
+// https://crbug.com/709067 tracks test flakiness on iOS.
+#if defined(OS_IOS)
+#define MAYBE_Dcheck DISABLED_Dcheck
+#else
+#define MAYBE_Dcheck Dcheck
+#endif
+TEST_F(LoggingTest, MAYBE_Dcheck) {
#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
// Release build.
EXPECT_FALSE(DCHECK_IS_ON());
diff --git a/chromium/base/mac/mach_port_broker_unittest.cc b/chromium/base/mac/mach_port_broker_unittest.cc
index bff8eb6a9bc..cb4b82ca47c 100644
--- a/chromium/base/mac/mach_port_broker_unittest.cc
+++ b/chromium/base/mac/mach_port_broker_unittest.cc
@@ -95,21 +95,21 @@ TEST_F(MachPortBrokerTest, ReceivePortFromChild) {
CommandLine command_line(
base::GetMultiProcessTestChildBaseCommandLine());
broker_.GetLock().Acquire();
- base::Process test_child_process = base::SpawnMultiProcessTestChild(
+ base::SpawnChildResult spawn_result = base::SpawnMultiProcessTestChild(
"MachPortBrokerTestChild", command_line, LaunchOptions());
- broker_.AddPlaceholderForPid(test_child_process.Handle());
+ broker_.AddPlaceholderForPid(spawn_result.process.Handle());
broker_.GetLock().Release();
WaitForTaskPort();
- EXPECT_EQ(test_child_process.Handle(), received_process_);
+ EXPECT_EQ(spawn_result.process.Handle(), received_process_);
int rv = -1;
- ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+ ASSERT_TRUE(spawn_result.process.WaitForExitWithTimeout(
TestTimeouts::action_timeout(), &rv));
EXPECT_EQ(0, rv);
EXPECT_NE(static_cast<mach_port_t>(MACH_PORT_NULL),
- broker_.TaskForPid(test_child_process.Handle()));
+ broker_.TaskForPid(spawn_result.process.Handle()));
}
TEST_F(MachPortBrokerTest, ReceivePortFromChildWithoutAdding) {
@@ -117,17 +117,18 @@ TEST_F(MachPortBrokerTest, ReceivePortFromChildWithoutAdding) {
CommandLine command_line(
base::GetMultiProcessTestChildBaseCommandLine());
broker_.GetLock().Acquire();
- base::Process test_child_process = base::SpawnMultiProcessTestChild(
+ base::SpawnChildResult spawn_result = base::SpawnMultiProcessTestChild(
"MachPortBrokerTestChild", command_line, LaunchOptions());
+
broker_.GetLock().Release();
int rv = -1;
- ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+ ASSERT_TRUE(spawn_result.process.WaitForExitWithTimeout(
TestTimeouts::action_timeout(), &rv));
EXPECT_EQ(0, rv);
EXPECT_EQ(static_cast<mach_port_t>(MACH_PORT_NULL),
- broker_.TaskForPid(test_child_process.Handle()));
+ broker_.TaskForPid(spawn_result.process.Handle()));
}
} // namespace base
diff --git a/chromium/base/mac/sdk_forward_declarations.h b/chromium/base/mac/sdk_forward_declarations.h
index 7ea7829d9af..86fac9c2768 100644
--- a/chromium/base/mac/sdk_forward_declarations.h
+++ b/chromium/base/mac/sdk_forward_declarations.h
@@ -144,6 +144,7 @@ BASE_EXPORT extern NSString* const NSAppearanceNameVibrantLight;
@interface NSView (YosemiteSDK)
- (BOOL)isAccessibilitySelectorAllowed:(SEL)selector;
+@property(copy) NSString* accessibilityLabel;
@end
#endif // MAC_OS_X_VERSION_10_10
@@ -163,6 +164,18 @@ BASE_EXPORT extern NSString* const NSAppearanceNameVibrantLight;
#endif // MAC_OS_X_VERSION_10_10
+// ----------------------------------------------------------------------------
+// Define NSStrings only available in newer versions of the OSX SDK to force
+// them to be statically linked.
+// ----------------------------------------------------------------------------
+
+extern "C" {
+#if !defined(MAC_OS_X_VERSION_10_11) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_11
+BASE_EXPORT extern NSString* const CIDetectorTypeText;
+#endif // MAC_OS_X_VERSION_10_11
+} // extern "C"
+
// Once Chrome no longer supports OSX 10.10, everything within this
// preprocessor block can be removed.
#if !defined(MAC_OS_X_VERSION_10_11) || \
@@ -187,6 +200,12 @@ BASE_EXPORT extern NSString* const NSAppearanceNameVibrantLight;
- (void)performWindowDragWithEvent:(NSEvent*)event;
@end
+@interface CIRectangleFeature (ElCapitanSDK)
+@property(readonly) CGRect bounds;
+@end
+
+@class CIRectangleFeature;
+
#endif // MAC_OS_X_VERSION_10_11
// Once Chrome no longer supports OSX 10.11, everything within this
diff --git a/chromium/base/mac/sdk_forward_declarations.mm b/chromium/base/mac/sdk_forward_declarations.mm
index 25f71a95da5..c624daedd81 100644
--- a/chromium/base/mac/sdk_forward_declarations.mm
+++ b/chromium/base/mac/sdk_forward_declarations.mm
@@ -13,3 +13,8 @@ NSString* const NSUserActivityTypeBrowsingWeb =
NSString* const NSAppearanceNameVibrantDark = @"NSAppearanceNameVibrantDark";
#endif // MAC_OS_X_VERSION_10_10
+
+#if !defined(MAC_OS_X_VERSION_10_11) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_11
+NSString* const CIDetectorTypeText = @"CIDetectorTypeText";
+#endif // MAC_OS_X_VERSION_10_11
diff --git a/chromium/base/memory/aligned_memory_unittest.cc b/chromium/base/memory/aligned_memory_unittest.cc
index abe0cf3ff57..892c50ef70b 100644
--- a/chromium/base/memory/aligned_memory_unittest.cc
+++ b/chromium/base/memory/aligned_memory_unittest.cc
@@ -44,10 +44,6 @@ TEST(AlignedMemoryTest, StackAlignment) {
EXPECT_ALIGNED(raw8.void_data(), 8);
EXPECT_ALIGNED(raw16.void_data(), 16);
-
- // TODO(ios): __attribute__((aligned(X))) with X >= 128 does not works on
- // the stack when building for arm64 on iOS, http://crbug.com/349003
-#if !(defined(OS_IOS) && defined(ARCH_CPU_ARM64))
EXPECT_ALIGNED(raw128.void_data(), 128);
// NaCl x86-64 compiler emits non-validating instructions for >128
@@ -61,14 +57,10 @@ TEST(AlignedMemoryTest, StackAlignment) {
EXPECT_EQ(256u, ALIGNOF(raw256));
EXPECT_ALIGNED(raw256.void_data(), 256);
- // TODO(ios): This test hits an armv7 bug in clang. crbug.com/138066
-#if !(defined(OS_IOS) && defined(ARCH_CPU_ARM_FAMILY))
AlignedMemory<8, 4096> raw4096;
EXPECT_EQ(4096u, ALIGNOF(raw4096));
EXPECT_ALIGNED(raw4096.void_data(), 4096);
-#endif // !(defined(OS_IOS) && defined(ARCH_CPU_ARM_FAMILY))
#endif // !(defined(OS_NACL) && defined(ARCH_CPU_X86_64))
-#endif // !(defined(OS_IOS) && defined(ARCH_CPU_ARM64))
}
TEST(AlignedMemoryTest, DynamicAllocation) {
diff --git a/chromium/base/memory/memory_coordinator_proxy.cc b/chromium/base/memory/memory_coordinator_proxy.cc
index 59c7a6a1ef2..f82e928a4b5 100644
--- a/chromium/base/memory/memory_coordinator_proxy.cc
+++ b/chromium/base/memory/memory_coordinator_proxy.cc
@@ -26,7 +26,7 @@ MemoryCoordinatorProxy* MemoryCoordinatorProxy::GetInstance() {
// static
void MemoryCoordinatorProxy::SetMemoryCoordinator(
MemoryCoordinator* coordinator) {
- DCHECK(!g_memory_coordinator);
+ DCHECK(!g_memory_coordinator || !coordinator);
g_memory_coordinator = coordinator;
}
@@ -36,10 +36,4 @@ MemoryState MemoryCoordinatorProxy::GetCurrentMemoryState() const {
return g_memory_coordinator->GetCurrentMemoryState();
}
-void MemoryCoordinatorProxy::SetCurrentMemoryStateForTesting(
- MemoryState memory_state) {
- DCHECK(g_memory_coordinator);
- g_memory_coordinator->SetCurrentMemoryStateForTesting(memory_state);
-}
-
} // namespace base
diff --git a/chromium/base/memory/memory_coordinator_proxy.h b/chromium/base/memory/memory_coordinator_proxy.h
index 036f126f4c6..2bb26c451c0 100644
--- a/chromium/base/memory/memory_coordinator_proxy.h
+++ b/chromium/base/memory/memory_coordinator_proxy.h
@@ -19,7 +19,6 @@ class BASE_EXPORT MemoryCoordinator {
virtual ~MemoryCoordinator() {}
virtual MemoryState GetCurrentMemoryState() const = 0;
- virtual void SetCurrentMemoryStateForTesting(MemoryState memory_state) = 0;
};
// The proxy of MemoryCoordinator to be accessed from components that are not
@@ -36,9 +35,6 @@ class BASE_EXPORT MemoryCoordinatorProxy {
// Returns the current memory state.
MemoryState GetCurrentMemoryState() const;
- // Sets the current memory state. This function is for testing only.
- void SetCurrentMemoryStateForTesting(MemoryState memory_state);
-
private:
friend struct base::DefaultSingletonTraits<MemoryCoordinatorProxy>;
diff --git a/chromium/base/memory/memory_pressure_monitor_mac.cc b/chromium/base/memory/memory_pressure_monitor_mac.cc
index 391589021bd..9ca520e8f8a 100644
--- a/chromium/base/memory/memory_pressure_monitor_mac.cc
+++ b/chromium/base/memory/memory_pressure_monitor_mac.cc
@@ -4,6 +4,8 @@
#include "base/memory/memory_pressure_monitor_mac.h"
+#include <CoreFoundation/CoreFoundation.h>
+
#include <dlfcn.h>
#include <stddef.h>
#include <sys/sysctl.h>
@@ -26,9 +28,9 @@ namespace base {
namespace mac {
MemoryPressureListener::MemoryPressureLevel
-MemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressure(
- int mac_memory_pressure) {
- switch (mac_memory_pressure) {
+MemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+ int mac_memory_pressure_level) {
+ switch (mac_memory_pressure_level) {
case DISPATCH_MEMORYPRESSURE_NORMAL:
return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
case DISPATCH_MEMORYPRESSURE_WARN:
@@ -39,6 +41,13 @@ MemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressure(
return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
}
+void MemoryPressureMonitor::OnRunLoopExit(CFRunLoopObserverRef observer,
+ CFRunLoopActivity activity,
+ void* info) {
+ MemoryPressureMonitor* self = static_cast<MemoryPressureMonitor*>(info);
+ self->UpdatePressureLevelOnRunLoopExit();
+}
+
MemoryPressureMonitor::MemoryPressureMonitor()
: memory_level_event_source_(dispatch_source_create(
DISPATCH_SOURCE_TYPE_MEMORYPRESSURE,
@@ -48,75 +57,137 @@ MemoryPressureMonitor::MemoryPressureMonitor()
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0))),
dispatch_callback_(
base::Bind(&MemoryPressureListener::NotifyMemoryPressure)),
- last_statistic_report_(CFAbsoluteTimeGetCurrent()),
+ last_statistic_report_time_(CFAbsoluteTimeGetCurrent()),
last_pressure_level_(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE),
- reporting_error_(0) {
- if (memory_level_event_source_.get() != nullptr) {
+ subtick_seconds_(0) {
+ // Attach an event handler to the memory pressure event source.
+ if (memory_level_event_source_.get()) {
dispatch_source_set_event_handler(memory_level_event_source_, ^{
OnMemoryPressureChanged(memory_level_event_source_.get(),
dispatch_callback_);
});
+
+ // Start monitoring the event source.
dispatch_resume(memory_level_event_source_);
}
+
+ // Create a CFRunLoopObserver to check the memory pressure at the end of
+ // every pass through the event loop (modulo kUMATickSize).
+ CFRunLoopObserverContext observer_context = {0, this, NULL, NULL, NULL};
+
+ exit_observer_.reset(
+ CFRunLoopObserverCreate(kCFAllocatorDefault, kCFRunLoopExit, true, 0,
+ OnRunLoopExit, &observer_context));
+
+ CFRunLoopRef run_loop = CFRunLoopGetCurrent();
+ CFRunLoopAddObserver(run_loop, exit_observer_, kCFRunLoopCommonModes);
+ CFRunLoopAddObserver(run_loop, exit_observer_,
+ kMessageLoopExclusiveRunLoopMode);
}
MemoryPressureMonitor::~MemoryPressureMonitor() {
- if (memory_level_event_source_.get() != nullptr)
+ // Detach from the run loop.
+ CFRunLoopRef run_loop = CFRunLoopGetCurrent();
+ CFRunLoopRemoveObserver(run_loop, exit_observer_, kCFRunLoopCommonModes);
+ CFRunLoopRemoveObserver(run_loop, exit_observer_,
+ kMessageLoopExclusiveRunLoopMode);
+
+ // Remove the memory pressure event source.
+ if (memory_level_event_source_.get()) {
dispatch_source_cancel(memory_level_event_source_);
+ }
}
-MemoryPressureListener::MemoryPressureLevel
-MemoryPressureMonitor::GetCurrentPressureLevel() {
- int mac_memory_pressure;
+int MemoryPressureMonitor::GetMacMemoryPressureLevel() {
+ // Get the raw memory pressure level from macOS.
+ int mac_memory_pressure_level;
size_t length = sizeof(int);
- sysctlbyname("kern.memorystatus_vm_pressure_level", &mac_memory_pressure,
- &length, nullptr, 0);
- MemoryPressureListener::MemoryPressureLevel memory_pressure_level =
- MemoryPressureLevelForMacMemoryPressure(mac_memory_pressure);
- bool pressure_level_changed = false;
- if (last_pressure_level_ != memory_pressure_level) {
- pressure_level_changed = true;
- }
- SendStatisticsIfNecessary(pressure_level_changed);
- last_pressure_level_ = memory_pressure_level;
- return memory_pressure_level;
-}
+ sysctlbyname("kern.memorystatus_vm_pressure_level",
+ &mac_memory_pressure_level, &length, nullptr, 0);
-void MemoryPressureMonitor::OnMemoryPressureChanged(
- dispatch_source_s* event_source,
- const MemoryPressureMonitor::DispatchCallback& dispatch_callback) {
- int mac_memory_pressure = dispatch_source_get_data(event_source);
- MemoryPressureListener::MemoryPressureLevel memory_pressure_level =
- MemoryPressureLevelForMacMemoryPressure(mac_memory_pressure);
- bool pressure_level_changed = false;
- if (last_pressure_level_ != memory_pressure_level) {
- pressure_level_changed = true;
- }
- SendStatisticsIfNecessary(pressure_level_changed);
- last_pressure_level_ = memory_pressure_level;
- if (memory_pressure_level !=
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE)
- dispatch_callback.Run(memory_pressure_level);
+ return mac_memory_pressure_level;
}
-void MemoryPressureMonitor::SendStatisticsIfNecessary(
- bool pressure_level_changed) {
+void MemoryPressureMonitor::UpdatePressureLevel() {
+ // Get the current macOS pressure level and convert to the corresponding
+ // Chrome pressure level.
+ int mac_memory_pressure_level = GetMacMemoryPressureLevel();
+ MemoryPressureListener::MemoryPressureLevel new_pressure_level =
+ MemoryPressureLevelForMacMemoryPressureLevel(mac_memory_pressure_level);
+
+ // Compute the number of "ticks" spent at |last_pressure_level_| (since the
+ // last report sent to UMA).
CFTimeInterval now = CFAbsoluteTimeGetCurrent();
- CFTimeInterval since_last_report = now - last_statistic_report_;
- last_statistic_report_ = now;
+ CFTimeInterval time_since_last_report = now - last_statistic_report_time_;
+ last_statistic_report_time_ = now;
- double accumulated_time = since_last_report + reporting_error_;
+ double accumulated_time = time_since_last_report + subtick_seconds_;
int ticks_to_report = static_cast<int>(accumulated_time / kUMATickSize);
- reporting_error_ = std::fmod(accumulated_time, kUMATickSize);
+ // Save for later the seconds that didn't make it into a full tick.
+ subtick_seconds_ = std::fmod(accumulated_time, kUMATickSize);
- // Round up on change to ensure we capture it
+ // Round the tick count up on a pressure level change to ensure we capture it.
+ bool pressure_level_changed = (new_pressure_level != last_pressure_level_);
if (pressure_level_changed && ticks_to_report < 1) {
ticks_to_report = 1;
- reporting_error_ = 0;
+ subtick_seconds_ = 0;
}
- if (ticks_to_report >= 1)
+ // Send elapsed ticks to UMA.
+ if (ticks_to_report >= 1) {
RecordMemoryPressure(last_pressure_level_, ticks_to_report);
+ }
+
+ // Save the now-current memory pressure level.
+ last_pressure_level_ = new_pressure_level;
+}
+
+void MemoryPressureMonitor::UpdatePressureLevelOnRunLoopExit() {
+ // Wait until it's time to check the pressure level.
+ CFTimeInterval now = CFAbsoluteTimeGetCurrent();
+ if (now >= next_run_loop_update_time_) {
+ UpdatePressureLevel();
+
+ // Update again in kUMATickSize seconds. We can update at any frequency,
+ // but because we're only checking memory pressure levels for UMA there's
+ // no need to update more frequently than we're keeping statistics on.
+ next_run_loop_update_time_ = now + kUMATickSize - subtick_seconds_;
+ }
+}
+
+// Static.
+int MemoryPressureMonitor::GetSecondsPerUMATick() {
+ return kUMATickSize;
+}
+
+MemoryPressureListener::MemoryPressureLevel
+MemoryPressureMonitor::GetCurrentPressureLevel() {
+ return last_pressure_level_;
+}
+
+void MemoryPressureMonitor::OnMemoryPressureChanged(
+ dispatch_source_s* event_source,
+ const MemoryPressureMonitor::DispatchCallback& dispatch_callback) {
+ // Get the Chrome-equvialent memory pressure level.
+ int mac_memory_pressure_level = dispatch_source_get_data(event_source);
+ MemoryPressureListener::MemoryPressureLevel memory_pressure_level =
+ MemoryPressureLevelForMacMemoryPressureLevel(mac_memory_pressure_level);
+
+ // Run the callback that's waiting on memory pressure change notifications.
+ // Note that we don't bother with updating |last_pressure_level_| or
+ // causing memory pressure stats to be sent to UMA. Memory pressure change
+ // notifications are delayed on the Mac, so the current actual memory pressure
+ // level may be different than the incoming pressure level from the event.
+ //
+ // In general we don't want to take action (such as freeing memory) on
+ // memory pressure change events, but that's how the current system is
+ // designed. Given that it's incorrect to act on either stale or current
+ // pressure level info, it's not clear which level is better to send. For
+ // now stick with how it's been implemented to date, which is to send the
+ // stale value.
+ if (memory_pressure_level !=
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE)
+ dispatch_callback.Run(memory_pressure_level);
}
void MemoryPressureMonitor::SetDispatchCallback(
diff --git a/chromium/base/memory/memory_pressure_monitor_mac.h b/chromium/base/memory/memory_pressure_monitor_mac.h
index 9118632bd66..b85b6c9017d 100644
--- a/chromium/base/memory/memory_pressure_monitor_mac.h
+++ b/chromium/base/memory/memory_pressure_monitor_mac.h
@@ -9,10 +9,12 @@
#include <dispatch/dispatch.h>
#include "base/base_export.h"
+#include "base/mac/scoped_cftyperef.h"
#include "base/mac/scoped_dispatch_object.h"
#include "base/macros.h"
#include "base/memory/memory_pressure_listener.h"
#include "base/memory/memory_pressure_monitor.h"
+#include "base/message_loop/message_pump_mac.h"
namespace base {
namespace mac {
@@ -34,24 +36,51 @@ class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
private:
friend TestMemoryPressureMonitor;
- static MemoryPressureLevel
- MemoryPressureLevelForMacMemoryPressure(int mac_memory_pressure);
+ static MemoryPressureLevel MemoryPressureLevelForMacMemoryPressureLevel(
+ int mac_memory_pressure_level);
+ static void OnRunLoopExit(CFRunLoopObserverRef observer,
+ CFRunLoopActivity activity,
+ void* info);
+ // Returns the raw memory pressure level from the macOS. Exposed for
+ // unit testing.
+ virtual int GetMacMemoryPressureLevel();
+
+ // Updates |last_pressure_level_| with the current memory pressure level.
+ void UpdatePressureLevel();
+
+ // Updates |last_pressure_level_| at the end of every run loop pass (modulo
+ // some number of seconds).
+ void UpdatePressureLevelOnRunLoopExit();
+
+ // Run |dispatch_callback| on memory pressure notifications from the OS.
void OnMemoryPressureChanged(dispatch_source_s* event_source,
const DispatchCallback& dispatch_callback);
- void SendStatisticsIfNecessary(bool pressure_level_changed);
+ // Returns the number of seconds per UMA tick (for statistics recording).
+ // Exposed for testing.
+ static int GetSecondsPerUMATick();
+
+ // The dispatch source that generates memory pressure change notifications.
ScopedDispatchObject<dispatch_source_t> memory_level_event_source_;
+ // The callback to call upon receiving a memory pressure change notification.
DispatchCallback dispatch_callback_;
- CFTimeInterval last_statistic_report_;
+ // Last UMA report time.
+ CFTimeInterval last_statistic_report_time_;
+ // Most-recent memory pressure level.
MemoryPressureLevel last_pressure_level_;
- // The UMA statistic is recorded in 5 second increments. This
- // accumulates the remaining time to be rolled into the next
- // call.
- CFTimeInterval reporting_error_;
+ // Observer that tracks exits from the main run loop.
+ ScopedCFTypeRef<CFRunLoopObserverRef> exit_observer_;
+
+ // Next time to update the memory pressure level when exiting the run loop.
+ CFTimeInterval next_run_loop_update_time_;
+
+ // Seconds left over from the last UMA tick calculation (to be added to the
+ // next calculation).
+ CFTimeInterval subtick_seconds_;
DISALLOW_COPY_AND_ASSIGN(MemoryPressureMonitor);
};
diff --git a/chromium/base/memory/memory_pressure_monitor_mac_unittest.cc b/chromium/base/memory/memory_pressure_monitor_mac_unittest.cc
index b7c29cd13cb..ff464fb3c43 100644
--- a/chromium/base/memory/memory_pressure_monitor_mac_unittest.cc
+++ b/chromium/base/memory/memory_pressure_monitor_mac_unittest.cc
@@ -4,7 +4,11 @@
#include "base/memory/memory_pressure_monitor_mac.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/mac/scoped_cftyperef.h"
#include "base/macros.h"
+#include "base/test/histogram_tester.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -12,43 +16,82 @@ namespace mac {
class TestMemoryPressureMonitor : public MemoryPressureMonitor {
public:
- using MemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressure;
+ using MemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel;
+
+ // A HistogramTester for verifying correct UMA stat generation.
+ base::HistogramTester tester;
TestMemoryPressureMonitor() { }
+ // Clears the next run loop update time so that the next pass of the run
+ // loop checks the memory pressure level immediately. Normally there's a
+ // 5 second delay between pressure readings.
+ void ResetRunLoopUpdateTime() { next_run_loop_update_time_ = 0; }
+
+ // Sets the last UMA stat report time. Time spent in memory pressure is
+ // recorded in 5-second "ticks" from the last time statistics were recorded.
+ void SetLastStatisticReportTime(CFTimeInterval time) {
+ last_statistic_report_time_ = time;
+ }
+
+ // Sets the raw macOS memory pressure level read by the memory pressure
+ // monitor.
+ int macos_pressure_level_for_testing_;
+
+ // Exposes the UpdatePressureLevel() method for testing.
+ void UpdatePressureLevel() { MemoryPressureMonitor::UpdatePressureLevel(); }
+
+ // Returns the number of seconds left over from the last UMA tick
+ // calculation.
+ int SubTickSeconds() { return subtick_seconds_; }
+
+ // Returns the number of seconds per UMA tick.
+ static int GetSecondsPerUMATick() {
+ return MemoryPressureMonitor::GetSecondsPerUMATick();
+ }
+
private:
DISALLOW_COPY_AND_ASSIGN(TestMemoryPressureMonitor);
+
+ int GetMacMemoryPressureLevel() override {
+ return macos_pressure_level_for_testing_;
+ }
};
TEST(MacMemoryPressureMonitorTest, MemoryPressureFromMacMemoryPressure) {
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- TestMemoryPressureMonitor::
- MemoryPressureLevelForMacMemoryPressure(
- DISPATCH_MEMORYPRESSURE_NORMAL));
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- TestMemoryPressureMonitor::
- MemoryPressureLevelForMacMemoryPressure(
- DISPATCH_MEMORYPRESSURE_WARN));
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
- TestMemoryPressureMonitor::
- MemoryPressureLevelForMacMemoryPressure(
- DISPATCH_MEMORYPRESSURE_CRITICAL));
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- TestMemoryPressureMonitor::
- MemoryPressureLevelForMacMemoryPressure(0));
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- TestMemoryPressureMonitor::
- MemoryPressureLevelForMacMemoryPressure(3));
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- TestMemoryPressureMonitor::
- MemoryPressureLevelForMacMemoryPressure(5));
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- TestMemoryPressureMonitor::
- MemoryPressureLevelForMacMemoryPressure(-1));
+ EXPECT_EQ(
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+ TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+ DISPATCH_MEMORYPRESSURE_NORMAL));
+ EXPECT_EQ(
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+ TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+ DISPATCH_MEMORYPRESSURE_WARN));
+ EXPECT_EQ(
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+ TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+ DISPATCH_MEMORYPRESSURE_CRITICAL));
+ EXPECT_EQ(
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+ TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+ 0));
+ EXPECT_EQ(
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+ TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+ 3));
+ EXPECT_EQ(
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+ TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+ 5));
+ EXPECT_EQ(
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+ TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+ -1));
}
TEST(MacMemoryPressureMonitorTest, CurrentMemoryPressure) {
TestMemoryPressureMonitor monitor;
+
MemoryPressureListener::MemoryPressureLevel memory_pressure =
monitor.GetCurrentPressureLevel();
EXPECT_TRUE(memory_pressure ==
@@ -59,5 +102,127 @@ TEST(MacMemoryPressureMonitorTest, CurrentMemoryPressure) {
MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
}
+TEST(MacMemoryPressureMonitorTest, MemoryPressureConversion) {
+ TestMemoryPressureMonitor monitor;
+
+ monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_NORMAL;
+ monitor.UpdatePressureLevel();
+ MemoryPressureListener::MemoryPressureLevel memory_pressure =
+ monitor.GetCurrentPressureLevel();
+ EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+ memory_pressure);
+
+ monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_WARN;
+ monitor.UpdatePressureLevel();
+ memory_pressure = monitor.GetCurrentPressureLevel();
+ EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+ memory_pressure);
+
+ monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_CRITICAL;
+ monitor.UpdatePressureLevel();
+ memory_pressure = monitor.GetCurrentPressureLevel();
+ EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+ memory_pressure);
+}
+
+TEST(MacMemoryPressureMonitorTest, MemoryPressureRunLoopChecking) {
+ TestMemoryPressureMonitor monitor;
+
+ // To test grabbing the memory presure at the end of the run loop, we have to
+ // run the run loop, but to do that the run loop needs a run loop source. Add
+ // a timer as the source. We know that the exit observer is attached to
+ // the kMessageLoopExclusiveRunLoopMode mode, so use that mode.
+ ScopedCFTypeRef<CFRunLoopTimerRef> timer_ref(CFRunLoopTimerCreate(
+ NULL, CFAbsoluteTimeGetCurrent() + 10, 0, 0, 0, nullptr, nullptr));
+ CFRunLoopAddTimer(CFRunLoopGetCurrent(), timer_ref,
+ kMessageLoopExclusiveRunLoopMode);
+
+ monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_WARN;
+ monitor.ResetRunLoopUpdateTime();
+ CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0, true);
+ EXPECT_EQ(monitor.GetCurrentPressureLevel(),
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE);
+
+ monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_CRITICAL;
+ monitor.ResetRunLoopUpdateTime();
+ CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0, true);
+ EXPECT_EQ(monitor.GetCurrentPressureLevel(),
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
+
+ monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_NORMAL;
+ monitor.ResetRunLoopUpdateTime();
+ CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0, true);
+ EXPECT_EQ(monitor.GetCurrentPressureLevel(),
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE);
+
+ CFRunLoopRemoveTimer(CFRunLoopGetCurrent(), timer_ref,
+ kMessageLoopExclusiveRunLoopMode);
+}
+
+TEST(MacMemoryPressureMonitorTest, RecordMemoryPressureStats) {
+ TestMemoryPressureMonitor monitor;
+ const char* kHistogram = "Memory.PressureLevel";
+ CFTimeInterval now = CFAbsoluteTimeGetCurrent();
+ const int seconds_per_tick =
+ TestMemoryPressureMonitor::GetSecondsPerUMATick();
+
+ // Set the initial pressure level.
+ monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_NORMAL;
+ // Incur one UMA tick of time (and include one extra second of elapsed time).
+ monitor.SetLastStatisticReportTime(now - (seconds_per_tick + 1));
+ monitor.UpdatePressureLevel();
+ monitor.tester.ExpectTotalCount(kHistogram, 1);
+ monitor.tester.ExpectBucketCount(kHistogram, 0, 1);
+ // The report time above included an extra second so there should be 1
+ // sub-tick second left over.
+ EXPECT_EQ(1, monitor.SubTickSeconds());
+
+ // Simulate sitting in normal pressure for 1 second less than 6 UMA tick
+ // seconds and then elevating to warning. With the left over sub-tick second
+ // from above, the total elapsed ticks should be an even 6 UMA ticks.
+ monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_WARN;
+ monitor.SetLastStatisticReportTime(now - (seconds_per_tick * 6 - 1));
+ monitor.UpdatePressureLevel();
+ monitor.tester.ExpectTotalCount(kHistogram, 7);
+ monitor.tester.ExpectBucketCount(kHistogram, 0, 7);
+ monitor.tester.ExpectBucketCount(kHistogram, 1, 0);
+ EXPECT_EQ(0, monitor.SubTickSeconds());
+
+ // Simulate sitting in warning pressure for 20 UMA ticks and 2 seconds, and
+ // then elevating to critical.
+ monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_CRITICAL;
+ monitor.SetLastStatisticReportTime(now - (20 * seconds_per_tick + 2));
+ monitor.UpdatePressureLevel();
+ monitor.tester.ExpectTotalCount(kHistogram, 27);
+ monitor.tester.ExpectBucketCount(kHistogram, 0, 7);
+ monitor.tester.ExpectBucketCount(kHistogram, 1, 20);
+ monitor.tester.ExpectBucketCount(kHistogram, 2, 0);
+ EXPECT_EQ(2, monitor.SubTickSeconds());
+
+ // A quick update while critical - the stats should not budge because less
+ // than 1 tick of time has elapsed.
+ monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_CRITICAL;
+ monitor.SetLastStatisticReportTime(now - 1);
+ monitor.UpdatePressureLevel();
+ monitor.tester.ExpectTotalCount(kHistogram, 27);
+ monitor.tester.ExpectBucketCount(kHistogram, 0, 7);
+ monitor.tester.ExpectBucketCount(kHistogram, 1, 20);
+ monitor.tester.ExpectBucketCount(kHistogram, 2, 0);
+ EXPECT_EQ(3, monitor.SubTickSeconds());
+
+ // A quick change back to normal. Less than 1 tick of time has elapsed, but
+ // in this case the pressure level changed, so the critical bucket should
+ // get another sample (otherwise we could miss quick level changes).
+ monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_NORMAL;
+ monitor.SetLastStatisticReportTime(now - 1);
+ monitor.UpdatePressureLevel();
+ monitor.tester.ExpectTotalCount(kHistogram, 28);
+ monitor.tester.ExpectBucketCount(kHistogram, 0, 7);
+ monitor.tester.ExpectBucketCount(kHistogram, 1, 20);
+ monitor.tester.ExpectBucketCount(kHistogram, 2, 1);
+ // When less than 1 tick of time has elapsed but the pressure level changed,
+ // the subtick remainder gets zeroed out.
+ EXPECT_EQ(0, monitor.SubTickSeconds());
+}
} // namespace mac
} // namespace base
diff --git a/chromium/base/memory/ref_counted.cc b/chromium/base/memory/ref_counted.cc
index 46bbd7ad858..039f255b153 100644
--- a/chromium/base/memory/ref_counted.cc
+++ b/chromium/base/memory/ref_counted.cc
@@ -3,9 +3,17 @@
// found in the LICENSE file.
#include "base/memory/ref_counted.h"
+
#include "base/threading/thread_collision_warner.h"
namespace base {
+namespace {
+
+#if DCHECK_IS_ON()
+AtomicRefCount g_cross_thread_ref_count_access_allow_count = 0;
+#endif
+
+} // namespace
namespace subtle {
@@ -13,8 +21,6 @@ bool RefCountedThreadSafeBase::HasOneRef() const {
return AtomicRefCountIsOne(&ref_count_);
}
-RefCountedThreadSafeBase::RefCountedThreadSafeBase() = default;
-
RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
#if DCHECK_IS_ON()
DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
@@ -25,6 +31,10 @@ RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
void RefCountedThreadSafeBase::AddRef() const {
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
+ DCHECK(!needs_adopt_ref_)
+ << "This RefCounted object is created with non-zero reference count."
+ << " The first reference to such a object has to be made by AdoptRef or"
+ << " MakeShared.";
#endif
AtomicRefCountInc(&ref_count_);
}
@@ -43,6 +53,23 @@ bool RefCountedThreadSafeBase::Release() const {
return false;
}
+#if DCHECK_IS_ON()
+bool RefCountedBase::CalledOnValidSequence() const {
+ return sequence_checker_.CalledOnValidSequence() ||
+ !AtomicRefCountIsZero(&g_cross_thread_ref_count_access_allow_count);
+}
+#endif
+
} // namespace subtle
+#if DCHECK_IS_ON()
+ScopedAllowCrossThreadRefCountAccess::ScopedAllowCrossThreadRefCountAccess() {
+ AtomicRefCountInc(&g_cross_thread_ref_count_access_allow_count);
+}
+
+ScopedAllowCrossThreadRefCountAccess::~ScopedAllowCrossThreadRefCountAccess() {
+ AtomicRefCountDec(&g_cross_thread_ref_count_access_allow_count);
+}
+#endif
+
} // namespace base
diff --git a/chromium/base/memory/ref_counted.h b/chromium/base/memory/ref_counted.h
index ff46e6d6e5a..be493f632f7 100644
--- a/chromium/base/memory/ref_counted.h
+++ b/chromium/base/memory/ref_counted.h
@@ -16,24 +16,40 @@
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/sequence_checker.h"
#include "base/threading/thread_collision_warner.h"
#include "build/build_config.h"
+template <class T>
+class scoped_refptr;
+
namespace base {
+template <typename T>
+scoped_refptr<T> AdoptRef(T* t);
+
namespace subtle {
+enum AdoptRefTag { kAdoptRefTag };
+enum StartRefCountFromZeroTag { kStartRefCountFromZeroTag };
+enum StartRefCountFromOneTag { kStartRefCountFromOneTag };
+
class BASE_EXPORT RefCountedBase {
public:
bool HasOneRef() const { return ref_count_ == 1; }
protected:
- RefCountedBase()
- : ref_count_(0)
+ explicit RefCountedBase(StartRefCountFromZeroTag) {
#if DCHECK_IS_ON()
- , in_dtor_(false)
+ sequence_checker_.DetachFromSequence();
+#endif
+ }
+
+ explicit RefCountedBase(StartRefCountFromOneTag) : ref_count_(1) {
+#if DCHECK_IS_ON()
+ needs_adopt_ref_ = true;
+ sequence_checker_.DetachFromSequence();
#endif
- {
}
~RefCountedBase() {
@@ -42,7 +58,6 @@ class BASE_EXPORT RefCountedBase {
#endif
}
-
void AddRef() const {
// TODO(maruel): Add back once it doesn't assert 500 times/sec.
// Current thread books the critical section "AddRelease"
@@ -50,32 +65,62 @@ class BASE_EXPORT RefCountedBase {
// DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
+ DCHECK(!needs_adopt_ref_)
+ << "This RefCounted object is created with non-zero reference count."
+ << " The first reference to such a object has to be made by AdoptRef or"
+ << " MakeShared.";
+ if (ref_count_ >= 1) {
+ DCHECK(CalledOnValidSequence());
+ }
#endif
+
++ref_count_;
}
// Returns true if the object should self-delete.
bool Release() const {
+ --ref_count_;
+
// TODO(maruel): Add back once it doesn't assert 500 times/sec.
// Current thread books the critical section "AddRelease"
// without release it.
// DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
+
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
-#endif
- if (--ref_count_ == 0) {
-#if DCHECK_IS_ON()
+ if (ref_count_ == 0)
in_dtor_ = true;
+
+ if (ref_count_ >= 1)
+ DCHECK(CalledOnValidSequence());
+ if (ref_count_ == 1)
+ sequence_checker_.DetachFromSequence();
#endif
- return true;
- }
- return false;
+
+ return ref_count_ == 0;
}
private:
- mutable size_t ref_count_;
+ template <typename U>
+ friend scoped_refptr<U> base::AdoptRef(U*);
+
+ void Adopted() const {
+#if DCHECK_IS_ON()
+ DCHECK(needs_adopt_ref_);
+ needs_adopt_ref_ = false;
+#endif
+ }
+
#if DCHECK_IS_ON()
- mutable bool in_dtor_;
+ bool CalledOnValidSequence() const;
+#endif
+
+ mutable size_t ref_count_ = 0;
+
+#if DCHECK_IS_ON()
+ mutable bool needs_adopt_ref_ = false;
+ mutable bool in_dtor_ = false;
+ mutable SequenceChecker sequence_checker_;
#endif
DFAKE_MUTEX(add_release_);
@@ -88,7 +133,13 @@ class BASE_EXPORT RefCountedThreadSafeBase {
bool HasOneRef() const;
protected:
- RefCountedThreadSafeBase();
+ explicit RefCountedThreadSafeBase(StartRefCountFromZeroTag) {}
+ explicit RefCountedThreadSafeBase(StartRefCountFromOneTag) : ref_count_(1) {
+#if DCHECK_IS_ON()
+ needs_adopt_ref_ = true;
+#endif
+ }
+
~RefCountedThreadSafeBase();
void AddRef() const;
@@ -97,8 +148,19 @@ class BASE_EXPORT RefCountedThreadSafeBase {
bool Release() const;
private:
+ template <typename U>
+ friend scoped_refptr<U> base::AdoptRef(U*);
+
+ void Adopted() const {
+#if DCHECK_IS_ON()
+ DCHECK(needs_adopt_ref_);
+ needs_adopt_ref_ = false;
+#endif
+ }
+
mutable AtomicRefCount ref_count_ = 0;
#if DCHECK_IS_ON()
+ mutable bool needs_adopt_ref_ = false;
mutable bool in_dtor_ = false;
#endif
@@ -107,6 +169,27 @@ class BASE_EXPORT RefCountedThreadSafeBase {
} // namespace subtle
+// ScopedAllowCrossThreadRefCountAccess disables the check documented on
+// RefCounted below for rare pre-existing use cases where thread-safety was
+// guaranteed through other means (e.g. explicit sequencing of calls across
+// execution sequences when bouncing between threads in order). New callers
+// should refrain from using this (callsites handling thread-safety through
+// locks should use RefCountedThreadSafe per the overhead of its atomics being
+// negligible compared to locks anyways and callsites doing explicit sequencing
+// should properly std::move() the ref to avoid hitting this check).
+// TODO(tzik): Cleanup existing use cases and remove
+// ScopedAllowCrossThreadRefCountAccess.
+class BASE_EXPORT ScopedAllowCrossThreadRefCountAccess final {
+ public:
+#if DCHECK_IS_ON()
+ ScopedAllowCrossThreadRefCountAccess();
+ ~ScopedAllowCrossThreadRefCountAccess();
+#else
+ ScopedAllowCrossThreadRefCountAccess() {}
+ ~ScopedAllowCrossThreadRefCountAccess() {}
+#endif
+};
+
//
// A base class for reference counted classes. Otherwise, known as a cheap
// knock-off of WebKit's RefCounted<T> class. To use this, just extend your
@@ -121,10 +204,45 @@ class BASE_EXPORT RefCountedThreadSafeBase {
//
// You should always make your destructor non-public, to avoid any code deleting
// the object accidently while there are references to it.
+//
+//
+// The ref count manipulation to RefCounted is NOT thread safe and has DCHECKs
+// to trap unsafe cross thread usage. A subclass instance of RefCounted can be
+// passed to another execution sequence only when its ref count is 1. If the ref
+// count is more than 1, the RefCounted class verifies the ref updates are made
+// on the same execution sequence as the previous ones.
+//
+//
+// The reference count starts from zero by default, and we intended to migrate
+// to start-from-one ref count. Put REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() to
+// the ref counted class to opt-in.
+//
+// If an object has start-from-one ref count, the first scoped_refptr need to be
+// created by base::AdoptRef() or base::MakeShared(). We can use
+// base::MakeShared() to create create both type of ref counted object.
+//
+// The motivations to use start-from-one ref count are:
+// - Start-from-one ref count doesn't need the ref count increment for the
+// first reference.
+// - It can detect an invalid object acquisition for a being-deleted object
+// that has zero ref count. That tends to happen on custom deleter that
+// delays the deletion.
+// TODO(tzik): Implement invalid acquisition detection.
+// - Behavior parity to Blink's WTF::RefCounted, whose count starts from one.
+// And start-from-one ref count is a step to merge WTF::RefCounted into
+// base::RefCounted.
+//
+#define REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() \
+ static constexpr ::base::subtle::StartRefCountFromOneTag \
+ kRefCountPreference = ::base::subtle::kStartRefCountFromOneTag
+
template <class T>
class RefCounted : public subtle::RefCountedBase {
public:
- RefCounted() = default;
+ static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+ subtle::kStartRefCountFromZeroTag;
+
+ RefCounted() : subtle::RefCountedBase(T::kRefCountPreference) {}
void AddRef() const {
subtle::RefCountedBase::AddRef();
@@ -140,7 +258,7 @@ class RefCounted : public subtle::RefCountedBase {
~RefCounted() = default;
private:
- DISALLOW_COPY_AND_ASSIGN(RefCounted<T>);
+ DISALLOW_COPY_AND_ASSIGN(RefCounted);
};
// Forward declaration.
@@ -171,10 +289,17 @@ struct DefaultRefCountedThreadSafeTraits {
// private:
// friend class base::RefCountedThreadSafe<MyFoo>;
// ~MyFoo();
+//
+// We can use REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() with RefCountedThreadSafe
+// too. See the comment above the RefCounted definition for details.
template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T> >
class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
public:
- RefCountedThreadSafe() = default;
+ static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+ subtle::kStartRefCountFromZeroTag;
+
+ explicit RefCountedThreadSafe()
+ : subtle::RefCountedThreadSafeBase(T::kRefCountPreference) {}
void AddRef() const {
subtle::RefCountedThreadSafeBase::AddRef();
@@ -214,6 +339,43 @@ class RefCountedData
~RefCountedData() = default;
};
+// Creates a scoped_refptr from a raw pointer without incrementing the reference
+// count. Use this only for a newly created object whose reference count starts
+// from 1 instead of 0.
+template <typename T>
+scoped_refptr<T> AdoptRef(T* obj) {
+ using Tag = typename std::decay<decltype(T::kRefCountPreference)>::type;
+ static_assert(std::is_same<subtle::StartRefCountFromOneTag, Tag>::value,
+ "Use AdoptRef only for the reference count starts from one.");
+
+ DCHECK(obj);
+ DCHECK(obj->HasOneRef());
+ obj->Adopted();
+ return scoped_refptr<T>(obj, subtle::kAdoptRefTag);
+}
+
+namespace subtle {
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromZeroTag) {
+ return scoped_refptr<T>(obj);
+}
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromOneTag) {
+ return AdoptRef(obj);
+}
+
+} // namespace subtle
+
+// Constructs an instance of T, which is a ref counted type, and wraps the
+// object into a scoped_refptr.
+template <typename T, typename... Args>
+scoped_refptr<T> MakeShared(Args&&... args) {
+ T* obj = new T(std::forward<Args>(args)...);
+ return subtle::AdoptRefIfNeeded(obj, T::kRefCountPreference);
+}
+
} // namespace base
//
@@ -354,14 +516,10 @@ class scoped_refptr {
return *this;
}
- void swap(T** pp) {
- T* p = ptr_;
- ptr_ = *pp;
- *pp = p;
- }
-
void swap(scoped_refptr<T>& r) {
- swap(&r.ptr_);
+ T* tmp = ptr_;
+ ptr_ = r.ptr_;
+ r.ptr_ = tmp;
}
explicit operator bool() const { return ptr_ != nullptr; }
@@ -385,6 +543,11 @@ class scoped_refptr {
T* ptr_ = nullptr;
private:
+ template <typename U>
+ friend scoped_refptr<U> base::AdoptRef(U*);
+
+ scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {}
+
// Friend required for move constructors that set r.ptr_ to null.
template <typename U>
friend class scoped_refptr;
diff --git a/chromium/base/memory/ref_counted_delete_on_sequence.h b/chromium/base/memory/ref_counted_delete_on_sequence.h
index a7600f9f854..2be035ce16d 100644
--- a/chromium/base/memory/ref_counted_delete_on_sequence.h
+++ b/chromium/base/memory/ref_counted_delete_on_sequence.h
@@ -33,10 +33,14 @@ namespace base {
template <class T>
class RefCountedDeleteOnSequence : public subtle::RefCountedThreadSafeBase {
public:
+ static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+ subtle::kStartRefCountFromZeroTag;
+
// A SequencedTaskRunner for the current sequence can be acquired by calling
// SequencedTaskRunnerHandle::Get().
RefCountedDeleteOnSequence(scoped_refptr<SequencedTaskRunner> task_runner)
- : task_runner_(std::move(task_runner)) {
+ : subtle::RefCountedThreadSafeBase(T::kRefCountPreference),
+ task_runner_(std::move(task_runner)) {
DCHECK(task_runner_);
}
diff --git a/chromium/base/memory/ref_counted_unittest.cc b/chromium/base/memory/ref_counted_unittest.cc
index 65c15d26ab1..515f4227eab 100644
--- a/chromium/base/memory/ref_counted_unittest.cc
+++ b/chromium/base/memory/ref_counted_unittest.cc
@@ -6,6 +6,7 @@
#include <utility>
+#include "base/test/gtest_util.h"
#include "base/test/opaque_ref_counted.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -122,6 +123,16 @@ scoped_refptr<SelfAssign> Overloaded(scoped_refptr<SelfAssign> self_assign) {
return self_assign;
}
+class InitialRefCountIsOne : public base::RefCounted<InitialRefCountIsOne> {
+ public:
+ REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
+
+ InitialRefCountIsOne() {}
+
+ private:
+ friend class base::RefCounted<InitialRefCountIsOne>;
+ ~InitialRefCountIsOne() {}
+};
} // end namespace
@@ -528,3 +539,30 @@ TEST(RefCountedUnitTest, TestOverloadResolutionMove) {
scoped_refptr<Other> other2(other);
EXPECT_EQ(other2, Overloaded(std::move(other)));
}
+
+TEST(RefCountedUnitTest, TestInitialRefCountIsOne) {
+ scoped_refptr<InitialRefCountIsOne> obj =
+ base::MakeShared<InitialRefCountIsOne>();
+ EXPECT_TRUE(obj->HasOneRef());
+ obj = nullptr;
+
+ scoped_refptr<InitialRefCountIsOne> obj2 =
+ base::AdoptRef(new InitialRefCountIsOne);
+ EXPECT_TRUE(obj2->HasOneRef());
+ obj2 = nullptr;
+
+ scoped_refptr<Other> obj3 = base::MakeShared<Other>();
+ EXPECT_TRUE(obj3->HasOneRef());
+ obj3 = nullptr;
+}
+
+TEST(RefCountedDeathTest, TestAdoptRef) {
+ EXPECT_DCHECK_DEATH(make_scoped_refptr(new InitialRefCountIsOne));
+
+ InitialRefCountIsOne* ptr = nullptr;
+ EXPECT_DCHECK_DEATH(base::AdoptRef(ptr));
+
+ scoped_refptr<InitialRefCountIsOne> obj =
+ base::MakeShared<InitialRefCountIsOne>();
+ EXPECT_DCHECK_DEATH(base::AdoptRef(obj.get()));
+}
diff --git a/chromium/base/memory/ref_counted_unittest.nc b/chromium/base/memory/ref_counted_unittest.nc
new file mode 100644
index 00000000000..5022779214a
--- /dev/null
+++ b/chromium/base/memory/ref_counted_unittest.nc
@@ -0,0 +1,25 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted.h"
+
+namespace base {
+
+class InitialRefCountIsZero : public base::RefCounted<InitialRefCountIsZero> {
+ public:
+ InitialRefCountIsZero() {}
+ private:
+ friend class base::RefCounted<InitialRefCountIsZero>;
+ ~InitialRefCountIsZero() {}
+};
+
+#if defined(NCTEST_ADOPT_REF_TO_ZERO_START) // [r"fatal error: static_assert failed \"Use AdoptRef only for the reference count starts from one\.\""]
+
+void WontCompile() {
+ AdoptRef(new InitialRefCountIsZero());
+}
+
+#endif
+
+} // namespace base
diff --git a/chromium/base/memory/shared_memory.h b/chromium/base/memory/shared_memory.h
index 3c68e90a031..4b66cc6edd2 100644
--- a/chromium/base/memory/shared_memory.h
+++ b/chromium/base/memory/shared_memory.h
@@ -255,9 +255,24 @@ class BASE_EXPORT SharedMemory {
return ShareToProcessCommon(process, new_handle, true, SHARE_CURRENT_MODE);
}
+#if defined(OS_POSIX) && (!defined(OS_MACOSX) || defined(OS_IOS)) && \
+ !defined(OS_NACL)
+ using UniqueId = std::pair<dev_t, ino_t>;
+
+ struct UniqueIdHash {
+ size_t operator()(const UniqueId& id) const {
+ return HashInts(id.first, id.second);
+ }
+ };
+
+ // Returns a unique ID for this shared memory's handle. Note this function may
+ // access file system and be slow.
+ bool GetUniqueId(UniqueId* id) const;
+#endif
+
private:
#if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID) && \
- !(defined(OS_MACOSX) && !defined(OS_IOS))
+ (!defined(OS_MACOSX) || defined(OS_IOS))
bool FilePathForMemoryName(const std::string& mem_name, FilePath* path);
#endif
@@ -301,6 +316,7 @@ class BASE_EXPORT SharedMemory {
DISALLOW_COPY_AND_ASSIGN(SharedMemory);
};
+
} // namespace base
#endif // BASE_MEMORY_SHARED_MEMORY_H_
diff --git a/chromium/base/memory/shared_memory_mac_unittest.cc b/chromium/base/memory/shared_memory_mac_unittest.cc
index c7d20ec0493..4ccee89deb4 100644
--- a/chromium/base/memory/shared_memory_mac_unittest.cc
+++ b/chromium/base/memory/shared_memory_mac_unittest.cc
@@ -204,7 +204,7 @@ class SharedMemoryMacMultiProcessTest : public MultiProcessTest {
// similar tests.
service_name_ = CreateRandomServiceName();
server_port_.reset(BecomeMachServer(service_name_.c_str()));
- child_process_ = SpawnChild(name);
+ spawn_child_ = SpawnChild(name);
client_port_.reset(ReceiveMachPort(server_port_.get()));
}
@@ -221,7 +221,7 @@ class SharedMemoryMacMultiProcessTest : public MultiProcessTest {
// process.
mac::ScopedMachSendRight client_port_;
- base::Process child_process_;
+ base::SpawnChildResult spawn_child_;
DISALLOW_COPY_AND_ASSIGN(SharedMemoryMacMultiProcessTest);
};
@@ -237,7 +237,7 @@ TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemory) {
SendMachPort(client_port_.get(), shared_memory->handle().GetMemoryObject(),
MACH_MSG_TYPE_COPY_SEND);
int rv = -1;
- ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+ ASSERT_TRUE(spawn_child_.process.WaitForExitWithTimeout(
TestTimeouts::action_timeout(), &rv));
EXPECT_EQ(0, rv);
}
@@ -277,7 +277,7 @@ TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemoryWithOffset) {
SendMachPort(
client_port_.get(), shm.GetMemoryObject(), MACH_MSG_TYPE_COPY_SEND);
int rv = -1;
- ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+ ASSERT_TRUE(spawn_child_.process.WaitForExitWithTimeout(
TestTimeouts::action_timeout(), &rv));
EXPECT_EQ(0, rv);
}
diff --git a/chromium/base/memory/shared_memory_posix.cc b/chromium/base/memory/shared_memory_posix.cc
index 3a18faa83dd..bf29e9f02a1 100644
--- a/chromium/base/memory/shared_memory_posix.cc
+++ b/chromium/base/memory/shared_memory_posix.cc
@@ -15,12 +15,14 @@
#include "base/files/scoped_file.h"
#include "base/logging.h"
#include "base/memory/shared_memory_helper.h"
+#include "base/memory/shared_memory_tracker.h"
#include "base/posix/eintr_wrapper.h"
#include "base/posix/safe_strerror.h"
#include "base/process/process_metrics.h"
#include "base/scoped_generic.h"
#include "base/strings/utf_string_conversions.h"
#include "base/threading/thread_restrictions.h"
+#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#if defined(OS_ANDROID)
@@ -283,8 +285,10 @@ bool SharedMemory::MapAt(off_t offset, size_t bytes) {
bool mmap_succeeded = memory_ != (void*)-1 && memory_ != NULL;
if (mmap_succeeded) {
mapped_size_ = bytes;
- DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
- (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+ DCHECK_EQ(0U,
+ reinterpret_cast<uintptr_t>(memory_) &
+ (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+ SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
} else {
memory_ = NULL;
}
@@ -297,6 +301,7 @@ bool SharedMemory::Unmap() {
return false;
munmap(memory_, mapped_size_);
+ SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
memory_ = NULL;
mapped_size_ = 0;
return true;
@@ -390,4 +395,22 @@ bool SharedMemory::ShareToProcessCommon(ProcessHandle process,
return true;
}
+bool SharedMemory::GetUniqueId(SharedMemory::UniqueId* id) const {
+ // This function is called just after mmap. fstat is a system call that might
+ // cause I/O. It's safe to call fstat here because mmap for shared memory is
+ // called in two cases:
+ // 1) To handle file-mapped memory
+ // 2) To handle annonymous shared memory
+ // In 1), I/O is already permitted. In 2), the backend is on page cache and
+ // fstat doesn't cause I/O access to the disk. See the discussion at
+ // crbug.com/604726#c41.
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+ struct stat file_stat;
+ if (HANDLE_EINTR(::fstat(static_cast<int>(handle().fd), &file_stat)) != 0)
+ return false;
+ id->first = file_stat.st_dev;
+ id->second = file_stat.st_ino;
+ return true;
+}
+
} // namespace base
diff --git a/chromium/base/memory/shared_memory_tracker.cc b/chromium/base/memory/shared_memory_tracker.cc
new file mode 100644
index 00000000000..8613f595336
--- /dev/null
+++ b/chromium/base/memory/shared_memory_tracker.cc
@@ -0,0 +1,92 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_tracker.h"
+
+#include "base/memory/shared_memory.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/process_memory_dump.h"
+
+namespace base {
+
+SharedMemoryTracker::Usage::Usage() = default;
+
+SharedMemoryTracker::Usage::Usage(const Usage& rhs) = default;
+
+SharedMemoryTracker::Usage::~Usage() = default;
+
+// static
+SharedMemoryTracker* SharedMemoryTracker::GetInstance() {
+ static SharedMemoryTracker* instance = new SharedMemoryTracker;
+ return instance;
+}
+
+void SharedMemoryTracker::IncrementMemoryUsage(
+ const SharedMemory& shared_memory) {
+ Usage usage;
+ // |shared_memory|'s unique ID must be generated here and it'd be too late at
+ // OnMemoryDump. An ID is generated with a SharedMemoryHandle, but the handle
+ // might already be closed at that time. Now IncrementMemoryUsage is called
+ // just after mmap and the handle must live then. See the discussion at
+ // crbug.com/604726#c30.
+ SharedMemory::UniqueId id;
+ if (!shared_memory.GetUniqueId(&id))
+ return;
+ usage.unique_id = id;
+ usage.size = shared_memory.mapped_size();
+ AutoLock hold(usages_lock_);
+ usages_[&shared_memory] = usage;
+}
+
+void SharedMemoryTracker::DecrementMemoryUsage(
+ const SharedMemory& shared_memory) {
+ AutoLock hold(usages_lock_);
+ usages_.erase(&shared_memory);
+}
+
+bool SharedMemoryTracker::OnMemoryDump(const trace_event::MemoryDumpArgs& args,
+ trace_event::ProcessMemoryDump* pmd) {
+ std::unordered_map<SharedMemory::UniqueId, size_t, SharedMemory::UniqueIdHash>
+ sizes;
+ {
+ AutoLock hold(usages_lock_);
+ for (const auto& usage : usages_)
+ sizes[usage.second.unique_id] += usage.second.size;
+ }
+ for (auto& size : sizes) {
+ const SharedMemory::UniqueId& id = size.first;
+ std::string dump_name = StringPrintf("%s/%lld.%lld", "shared_memory",
+ static_cast<long long>(id.first),
+ static_cast<long long>(id.second));
+ auto guid = trace_event::MemoryAllocatorDumpGuid(dump_name);
+ trace_event::MemoryAllocatorDump* local_dump =
+ pmd->CreateAllocatorDump(dump_name);
+ // TODO(hajimehoshi): The size is not resident size but virtual size so far.
+ // Fix this to record resident size.
+ local_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
+ trace_event::MemoryAllocatorDump::kUnitsBytes,
+ size.second);
+ trace_event::MemoryAllocatorDump* global_dump =
+ pmd->CreateSharedGlobalAllocatorDump(guid);
+ global_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
+ trace_event::MemoryAllocatorDump::kUnitsBytes,
+ size.second);
+ // TOOD(hajimehoshi): Detect which the shared memory comes from browser,
+ // renderer or GPU process.
+ // TODO(hajimehoshi): Shared memory reported by GPU and discardable is
+ // currently double-counted. Add ownership edges to avoid this.
+ pmd->AddOwnershipEdge(local_dump->guid(), global_dump->guid());
+ }
+ return true;
+}
+
+SharedMemoryTracker::SharedMemoryTracker() {
+ trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ this, "SharedMemoryTracker", nullptr);
+}
+
+SharedMemoryTracker::~SharedMemoryTracker() = default;
+
+} // namespace
diff --git a/chromium/base/memory/shared_memory_tracker.h b/chromium/base/memory/shared_memory_tracker.h
new file mode 100644
index 00000000000..fe1a3dd3921
--- /dev/null
+++ b/chromium/base/memory/shared_memory_tracker.h
@@ -0,0 +1,56 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
+#define BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
+
+#include "base/memory/shared_memory.h"
+#include "base/synchronization/lock.h"
+#include "base/trace_event/memory_dump_provider.h"
+
+namespace base {
+
+namespace trace_event {
+class ProcessMemoryDump;
+}
+
+// SharedMemoryTracker tracks shared memory usage.
+class BASE_EXPORT SharedMemoryTracker
+ : public base::trace_event::MemoryDumpProvider {
+ public:
+ // Returns a singleton instance.
+ static SharedMemoryTracker* GetInstance();
+
+ // Records shared memory usage on mapping.
+ void IncrementMemoryUsage(const SharedMemory& shared_memory);
+
+ // Records shared memory usage on unmapping.
+ void DecrementMemoryUsage(const SharedMemory& shared_memory);
+
+ private:
+ struct Usage {
+ Usage();
+ Usage(const Usage& rhs);
+ ~Usage();
+ SharedMemory::UniqueId unique_id;
+ size_t size;
+ };
+
+ SharedMemoryTracker();
+ ~SharedMemoryTracker() override;
+
+ // base::trace_event::MemoryDumpProvider implementation.
+ bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) override;
+
+ // Used to lock when |usages_| is modified or read.
+ Lock usages_lock_;
+ std::unordered_map<const SharedMemory*, Usage> usages_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedMemoryTracker);
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
diff --git a/chromium/base/memory/shared_memory_unittest.cc b/chromium/base/memory/shared_memory_unittest.cc
index 19dedccb476..d87fad01d3f 100644
--- a/chromium/base/memory/shared_memory_unittest.cc
+++ b/chromium/base/memory/shared_memory_unittest.cc
@@ -682,16 +682,16 @@ TEST_F(SharedMemoryProcessTest, SharedMemoryAcrossProcesses) {
// Start |kNumTasks| processes, each of which atomically increments the first
// word by 1.
- Process processes[kNumTasks];
+ SpawnChildResult children[kNumTasks];
for (int index = 0; index < kNumTasks; ++index) {
- processes[index] = SpawnChild("SharedMemoryTestMain");
- ASSERT_TRUE(processes[index].IsValid());
+ children[index] = SpawnChild("SharedMemoryTestMain");
+ ASSERT_TRUE(children[index].process.IsValid());
}
// Check that each process exited correctly.
int exit_code = 0;
for (int index = 0; index < kNumTasks; ++index) {
- EXPECT_TRUE(processes[index].WaitForExit(&exit_code));
+ EXPECT_TRUE(children[index].process.WaitForExit(&exit_code));
EXPECT_EQ(0, exit_code);
}
diff --git a/chromium/base/memory/shared_memory_win_unittest.cc b/chromium/base/memory/shared_memory_win_unittest.cc
index 5fc132d25a1..d04e840236f 100644
--- a/chromium/base/memory/shared_memory_win_unittest.cc
+++ b/chromium/base/memory/shared_memory_win_unittest.cc
@@ -199,8 +199,10 @@ TEST_F(SharedMemoryWinTest, LowerPermissions) {
base::LaunchOptions options;
options.as_user = lowered_process_token.Get();
- base::Process process = SpawnChildWithOptions("LowerPermissions", options);
- ASSERT_TRUE(process.IsValid());
+
+ base::SpawnChildResult spawn_child =
+ SpawnChildWithOptions("LowerPermissions", options);
+ ASSERT_TRUE(spawn_child.process.IsValid());
SharedMemory memory;
memory.CreateAndMapAnonymous(1001);
@@ -208,15 +210,15 @@ TEST_F(SharedMemoryWinTest, LowerPermissions) {
// Duplicate into child process, giving only FILE_MAP_READ permissions.
HANDLE raw_handle = nullptr;
::DuplicateHandle(::GetCurrentProcess(), memory.handle().GetHandle(),
- process.Handle(), &raw_handle,
+ spawn_child.process.Handle(), &raw_handle,
FILE_MAP_READ | SECTION_QUERY, FALSE, 0);
ASSERT_TRUE(raw_handle);
WriteHandleToPipe(communication_pipe.Get(), raw_handle);
int exit_code;
- EXPECT_TRUE(process.WaitForExitWithTimeout(TestTimeouts::action_max_timeout(),
- &exit_code));
+ EXPECT_TRUE(spawn_child.process.WaitForExitWithTimeout(
+ TestTimeouts::action_max_timeout(), &exit_code));
EXPECT_EQ(0, exit_code);
}
diff --git a/chromium/base/memory/singleton.h b/chromium/base/memory/singleton.h
index cfdff7831f9..5c58d5fe294 100644
--- a/chromium/base/memory/singleton.h
+++ b/chromium/base/memory/singleton.h
@@ -153,14 +153,17 @@ subtle::Atomic32 StaticMemorySingletonTraits<Type>::dead_ = 0;
// Example usage:
//
// In your header:
-// template <typename T> struct DefaultSingletonTraits;
+// namespace base {
+// template <typename T>
+// struct DefaultSingletonTraits;
+// }
// class FooClass {
// public:
// static FooClass* GetInstance(); <-- See comment below on this.
// void Bar() { ... }
// private:
// FooClass() { ... }
-// friend struct DefaultSingletonTraits<FooClass>;
+// friend struct base::DefaultSingletonTraits<FooClass>;
//
// DISALLOW_COPY_AND_ASSIGN(FooClass);
// };
@@ -168,7 +171,14 @@ subtle::Atomic32 StaticMemorySingletonTraits<Type>::dead_ = 0;
// In your source file:
// #include "base/memory/singleton.h"
// FooClass* FooClass::GetInstance() {
-// return Singleton<FooClass>::get();
+// return base::Singleton<FooClass>::get();
+// }
+//
+// Or for leaky singletons:
+// #include "base/memory/singleton.h"
+// FooClass* FooClass::GetInstance() {
+// return base::Singleton<
+// FooClass, base::LeakySingletonTraits<FooClass>>::get();
// }
//
// And to call methods on FooClass:
diff --git a/chromium/base/memory/singleton_objc.h b/chromium/base/memory/singleton_objc.h
deleted file mode 100644
index 6df3f7757ef..00000000000
--- a/chromium/base/memory/singleton_objc.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Support for using the Singleton<T> pattern with Objective-C objects. A
-// SingletonObjC is the same as a Singleton, except the default traits are
-// appropriate for Objective-C objects. A typical Objective-C object of type
-// NSExampleType can be maintained as a singleton and accessed with:
-//
-// NSExampleType* exampleSingleton = SingletonObjC<NSExampleType>::get();
-//
-// The first time this is used, it will create exampleSingleton as the result
-// of [[NSExampleType alloc] init]. Subsequent calls will return the same
-// NSExampleType* object. The object will be released by calling
-// -[NSExampleType release] when Singleton's atexit routines run
-// (see singleton.h).
-//
-// For Objective-C objects initialized through means other than the
-// no-parameter -init selector, DefaultSingletonObjCTraits may be extended
-// as needed:
-//
-// struct FooSingletonTraits : public DefaultSingletonObjCTraits<Foo> {
-// static Foo* New() {
-// return [[Foo alloc] initWithName:@"selecty"];
-// }
-// };
-// ...
-// Foo* widgetSingleton = SingletonObjC<Foo, FooSingletonTraits>::get();
-
-#ifndef BASE_MEMORY_SINGLETON_OBJC_H_
-#define BASE_MEMORY_SINGLETON_OBJC_H_
-
-#import <Foundation/Foundation.h>
-#include "base/memory/singleton.h"
-
-// Singleton traits usable to manage traditional Objective-C objects, which
-// are instantiated by sending |alloc| and |init| messages, and are deallocated
-// in a memory-managed environment when their retain counts drop to 0 by
-// sending |release| messages.
-template<typename Type>
-struct DefaultSingletonObjCTraits : public DefaultSingletonTraits<Type> {
- static Type* New() {
- return [[Type alloc] init];
- }
-
- static void Delete(Type* object) {
- [object release];
- }
-};
-
-// Exactly like Singleton, but without the DefaultSingletonObjCTraits as the
-// default trait class. This makes it straightforward for Objective-C++ code
-// to hold Objective-C objects as singletons.
-template<typename Type,
- typename Traits = DefaultSingletonObjCTraits<Type>,
- typename DifferentiatingType = Type>
-class SingletonObjC : public Singleton<Type, Traits, DifferentiatingType> {
-};
-
-#endif // BASE_MEMORY_SINGLETON_OBJC_H_
diff --git a/chromium/base/memory/weak_ptr_unittest.cc b/chromium/base/memory/weak_ptr_unittest.cc
index 1a4870eab18..d223bd2bf6d 100644
--- a/chromium/base/memory/weak_ptr_unittest.cc
+++ b/chromium/base/memory/weak_ptr_unittest.cc
@@ -32,7 +32,8 @@ class OffThreadObjectCreator {
Thread creator_thread("creator_thread");
creator_thread.Start();
creator_thread.task_runner()->PostTask(
- FROM_HERE, base::Bind(OffThreadObjectCreator::CreateObject, &result));
+ FROM_HERE,
+ base::BindOnce(OffThreadObjectCreator::CreateObject, &result));
}
DCHECK(result); // We synchronized on thread destruction above.
return result;
@@ -73,8 +74,8 @@ class BackgroundThread : public Thread {
WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
- FROM_HERE, base::Bind(&BackgroundThread::DoCreateArrowFromTarget, arrow,
- target, &completion));
+ FROM_HERE, base::BindOnce(&BackgroundThread::DoCreateArrowFromTarget,
+ arrow, target, &completion));
completion.Wait();
}
@@ -82,8 +83,8 @@ class BackgroundThread : public Thread {
WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
- FROM_HERE, base::Bind(&BackgroundThread::DoCreateArrowFromArrow, arrow,
- other, &completion));
+ FROM_HERE, base::BindOnce(&BackgroundThread::DoCreateArrowFromArrow,
+ arrow, other, &completion));
completion.Wait();
}
@@ -92,7 +93,7 @@ class BackgroundThread : public Thread {
WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
FROM_HERE,
- base::Bind(&BackgroundThread::DoDeleteTarget, object, &completion));
+ base::BindOnce(&BackgroundThread::DoDeleteTarget, object, &completion));
completion.Wait();
}
@@ -100,8 +101,8 @@ class BackgroundThread : public Thread {
WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
- FROM_HERE, base::Bind(&BackgroundThread::DoCopyAndAssignArrow, object,
- &completion));
+ FROM_HERE, base::BindOnce(&BackgroundThread::DoCopyAndAssignArrow,
+ object, &completion));
completion.Wait();
}
@@ -109,8 +110,8 @@ class BackgroundThread : public Thread {
WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
- FROM_HERE, base::Bind(&BackgroundThread::DoCopyAndAssignArrowBase,
- object, &completion));
+ FROM_HERE, base::BindOnce(&BackgroundThread::DoCopyAndAssignArrowBase,
+ object, &completion));
completion.Wait();
}
@@ -119,7 +120,7 @@ class BackgroundThread : public Thread {
WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(
FROM_HERE,
- base::Bind(&BackgroundThread::DoDeleteArrow, object, &completion));
+ base::BindOnce(&BackgroundThread::DoDeleteArrow, object, &completion));
completion.Wait();
}
@@ -127,8 +128,9 @@ class BackgroundThread : public Thread {
WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
Target* result = nullptr;
- task_runner()->PostTask(FROM_HERE, base::Bind(&BackgroundThread::DoDeRef,
- arrow, &result, &completion));
+ task_runner()->PostTask(
+ FROM_HERE, base::BindOnce(&BackgroundThread::DoDeRef, arrow, &result,
+ &completion));
completion.Wait();
return result;
}
diff --git a/chromium/base/message_loop/incoming_task_queue.cc b/chromium/base/message_loop/incoming_task_queue.cc
index c7faa9a5b0f..67796ffa377 100644
--- a/chromium/base/message_loop/incoming_task_queue.cc
+++ b/chromium/base/message_loop/incoming_task_queue.cc
@@ -5,6 +5,7 @@
#include "base/message_loop/incoming_task_queue.h"
#include <limits>
+#include <utility>
#include "base/location.h"
#include "base/message_loop/message_loop.h"
@@ -59,16 +60,19 @@ IncomingTaskQueue::IncomingTaskQueue(MessageLoop* message_loop)
bool IncomingTaskQueue::AddToIncomingQueue(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay,
bool nestable) {
+ // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+ // for details.
+ CHECK(task);
DLOG_IF(WARNING,
delay.InSeconds() > kTaskDelayWarningThresholdInSeconds)
<< "Requesting super-long task delay period of " << delay.InSeconds()
<< " seconds from here: " << from_here.ToString();
- PendingTask pending_task(from_here, task, CalculateDelayedRuntime(delay),
- nestable);
+ PendingTask pending_task(from_here, std::move(task),
+ CalculateDelayedRuntime(delay), nestable);
#if defined(OS_WIN)
// We consider the task needs a high resolution timer if the delay is
// more than 0 and less than 32ms. This caps the relative error to
diff --git a/chromium/base/message_loop/incoming_task_queue.h b/chromium/base/message_loop/incoming_task_queue.h
index 157e47fa14b..17bea07674c 100644
--- a/chromium/base/message_loop/incoming_task_queue.h
+++ b/chromium/base/message_loop/incoming_task_queue.h
@@ -6,6 +6,7 @@
#define BASE_MESSAGE_LOOP_INCOMING_TASK_QUEUE_H_
#include "base/base_export.h"
+#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/pending_task.h"
@@ -35,7 +36,7 @@ class BASE_EXPORT IncomingTaskQueue
// returns false. In all cases, the ownership of |task| is transferred to the
// called method.
bool AddToIncomingQueue(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay,
bool nestable);
diff --git a/chromium/base/message_loop/message_loop.h b/chromium/base/message_loop/message_loop.h
index 8417ce49c0c..2ef4537d339 100644
--- a/chromium/base/message_loop/message_loop.h
+++ b/chromium/base/message_loop/message_loop.h
@@ -320,6 +320,8 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// Runs the specified PendingTask.
void RunTask(PendingTask* pending_task);
+ bool nesting_allowed() const { return allow_nesting_; }
+
// Disallow nesting. After this is called, running a nested RunLoop or calling
// Add/RemoveNestingObserver() on this MessageLoop will crash.
void DisallowNesting() { allow_nesting_ = false; }
diff --git a/chromium/base/message_loop/message_loop_task_runner.cc b/chromium/base/message_loop/message_loop_task_runner.cc
index c9b5ffe3f73..aece087b768 100644
--- a/chromium/base/message_loop/message_loop_task_runner.cc
+++ b/chromium/base/message_loop/message_loop_task_runner.cc
@@ -4,6 +4,8 @@
#include "base/message_loop/message_loop_task_runner.h"
+#include <utility>
+
#include "base/location.h"
#include "base/logging.h"
#include "base/message_loop/incoming_task_queue.h"
@@ -24,18 +26,20 @@ void MessageLoopTaskRunner::BindToCurrentThread() {
bool MessageLoopTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- const base::Closure& task,
+ OnceClosure task,
base::TimeDelta delay) {
DCHECK(!task.is_null()) << from_here.ToString();
- return incoming_queue_->AddToIncomingQueue(from_here, task, delay, true);
+ return incoming_queue_->AddToIncomingQueue(from_here, std::move(task), delay,
+ true);
}
bool MessageLoopTaskRunner::PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
- const base::Closure& task,
+ OnceClosure task,
base::TimeDelta delay) {
DCHECK(!task.is_null()) << from_here.ToString();
- return incoming_queue_->AddToIncomingQueue(from_here, task, delay, false);
+ return incoming_queue_->AddToIncomingQueue(from_here, std::move(task), delay,
+ false);
}
bool MessageLoopTaskRunner::RunsTasksOnCurrentThread() const {
diff --git a/chromium/base/message_loop/message_loop_task_runner.h b/chromium/base/message_loop/message_loop_task_runner.h
index 5e70b128b20..99a96a711e3 100644
--- a/chromium/base/message_loop/message_loop_task_runner.h
+++ b/chromium/base/message_loop/message_loop_task_runner.h
@@ -6,6 +6,7 @@
#define BASE_MESSAGE_LOOP_MESSAGE_LOOP_TASK_RUNNER_H_
#include "base/base_export.h"
+#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/pending_task.h"
@@ -31,10 +32,10 @@ class BASE_EXPORT MessageLoopTaskRunner : public SingleThreadTaskRunner {
// SingleThreadTaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
+ OnceClosure task,
base::TimeDelta delay) override;
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
+ OnceClosure task,
base::TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
diff --git a/chromium/base/message_loop/message_loop_task_runner_unittest.cc b/chromium/base/message_loop/message_loop_task_runner_unittest.cc
index 54551daadd2..5fa01f01b06 100644
--- a/chromium/base/message_loop/message_loop_task_runner_unittest.cc
+++ b/chromium/base/message_loop/message_loop_task_runner_unittest.cc
@@ -38,8 +38,8 @@ class MessageLoopTaskRunnerTest : public testing::Test {
// Allow us to pause the |task_thread_|'s MessageLoop.
task_thread_.task_runner()->PostTask(
- FROM_HERE, Bind(&MessageLoopTaskRunnerTest::BlockTaskThreadHelper,
- Unretained(this)));
+ FROM_HERE, BindOnce(&MessageLoopTaskRunnerTest::BlockTaskThreadHelper,
+ Unretained(this)));
}
void TearDown() override {
@@ -114,8 +114,8 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_Basic) {
new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
ASSERT_TRUE(task_thread_.task_runner()->PostTaskAndReply(
- FROM_HERE, Bind(&RecordLoop, task_recorder),
- Bind(&RecordLoopAndQuit, reply_recorder)));
+ FROM_HERE, BindOnce(&RecordLoop, task_recorder),
+ BindOnce(&RecordLoopAndQuit, reply_recorder)));
// Die if base::Bind doesn't retain a reference to the recorders.
task_recorder = NULL;
@@ -127,7 +127,7 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_Basic) {
RunLoop().Run();
EXPECT_EQ(task_thread_.message_loop(), task_run_on);
- EXPECT_EQ(current_loop_.get(), task_deleted_on);
+ EXPECT_EQ(task_thread_.message_loop(), task_deleted_on);
EXPECT_EQ(current_loop_.get(), reply_run_on);
EXPECT_EQ(current_loop_.get(), reply_deleted_on);
EXPECT_LT(task_delete_order, reply_delete_order);
@@ -152,9 +152,9 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReplyOnDeletedThreadDoesNotLeak) {
UnblockTaskThread();
task_thread_.Stop();
- ASSERT_FALSE(
- task_runner->PostTaskAndReply(FROM_HERE, Bind(&RecordLoop, task_recorder),
- Bind(&RecordLoopAndQuit, reply_recorder)));
+ ASSERT_FALSE(task_runner->PostTaskAndReply(
+ FROM_HERE, BindOnce(&RecordLoop, task_recorder),
+ BindOnce(&RecordLoopAndQuit, reply_recorder)));
// The relay should have properly deleted its resources leaving us as the only
// reference.
@@ -182,8 +182,8 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_SameLoop) {
// Enqueue the relay.
ASSERT_TRUE(current_loop_->task_runner()->PostTaskAndReply(
- FROM_HERE, Bind(&RecordLoop, task_recorder),
- Bind(&RecordLoopAndQuit, reply_recorder)));
+ FROM_HERE, BindOnce(&RecordLoop, task_recorder),
+ BindOnce(&RecordLoopAndQuit, reply_recorder)));
// Die if base::Bind doesn't retain a reference to the recorders.
task_recorder = NULL;
@@ -200,7 +200,8 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_SameLoop) {
EXPECT_LT(task_delete_order, reply_delete_order);
}
-TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_DeadReplyLoopDoesNotDelete) {
+TEST_F(MessageLoopTaskRunnerTest,
+ PostTaskAndReply_DeadReplyTaskRunnerBehavior) {
// Annotate the scope as having memory leaks to suppress heapchecker reports.
ANNOTATE_SCOPED_MEMORY_LEAK;
MessageLoop* task_run_on = NULL;
@@ -217,8 +218,8 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_DeadReplyLoopDoesNotDelete) {
// Enqueue the relay.
task_thread_.task_runner()->PostTaskAndReply(
- FROM_HERE, Bind(&RecordLoop, task_recorder),
- Bind(&RecordLoopAndQuit, reply_recorder));
+ FROM_HERE, BindOnce(&RecordLoop, task_recorder),
+ BindOnce(&RecordLoopAndQuit, reply_recorder));
// Die if base::Bind doesn't retain a reference to the recorders.
task_recorder = NULL;
@@ -237,11 +238,13 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_DeadReplyLoopDoesNotDelete) {
MessageLoop* task_loop = task_thread_.message_loop();
task_thread_.Stop();
+ // Even if the reply task runner is already gone, the original task should
+ // already be deleted. However, the reply which hasn't executed yet should
+ // leak to avoid thread-safety issues.
EXPECT_EQ(task_loop, task_run_on);
- ASSERT_FALSE(task_deleted_on);
+ EXPECT_EQ(task_loop, task_deleted_on);
EXPECT_FALSE(reply_run_on);
ASSERT_FALSE(reply_deleted_on);
- EXPECT_EQ(task_delete_order, reply_delete_order);
// The PostTaskAndReplyRelay is leaked here. Even if we had a reference to
// it, we cannot just delete it because PostTaskAndReplyRelay's destructor
@@ -328,8 +331,8 @@ TEST_F(MessageLoopTaskRunnerThreadingTest, Delete) {
TEST_F(MessageLoopTaskRunnerThreadingTest, PostTask) {
EXPECT_TRUE(file_thread_->task_runner()->PostTask(
- FROM_HERE, Bind(&MessageLoopTaskRunnerThreadingTest::BasicFunction,
- Unretained(this))));
+ FROM_HERE, BindOnce(&MessageLoopTaskRunnerThreadingTest::BasicFunction,
+ Unretained(this))));
RunLoop().Run();
}
@@ -342,7 +345,7 @@ TEST_F(MessageLoopTaskRunnerThreadingTest, PostTaskAfterThreadExits) {
test_thread->Stop();
bool ret = task_runner->PostTask(
- FROM_HERE, Bind(&MessageLoopTaskRunnerThreadingTest::AssertNotRun));
+ FROM_HERE, BindOnce(&MessageLoopTaskRunnerThreadingTest::AssertNotRun));
EXPECT_FALSE(ret);
}
@@ -355,7 +358,7 @@ TEST_F(MessageLoopTaskRunnerThreadingTest, PostTaskAfterThreadIsDeleted) {
task_runner = test_thread->task_runner();
}
bool ret = task_runner->PostTask(
- FROM_HERE, Bind(&MessageLoopTaskRunnerThreadingTest::AssertNotRun));
+ FROM_HERE, BindOnce(&MessageLoopTaskRunnerThreadingTest::AssertNotRun));
EXPECT_FALSE(ret);
}
diff --git a/chromium/base/message_loop/message_loop_test.cc b/chromium/base/message_loop/message_loop_test.cc
index 6ffb16d05ae..ad9f127390e 100644
--- a/chromium/base/message_loop/message_loop_test.cc
+++ b/chromium/base/message_loop/message_loop_test.cc
@@ -98,21 +98,22 @@ void RunTest_PostTask(MessagePumpFactory factory) {
// Add tests to message loop
scoped_refptr<Foo> foo(new Foo());
std::string a("a"), b("b"), c("c"), d("d");
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&Foo::Test0, foo));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&Foo::Test1ConstRef, foo, a));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&Foo::Test1Ptr, foo, &b));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&Foo::Test1Int, foo, 100));
+ BindOnce(&Foo::Test0, foo));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&Foo::Test1ConstRef, foo, a));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&Foo::Test2Ptr, foo, &a, &c));
+ BindOnce(&Foo::Test1Ptr, foo, &b));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&Foo::Test2Mixed, foo, a, &d));
+ BindOnce(&Foo::Test1Int, foo, 100));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&Foo::Test2Ptr, foo, &a, &c));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&Foo::Test2Mixed, foo, a, &d));
// After all tests, post a message that will shut down the message loop
ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
- Bind(&MessageLoop::QuitWhenIdle, Unretained(MessageLoop::current())));
+ BindOnce(&MessageLoop::QuitWhenIdle, Unretained(MessageLoop::current())));
// Now kick things off
RunLoop().Run();
@@ -133,7 +134,7 @@ void RunTest_PostDelayedTask_Basic(MessagePumpFactory factory) {
Time run_time;
loop.task_runner()->PostDelayedTask(
- FROM_HERE, Bind(&RecordRunTimeFunc, &run_time, &num_tasks), kDelay);
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time, &num_tasks), kDelay);
Time time_before_run = Time::Now();
RunLoop().Run();
@@ -152,12 +153,12 @@ void RunTest_PostDelayedTask_InDelayOrder(MessagePumpFactory factory) {
Time run_time1, run_time2;
loop.task_runner()->PostDelayedTask(
- FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks),
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time1, &num_tasks),
TimeDelta::FromMilliseconds(200));
// If we get a large pause in execution (due to a context switch) here, this
// test could fail.
loop.task_runner()->PostDelayedTask(
- FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time2, &num_tasks),
TimeDelta::FromMilliseconds(10));
RunLoop().Run();
@@ -184,9 +185,9 @@ void RunTest_PostDelayedTask_InPostOrder(MessagePumpFactory factory) {
Time run_time1, run_time2;
loop.task_runner()->PostDelayedTask(
- FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks), kDelay);
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time1, &num_tasks), kDelay);
loop.task_runner()->PostDelayedTask(
- FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks), kDelay);
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time2, &num_tasks), kDelay);
RunLoop().Run();
EXPECT_EQ(0, num_tasks);
@@ -206,9 +207,10 @@ void RunTest_PostDelayedTask_InPostOrder_2(MessagePumpFactory factory) {
int num_tasks = 2;
Time run_time;
- loop.task_runner()->PostTask(FROM_HERE, Bind(&SlowFunc, kPause, &num_tasks));
+ loop.task_runner()->PostTask(FROM_HERE,
+ BindOnce(&SlowFunc, kPause, &num_tasks));
loop.task_runner()->PostDelayedTask(
- FROM_HERE, Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time, &num_tasks),
TimeDelta::FromMilliseconds(10));
Time time_before_run = Time::Now();
@@ -236,10 +238,10 @@ void RunTest_PostDelayedTask_InPostOrder_3(MessagePumpFactory factory) {
// Clutter the ML with tasks.
for (int i = 1; i < num_tasks; ++i)
loop.task_runner()->PostTask(
- FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks));
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time1, &num_tasks));
loop.task_runner()->PostDelayedTask(
- FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time2, &num_tasks),
TimeDelta::FromMilliseconds(1));
RunLoop().Run();
@@ -261,10 +263,10 @@ void RunTest_PostDelayedTask_SharedTimer(MessagePumpFactory factory) {
Time run_time1, run_time2;
loop.task_runner()->PostDelayedTask(
- FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks),
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time1, &num_tasks),
TimeDelta::FromSeconds(1000));
loop.task_runner()->PostDelayedTask(
- FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time2, &num_tasks),
TimeDelta::FromMilliseconds(10));
Time start_time = Time::Now();
@@ -303,7 +305,7 @@ class RecordDeletionProbe : public RefCounted<RecordDeletionProbe> {
*was_deleted_ = true;
if (post_on_delete_.get())
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&RecordDeletionProbe::Run, post_on_delete_));
+ FROM_HERE, BindOnce(&RecordDeletionProbe::Run, post_on_delete_));
}
scoped_refptr<RecordDeletionProbe> post_on_delete_;
@@ -317,12 +319,13 @@ void RunTest_EnsureDeletion(MessagePumpFactory factory) {
std::unique_ptr<MessagePump> pump(factory());
MessageLoop loop(std::move(pump));
loop.task_runner()->PostTask(
- FROM_HERE, Bind(&RecordDeletionProbe::Run,
- new RecordDeletionProbe(NULL, &a_was_deleted)));
+ FROM_HERE, BindOnce(&RecordDeletionProbe::Run,
+ new RecordDeletionProbe(NULL, &a_was_deleted)));
// TODO(ajwong): Do we really need 1000ms here?
loop.task_runner()->PostDelayedTask(
- FROM_HERE, Bind(&RecordDeletionProbe::Run,
- new RecordDeletionProbe(NULL, &b_was_deleted)),
+ FROM_HERE,
+ BindOnce(&RecordDeletionProbe::Run,
+ new RecordDeletionProbe(NULL, &b_was_deleted)),
TimeDelta::FromMilliseconds(1000));
}
EXPECT_TRUE(a_was_deleted);
@@ -341,7 +344,8 @@ void RunTest_EnsureDeletion_Chain(MessagePumpFactory factory) {
RecordDeletionProbe* a = new RecordDeletionProbe(NULL, &a_was_deleted);
RecordDeletionProbe* b = new RecordDeletionProbe(a, &b_was_deleted);
RecordDeletionProbe* c = new RecordDeletionProbe(b, &c_was_deleted);
- loop.task_runner()->PostTask(FROM_HERE, Bind(&RecordDeletionProbe::Run, c));
+ loop.task_runner()->PostTask(FROM_HERE,
+ BindOnce(&RecordDeletionProbe::Run, c));
}
EXPECT_TRUE(a_was_deleted);
EXPECT_TRUE(b_was_deleted);
@@ -352,7 +356,7 @@ void NestingFunc(int* depth) {
if (*depth > 0) {
*depth -= 1;
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&NestingFunc, depth));
+ BindOnce(&NestingFunc, depth));
MessageLoop::current()->SetNestableTasksAllowed(true);
RunLoop().Run();
@@ -366,7 +370,7 @@ void RunTest_Nesting(MessagePumpFactory factory) {
int depth = 100;
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&NestingFunc, &depth));
+ BindOnce(&NestingFunc, &depth));
RunLoop().Run();
EXPECT_EQ(depth, 0);
}
@@ -404,7 +408,7 @@ void RunNestedLoop(TestNestingObserver* observer,
// Verify that by the time the first task is run the observer has seen the
// message loop begin.
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&ExpectOneBeginNestedLoop, observer));
+ FROM_HERE, BindOnce(&ExpectOneBeginNestedLoop, observer));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, nested_loop.QuitClosure());
nested_loop.Run();
@@ -424,9 +428,9 @@ void RunTest_NestingObserver(MessagePumpFactory factory) {
outer_loop.AddNestingObserver(&nesting_observer);
// Post a task that runs a nested message loop.
- outer_loop.task_runner()->PostTask(FROM_HERE,
- Bind(&RunNestedLoop, &nesting_observer,
- outer_loop.QuitWhenIdleClosure()));
+ outer_loop.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&RunNestedLoop, &nesting_observer,
+ outer_loop.QuitWhenIdleClosure()));
RunLoop().Run();
outer_loop.RemoveNestingObserver(&nesting_observer);
@@ -519,7 +523,7 @@ void RecursiveFunc(TaskList* order, int cookie, int depth,
MessageLoop::current()->SetNestableTasksAllowed(true);
ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
- Bind(&RecursiveFunc, order, cookie, depth - 1, is_reentrant));
+ BindOnce(&RecursiveFunc, order, cookie, depth - 1, is_reentrant));
}
order->RecordEnd(RECURSIVE, cookie);
}
@@ -536,11 +540,11 @@ void RunTest_RecursiveDenial1(MessagePumpFactory factory) {
EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
TaskList order;
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&RecursiveFunc, &order, 1, 2, false));
+ FROM_HERE, BindOnce(&RecursiveFunc, &order, 1, 2, false));
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&RecursiveFunc, &order, 2, 2, false));
+ FROM_HERE, BindOnce(&RecursiveFunc, &order, 2, 2, false));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&QuitFunc, &order, 3));
+ BindOnce(&QuitFunc, &order, 3));
RunLoop().Run();
@@ -580,13 +584,15 @@ void RunTest_RecursiveDenial3(MessagePumpFactory factory) {
EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
TaskList order;
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&RecursiveSlowFunc, &order, 1, 2, false));
+ FROM_HERE, BindOnce(&RecursiveSlowFunc, &order, 1, 2, false));
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&RecursiveSlowFunc, &order, 2, 2, false));
+ FROM_HERE, BindOnce(&RecursiveSlowFunc, &order, 2, 2, false));
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 3), TimeDelta::FromMilliseconds(5));
+ FROM_HERE, BindOnce(&OrderedFunc, &order, 3),
+ TimeDelta::FromMilliseconds(5));
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, Bind(&QuitFunc, &order, 4), TimeDelta::FromMilliseconds(5));
+ FROM_HERE, BindOnce(&QuitFunc, &order, 4),
+ TimeDelta::FromMilliseconds(5));
RunLoop().Run();
@@ -616,11 +622,11 @@ void RunTest_RecursiveSupport1(MessagePumpFactory factory) {
TaskList order;
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&RecursiveFunc, &order, 1, 2, true));
+ FROM_HERE, BindOnce(&RecursiveFunc, &order, 1, 2, true));
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&RecursiveFunc, &order, 2, 2, true));
+ FROM_HERE, BindOnce(&RecursiveFunc, &order, 2, 2, true));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&QuitFunc, &order, 3));
+ BindOnce(&QuitFunc, &order, 3));
RunLoop().Run();
@@ -650,11 +656,11 @@ void RunTest_NonNestableWithNoNesting(MessagePumpFactory factory) {
TaskList order;
ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 1));
+ FROM_HERE, BindOnce(&OrderedFunc, &order, 1));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 2));
+ BindOnce(&OrderedFunc, &order, 2));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&QuitFunc, &order, 3));
+ BindOnce(&QuitFunc, &order, 3));
RunLoop().Run();
// FIFO order.
@@ -690,17 +696,18 @@ void RunTest_NonNestableInNestedLoop(MessagePumpFactory factory) {
TaskList order;
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&FuncThatPumps, &order, 1));
+ BindOnce(&FuncThatPumps, &order, 1));
ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 2));
+ FROM_HERE, BindOnce(&OrderedFunc, &order, 2));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 3));
+ BindOnce(&OrderedFunc, &order, 3));
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&SleepFunc, &order, 4, TimeDelta::FromMilliseconds(50)));
+ FROM_HERE,
+ BindOnce(&SleepFunc, &order, 4, TimeDelta::FromMilliseconds(50)));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 5));
+ BindOnce(&OrderedFunc, &order, 5));
ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
- FROM_HERE, Bind(&QuitFunc, &order, 6));
+ FROM_HERE, BindOnce(&QuitFunc, &order, 6));
RunLoop().Run();
@@ -742,15 +749,17 @@ void RunTest_QuitNow(MessagePumpFactory factory) {
RunLoop run_loop;
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
+ FROM_HERE, BindOnce(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 2));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&FuncThatQuitsNow));
+ BindOnce(&OrderedFunc, &order, 2));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 3));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&FuncThatQuitsNow));
+ BindOnce(&FuncThatQuitsNow));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 3));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&FuncThatQuitsNow));
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 4)); // never runs
+ FROM_HERE, BindOnce(&OrderedFunc, &order, 4)); // never runs
RunLoop().Run();
@@ -776,11 +785,12 @@ void RunTest_RunLoopQuitTop(MessagePumpFactory factory) {
RunLoop nested_run_loop;
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ FROM_HERE,
+ BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
outer_run_loop.QuitClosure());
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 2));
+ BindOnce(&OrderedFunc, &order, 2));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
nested_run_loop.QuitClosure());
@@ -806,11 +816,12 @@ void RunTest_RunLoopQuitNested(MessagePumpFactory factory) {
RunLoop nested_run_loop;
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ FROM_HERE,
+ BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
nested_run_loop.QuitClosure());
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 2));
+ BindOnce(&OrderedFunc, &order, 2));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
outer_run_loop.QuitClosure());
@@ -837,11 +848,12 @@ void RunTest_RunLoopQuitBogus(MessagePumpFactory factory) {
RunLoop bogus_run_loop;
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ FROM_HERE,
+ BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
bogus_run_loop.QuitClosure());
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 2));
+ BindOnce(&OrderedFunc, &order, 2));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
outer_run_loop.QuitClosure());
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
@@ -872,35 +884,35 @@ void RunTest_RunLoopQuitDeep(MessagePumpFactory factory) {
RunLoop nested_loop4;
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_loop1)));
+ FROM_HERE, BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_loop1)));
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&FuncThatRuns, &order, 2, Unretained(&nested_loop2)));
+ FROM_HERE, BindOnce(&FuncThatRuns, &order, 2, Unretained(&nested_loop2)));
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&FuncThatRuns, &order, 3, Unretained(&nested_loop3)));
+ FROM_HERE, BindOnce(&FuncThatRuns, &order, 3, Unretained(&nested_loop3)));
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&FuncThatRuns, &order, 4, Unretained(&nested_loop4)));
+ FROM_HERE, BindOnce(&FuncThatRuns, &order, 4, Unretained(&nested_loop4)));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 5));
+ BindOnce(&OrderedFunc, &order, 5));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
outer_run_loop.QuitClosure());
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 6));
+ BindOnce(&OrderedFunc, &order, 6));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
nested_loop1.QuitClosure());
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 7));
+ BindOnce(&OrderedFunc, &order, 7));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
nested_loop2.QuitClosure());
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 8));
+ BindOnce(&OrderedFunc, &order, 8));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
nested_loop3.QuitClosure());
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 9));
+ BindOnce(&OrderedFunc, &order, 9));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
nested_loop4.QuitClosure());
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 10));
+ BindOnce(&OrderedFunc, &order, 10));
outer_run_loop.Run();
@@ -939,9 +951,9 @@ void RunTest_RunLoopQuitOrderBefore(MessagePumpFactory factory) {
run_loop.Quit();
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 1)); // never runs
+ FROM_HERE, BindOnce(&OrderedFunc, &order, 1)); // never runs
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&FuncThatQuitsNow)); // never runs
+ FROM_HERE, BindOnce(&FuncThatQuitsNow)); // never runs
run_loop.Run();
@@ -958,12 +970,12 @@ void RunTest_RunLoopQuitOrderDuring(MessagePumpFactory factory) {
RunLoop run_loop;
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 1));
+ BindOnce(&OrderedFunc, &order, 1));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, run_loop.QuitClosure());
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&OrderedFunc, &order, 2)); // never runs
+ FROM_HERE, BindOnce(&OrderedFunc, &order, 2)); // never runs
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&FuncThatQuitsNow)); // never runs
+ FROM_HERE, BindOnce(&FuncThatQuitsNow)); // never runs
run_loop.Run();
@@ -984,17 +996,19 @@ void RunTest_RunLoopQuitOrderAfter(MessagePumpFactory factory) {
RunLoop run_loop;
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
+ FROM_HERE, BindOnce(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 2));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 2));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&FuncThatQuitsNow));
+ BindOnce(&FuncThatQuitsNow));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 3));
+ BindOnce(&OrderedFunc, &order, 3));
ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, run_loop.QuitClosure()); // has no affect
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&OrderedFunc, &order, 4));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&FuncThatQuitsNow));
+ BindOnce(&OrderedFunc, &order, 4));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&FuncThatQuitsNow));
RunLoop outer_run_loop;
outer_run_loop.Run();
@@ -1015,7 +1029,7 @@ void RunTest_RunLoopQuitOrderAfter(MessagePumpFactory factory) {
void PostNTasksThenQuit(int posts_remaining) {
if (posts_remaining > 1) {
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&PostNTasksThenQuit, posts_remaining - 1));
+ FROM_HERE, BindOnce(&PostNTasksThenQuit, posts_remaining - 1));
} else {
MessageLoop::current()->QuitWhenIdle();
}
@@ -1033,7 +1047,8 @@ void RunTest_RecursivePosts(MessagePumpFactory factory) {
const int kNumTimes = 1 << 17;
std::unique_ptr<MessagePump> pump(factory());
MessageLoop loop(std::move(pump));
- loop.task_runner()->PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumTimes));
+ loop.task_runner()->PostTask(FROM_HERE,
+ BindOnce(&PostNTasksThenQuit, kNumTimes));
RunLoop().Run();
}
diff --git a/chromium/base/message_loop/message_loop_unittest.cc b/chromium/base/message_loop/message_loop_unittest.cc
index 14fe1ee391c..fe3c015f258 100644
--- a/chromium/base/message_loop/message_loop_unittest.cc
+++ b/chromium/base/message_loop/message_loop_unittest.cc
@@ -12,6 +12,7 @@
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
#include "base/message_loop/message_loop_test.h"
@@ -93,16 +94,19 @@ void AbortMessagePump() {
static_cast<base::MessageLoopForUI*>(base::MessageLoop::current())->Abort();
}
-void RunTest_AbortDontRunMoreTasks(bool delayed) {
- MessageLoop loop(MessageLoop::TYPE_JAVA);
-
+void RunTest_AbortDontRunMoreTasks(bool delayed, bool init_java_first) {
WaitableEvent test_done_event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- std::unique_ptr<android::JavaHandlerThreadForTesting> java_thread;
- java_thread.reset(new android::JavaHandlerThreadForTesting(
- "JavaHandlerThreadForTesting from AbortDontRunMoreTasks",
- &test_done_event));
+ std::unique_ptr<android::JavaHandlerThread> java_thread;
+ if (init_java_first) {
+ java_thread =
+ android::JavaHandlerThreadForTesting::CreateJavaFirst(&test_done_event);
+ } else {
+ java_thread = android::JavaHandlerThreadForTesting::Create(
+ "JavaHandlerThreadForTesting from AbortDontRunMoreTasks",
+ &test_done_event);
+ }
java_thread->Start();
if (delayed) {
@@ -121,10 +125,19 @@ void RunTest_AbortDontRunMoreTasks(bool delayed) {
}
TEST(MessageLoopTest, JavaExceptionAbort) {
- RunTest_AbortDontRunMoreTasks(false);
+ constexpr bool delayed = false;
+ constexpr bool init_java_first = false;
+ RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
}
TEST(MessageLoopTest, DelayedJavaExceptionAbort) {
- RunTest_AbortDontRunMoreTasks(true);
+ constexpr bool delayed = true;
+ constexpr bool init_java_first = false;
+ RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
+}
+TEST(MessageLoopTest, JavaExceptionAbortInitJavaFirst) {
+ constexpr bool delayed = false;
+ constexpr bool init_java_first = true;
+ RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
}
#endif // defined(OS_ANDROID)
@@ -457,7 +470,7 @@ void RunTest_RecursiveSupport2(MessageLoop::Type message_loop_type) {
void PostNTasksThenQuit(int posts_remaining) {
if (posts_remaining > 1) {
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&PostNTasksThenQuit, posts_remaining - 1));
+ FROM_HERE, BindOnce(&PostNTasksThenQuit, posts_remaining - 1));
} else {
MessageLoop::current()->QuitWhenIdle();
}
@@ -669,7 +682,8 @@ TEST(MessageLoopTest, TaskObserver) {
MessageLoop loop;
loop.AddTaskObserver(&observer);
- loop.task_runner()->PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumPosts));
+ loop.task_runner()->PostTask(FROM_HERE,
+ BindOnce(&PostNTasksThenQuit, kNumPosts));
RunLoop().Run();
loop.RemoveTaskObserver(&observer);
@@ -846,9 +860,10 @@ TEST(MessageLoopTest, DestructionObserverTest) {
MLDestructionObserver observer(&task_destroyed, &destruction_observer_called);
loop->AddDestructionObserver(&observer);
loop->task_runner()->PostDelayedTask(
- FROM_HERE, Bind(&DestructionObserverProbe::Run,
- new DestructionObserverProbe(
- &task_destroyed, &destruction_observer_called)),
+ FROM_HERE,
+ BindOnce(&DestructionObserverProbe::Run,
+ new DestructionObserverProbe(&task_destroyed,
+ &destruction_observer_called)),
kDelay);
delete loop;
EXPECT_TRUE(observer.task_destroyed_before_message_loop());
@@ -865,13 +880,13 @@ TEST(MessageLoopTest, ThreadMainTaskRunner) {
scoped_refptr<Foo> foo(new Foo());
std::string a("a");
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(
- &Foo::Test1ConstRef, foo, a));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&Foo::Test1ConstRef, foo, a));
// Post quit task;
ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
- Bind(&MessageLoop::QuitWhenIdle, Unretained(MessageLoop::current())));
+ BindOnce(&MessageLoop::QuitWhenIdle, Unretained(MessageLoop::current())));
// Now kick things off
RunLoop().Run();
@@ -994,8 +1009,7 @@ TEST(MessageLoopTest, OriginalRunnerWorks) {
loop.SetTaskRunner(new_runner);
scoped_refptr<Foo> foo(new Foo());
- original_runner->PostTask(FROM_HERE,
- Bind(&Foo::Test1ConstRef, foo, "a"));
+ original_runner->PostTask(FROM_HERE, BindOnce(&Foo::Test1ConstRef, foo, "a"));
RunLoop().RunUntilIdle();
EXPECT_EQ(1, foo->test_count());
}
diff --git a/chromium/base/message_loop/message_pump_glib_unittest.cc b/chromium/base/message_loop/message_pump_glib_unittest.cc
index 607d3c93d6e..bef0c8f63e1 100644
--- a/chromium/base/message_loop/message_pump_glib_unittest.cc
+++ b/chromium/base/message_loop/message_pump_glib_unittest.cc
@@ -237,13 +237,13 @@ TEST_F(MessagePumpGLibTest, TestWorkWhileWaitingForEvents) {
// The event queue is empty at first.
for (int i = 0; i < 10; ++i) {
loop()->task_runner()->PostTask(FROM_HERE,
- Bind(&IncrementInt, &task_count));
+ BindOnce(&IncrementInt, &task_count));
}
// After all the previous tasks have executed, enqueue an event that will
// quit.
loop()->task_runner()->PostTask(
- FROM_HERE, Bind(&EventInjector::AddEvent, Unretained(injector()), 0,
- MessageLoop::QuitWhenIdleClosure()));
+ FROM_HERE, BindOnce(&EventInjector::AddEvent, Unretained(injector()), 0,
+ MessageLoop::QuitWhenIdleClosure()));
RunLoop().Run();
ASSERT_EQ(10, task_count);
EXPECT_EQ(1, injector()->processed_events());
@@ -253,7 +253,7 @@ TEST_F(MessagePumpGLibTest, TestWorkWhileWaitingForEvents) {
task_count = 0;
for (int i = 0; i < 10; ++i) {
loop()->task_runner()->PostDelayedTask(FROM_HERE,
- Bind(&IncrementInt, &task_count),
+ BindOnce(&IncrementInt, &task_count),
TimeDelta::FromMilliseconds(10 * i));
}
// After all the previous tasks have executed, enqueue an event that will
@@ -261,8 +261,9 @@ TEST_F(MessagePumpGLibTest, TestWorkWhileWaitingForEvents) {
// This relies on the fact that delayed tasks are executed in delay order.
// That is verified in message_loop_unittest.cc.
loop()->task_runner()->PostDelayedTask(
- FROM_HERE, Bind(&EventInjector::AddEvent, Unretained(injector()), 10,
- MessageLoop::QuitWhenIdleClosure()),
+ FROM_HERE,
+ BindOnce(&EventInjector::AddEvent, Unretained(injector()), 10,
+ MessageLoop::QuitWhenIdleClosure()),
TimeDelta::FromMilliseconds(150));
RunLoop().Run();
ASSERT_EQ(10, task_count);
@@ -312,7 +313,7 @@ class ConcurrentHelper : public RefCounted<ConcurrentHelper> {
MessageLoop::current()->QuitWhenIdle();
} else {
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&ConcurrentHelper::FromTask, this));
+ FROM_HERE, BindOnce(&ConcurrentHelper::FromTask, this));
}
}
@@ -363,9 +364,9 @@ TEST_F(MessagePumpGLibTest, TestConcurrentEventPostedTask) {
// Similarly post 2 tasks.
loop()->task_runner()->PostTask(
- FROM_HERE, Bind(&ConcurrentHelper::FromTask, helper));
+ FROM_HERE, BindOnce(&ConcurrentHelper::FromTask, helper));
loop()->task_runner()->PostTask(
- FROM_HERE, Bind(&ConcurrentHelper::FromTask, helper));
+ FROM_HERE, BindOnce(&ConcurrentHelper::FromTask, helper));
RunLoop().Run();
EXPECT_EQ(0, helper->event_count());
@@ -382,8 +383,8 @@ void AddEventsAndDrainGLib(EventInjector* injector) {
injector->AddEvent(0, MessageLoop::QuitWhenIdleClosure());
// Post a couple of dummy tasks
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&DoNothing));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&DoNothing));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, BindOnce(&DoNothing));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, BindOnce(&DoNothing));
// Drain the events
while (g_main_context_pending(NULL)) {
@@ -396,7 +397,7 @@ void AddEventsAndDrainGLib(EventInjector* injector) {
TEST_F(MessagePumpGLibTest, TestDrainingGLib) {
// Tests that draining events using GLib works.
loop()->task_runner()->PostTask(
- FROM_HERE, Bind(&AddEventsAndDrainGLib, Unretained(injector())));
+ FROM_HERE, BindOnce(&AddEventsAndDrainGLib, Unretained(injector())));
RunLoop().Run();
EXPECT_EQ(3, injector()->processed_events());
@@ -448,18 +449,18 @@ void TestGLibLoopInternal(EventInjector* injector) {
injector->AddDummyEvent(0);
// Post a couple of dummy tasks
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&IncrementInt, &task_count));
+ BindOnce(&IncrementInt, &task_count));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&IncrementInt, &task_count));
+ BindOnce(&IncrementInt, &task_count));
// Delayed events
injector->AddDummyEvent(10);
injector->AddDummyEvent(10);
// Delayed work
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, Bind(&IncrementInt, &task_count),
+ FROM_HERE, BindOnce(&IncrementInt, &task_count),
TimeDelta::FromMilliseconds(30));
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, Bind(&GLibLoopRunner::Quit, runner),
+ FROM_HERE, BindOnce(&GLibLoopRunner::Quit, runner),
TimeDelta::FromMilliseconds(40));
// Run a nested, straight GLib message loop.
@@ -481,18 +482,18 @@ void TestGtkLoopInternal(EventInjector* injector) {
injector->AddDummyEvent(0);
// Post a couple of dummy tasks
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&IncrementInt, &task_count));
+ BindOnce(&IncrementInt, &task_count));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- Bind(&IncrementInt, &task_count));
+ BindOnce(&IncrementInt, &task_count));
// Delayed events
injector->AddDummyEvent(10);
injector->AddDummyEvent(10);
// Delayed work
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, Bind(&IncrementInt, &task_count),
+ FROM_HERE, BindOnce(&IncrementInt, &task_count),
TimeDelta::FromMilliseconds(30));
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, Bind(&GLibLoopRunner::Quit, runner),
+ FROM_HERE, BindOnce(&GLibLoopRunner::Quit, runner),
TimeDelta::FromMilliseconds(40));
// Run a nested, straight Gtk message loop.
@@ -511,7 +512,7 @@ TEST_F(MessagePumpGLibTest, TestGLibLoop) {
// Note that in this case we don't make strong guarantees about niceness
// between events and posted tasks.
loop()->task_runner()->PostTask(
- FROM_HERE, Bind(&TestGLibLoopInternal, Unretained(injector())));
+ FROM_HERE, BindOnce(&TestGLibLoopInternal, Unretained(injector())));
RunLoop().Run();
}
@@ -521,7 +522,7 @@ TEST_F(MessagePumpGLibTest, TestGtkLoop) {
// Note that in this case we don't make strong guarantees about niceness
// between events and posted tasks.
loop()->task_runner()->PostTask(
- FROM_HERE, Bind(&TestGtkLoopInternal, Unretained(injector())));
+ FROM_HERE, BindOnce(&TestGtkLoopInternal, Unretained(injector())));
RunLoop().Run();
}
diff --git a/chromium/base/message_loop/message_pump_libevent_unittest.cc b/chromium/base/message_loop/message_pump_libevent_unittest.cc
index 3e7a200a235..0a7c4855616 100644
--- a/chromium/base/message_loop/message_pump_libevent_unittest.cc
+++ b/chromium/base/message_loop/message_pump_libevent_unittest.cc
@@ -187,7 +187,7 @@ class NestedPumpWatcher : public MessagePumpLibevent::Watcher {
void OnFileCanReadWithoutBlocking(int /* fd */) override {
RunLoop runloop;
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&QuitMessageLoopAndStart, runloop.QuitClosure()));
+ FROM_HERE, BindOnce(&QuitMessageLoopAndStart, runloop.QuitClosure()));
runloop.Run();
}
@@ -218,7 +218,7 @@ class QuitWatcher : public BaseWatcher {
void OnFileCanReadWithoutBlocking(int /* fd */) override {
// Post a fatal closure to the MessageLoop before we quit it.
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&FatalClosure));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, BindOnce(&FatalClosure));
// Now quit the MessageLoop.
run_loop_->Quit();
@@ -259,12 +259,12 @@ TEST_F(MessagePumpLibeventTest, QuitWatcher) {
const WaitableEventWatcher::EventCallback write_fd_task =
Bind(&WriteFDWrapper, pipefds_[1], &buf, 1);
io_loop()->task_runner()->PostTask(
- FROM_HERE, Bind(IgnoreResult(&WaitableEventWatcher::StartWatching),
- Unretained(watcher.get()), &event, write_fd_task));
+ FROM_HERE, BindOnce(IgnoreResult(&WaitableEventWatcher::StartWatching),
+ Unretained(watcher.get()), &event, write_fd_task));
// Queue |event| to signal on |loop|.
loop.task_runner()->PostTask(
- FROM_HERE, Bind(&WaitableEvent::Signal, Unretained(&event)));
+ FROM_HERE, BindOnce(&WaitableEvent::Signal, Unretained(&event)));
// Now run the MessageLoop.
run_loop.Run();
@@ -272,7 +272,7 @@ TEST_F(MessagePumpLibeventTest, QuitWatcher) {
// StartWatching can move |watcher| to IO thread. Release on IO thread.
io_loop()->task_runner()->PostTask(
FROM_HERE,
- Bind(&WaitableEventWatcher::StopWatching, Owned(watcher.release())));
+ BindOnce(&WaitableEventWatcher::StopWatching, Owned(watcher.release())));
}
} // namespace
diff --git a/chromium/base/message_loop/message_pump_perftest.cc b/chromium/base/message_loop/message_pump_perftest.cc
index 04a98c23e55..e9629aae72b 100644
--- a/chromium/base/message_loop/message_pump_perftest.cc
+++ b/chromium/base/message_loop/message_pump_perftest.cc
@@ -65,8 +65,8 @@ class ScheduleWorkTest : public testing::Test {
min_batch_times_[index] = minimum;
max_batch_times_[index] = maximum;
target_message_loop()->task_runner()->PostTask(
- FROM_HERE, base::Bind(&ScheduleWorkTest::Increment,
- base::Unretained(this), schedule_calls));
+ FROM_HERE, base::BindOnce(&ScheduleWorkTest::Increment,
+ base::Unretained(this), schedule_calls));
}
void ScheduleWork(MessageLoop::Type target_type, int num_scheduling_threads) {
@@ -101,8 +101,8 @@ class ScheduleWorkTest : public testing::Test {
for (int i = 0; i < num_scheduling_threads; ++i) {
scheduling_threads[i]->task_runner()->PostTask(
- FROM_HERE,
- base::Bind(&ScheduleWorkTest::Schedule, base::Unretained(this), i));
+ FROM_HERE, base::BindOnce(&ScheduleWorkTest::Schedule,
+ base::Unretained(this), i));
}
for (int i = 0; i < num_scheduling_threads; ++i) {
@@ -263,8 +263,8 @@ class PostTaskTest : public testing::Test {
do {
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < tasks_per_reload; ++j) {
- queue->AddToIncomingQueue(
- FROM_HERE, base::Bind(&DoNothing), base::TimeDelta(), false);
+ queue->AddToIncomingQueue(FROM_HERE, base::BindOnce(&DoNothing),
+ base::TimeDelta(), false);
num_posted++;
}
TaskQueue loop_local_queue;
diff --git a/chromium/base/metrics/OWNERS b/chromium/base/metrics/OWNERS
index e42c3131a4c..e63738742c0 100644
--- a/chromium/base/metrics/OWNERS
+++ b/chromium/base/metrics/OWNERS
@@ -2,3 +2,5 @@ asvitkine@chromium.org
isherman@chromium.org
mpearson@chromium.org
rkaplow@chromium.org
+
+# COMPONENT: Internals>Metrics
diff --git a/chromium/base/metrics/bucket_ranges.h b/chromium/base/metrics/bucket_ranges.h
index c356195ba78..db82e552b3a 100644
--- a/chromium/base/metrics/bucket_ranges.h
+++ b/chromium/base/metrics/bucket_ranges.h
@@ -24,6 +24,7 @@
#include <limits.h>
+#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/macros.h"
#include "base/metrics/histogram_base.h"
@@ -58,6 +59,17 @@ class BASE_EXPORT BucketRanges {
// Return true iff |other| object has same ranges_ as |this| object's ranges_.
bool Equals(const BucketRanges* other) const;
+ // Set and get a reference into persistent memory where this bucket data
+ // can be found (and re-used). These calls are internally atomic with no
+ // safety against overwriting an existing value since though it is wasteful
+ // to have multiple identical persistent records, it is still safe.
+ void set_persistent_reference(uint32_t ref) const {
+ subtle::NoBarrier_Store(&persistent_reference_, ref);
+ }
+ uint32_t persistent_reference() const {
+ return subtle::NoBarrier_Load(&persistent_reference_);
+ }
+
private:
// A monotonically increasing list of values which determine which bucket to
// put a sample into. For each index, show the smallest sample that can be
@@ -71,6 +83,12 @@ class BASE_EXPORT BucketRanges {
// noise on UMA dashboard.
uint32_t checksum_;
+ // A reference into a global PersistentMemoryAllocator where the ranges
+ // information is stored. This allows for the record to be created once and
+ // re-used simply by having all histograms with the same ranges use the
+ // same reference.
+ mutable subtle::Atomic32 persistent_reference_ = 0;
+
DISALLOW_COPY_AND_ASSIGN(BucketRanges);
};
diff --git a/chromium/base/metrics/histogram_functions.h b/chromium/base/metrics/histogram_functions.h
index a18c464aadb..5960aca6574 100644
--- a/chromium/base/metrics/histogram_functions.h
+++ b/chromium/base/metrics/histogram_functions.h
@@ -43,7 +43,10 @@ template <typename T>
void UmaHistogramEnumeration(const std::string& name, T sample, T max) {
static_assert(std::is_enum<T>::value,
"Non enum passed to UmaHistogramEnumeration");
- return UmaHistogramExactLinear(name, static_cast<int>(sample), max);
+ DCHECK_LE(static_cast<uintmax_t>(max), static_cast<uintmax_t>(INT_MAX));
+ DCHECK_LE(static_cast<uintmax_t>(sample), static_cast<uintmax_t>(max));
+ return UmaHistogramExactLinear(name, static_cast<int>(sample),
+ static_cast<int>(max));
}
// For adding boolean sample to histogram.
@@ -59,7 +62,7 @@ BASE_EXPORT void UmaHistogramPercentage(const std::string& name, int percent);
// For adding counts histogram.
// Sample usage:
-// base::UmaHistogramCounts("My.Counts", some_value, 1, 600, 30)
+// base::UmaHistogramCustomCounts("My.Counts", some_value, 1, 600, 30)
BASE_EXPORT void UmaHistogramCustomCounts(const std::string& name,
int sample,
int min,
diff --git a/chromium/base/metrics/histogram_macros.h b/chromium/base/metrics/histogram_macros.h
index 78473761dd4..d39972a8a12 100644
--- a/chromium/base/metrics/histogram_macros.h
+++ b/chromium/base/metrics/histogram_macros.h
@@ -41,10 +41,9 @@
// delete and reused. The value in |sample| must be strictly less than
// |enum_max|.
-#define UMA_HISTOGRAM_ENUMERATION(name, sample, enum_max) \
- INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG( \
- name, sample, enum_max, \
- base::HistogramBase::kUmaTargetedHistogramFlag)
+#define UMA_HISTOGRAM_ENUMERATION(name, sample, enum_max) \
+ INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG( \
+ name, sample, enum_max, base::HistogramBase::kUmaTargetedHistogramFlag)
// Histogram for boolean values.
@@ -68,14 +67,15 @@
// Sample usage:
// UMA_HISTOGRAM_EXACT_LINEAR("Histogram.Linear", count, 10);
#define UMA_HISTOGRAM_EXACT_LINEAR(name, sample, value_max) \
- UMA_HISTOGRAM_ENUMERATION(name, sample, value_max)
+ INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG( \
+ name, sample, value_max, base::HistogramBase::kUmaTargetedHistogramFlag)
// Used for capturing basic percentages. This will be 100 buckets of size 1.
// Sample usage:
// UMA_HISTOGRAM_PERCENTAGE("Histogram.Percent", percent_as_int);
-#define UMA_HISTOGRAM_PERCENTAGE(name, percent_as_int) \
- UMA_HISTOGRAM_ENUMERATION(name, percent_as_int, 101)
+#define UMA_HISTOGRAM_PERCENTAGE(name, percent_as_int) \
+ UMA_HISTOGRAM_EXACT_LINEAR(name, percent_as_int, 101)
//------------------------------------------------------------------------------
// Count histograms. These are used for collecting numeric data. Note that we
diff --git a/chromium/base/metrics/histogram_macros_internal.h b/chromium/base/metrics/histogram_macros_internal.h
index 53e4f11b75d..c107a4729d2 100644
--- a/chromium/base/metrics/histogram_macros_internal.h
+++ b/chromium/base/metrics/histogram_macros_internal.h
@@ -5,6 +5,11 @@
#ifndef BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
#define BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
+#include <stdint.h>
+
+#include <limits>
+#include <type_traits>
+
#include "base/atomicops.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
@@ -96,17 +101,42 @@
base::Histogram::FactoryGet(name, min, max, bucket_count, flag))
// This is a helper macro used by other macros and shouldn't be used directly.
-// For an enumeration with N items, recording values in the range [0, N - 1],
-// this macro creates a linear histogram with N + 1 buckets:
-// [0, 1), [1, 2), ..., [N - 1, N), and an overflow bucket [N, infinity).
+// The bucketing scheme is linear with a bucket size of 1. For N items,
+// recording values in the range [0, N - 1] creates a linear histogram with N +
+// 1 buckets:
+// [0, 1), [1, 2), ..., [N - 1, N)
+// and an overflow bucket [N, infinity).
+//
// Code should never emit to the overflow bucket; only to the other N buckets.
-// This allows future versions of Chrome to safely append new entries to the
-// enumeration. Otherwise, the histogram would have [N - 1, infinity) as its
-// overflow bucket, and so the maximal value (N - 1) would be emitted to this
-// overflow bucket. But, if an additional enumerated value were later added, the
-// bucket label for the value (N - 1) would change to [N - 1, N), which would
-// result in different versions of Chrome using different bucket labels for
-// identical data.
+// This allows future versions of Chrome to safely increase the boundary size.
+// Otherwise, the histogram would have [N - 1, infinity) as its overflow bucket,
+// and so the maximal value (N - 1) would be emitted to this overflow bucket.
+// But, if an additional value were later added, the bucket label for
+// the value (N - 1) would change to [N - 1, N), which would result in different
+// versions of Chrome using different bucket labels for identical data.
+#define INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(name, sample, boundary, \
+ flag) \
+ do { \
+ static_assert(!std::is_enum<decltype(sample)>::value, \
+ "|sample| should not be an enum type!"); \
+ static_assert(!std::is_enum<decltype(boundary)>::value, \
+ "|boundary| should not be an enum type!"); \
+ STATIC_HISTOGRAM_POINTER_BLOCK( \
+ name, Add(sample), \
+ base::LinearHistogram::FactoryGet(name, 1, boundary, boundary + 1, \
+ flag)); \
+ } while (0)
+
+// Similar to the previous macro but intended for enumerations. This delegates
+// the work to the previous macro, but supports scoped enumerations as well by
+// forcing an explicit cast to the HistogramBase::Sample integral type.
+//
+// Note the range checks verify two separate issues:
+// - that the declared enum max isn't out of range of HistogramBase::Sample
+// - that the declared enum max is > 0
+//
+// TODO(dcheng): This should assert that the passed in types are actually enum
+// types.
#define INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary, flag) \
do { \
static_assert( \
@@ -115,9 +145,14 @@
std::is_same<std::remove_const<decltype(sample)>::type, \
std::remove_const<decltype(boundary)>::type>::value, \
"|sample| and |boundary| shouldn't be of different enums"); \
- STATIC_HISTOGRAM_POINTER_BLOCK( \
- name, Add(sample), base::LinearHistogram::FactoryGet( \
- name, 1, boundary, boundary + 1, flag)); \
+ static_assert( \
+ static_cast<uintmax_t>(boundary) < \
+ static_cast<uintmax_t>( \
+ std::numeric_limits<base::HistogramBase::Sample>::max()), \
+ "|boundary| is out of range of HistogramBase::Sample"); \
+ INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG( \
+ name, static_cast<base::HistogramBase::Sample>(sample), \
+ static_cast<base::HistogramBase::Sample>(boundary), flag); \
} while (0)
// This is a helper macro used by other macros and shouldn't be used directly.
diff --git a/chromium/base/metrics/histogram_macros_unittest.cc b/chromium/base/metrics/histogram_macros_unittest.cc
index c5991619a0f..33a9c6e5b2e 100644
--- a/chromium/base/metrics/histogram_macros_unittest.cc
+++ b/chromium/base/metrics/histogram_macros_unittest.cc
@@ -15,4 +15,35 @@ TEST(ScopedHistogramTimer, TwoTimersOneScope) {
SCOPED_UMA_HISTOGRAM_LONG_TIMER("TestLongTimer1");
}
+// Compile tests for UMA_HISTOGRAM_ENUMERATION with the three different types it
+// accepts:
+// - integral types
+// - unscoped enums
+// - scoped enums
+TEST(HistogramMacro, IntegralPsuedoEnumeration) {
+ UMA_HISTOGRAM_ENUMERATION("Test.FauxEnumeration", 1, 10000);
+}
+
+TEST(HistogramMacro, UnscopedEnumeration) {
+ enum TestEnum : char {
+ FIRST_VALUE,
+ SECOND_VALUE,
+ THIRD_VALUE,
+ MAX_ENTRIES,
+ };
+ UMA_HISTOGRAM_ENUMERATION("Test.UnscopedEnumeration", SECOND_VALUE,
+ MAX_ENTRIES);
+}
+
+TEST(HistogramMacro, ScopedEnumeration) {
+ enum class TestEnum {
+ FIRST_VALUE,
+ SECOND_VALUE,
+ THIRD_VALUE,
+ MAX_ENTRIES,
+ };
+ UMA_HISTOGRAM_ENUMERATION("Test.ScopedEnumeration", TestEnum::SECOND_VALUE,
+ TestEnum::MAX_ENTRIES);
+}
+
} // namespace base
diff --git a/chromium/base/metrics/histogram_unittest.nc b/chromium/base/metrics/histogram_unittest.nc
index 0dfe1af050a..6f913e89f14 100644
--- a/chromium/base/metrics/histogram_unittest.nc
+++ b/chromium/base/metrics/histogram_unittest.nc
@@ -17,6 +17,22 @@ void WontCompile() {
UMA_HISTOGRAM_ENUMERATION("", A, B);
}
+#elif defined(NCTEST_NEGATIVE_ENUM_MAX) // [r'static_assert failed "\|boundary\| is out of range of HistogramBase::Sample"']
+
+void WontCompile() {
+ // Buckets for enumeration start from 0, so a boundary < 0 is illegal.
+ enum class TypeA { A = -1 };
+ UMA_HISTOGRAM_ENUMERATION("", TypeA::A, TypeA::A);
+}
+
+#elif defined(NCTEST_ENUM_MAX_OUT_OF_RANGE) // [r'static_assert failed "\|boundary\| is out of range of HistogramBase::Sample"']
+
+void WontCompile() {
+ // HistogramBase::Sample is an int and can't hold larger values.
+ enum class TypeA : uint32_t { A = 0xffffffff };
+ UMA_HISTOGRAM_ENUMERATION("", TypeA::A, TypeA::A);
+}
+
#endif
} // namespace base
diff --git a/chromium/base/metrics/persistent_histogram_allocator.cc b/chromium/base/metrics/persistent_histogram_allocator.cc
index 939174ecaf6..b2dae993ddf 100644
--- a/chromium/base/metrics/persistent_histogram_allocator.cc
+++ b/chromium/base/metrics/persistent_histogram_allocator.cc
@@ -340,24 +340,49 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
return nullptr;
}
- size_t ranges_count = bucket_count + 1;
- size_t ranges_bytes = ranges_count * sizeof(HistogramBase::Sample);
+ // Since the StasticsRecorder keeps a global collection of BucketRanges
+ // objects for re-use, it would be dangerous for one to hold a reference
+ // from a persistent allocator that is not the global one (which is
+ // permanent once set). If this stops being the case, this check can
+ // become an "if" condition beside "!ranges_ref" below and before
+ // set_persistent_reference() farther down.
+ DCHECK_EQ(this, GlobalHistogramAllocator::Get());
+
+ // Re-use an existing BucketRanges persistent allocation if one is known;
+ // otherwise, create one.
+ PersistentMemoryAllocator::Reference ranges_ref =
+ bucket_ranges->persistent_reference();
+ if (!ranges_ref) {
+ size_t ranges_count = bucket_count + 1;
+ size_t ranges_bytes = ranges_count * sizeof(HistogramBase::Sample);
+ ranges_ref =
+ memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
+ if (ranges_ref) {
+ HistogramBase::Sample* ranges_data =
+ memory_allocator_->GetAsArray<HistogramBase::Sample>(
+ ranges_ref, kTypeIdRangesArray, ranges_count);
+ if (ranges_data) {
+ for (size_t i = 0; i < bucket_ranges->size(); ++i)
+ ranges_data[i] = bucket_ranges->range(i);
+ bucket_ranges->set_persistent_reference(ranges_ref);
+ } else {
+ // This should never happen but be tolerant if it does.
+ NOTREACHED();
+ ranges_ref = PersistentMemoryAllocator::kReferenceNull;
+ }
+ }
+ } else {
+ DCHECK_EQ(kTypeIdRangesArray, memory_allocator_->GetType(ranges_ref));
+ }
+
PersistentMemoryAllocator::Reference counts_ref =
memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray);
- PersistentMemoryAllocator::Reference ranges_ref =
- memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
- HistogramBase::Sample* ranges_data =
- memory_allocator_->GetAsArray<HistogramBase::Sample>(
- ranges_ref, kTypeIdRangesArray, ranges_count);
// Only continue here if all allocations were successful. If they weren't,
// there is no way to free the space but that's not really a problem since
// the allocations only fail because the space is full or corrupt and so
// any future attempts will also fail.
- if (counts_ref && ranges_data && histogram_data) {
- for (size_t i = 0; i < bucket_ranges->size(); ++i)
- ranges_data[i] = bucket_ranges->range(i);
-
+ if (counts_ref && ranges_ref && histogram_data) {
histogram_data->minimum = minimum;
histogram_data->maximum = maximum;
// |bucket_count| must fit within 32-bits or the allocation of the counts
@@ -785,24 +810,6 @@ void GlobalHistogramAllocator::ConstructFilePaths(const FilePath& dir,
#endif // !defined(OS_NACL)
// static
-void GlobalHistogramAllocator::CreateWithSharedMemory(
- std::unique_ptr<SharedMemory> memory,
- size_t size,
- uint64_t id,
- StringPiece name) {
- if ((!memory->memory() && !memory->Map(size)) ||
- !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*memory)) {
- NOTREACHED();
- return;
- }
-
- DCHECK_LE(memory->mapped_size(), size);
- Set(WrapUnique(
- new GlobalHistogramAllocator(MakeUnique<SharedPersistentMemoryAllocator>(
- std::move(memory), 0, StringPiece(), /*readonly=*/false))));
-}
-
-// static
void GlobalHistogramAllocator::CreateWithSharedMemoryHandle(
const SharedMemoryHandle& handle,
size_t size) {
@@ -905,6 +912,8 @@ bool GlobalHistogramAllocator::WriteToPersistentLocation() {
}
void GlobalHistogramAllocator::DeletePersistentLocation() {
+ memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
+
#if defined(OS_NACL)
NOTREACHED();
#else
diff --git a/chromium/base/metrics/persistent_histogram_allocator.h b/chromium/base/metrics/persistent_histogram_allocator.h
index 2eb28dfaf5b..851d7ef5a4e 100644
--- a/chromium/base/metrics/persistent_histogram_allocator.h
+++ b/chromium/base/metrics/persistent_histogram_allocator.h
@@ -431,15 +431,6 @@ class BASE_EXPORT GlobalHistogramAllocator
FilePath* out_active_path);
#endif
- // Create a global allocator using a block of shared |memory| of the
- // specified |size|. The allocator takes ownership of the shared memory
- // and releases it upon destruction, though the memory will continue to
- // live if other processes have access to it.
- static void CreateWithSharedMemory(std::unique_ptr<SharedMemory> memory,
- size_t size,
- uint64_t id,
- StringPiece name);
-
// Create a global allocator using a block of shared memory accessed
// through the given |handle| and |size|. The allocator takes ownership
// of the handle and closes it upon destruction, though the memory will
diff --git a/chromium/base/metrics/persistent_histogram_allocator_unittest.cc b/chromium/base/metrics/persistent_histogram_allocator_unittest.cc
index df250a37b0d..cfd20a8e2cd 100644
--- a/chromium/base/metrics/persistent_histogram_allocator_unittest.cc
+++ b/chromium/base/metrics/persistent_histogram_allocator_unittest.cc
@@ -281,4 +281,41 @@ TEST_F(PersistentHistogramAllocatorTest, StatisticsRecorderMergeTest) {
EXPECT_EQ(1, snapshot->GetCount(7));
}
+TEST_F(PersistentHistogramAllocatorTest, RangesDeDuplication) {
+ // This corresponds to the "ranges_ref" field of the PersistentHistogramData
+ // structure defined (privately) inside persistent_histogram_allocator.cc.
+ const int kRangesRefIndex = 5;
+
+ // Create two histograms with the same ranges.
+ HistogramBase* histogram1 =
+ Histogram::FactoryGet("TestHistogram1", 1, 1000, 10, 0);
+ HistogramBase* histogram2 =
+ Histogram::FactoryGet("TestHistogram2", 1, 1000, 10, 0);
+ const uint32_t ranges_ref = static_cast<Histogram*>(histogram1)
+ ->bucket_ranges()
+ ->persistent_reference();
+ ASSERT_NE(0U, ranges_ref);
+ EXPECT_EQ(ranges_ref, static_cast<Histogram*>(histogram2)
+ ->bucket_ranges()
+ ->persistent_reference());
+
+ // Make sure that the persistent data record is also correct. Two histograms
+ // will be fetched; other allocations are not "iterable".
+ PersistentMemoryAllocator::Iterator iter(allocator_);
+ uint32_t type;
+ uint32_t ref1 = iter.GetNext(&type);
+ uint32_t ref2 = iter.GetNext(&type);
+ EXPECT_EQ(0U, iter.GetNext(&type));
+ EXPECT_NE(0U, ref1);
+ EXPECT_NE(0U, ref2);
+ EXPECT_NE(ref1, ref2);
+
+ uint32_t* data1 =
+ allocator_->GetAsArray<uint32_t>(ref1, 0, kRangesRefIndex + 1);
+ uint32_t* data2 =
+ allocator_->GetAsArray<uint32_t>(ref2, 0, kRangesRefIndex + 1);
+ EXPECT_EQ(ranges_ref, data1[kRangesRefIndex]);
+ EXPECT_EQ(ranges_ref, data2[kRangesRefIndex]);
+}
+
} // namespace base
diff --git a/chromium/base/metrics/persistent_memory_allocator.cc b/chromium/base/metrics/persistent_memory_allocator.cc
index 99ea44b8e93..d381d8784d1 100644
--- a/chromium/base/metrics/persistent_memory_allocator.cc
+++ b/chromium/base/metrics/persistent_memory_allocator.cc
@@ -18,6 +18,7 @@
#include "base/memory/shared_memory.h"
#include "base/metrics/histogram_macros.h"
#include "base/metrics/sparse_histogram.h"
+#include "base/threading/thread_restrictions.h"
namespace {
@@ -32,7 +33,7 @@ const uint32_t kGlobalCookie = 0x408305DC;
// The current version of the metadata. If updates are made that change
// the metadata, the version number can be queried to operate in a backward-
// compatible manner until the memory segment is completely re-initalized.
-const uint32_t kGlobalVersion = 1;
+const uint32_t kGlobalVersion = 2;
// Constant values placed in the block headers to indicate its state.
const uint32_t kBlockCookieFree = 0;
@@ -43,7 +44,7 @@ const uint32_t kBlockCookieAllocated = 0xC8799269;
// TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
// types rather than combined bitfield.
-// Flags stored in the flags_ field of the SharedMetaData structure below.
+// Flags stored in the flags_ field of the SharedMetadata structure below.
enum : int {
kFlagCorrupt = 1 << 0,
kFlagFull = 1 << 1
@@ -100,7 +101,9 @@ struct PersistentMemoryAllocator::BlockHeader {
};
// The shared metadata exists once at the top of the memory segment to
-// describe the state of the allocator to all processes.
+// describe the state of the allocator to all processes. The size of this
+// structure must be a multiple of 64-bits to ensure compatibility between
+// architectures.
struct PersistentMemoryAllocator::SharedMetadata {
uint32_t cookie; // Some value that indicates complete initialization.
uint32_t size; // Total size of memory segment.
@@ -108,10 +111,15 @@ struct PersistentMemoryAllocator::SharedMetadata {
uint32_t version; // Version code so upgrades don't break.
uint64_t id; // Arbitrary ID number given by creator.
uint32_t name; // Reference to stored name string.
+ uint32_t padding1; // Pad-out read-only data to 64-bit alignment.
// Above is read-only after first construction. Below may be changed and
// so must be marked "volatile" to provide correct inter-process behavior.
+ // State of the memory, plus some padding to keep alignment.
+ volatile std::atomic<uint8_t> memory_state; // MemoryState enum values.
+ uint8_t padding2[3];
+
// Bitfield of information flags. Access to this should be done through
// the CheckFlag() and SetFlag() methods defined above.
volatile std::atomic<uint32_t> flags;
@@ -121,6 +129,7 @@ struct PersistentMemoryAllocator::SharedMetadata {
// The "iterable" queue is an M&S Queue as described here, append-only:
// https://www.research.ibm.com/people/m/michael/podc-1996.pdf
+ // |queue| needs to be 64-bit aligned and is itself a multiple of 64 bits.
volatile std::atomic<uint32_t> tailptr; // Last block of iteration queue.
volatile BlockHeader queue; // Empty block for linked-list head/tail.
};
@@ -312,7 +321,7 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
// definitions and so cannot be moved to the global scope.
static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
"struct is not portable across different natural word widths");
- static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 56,
+ static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 64,
"struct is not portable across different natural word widths");
static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
@@ -384,12 +393,13 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
if (name_cstr)
memcpy(name_cstr, name.data(), name.length());
}
+
+ shared_meta()->memory_state.store(MEMORY_INITIALIZED,
+ std::memory_order_release);
} else {
- if (shared_meta()->size == 0 ||
- shared_meta()->version == 0 ||
+ if (shared_meta()->size == 0 || shared_meta()->version != kGlobalVersion ||
shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
- shared_meta()->tailptr == 0 ||
- shared_meta()->queue.cookie == 0 ||
+ shared_meta()->tailptr == 0 || shared_meta()->queue.cookie == 0 ||
shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
SetCorrupt();
}
@@ -470,6 +480,19 @@ void PersistentMemoryAllocator::CreateTrackingHistograms(
HistogramBase::kUmaTargetedHistogramFlag);
}
+void PersistentMemoryAllocator::Flush(bool sync) {
+ FlushPartial(used(), sync);
+}
+
+void PersistentMemoryAllocator::SetMemoryState(uint8_t memory_state) {
+ shared_meta()->memory_state.store(memory_state, std::memory_order_relaxed);
+ FlushPartial(sizeof(SharedMetadata), false);
+}
+
+uint8_t PersistentMemoryAllocator::GetMemoryState() const {
+ return shared_meta()->memory_state.load(std::memory_order_relaxed);
+}
+
size_t PersistentMemoryAllocator::used() const {
return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
mem_size_);
@@ -540,10 +563,19 @@ bool PersistentMemoryAllocator::ChangeType(Reference ref,
return false;
}
- // Clear the memory while the type doesn't match either "from" or "to".
- memset(const_cast<char*>(reinterpret_cast<volatile char*>(block) +
- sizeof(BlockHeader)),
- 0, block->size - sizeof(BlockHeader));
+ // Clear the memory in an atomic manner. Using "release" stores force
+ // every write to be done after the ones before it. This is better than
+ // using memset because (a) it supports "volatile" and (b) it creates a
+ // reliable pattern upon which other threads may rely.
+ volatile std::atomic<int>* data =
+ reinterpret_cast<volatile std::atomic<int>*>(
+ reinterpret_cast<volatile char*>(block) + sizeof(BlockHeader));
+ const uint32_t words = (block->size - sizeof(BlockHeader)) / sizeof(int);
+ DCHECK_EQ(0U, (block->size - sizeof(BlockHeader)) % sizeof(int));
+ for (uint32_t i = 0; i < words; ++i) {
+ data->store(0, std::memory_order_release);
+ ++data;
+ }
// If the destination type is "transitioning" then skip the final exchange.
if (to_type_id == kTypeIdTransitioning)
@@ -807,8 +839,12 @@ const volatile PersistentMemoryAllocator::BlockHeader*
PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
uint32_t size, bool queue_ok,
bool free_ok) const {
+ // Handle special cases.
+ if (ref == kReferenceQueue && queue_ok)
+ return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
+
// Validation of parameters.
- if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
+ if (ref < sizeof(SharedMetadata))
return nullptr;
if (ref % kAllocAlignment != 0)
return nullptr;
@@ -818,17 +854,13 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
// Validation of referenced block-header.
if (!free_ok) {
- uint32_t freeptr = std::min(
- shared_meta()->freeptr.load(std::memory_order_relaxed), mem_size_);
- if (ref + size > freeptr)
- return nullptr;
const volatile BlockHeader* const block =
reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
- if (block->size < size)
+ if (block->cookie != kBlockCookieAllocated)
return nullptr;
- if (ref + block->size > freeptr)
+ if (block->size < size)
return nullptr;
- if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
+ if (ref + block->size > mem_size_)
return nullptr;
if (type_id != 0 &&
block->type_id.load(std::memory_order_relaxed) != type_id) {
@@ -840,6 +872,13 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
}
+void PersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
+ // Generally there is nothing to do as every write is done through volatile
+ // memory with atomic instructions to guarantee consistency. This (virtual)
+ // method exists so that derivced classes can do special things, such as
+ // tell the OS to write changes to disk now rather than when convenient.
+}
+
void PersistentMemoryAllocator::RecordError(int error) const {
if (errors_histogram_)
errors_histogram_->Add(error);
@@ -980,7 +1019,12 @@ FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
id,
name,
read_only),
- mapped_file_(std::move(file)) {}
+ mapped_file_(std::move(file)) {
+ // Ensure the disk-copy of the data reflects the fully-initialized memory as
+ // there is no guarantee as to what order the pages might be auto-flushed by
+ // the OS in the future.
+ Flush(true);
+}
FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
@@ -990,6 +1034,33 @@ bool FilePersistentMemoryAllocator::IsFileAcceptable(
bool read_only) {
return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
}
+
+void FilePersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
+ if (sync)
+ ThreadRestrictions::AssertIOAllowed();
+ if (IsReadonly())
+ return;
+
+#if defined(OS_WIN)
+ // Windows doesn't support a synchronous flush.
+ BOOL success = ::FlushViewOfFile(data(), length);
+ DPCHECK(success);
+#elif defined(OS_MACOSX)
+ // On OSX, "invalidate" removes all cached pages, forcing a re-read from
+ // disk. That's not applicable to "flush" so omit it.
+ int result =
+ ::msync(const_cast<void*>(data()), length, sync ? MS_SYNC : MS_ASYNC);
+ DCHECK_NE(EINVAL, result);
+#elif defined(OS_POSIX)
+ // On POSIX, "invalidate" forces _other_ processes to recognize what has
+ // been written to disk and so is applicable to "flush".
+ int result = ::msync(const_cast<void*>(data()), length,
+ MS_INVALIDATE | (sync ? MS_SYNC : MS_ASYNC));
+ DCHECK_NE(EINVAL, result);
+#else
+#error Unsupported OS.
+#endif
+}
#endif // !defined(OS_NACL)
} // namespace base
diff --git a/chromium/base/metrics/persistent_memory_allocator.h b/chromium/base/metrics/persistent_memory_allocator.h
index e0ac867578a..94a7744bfb2 100644
--- a/chromium/base/metrics/persistent_memory_allocator.h
+++ b/chromium/base/metrics/persistent_memory_allocator.h
@@ -96,6 +96,29 @@ class BASE_EXPORT PersistentMemoryAllocator {
public:
typedef uint32_t Reference;
+ // These states are used to indicate the overall condition of the memory
+ // segment irrespective of what is stored within it. Because the data is
+ // often persistent and thus needs to be readable by different versions of
+ // a program, these values are fixed and can never change.
+ enum MemoryState : uint8_t {
+ // Persistent memory starts all zeros and so shows "uninitialized".
+ MEMORY_UNINITIALIZED = 0,
+
+ // The header has been written and the memory is ready for use.
+ MEMORY_INITIALIZED = 1,
+
+ // The data should be considered deleted. This would be set when the
+ // allocator is being cleaned up. If file-backed, the file is likely
+ // to be deleted but since deletion can fail for a variety of reasons,
+ // having this extra status means a future reader can realize what
+ // should have happened.
+ MEMORY_DELETED = 2,
+
+ // Outside code can create states starting with this number; these too
+ // must also never change between code versions.
+ MEMORY_USER_DEFINED = 100,
+ };
+
// Iterator for going through all iterable memory records in an allocator.
// Like the allocator itself, iterators are lock-free and thread-secure.
// That means that multiple threads can share an iterator and the same
@@ -217,6 +240,9 @@ class BASE_EXPORT PersistentMemoryAllocator {
};
enum : uint32_t {
+ // A value that will match any type when doing lookups.
+ kTypeIdAny = 0x00000000,
+
// A value indicating that the type is in transition. Work is being done
// on the contents to prepare it for a new type to come.
kTypeIdTransitioning = 0xFFFFFFFF,
@@ -277,7 +303,11 @@ class BASE_EXPORT PersistentMemoryAllocator {
const char* Name() const;
// Is this segment open only for read?
- bool IsReadonly() { return readonly_; }
+ bool IsReadonly() const { return readonly_; }
+
+ // Manage the saved state of the memory.
+ void SetMemoryState(uint8_t memory_state);
+ uint8_t GetMemoryState() const;
// Create internal histograms for tracking memory use and allocation sizes
// for allocator of |name| (which can simply be the result of Name()). This
@@ -290,6 +320,17 @@ class BASE_EXPORT PersistentMemoryAllocator {
// UMA.PersistentAllocator.name.UsedPct
void CreateTrackingHistograms(base::StringPiece name);
+ // Flushes the persistent memory to any backing store. This typically does
+ // nothing but is used by the FilePersistentMemoryAllocator to inform the
+ // OS that all the data should be sent to the disk immediately. This is
+ // useful in the rare case where something has just been stored that needs
+ // to survive a hard shutdown of the machine like from a power failure.
+ // The |sync| parameter indicates if this call should block until the flush
+ // is complete but is only advisory and may or may not have an effect
+ // depending on the capabilities of the OS. Synchronous flushes are allowed
+ // only from theads that are allowed to do I/O.
+ void Flush(bool sync);
+
// Direct access to underlying memory segment. If the segment is shared
// across threads or processes, reading data through these values does
// not guarantee consistency. Use with care. Do not write.
@@ -400,7 +441,8 @@ class BASE_EXPORT PersistentMemoryAllocator {
// Changing the type doesn't mean the data is compatible with the new type.
// Passing true for |clear| will zero the memory after the type has been
// changed away from |from_type_id| but before it becomes |to_type_id| meaning
- // that it is done in a manner that is thread-safe.
+ // that it is done in a manner that is thread-safe. Memory is guaranteed to
+ // be zeroed atomically by machine-word in a monotonically increasing order.
//
// It will likely be necessary to reconstruct the type before it can be used.
// Changing the type WILL NOT invalidate existing pointers to the data, either
@@ -576,6 +618,9 @@ class BASE_EXPORT PersistentMemoryAllocator {
uint64_t id, base::StringPiece name,
bool readonly);
+ // Implementation of Flush that accepts how much to flush.
+ virtual void FlushPartial(size_t length, bool sync);
+
volatile char* const mem_base_; // Memory base. (char so sizeof guaranteed 1)
const MemoryType mem_type_; // Type of memory allocation.
const uint32_t mem_size_; // Size of entire memory segment.
@@ -711,6 +756,10 @@ class BASE_EXPORT FilePersistentMemoryAllocator
// the rest.
static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
+ protected:
+ // PersistentMemoryAllocator:
+ void FlushPartial(size_t length, bool sync) override;
+
private:
std::unique_ptr<MemoryMappedFile> mapped_file_;
diff --git a/chromium/base/metrics/persistent_memory_allocator_unittest.cc b/chromium/base/metrics/persistent_memory_allocator_unittest.cc
index d12e00f6d6b..c3027ecc121 100644
--- a/chromium/base/metrics/persistent_memory_allocator_unittest.cc
+++ b/chromium/base/metrics/persistent_memory_allocator_unittest.cc
@@ -100,6 +100,8 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_TRUE(allocator_->used_histogram_);
EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".UsedPct",
allocator_->used_histogram_->histogram_name());
+ EXPECT_EQ(PersistentMemoryAllocator::MEMORY_INITIALIZED,
+ allocator_->GetMemoryState());
// Get base memory info for later comparison.
PersistentMemoryAllocator::MemoryInfo meminfo0;
@@ -254,6 +256,11 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
allocator_->Delete(obj2);
PersistentMemoryAllocator::Iterator iter1z(allocator_.get());
EXPECT_EQ(nullptr, iter1z.GetNextOfObject<TestObject2>());
+
+ // Ensure that the memory state can be set.
+ allocator_->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
+ EXPECT_EQ(PersistentMemoryAllocator::MEMORY_DELETED,
+ allocator_->GetMemoryState());
}
TEST_F(PersistentMemoryAllocatorTest, PageTest) {
@@ -691,8 +698,8 @@ TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
const size_t mmlength = mmfile->length();
EXPECT_GE(meminfo1.total, mmlength);
- FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", true);
- EXPECT_TRUE(file.IsReadonly());
+ FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", false);
+ EXPECT_FALSE(file.IsReadonly());
EXPECT_EQ(TEST_ID, file.Id());
EXPECT_FALSE(file.IsFull());
EXPECT_FALSE(file.IsCorrupt());
@@ -713,6 +720,11 @@ TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
EXPECT_GE(meminfo1.free, meminfo2.free);
EXPECT_EQ(mmlength, meminfo2.total);
EXPECT_EQ(0U, meminfo2.free);
+
+ // There's no way of knowing if Flush actually does anything but at least
+ // verify that it runs without CHECK violations.
+ file.Flush(false);
+ file.Flush(true);
}
TEST(FilePersistentMemoryAllocatorTest, ExtendTest) {
diff --git a/chromium/base/metrics/statistics_recorder.cc b/chromium/base/metrics/statistics_recorder.cc
index ba2101bccf5..409e6374be1 100644
--- a/chromium/base/metrics/statistics_recorder.cc
+++ b/chromium/base/metrics/statistics_recorder.cc
@@ -431,8 +431,24 @@ size_t StatisticsRecorder::GetHistogramCount() {
// static
void StatisticsRecorder::ForgetHistogramForTesting(base::StringPiece name) {
- if (histograms_)
- histograms_->erase(name);
+ if (!histograms_)
+ return;
+
+ HistogramMap::iterator found = histograms_->find(name);
+ if (found == histograms_->end())
+ return;
+
+ HistogramBase* base = found->second;
+ if (base->GetHistogramType() != SPARSE_HISTOGRAM) {
+ // When forgetting a histogram, it's likely that other information is
+ // also becoming invalid. Clear the persistent reference that may no
+ // longer be valid. There's no danger in this as, at worst, duplicates
+ // will be created in persistent memory.
+ Histogram* histogram = static_cast<Histogram*>(base);
+ histogram->bucket_ranges()->set_persistent_reference(0);
+ }
+
+ histograms_->erase(found);
}
// static
diff --git a/chromium/base/metrics/statistics_recorder.h b/chromium/base/metrics/statistics_recorder.h
index 193bad79703..40c4bd090b1 100644
--- a/chromium/base/metrics/statistics_recorder.h
+++ b/chromium/base/metrics/statistics_recorder.h
@@ -217,7 +217,7 @@ class BASE_EXPORT StatisticsRecorder {
// |bucket_ranges_|.
typedef std::map<uint32_t, std::list<const BucketRanges*>*> RangesMap;
- friend struct DefaultLazyInstanceTraits<StatisticsRecorder>;
+ friend struct LazyInstanceTraitsBase<StatisticsRecorder>;
friend class StatisticsRecorderTest;
// Imports histograms from global persistent memory. The global lock must
diff --git a/chromium/base/metrics/user_metrics.cc b/chromium/base/metrics/user_metrics.cc
index 169a0634e40..9fcc9e8a18a 100644
--- a/chromium/base/metrics/user_metrics.cc
+++ b/chromium/base/metrics/user_metrics.cc
@@ -17,10 +17,10 @@
namespace base {
namespace {
-LazyInstance<std::vector<ActionCallback>> g_callbacks =
- LAZY_INSTANCE_INITIALIZER;
-LazyInstance<scoped_refptr<SingleThreadTaskRunner>> g_task_runner =
+LazyInstance<std::vector<ActionCallback>>::DestructorAtExit g_callbacks =
LAZY_INSTANCE_INITIALIZER;
+LazyInstance<scoped_refptr<SingleThreadTaskRunner>>::DestructorAtExit
+ g_task_runner = LAZY_INSTANCE_INITIALIZER;
} // namespace
@@ -36,7 +36,7 @@ void RecordComputedAction(const std::string& action) {
if (!g_task_runner.Get()->BelongsToCurrentThread()) {
g_task_runner.Get()->PostTask(FROM_HERE,
- Bind(&RecordComputedAction, action));
+ BindOnce(&RecordComputedAction, action));
return;
}
diff --git a/chromium/base/native_library.h b/chromium/base/native_library.h
index 02eae1d5081..e2b9ca7e6d1 100644
--- a/chromium/base/native_library.h
+++ b/chromium/base/native_library.h
@@ -91,16 +91,6 @@ BASE_EXPORT NativeLibrary LoadNativeLibraryWithOptions(
const NativeLibraryOptions& options,
NativeLibraryLoadError* error);
-#if defined(OS_WIN)
-// Loads a native library from disk. Release it with UnloadNativeLibrary when
-// you're done.
-// This function retrieves the LoadLibrary function exported from kernel32.dll
-// and calls it instead of directly calling the LoadLibrary function via the
-// import table.
-BASE_EXPORT NativeLibrary LoadNativeLibraryDynamically(
- const FilePath& library_path);
-#endif // OS_WIN
-
// Unloads a native library.
BASE_EXPORT void UnloadNativeLibrary(NativeLibrary library);
diff --git a/chromium/base/native_library_unittest.cc b/chromium/base/native_library_unittest.cc
index 5fc9d1cf06a..320442bdd11 100644
--- a/chromium/base/native_library_unittest.cc
+++ b/chromium/base/native_library_unittest.cc
@@ -100,7 +100,9 @@ TEST(NativeLibraryTest, LoadLibrary) {
// Android dlopen() requires further investigation, as it might vary across
// versions with respect to symbol resolution scope.
-#if !defined(OS_ANDROID)
+// TSan and MSan error out on RTLD_DEEPBIND, https://crbug.com/705255
+#if !defined(OS_ANDROID) && !defined(THREAD_SANITIZER) && \
+ !defined(MEMORY_SANITIZER)
// Verifies that the |prefer_own_symbols| option satisfies its guarantee that
// a loaded library will always prefer local symbol resolution before
diff --git a/chromium/base/native_library_win.cc b/chromium/base/native_library_win.cc
index 64c7380f173..68ff3d1f95d 100644
--- a/chromium/base/native_library_win.cc
+++ b/chromium/base/native_library_win.cc
@@ -7,6 +7,7 @@
#include <windows.h>
#include "base/files/file_util.h"
+#include "base/metrics/histogram_macros.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
@@ -14,16 +15,108 @@
namespace base {
-typedef HMODULE (WINAPI* LoadLibraryFunction)(const wchar_t* file_name);
+using AddDllDirectory = HMODULE (*)(PCWSTR new_directory);
namespace {
+// This enum is used to back an UMA histogram, and should therefore be treated
+// as append-only.
+enum LoadLibraryResult {
+ // LoadLibraryExW API/flags are available and the call succeeds.
+ SUCCEED = 0,
+ // LoadLibraryExW API/flags are availabe to use but the call fails, then
+ // LoadLibraryW is used and succeeds.
+ FAIL_AND_SUCCEED,
+ // LoadLibraryExW API/flags are availabe to use but the call fails, then
+ // LoadLibraryW is used but fails as well.
+ FAIL_AND_FAIL,
+ // LoadLibraryExW API/flags are unavailabe to use, then LoadLibraryW is used
+ // and succeeds.
+ UNAVAILABLE_AND_SUCCEED,
+ // LoadLibraryExW API/flags are unavailabe to use, then LoadLibraryW is used
+ // but fails.
+ UNAVAILABLE_AND_FAIL,
+ // Add new items before this one, always keep this one at the end.
+ END
+};
+
+// A helper method to log library loading result to UMA.
+void LogLibrarayLoadResultToUMA(LoadLibraryResult result) {
+ UMA_HISTOGRAM_ENUMERATION("LibraryLoader.LoadNativeLibraryWindows", result,
+ LoadLibraryResult::END);
+}
+
+// A helper method to check if AddDllDirectory method is available, thus
+// LOAD_LIBRARY_SEARCH_* flags are available on systems.
+bool AreSearchFlagsAvailable() {
+ // The LOAD_LIBRARY_SEARCH_* flags are available on systems that have
+ // KB2533623 installed. To determine whether the flags are available, use
+ // GetProcAddress to get the address of the AddDllDirectory,
+ // RemoveDllDirectory, or SetDefaultDllDirectories function. If GetProcAddress
+ // succeeds, the LOAD_LIBRARY_SEARCH_* flags can be used with LoadLibraryEx.
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/ms684179(v=vs.85).aspx
+ // The LOAD_LIBRARY_SEARCH_* flags are used in the LoadNativeLibraryHelper
+ // method.
+ auto add_dll_dir_func = reinterpret_cast<AddDllDirectory>(
+ GetProcAddress(GetModuleHandle(L"kernel32.dll"), "AddDllDirectory"));
+ return !!add_dll_dir_func;
+}
+
+// A helper method to encode the library loading result to enum
+// LoadLibraryResult.
+LoadLibraryResult GetLoadLibraryResult(bool are_search_flags_available,
+ bool has_load_library_succeeded) {
+ LoadLibraryResult result;
+ if (are_search_flags_available) {
+ if (has_load_library_succeeded)
+ result = LoadLibraryResult::FAIL_AND_SUCCEED;
+ else
+ result = LoadLibraryResult::FAIL_AND_FAIL;
+ } else if (has_load_library_succeeded) {
+ result = LoadLibraryResult::UNAVAILABLE_AND_SUCCEED;
+ } else {
+ result = LoadLibraryResult::UNAVAILABLE_AND_FAIL;
+ }
+ return result;
+}
NativeLibrary LoadNativeLibraryHelper(const FilePath& library_path,
- LoadLibraryFunction load_library_api,
NativeLibraryLoadError* error) {
// LoadLibrary() opens the file off disk.
ThreadRestrictions::AssertIOAllowed();
+ HMODULE module = nullptr;
+
+ // This variable records the library loading result.
+ LoadLibraryResult load_library_result = LoadLibraryResult::SUCCEED;
+
+ bool are_search_flags_available = AreSearchFlagsAvailable();
+ if (are_search_flags_available) {
+ // LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR flag is needed to search the library
+ // directory as the library may have dependencies on DLLs in this
+ // directory.
+ module = ::LoadLibraryExW(
+ library_path.value().c_str(), nullptr,
+ LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR | LOAD_LIBRARY_SEARCH_DEFAULT_DIRS);
+ // If LoadLibraryExW succeeds, log this metric and return.
+ if (module) {
+ LogLibrarayLoadResultToUMA(load_library_result);
+ return module;
+ }
+ // GetLastError() needs to be called immediately after
+ // LoadLibraryExW call.
+ if (error)
+ error->code = GetLastError();
+ }
+
+ // If LoadLibraryExW API/flags are unavailable or API call fails, try
+ // LoadLibraryW API.
+ // TODO(chengx): Currently, if LoadLibraryExW API call fails, LoadLibraryW is
+ // still tried. We should strictly prefer the LoadLibraryExW over the
+ // LoadLibraryW if LoadLibraryW is statistically showing no extra benefits. If
+ // UMA metric shows that FAIL_AND_FAIL is the primary failure mode and/or
+ // FAIL_AND_SUCCESS is close to zero, we should remove this fallback.
+ // (http://crbug.com/701944)
+
// Switch the current directory to the library directory as the library
// may have dependencies on DLLs in this directory.
bool restore_directory = false;
@@ -36,18 +129,21 @@ NativeLibrary LoadNativeLibraryHelper(const FilePath& library_path,
}
}
- HMODULE module = (*load_library_api)(library_path.value().c_str());
- if (!module && error) {
- // GetLastError() needs to be called immediately after |load_library_api|.
+ module = ::LoadLibraryW(library_path.value().c_str());
+
+ // GetLastError() needs to be called immediately after LoadLibraryW call.
+ if (!module && error)
error->code = GetLastError();
- }
if (restore_directory)
SetCurrentDirectory(current_directory);
+ // Get the library loading result and log it to UMA.
+ LogLibrarayLoadResultToUMA(
+ GetLoadLibraryResult(are_search_flags_available, !!module));
+
return module;
}
-
} // namespace
std::string NativeLibraryLoadError::ToString() const {
@@ -58,16 +154,7 @@ std::string NativeLibraryLoadError::ToString() const {
NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
const NativeLibraryOptions& options,
NativeLibraryLoadError* error) {
- return LoadNativeLibraryHelper(library_path, LoadLibraryW, error);
-}
-
-NativeLibrary LoadNativeLibraryDynamically(const FilePath& library_path) {
- typedef HMODULE (WINAPI* LoadLibraryFunction)(const wchar_t* file_name);
-
- LoadLibraryFunction load_library = reinterpret_cast<LoadLibraryFunction>(
- GetProcAddress(GetModuleHandle(L"kernel32.dll"), "LoadLibraryW"));
-
- return LoadNativeLibraryHelper(library_path, load_library, NULL);
+ return LoadNativeLibraryHelper(library_path, error);
}
// static
diff --git a/chromium/base/nix/xdg_util.cc b/chromium/base/nix/xdg_util.cc
index f76c0cb38b1..41a89147b9e 100644
--- a/chromium/base/nix/xdg_util.cc
+++ b/chromium/base/nix/xdg_util.cc
@@ -11,6 +11,7 @@
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/path_service.h"
+#include "base/strings/string_util.h"
#include "base/third_party/xdg_user_dirs/xdg_user_dir_lookup.h"
namespace {
@@ -57,7 +58,8 @@ DesktopEnvironment GetDesktopEnvironment(Environment* env) {
std::string xdg_current_desktop;
if (env->GetVar("XDG_CURRENT_DESKTOP", &xdg_current_desktop)) {
// Not all desktop environments set this env var as of this writing.
- if (xdg_current_desktop == "Unity") {
+ if (base::StartsWith(xdg_current_desktop, "Unity",
+ base::CompareCase::SENSITIVE)) {
// gnome-fallback sessions set XDG_CURRENT_DESKTOP to Unity
// DESKTOP_SESSION can be gnome-fallback or gnome-fallback-compiz
std::string desktop_session;
diff --git a/chromium/base/nix/xdg_util_unittest.cc b/chromium/base/nix/xdg_util_unittest.cc
index c8e53616ccd..1219bba82c5 100644
--- a/chromium/base/nix/xdg_util_unittest.cc
+++ b/chromium/base/nix/xdg_util_unittest.cc
@@ -35,6 +35,8 @@ const char* const kDesktopXFCE = "xfce";
const char* const kXdgDesktopGNOME = "GNOME";
const char* const kXdgDesktopKDE = "KDE";
const char* const kXdgDesktopUnity = "Unity";
+const char* const kXdgDesktopUnity7 = "Unity:Unity7";
+const char* const kXdgDesktopUnity8 = "Unity:Unity8";
const char* const kKDESessionKDE5 = "5";
const char kDesktopSession[] = "DESKTOP_SESSION";
@@ -138,5 +140,23 @@ TEST(XDGUtilTest, GetXdgDesktopUnity) {
EXPECT_EQ(DESKTOP_ENVIRONMENT_UNITY, GetDesktopEnvironment(&getter));
}
+TEST(XDGUtilTest, GetXdgDesktopUnity7) {
+ MockEnvironment getter;
+ EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+ EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
+ .WillOnce(DoAll(SetArgumentPointee<1>(kXdgDesktopUnity7), Return(true)));
+
+ EXPECT_EQ(DESKTOP_ENVIRONMENT_UNITY, GetDesktopEnvironment(&getter));
+}
+
+TEST(XDGUtilTest, GetXdgDesktopUnity8) {
+ MockEnvironment getter;
+ EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+ EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
+ .WillOnce(DoAll(SetArgumentPointee<1>(kXdgDesktopUnity8), Return(true)));
+
+ EXPECT_EQ(DESKTOP_ENVIRONMENT_UNITY, GetDesktopEnvironment(&getter));
+}
+
} // namespace nix
} // namespace base
diff --git a/chromium/base/numerics/safe_conversions_impl.h b/chromium/base/numerics/safe_conversions_impl.h
index 24357fd6a57..020cac0539a 100644
--- a/chromium/base/numerics/safe_conversions_impl.h
+++ b/chromium/base/numerics/safe_conversions_impl.h
@@ -517,38 +517,16 @@ struct FastIntegerArithmeticPromotion<Lhs, Rhs, false> {
static const bool is_contained = false;
};
-// This hacks around libstdc++ 4.6 missing stuff in type_traits.
-#if defined(__GLIBCXX__)
-#define PRIV_GLIBCXX_4_7_0 20120322
-#define PRIV_GLIBCXX_4_5_4 20120702
-#define PRIV_GLIBCXX_4_6_4 20121127
-#if (__GLIBCXX__ < PRIV_GLIBCXX_4_7_0 || __GLIBCXX__ == PRIV_GLIBCXX_4_5_4 || \
- __GLIBCXX__ == PRIV_GLIBCXX_4_6_4)
-#define PRIV_USE_FALLBACKS_FOR_OLD_GLIBCXX
-#undef PRIV_GLIBCXX_4_7_0
-#undef PRIV_GLIBCXX_4_5_4
-#undef PRIV_GLIBCXX_4_6_4
-#endif
-#endif
-
// Extracts the underlying type from an enum.
template <typename T, bool is_enum = std::is_enum<T>::value>
struct ArithmeticOrUnderlyingEnum;
template <typename T>
struct ArithmeticOrUnderlyingEnum<T, true> {
-#if defined(PRIV_USE_FALLBACKS_FOR_OLD_GLIBCXX)
- using type = __underlying_type(T);
-#else
using type = typename std::underlying_type<T>::type;
-#endif
static const bool value = std::is_arithmetic<type>::value;
};
-#if defined(PRIV_USE_FALLBACKS_FOR_OLD_GLIBCXX)
-#undef PRIV_USE_FALLBACKS_FOR_OLD_GLIBCXX
-#endif
-
template <typename T>
struct ArithmeticOrUnderlyingEnum<T, false> {
using type = T;
diff --git a/chromium/base/numerics/saturated_arithmetic.h b/chromium/base/numerics/saturated_arithmetic.h
index 7e24fe3617b..74fbba808d6 100644
--- a/chromium/base/numerics/saturated_arithmetic.h
+++ b/chromium/base/numerics/saturated_arithmetic.h
@@ -56,6 +56,12 @@ ALWAYS_INLINE int32_t SaturatedNegative(int32_t a) {
return -a;
}
+ALWAYS_INLINE int32_t SaturatedAbsolute(int32_t a) {
+ if (a >= 0)
+ return a;
+ return SaturatedNegative(a);
+}
+
ALWAYS_INLINE int GetMaxSaturatedSetResultForTesting(int fractional_shift) {
// For C version the set function maxes out to max int, this differs from
// the ARM asm version, see saturated_arithmetic_arm.h for the equivalent asm
diff --git a/chromium/base/numerics/saturated_arithmetic_arm.h b/chromium/base/numerics/saturated_arithmetic_arm.h
index e5017e4ea37..732f5f2c1f5 100644
--- a/chromium/base/numerics/saturated_arithmetic_arm.h
+++ b/chromium/base/numerics/saturated_arithmetic_arm.h
@@ -33,6 +33,12 @@ inline int32_t SaturatedNegative(int32_t a) {
return SaturatedSubtraction(0, a);
}
+inline int32_t SaturatedAbsolute(int32_t a) {
+ if (a >= 0)
+ return a;
+ return SaturatedNegative(a);
+}
+
inline int GetMaxSaturatedSetResultForTesting(int fractional_shift) {
// For ARM Asm version the set function maxes out to the biggest
// possible integer part with the fractional part zero'd out.
diff --git a/chromium/base/observer_list_threadsafe.h b/chromium/base/observer_list_threadsafe.h
index afb1010b67c..c175c1787da 100644
--- a/chromium/base/observer_list_threadsafe.h
+++ b/chromium/base/observer_list_threadsafe.h
@@ -5,52 +5,49 @@
#ifndef BASE_OBSERVER_LIST_THREADSAFE_H_
#define BASE_OBSERVER_LIST_THREADSAFE_H_
-#include <algorithm>
-#include <map>
-#include <memory>
-#include <tuple>
+#include <unordered_map>
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/macros.h"
-#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
#include "base/observer_list.h"
+#include "base/sequenced_task_runner.h"
+#include "base/stl_util.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread_local.h"
+#include "build/build_config.h"
+
+// TODO(fdoray): Removing these includes causes IWYU failures in other headers,
+// remove them in a follow- up CL.
+#include "base/memory/ptr_util.h"
#include "base/single_thread_task_runner.h"
-#include "base/threading/platform_thread.h"
#include "base/threading/thread_task_runner_handle.h"
///////////////////////////////////////////////////////////////////////////////
//
// OVERVIEW:
//
-// A thread-safe container for a list of observers.
-// This is similar to the observer_list (see observer_list.h), but it
-// is more robust for multi-threaded situations.
+// A thread-safe container for a list of observers. This is similar to the
+// observer_list (see observer_list.h), but it is more robust for multi-
+// threaded situations.
//
// The following use cases are supported:
-// * Observers can register for notifications from any thread.
-// Callbacks to the observer will occur on the same thread where
-// the observer initially called AddObserver() from.
-// * Any thread may trigger a notification via Notify().
-// * Observers can remove themselves from the observer list inside
-// of a callback.
-// * If one thread is notifying observers concurrently with an observer
-// removing itself from the observer list, the notifications will
-// be silently dropped.
-//
-// The drawback of the threadsafe observer list is that notifications
-// are not as real-time as the non-threadsafe version of this class.
-// Notifications will always be done via PostTask() to another thread,
-// whereas with the non-thread-safe observer_list, notifications happen
-// synchronously and immediately.
+// * Observers can register for notifications from any sequence. They are
+// always notified on the sequence from which they were registered.
+// * Any sequence may trigger a notification via Notify().
+// * Observers can remove themselves from the observer list inside of a
+// callback.
+// * If one sequence is notifying observers concurrently with an observer
+// removing itself from the observer list, the notifications will be
+// silently dropped.
//
-// IMPLEMENTATION NOTES
-// The ObserverListThreadSafe maintains an ObserverList for each thread
-// which uses the ThreadSafeObserver. When Notifying the observers,
-// we simply call PostTask to each registered thread, and then each thread
-// will notify its regular ObserverList.
+// The drawback of the threadsafe observer list is that notifications are not
+// as real-time as the non-threadsafe version of this class. Notifications
+// will always be done via PostTask() to another sequence, whereas with the
+// non-thread-safe observer_list, notifications happen synchronously.
//
///////////////////////////////////////////////////////////////////////////////
@@ -77,68 +74,63 @@ class ObserverListThreadSafe
using NotificationType =
typename ObserverList<ObserverType>::NotificationType;
- ObserverListThreadSafe()
- : type_(ObserverListBase<ObserverType>::NOTIFY_ALL) {}
+ ObserverListThreadSafe() = default;
explicit ObserverListThreadSafe(NotificationType type) : type_(type) {}
- // Add an observer to the list. An observer should not be added to
- // the same list more than once.
- void AddObserver(ObserverType* obs) {
- // If there is no ThreadTaskRunnerHandle, it is impossible to notify on it,
- // so do not add the observer.
- if (!ThreadTaskRunnerHandle::IsSet())
+ // Adds |observer| to the list. |observer| must not already be in the list.
+ void AddObserver(ObserverType* observer) {
+ // TODO(fdoray): Change this to a DCHECK once all call sites have a
+ // SequencedTaskRunnerHandle.
+ if (!SequencedTaskRunnerHandle::IsSet())
return;
- ObserverList<ObserverType>* list = nullptr;
- PlatformThreadId thread_id = PlatformThread::CurrentId();
- {
- AutoLock lock(list_lock_);
- if (observer_lists_.find(thread_id) == observer_lists_.end()) {
- observer_lists_[thread_id] =
- base::MakeUnique<ObserverListContext>(type_);
+ AutoLock auto_lock(lock_);
+
+ // Add |observer| to the list of observers.
+ DCHECK(!ContainsKey(observers_, observer));
+ const scoped_refptr<SequencedTaskRunner> task_runner =
+ SequencedTaskRunnerHandle::Get();
+ observers_[observer] = task_runner;
+
+ // If this is called while a notification is being dispatched on this thread
+ // and |type_| is NOTIFY_ALL, |observer| must be notified (if a notification
+ // is being dispatched on another thread in parallel, the notification may
+ // or may not make it to |observer| depending on the outcome of the race to
+ // |lock_|).
+ if (type_ == NotificationType::NOTIFY_ALL) {
+ const NotificationData* current_notification =
+ tls_current_notification_.Get();
+ if (current_notification) {
+ task_runner->PostTask(
+ current_notification->from_here,
+ Bind(&ObserverListThreadSafe<ObserverType>::NotifyWrapper, this,
+ observer, *current_notification));
}
- list = &(observer_lists_[thread_id]->list);
}
- list->AddObserver(obs);
}
// Remove an observer from the list if it is in the list.
- // If there are pending notifications in-transit to the observer, they will
- // be aborted.
- // If the observer to be removed is in the list, RemoveObserver MUST
- // be called from the same thread which called AddObserver.
- void RemoveObserver(ObserverType* obs) {
- PlatformThreadId thread_id = PlatformThread::CurrentId();
- {
- AutoLock lock(list_lock_);
- auto it = observer_lists_.find(thread_id);
- if (it == observer_lists_.end()) {
- // This will happen if we try to remove an observer on a thread
- // we never added an observer for.
- return;
- }
- ObserverList<ObserverType>& list = it->second->list;
-
- list.RemoveObserver(obs);
-
- // If that was the last observer in the list, remove the ObserverList
- // entirely.
- if (list.size() == 0)
- observer_lists_.erase(it);
- }
+ //
+ // If a notification was sent to the observer but hasn't started to run yet,
+ // it will be aborted. If a notification has started to run, removing the
+ // observer won't stop it.
+ void RemoveObserver(ObserverType* observer) {
+ AutoLock auto_lock(lock_);
+ observers_.erase(observer);
}
// Verifies that the list is currently empty (i.e. there are no observers).
void AssertEmpty() const {
- AutoLock lock(list_lock_);
- DCHECK(observer_lists_.empty());
+#if DCHECK_IS_ON()
+ AutoLock auto_lock(lock_);
+ DCHECK(observers_.empty());
+#endif
}
- // Notify methods.
- // Make a thread-safe callback to each Observer in the list.
- // Note, these calls are effectively asynchronous. You cannot assume
- // that at the completion of the Notify call that all Observers have
- // been Notified. The notification may still be pending delivery.
+ // Asynchronously invokes a callback on all observers, on their registration
+ // sequence. You cannot assume that at the completion of the Notify call that
+ // all Observers have been Notified. The notification may still be pending
+ // delivery.
template <typename Method, typename... Params>
void Notify(const tracked_objects::Location& from_here,
Method m, Params&&... params) {
@@ -146,79 +138,71 @@ class ObserverListThreadSafe
Bind(&internal::Dispatcher<ObserverType, Method>::Run,
m, std::forward<Params>(params)...);
- AutoLock lock(list_lock_);
- for (const auto& entry : observer_lists_) {
- ObserverListContext* context = entry.second.get();
- context->task_runner->PostTask(
+ AutoLock lock(lock_);
+ for (const auto& observer : observers_) {
+ observer.second->PostTask(
from_here,
- Bind(&ObserverListThreadSafe<ObserverType>::NotifyWrapper,
- this, context, method));
+ BindOnce(&ObserverListThreadSafe<ObserverType>::NotifyWrapper, this,
+ observer.first, NotificationData(from_here, method)));
}
}
private:
friend class RefCountedThreadSafe<ObserverListThreadSafe<ObserverType>>;
- struct ObserverListContext {
- explicit ObserverListContext(NotificationType type)
- : task_runner(ThreadTaskRunnerHandle::Get()), list(type) {}
-
- scoped_refptr<SingleThreadTaskRunner> task_runner;
- ObserverList<ObserverType> list;
+ struct NotificationData {
+ NotificationData(const tracked_objects::Location& from_here_in,
+ const Callback<void(ObserverType*)>& method_in)
+ : from_here(from_here_in), method(method_in) {}
- private:
- DISALLOW_COPY_AND_ASSIGN(ObserverListContext);
+ tracked_objects::Location from_here;
+ Callback<void(ObserverType*)> method;
};
- ~ObserverListThreadSafe() {
- }
+ ~ObserverListThreadSafe() = default;
- // Wrapper which is called to fire the notifications for each thread's
- // ObserverList. This function MUST be called on the thread which owns
- // the unsafe ObserverList.
- void NotifyWrapper(ObserverListContext* context,
- const Callback<void(ObserverType*)>& method) {
- // Check that this list still needs notifications.
+ void NotifyWrapper(ObserverType* observer,
+ const NotificationData& notification) {
{
- AutoLock lock(list_lock_);
- auto it = observer_lists_.find(PlatformThread::CurrentId());
-
- // The ObserverList could have been removed already. In fact, it could
- // have been removed and then re-added! If the master list's loop
- // does not match this one, then we do not need to finish this
- // notification.
- if (it == observer_lists_.end() || it->second.get() != context)
- return;
- }
+ AutoLock auto_lock(lock_);
- for (auto& observer : context->list) {
- method.Run(&observer);
+ // Check whether the observer still needs a notification.
+ auto it = observers_.find(observer);
+ if (it == observers_.end())
+ return;
+ DCHECK(it->second->RunsTasksOnCurrentThread());
}
- // If there are no more observers on the list, we can now delete it.
- if (context->list.size() == 0) {
- {
- AutoLock lock(list_lock_);
- // Remove |list| if it's not already removed.
- // This can happen if multiple observers got removed in a notification.
- // See http://crbug.com/55725.
- auto it = observer_lists_.find(PlatformThread::CurrentId());
- if (it != observer_lists_.end() && it->second.get() == context)
- observer_lists_.erase(it);
- }
- }
+ // Keep track of the notification being dispatched on the current thread.
+ // This will be used if the callback below calls AddObserver().
+ //
+ // Note: |tls_current_notification_| may not be nullptr if this runs in a
+ // nested loop started by a notification callback. In that case, it is
+ // important to save the previous value to restore it later.
+ const NotificationData* const previous_notification =
+ tls_current_notification_.Get();
+ tls_current_notification_.Set(&notification);
+
+ // Invoke the callback.
+ notification.method.Run(observer);
+
+ // Reset the notification being dispatched on the current thread to its
+ // previous value.
+ tls_current_notification_.Set(previous_notification);
}
- mutable Lock list_lock_; // Protects the observer_lists_.
+ const NotificationType type_ = NotificationType::NOTIFY_ALL;
+
+ // Synchronizes access to |observers_|.
+ mutable Lock lock_;
- // Key by PlatformThreadId because in tests, clients can attempt to remove
- // observers without a SingleThreadTaskRunner. If this were keyed by
- // SingleThreadTaskRunner, that operation would be silently ignored, leaving
- // garbage in the ObserverList.
- std::map<PlatformThreadId, std::unique_ptr<ObserverListContext>>
- observer_lists_;
+ // Keys are observers. Values are the SequencedTaskRunners on which they must
+ // be notified.
+ std::unordered_map<ObserverType*, scoped_refptr<SequencedTaskRunner>>
+ observers_;
- const NotificationType type_;
+ // Notification being dispatched on the current thread.
+ ThreadLocalPointer<const NotificationData> tls_current_notification_;
DISALLOW_COPY_AND_ASSIGN(ObserverListThreadSafe);
};
diff --git a/chromium/base/observer_list_unittest.cc b/chromium/base/observer_list_unittest.cc
index c5e556bd9da..d0248c90faf 100644
--- a/chromium/base/observer_list_unittest.cc
+++ b/chromium/base/observer_list_unittest.cc
@@ -5,13 +5,21 @@
#include "base/observer_list.h"
#include "base/observer_list_threadsafe.h"
+#include <utility>
#include <vector>
+#include "base/bind.h"
#include "base/compiler_specific.h"
#include "base/location.h"
#include "base/memory/weak_ptr.h"
#include "base/run_loop.h"
+#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/task_scheduler/task_scheduler.h"
+#include "base/test/scoped_task_environment.h"
+#include "base/test/scoped_task_scheduler.h"
#include "base/threading/platform_thread.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -65,20 +73,6 @@ class Disrupter : public Foo {
bool remove_self_;
};
-class ThreadSafeDisrupter : public Foo {
- public:
- ThreadSafeDisrupter(ObserverListThreadSafe<Foo>* list, Foo* doomed)
- : list_(list),
- doomed_(doomed) {
- }
- ~ThreadSafeDisrupter() override {}
- void Observe(int x) override { list_->RemoveObserver(doomed_); }
-
- private:
- ObserverListThreadSafe<Foo>* list_;
- Foo* doomed_;
-};
-
template <typename ObserverListType>
class AddInObserve : public Foo {
public:
@@ -124,7 +118,7 @@ class AddRemoveThread : public PlatformThread::Delegate,
loop_ = new MessageLoop(); // Fire up a message loop.
loop_->task_runner()->PostTask(
FROM_HERE,
- base::Bind(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
+ base::BindOnce(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
RunLoop().Run();
delete loop_;
loop_ = reinterpret_cast<MessageLoop*>(0xdeadbeef);
@@ -152,7 +146,7 @@ class AddRemoveThread : public PlatformThread::Delegate,
loop_->task_runner()->PostTask(
FROM_HERE,
- base::Bind(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
+ base::BindOnce(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
}
void Quit() {
@@ -276,7 +270,6 @@ TEST(ObserverListThreadSafeTest, BasicTest) {
Adder b(-1);
Adder c(1);
Adder d(-1);
- ThreadSafeDisrupter evil(observer_list.get(), &c);
observer_list->AddObserver(&a);
observer_list->AddObserver(&b);
@@ -284,11 +277,11 @@ TEST(ObserverListThreadSafeTest, BasicTest) {
observer_list->Notify(FROM_HERE, &Foo::Observe, 10);
RunLoop().RunUntilIdle();
- observer_list->AddObserver(&evil);
observer_list->AddObserver(&c);
observer_list->AddObserver(&d);
observer_list->Notify(FROM_HERE, &Foo::Observe, 10);
+ observer_list->RemoveObserver(&c);
RunLoop().RunUntilIdle();
EXPECT_EQ(20, a.total);
@@ -329,18 +322,18 @@ TEST(ObserverListThreadSafeTest, RemoveObserver) {
EXPECT_EQ(0, b.total);
}
-TEST(ObserverListThreadSafeTest, WithoutMessageLoop) {
+TEST(ObserverListThreadSafeTest, WithoutSequence) {
scoped_refptr<ObserverListThreadSafe<Foo> > observer_list(
new ObserverListThreadSafe<Foo>);
Adder a(1), b(1), c(1);
- // No MessageLoop, so these should not be added.
+ // No sequence, so these should not be added.
observer_list->AddObserver(&a);
observer_list->AddObserver(&b);
{
- // Add c when there's a loop.
+ // Add c when there's a sequence.
MessageLoop loop;
observer_list->AddObserver(&c);
@@ -351,10 +344,10 @@ TEST(ObserverListThreadSafeTest, WithoutMessageLoop) {
EXPECT_EQ(0, b.total);
EXPECT_EQ(10, c.total);
- // Now add a when there's a loop.
+ // Now add a when there's a sequence.
observer_list->AddObserver(&a);
- // Remove c when there's a loop.
+ // Remove c when there's a sequence.
observer_list->RemoveObserver(&c);
// Notify again.
@@ -366,7 +359,7 @@ TEST(ObserverListThreadSafeTest, WithoutMessageLoop) {
EXPECT_EQ(10, c.total);
}
- // Removing should always succeed with or without a loop.
+ // Removing should always succeed with or without a sequence.
observer_list->RemoveObserver(&a);
// Notifying should not fail but should also be a no-op.
@@ -491,6 +484,135 @@ TEST(ObserverListThreadSafeTest, OutlivesMessageLoop) {
observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
}
+namespace {
+
+class SequenceVerificationObserver : public Foo {
+ public:
+ explicit SequenceVerificationObserver(
+ scoped_refptr<SequencedTaskRunner> task_runner)
+ : task_runner_(std::move(task_runner)) {}
+ ~SequenceVerificationObserver() override = default;
+
+ void Observe(int x) override {
+ called_on_valid_sequence_ = task_runner_->RunsTasksOnCurrentThread();
+ }
+
+ bool called_on_valid_sequence() const { return called_on_valid_sequence_; }
+
+ private:
+ const scoped_refptr<SequencedTaskRunner> task_runner_;
+ bool called_on_valid_sequence_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(SequenceVerificationObserver);
+};
+
+} // namespace
+
+// Verify that observers are notified on the correct sequence.
+TEST(ObserverListThreadSafeTest, NotificationOnValidSequence) {
+ test::ScopedTaskEnvironment scoped_task_environment;
+
+ auto task_runner_1 = CreateSequencedTaskRunnerWithTraits(TaskTraits());
+ auto task_runner_2 = CreateSequencedTaskRunnerWithTraits(TaskTraits());
+
+ auto observer_list = make_scoped_refptr(new ObserverListThreadSafe<Foo>());
+
+ SequenceVerificationObserver observer_1(task_runner_1);
+ SequenceVerificationObserver observer_2(task_runner_2);
+
+ task_runner_1->PostTask(
+ FROM_HERE, Bind(&ObserverListThreadSafe<Foo>::AddObserver, observer_list,
+ Unretained(&observer_1)));
+ task_runner_2->PostTask(
+ FROM_HERE, Bind(&ObserverListThreadSafe<Foo>::AddObserver, observer_list,
+ Unretained(&observer_2)));
+
+ TaskScheduler::GetInstance()->FlushForTesting();
+
+ observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
+
+ TaskScheduler::GetInstance()->FlushForTesting();
+
+ EXPECT_TRUE(observer_1.called_on_valid_sequence());
+ EXPECT_TRUE(observer_2.called_on_valid_sequence());
+}
+
+// Verify that when an observer is added to a NOTIFY_ALL ObserverListThreadSafe
+// from a notification, it is itself notified.
+TEST(ObserverListThreadSafeTest, AddObserverFromNotificationNotifyAll) {
+ test::ScopedTaskEnvironment scoped_task_environment;
+ auto observer_list = make_scoped_refptr(new ObserverListThreadSafe<Foo>());
+
+ Adder observer_added_from_notification(1);
+
+ AddInObserve<ObserverListThreadSafe<Foo>> initial_observer(
+ observer_list.get());
+ initial_observer.SetToAdd(&observer_added_from_notification);
+ observer_list->AddObserver(&initial_observer);
+
+ observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
+
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(1, observer_added_from_notification.GetValue());
+}
+
+namespace {
+
+class RemoveWhileNotificationIsRunningObserver : public Foo {
+ public:
+ RemoveWhileNotificationIsRunningObserver()
+ : notification_running_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ barrier_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
+ ~RemoveWhileNotificationIsRunningObserver() override = default;
+
+ void Observe(int x) override {
+ notification_running_.Signal();
+ barrier_.Wait();
+ }
+
+ void WaitForNotificationRunning() { notification_running_.Wait(); }
+ void Unblock() { barrier_.Signal(); }
+
+ private:
+ WaitableEvent notification_running_;
+ WaitableEvent barrier_;
+
+ DISALLOW_COPY_AND_ASSIGN(RemoveWhileNotificationIsRunningObserver);
+};
+
+} // namespace
+
+// Verify that there is no crash when an observer is removed while it is being
+// notified.
+TEST(ObserverListThreadSafeTest, RemoveWhileNotificationIsRunning) {
+ auto observer_list = make_scoped_refptr(new ObserverListThreadSafe<Foo>());
+ RemoveWhileNotificationIsRunningObserver observer;
+
+ WaitableEvent task_running(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent barrier(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ // This must be after the declaration of |barrier| so that tasks posted to
+ // TaskScheduler can safely use |barrier|.
+ test::ScopedTaskEnvironment scoped_task_environment;
+
+ CreateSequencedTaskRunnerWithTraits(TaskTraits().WithBaseSyncPrimitives())
+ ->PostTask(FROM_HERE,
+ base::Bind(&ObserverListThreadSafe<Foo>::AddObserver,
+ observer_list, Unretained(&observer)));
+ TaskScheduler::GetInstance()->FlushForTesting();
+
+ observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
+ observer.WaitForNotificationRunning();
+ observer_list->RemoveObserver(&observer);
+
+ observer.Unblock();
+}
+
TEST(ObserverListTest, Existing) {
ObserverList<Foo> observer_list(ObserverList<Foo>::NOTIFY_EXISTING_ONLY);
Adder a(1);
diff --git a/chromium/base/optional.h b/chromium/base/optional.h
index cf65ad7dac1..2be2f821754 100644
--- a/chromium/base/optional.h
+++ b/chromium/base/optional.h
@@ -8,7 +8,6 @@
#include <type_traits>
#include "base/logging.h"
-#include "base/template_util.h"
namespace base {
@@ -32,7 +31,7 @@ constexpr nullopt_t nullopt(0);
namespace internal {
-template <typename T, bool = base::is_trivially_destructible<T>::value>
+template <typename T, bool = std::is_trivially_destructible<T>::value>
struct OptionalStorage {
// Initializing |empty_| here instead of using default member initializing
// to avoid errors in g++ 4.8.
diff --git a/chromium/base/optional_unittest.cc b/chromium/base/optional_unittest.cc
index 83025e8bdad..8b9a1ae2a39 100644
--- a/chromium/base/optional_unittest.cc
+++ b/chromium/base/optional_unittest.cc
@@ -89,11 +89,11 @@ class NonTriviallyDestructible {
} // anonymous namespace
-static_assert(is_trivially_destructible<Optional<int>>::value,
+static_assert(std::is_trivially_destructible<Optional<int>>::value,
"OptionalIsTriviallyDestructible");
static_assert(
- !is_trivially_destructible<Optional<NonTriviallyDestructible>>::value,
+ !std::is_trivially_destructible<Optional<NonTriviallyDestructible>>::value,
"OptionalIsTriviallyDestructible");
TEST(OptionalTest, DefaultConstructor) {
diff --git a/chromium/base/pickle.cc b/chromium/base/pickle.cc
index 02f39b57b7b..7c8420ba583 100644
--- a/chromium/base/pickle.cc
+++ b/chromium/base/pickle.cc
@@ -308,7 +308,6 @@ Pickle::~Pickle() {
Pickle& Pickle::operator=(const Pickle& other) {
if (this == &other) {
- NOTREACHED();
return *this;
}
if (capacity_after_header_ == kCapacityReadOnly) {
diff --git a/chromium/base/posix/unix_domain_socket_linux.cc b/chromium/base/posix/unix_domain_socket_linux.cc
index 8b3094eedfc..6b445c6615b 100644
--- a/chromium/base/posix/unix_domain_socket_linux.cc
+++ b/chromium/base/posix/unix_domain_socket_linux.cc
@@ -26,10 +26,7 @@ namespace base {
const size_t UnixDomainSocket::kMaxFileDescriptors = 16;
#if !defined(OS_NACL_NONSFI)
-// Creates a connected pair of UNIX-domain SOCK_SEQPACKET sockets, and passes
-// ownership of the newly allocated file descriptors to |one| and |two|.
-// Returns true on success.
-static bool CreateSocketPair(ScopedFD* one, ScopedFD* two) {
+bool CreateSocketPair(ScopedFD* one, ScopedFD* two) {
int raw_socks[2];
if (socketpair(AF_UNIX, SOCK_SEQPACKET, 0, raw_socks) == -1)
return false;
diff --git a/chromium/base/posix/unix_domain_socket_linux.h b/chromium/base/posix/unix_domain_socket_linux.h
index 2ba739e1083..84e7d160093 100644
--- a/chromium/base/posix/unix_domain_socket_linux.h
+++ b/chromium/base/posix/unix_domain_socket_linux.h
@@ -19,6 +19,13 @@ namespace base {
class Pickle;
+#if !defined(OS_NACL_NONSFI)
+// Creates a connected pair of UNIX-domain SOCK_SEQPACKET sockets, and passes
+// ownership of the newly allocated file descriptors to |one| and |two|.
+// Returns true on success.
+bool BASE_EXPORT CreateSocketPair(ScopedFD* one, ScopedFD* two);
+#endif
+
class BASE_EXPORT UnixDomainSocket {
public:
// Maximum number of file descriptors that can be read by RecvMsg().
diff --git a/chromium/base/posix/unix_domain_socket_linux_unittest.cc b/chromium/base/posix/unix_domain_socket_linux_unittest.cc
index 3f5173cfc21..47ba6222c43 100644
--- a/chromium/base/posix/unix_domain_socket_linux_unittest.cc
+++ b/chromium/base/posix/unix_domain_socket_linux_unittest.cc
@@ -36,9 +36,9 @@ TEST(UnixDomainSocketTest, SendRecvMsgAbortOnReplyFDClose) {
// Have the thread send a synchronous message via the socket.
Pickle request;
message_thread.task_runner()->PostTask(
- FROM_HERE,
- Bind(IgnoreResult(&UnixDomainSocket::SendRecvMsg), fds[1],
- static_cast<uint8_t*>(NULL), 0U, static_cast<int*>(NULL), request));
+ FROM_HERE, BindOnce(IgnoreResult(&UnixDomainSocket::SendRecvMsg), fds[1],
+ static_cast<uint8_t*>(NULL), 0U,
+ static_cast<int*>(NULL), request));
// Receive the message.
std::vector<ScopedFD> message_fds;
@@ -55,7 +55,7 @@ TEST(UnixDomainSocketTest, SendRecvMsgAbortOnReplyFDClose) {
WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
message_thread.task_runner()->PostTask(
- FROM_HERE, Bind(&WaitableEvent::Signal, Unretained(&event)));
+ FROM_HERE, BindOnce(&WaitableEvent::Signal, Unretained(&event)));
ASSERT_TRUE(event.TimedWait(TimeDelta::FromMilliseconds(5000)));
}
diff --git a/chromium/base/post_task_and_reply_with_result_internal.h b/chromium/base/post_task_and_reply_with_result_internal.h
index 1456129324a..6f50de8b860 100644
--- a/chromium/base/post_task_and_reply_with_result_internal.h
+++ b/chromium/base/post_task_and_reply_with_result_internal.h
@@ -16,16 +16,15 @@ namespace internal {
// Adapts a function that produces a result via a return value to
// one that returns via an output parameter.
template <typename ReturnType>
-void ReturnAsParamAdapter(const Callback<ReturnType(void)>& func,
- ReturnType* result) {
- *result = func.Run();
+void ReturnAsParamAdapter(OnceCallback<ReturnType()> func, ReturnType* result) {
+ *result = std::move(func).Run();
}
// Adapts a T* result to a callblack that expects a T.
template <typename TaskReturnType, typename ReplyArgType>
-void ReplyAdapter(const Callback<void(ReplyArgType)>& callback,
+void ReplyAdapter(OnceCallback<void(ReplyArgType)> callback,
TaskReturnType* result) {
- callback.Run(std::move(*result));
+ std::move(callback).Run(std::move(*result));
}
} // namespace internal
diff --git a/chromium/base/process/launch.h b/chromium/base/process/launch.h
index be8f6e73b9f..99a7280cb35 100644
--- a/chromium/base/process/launch.h
+++ b/chromium/base/process/launch.h
@@ -262,6 +262,11 @@ BASE_EXPORT bool GetAppOutput(const StringPiece16& cl, std::string* output);
BASE_EXPORT bool GetAppOutput(const std::vector<std::string>& argv,
std::string* output);
+// Like the above POSIX-specific version of GetAppOutput, but also includes
+// stderr.
+BASE_EXPORT bool GetAppOutputAndError(const std::vector<std::string>& argv,
+ std::string* output);
+
// A version of |GetAppOutput()| which also returns the exit code of the
// executed command. Returns true if the application runs and exits cleanly. If
// this is the case the exit code of the application is available in
diff --git a/chromium/base/process/launch_posix.cc b/chromium/base/process/launch_posix.cc
index 44eafcfb05a..2184051552d 100644
--- a/chromium/base/process/launch_posix.cc
+++ b/chromium/base/process/launch_posix.cc
@@ -663,6 +663,14 @@ bool GetAppOutputAndError(const CommandLine& cl, std::string* output) {
return result && exit_code == EXIT_SUCCESS;
}
+bool GetAppOutputAndError(const std::vector<std::string>& argv,
+ std::string* output) {
+ int exit_code;
+ bool result =
+ GetAppOutputInternal(argv, nullptr, true, output, true, &exit_code);
+ return result && exit_code == EXIT_SUCCESS;
+}
+
bool GetAppOutputWithExitCode(const CommandLine& cl,
std::string* output,
int* exit_code) {
diff --git a/chromium/base/process/launch_win.cc b/chromium/base/process/launch_win.cc
index 97b59a5bfe4..f55c9684150 100644
--- a/chromium/base/process/launch_win.cc
+++ b/chromium/base/process/launch_win.cc
@@ -17,6 +17,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/command_line.h"
+#include "base/debug/activity_tracker.h"
#include "base/debug/stack_trace.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
@@ -94,7 +95,12 @@ bool GetAppOutputInternal(const StringPiece16& cl,
NOTREACHED() << "Failed to start process";
return false;
}
+
base::win::ScopedProcessInformation proc_info(temp_process_info);
+ base::debug::GlobalActivityTracker* tracker =
+ base::debug::GlobalActivityTracker::Get();
+ if (tracker)
+ tracker->RecordProcessLaunch(proc_info.process_id(), cl.as_string());
// Close our writing end of pipe now. Otherwise later read would not be able
// to detect end of child's output.
@@ -119,6 +125,8 @@ bool GetAppOutputInternal(const StringPiece16& cl,
int exit_code;
base::TerminationStatus status = GetTerminationStatus(
proc_info.process_handle(), &exit_code);
+ base::debug::GlobalActivityTracker::RecordProcessExitIfEnabled(
+ proc_info.process_id(), exit_code);
return status != base::TERMINATION_STATUS_PROCESS_CRASHED &&
status != base::TERMINATION_STATUS_ABNORMAL_TERMINATION;
}
@@ -324,6 +332,8 @@ Process LaunchProcess(const string16& cmdline,
if (options.wait)
WaitForSingleObject(process_info.process_handle(), INFINITE);
+ base::debug::GlobalActivityTracker::RecordProcessLaunchIfEnabled(
+ process_info.process_id(), cmdline);
return Process(process_info.TakeProcessHandle());
}
@@ -351,6 +361,8 @@ Process LaunchElevatedProcess(const CommandLine& cmdline,
if (options.wait)
WaitForSingleObject(shex_info.hProcess, INFINITE);
+ base::debug::GlobalActivityTracker::RecordProcessLaunchIfEnabled(
+ GetProcessId(shex_info.hProcess), file, arguments);
return Process(shex_info.hProcess);
}
diff --git a/chromium/base/process/memory_unittest.cc b/chromium/base/process/memory_unittest.cc
index ecf0b871ecb..a097897c0b0 100644
--- a/chromium/base/process/memory_unittest.cc
+++ b/chromium/base/process/memory_unittest.cc
@@ -79,6 +79,10 @@ TEST(ProcessMemoryTest, MacTerminateOnHeapCorruption) {
#else
ADD_FAILURE() << "This test is not supported in this build configuration.";
#endif
+
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+ base::allocator::UninterceptMallocZonesForTesting();
+#endif
}
#endif // defined(OS_MACOSX)
@@ -91,6 +95,10 @@ TEST(MemoryTest, AllocatorShimWorking) {
base::allocator::InterceptAllocationsMac();
#endif
ASSERT_TRUE(base::allocator::IsAllocatorInitialized());
+
+#if defined(OS_MACOSX)
+ base::allocator::UninterceptMallocZonesForTesting();
+#endif
}
// OpenBSD does not support these tests. Don't test these on ASan/TSan/MSan
@@ -146,6 +154,12 @@ class OutOfMemoryDeathTest : public OutOfMemoryTest {
// should be done inside of the ASSERT_DEATH.
base::EnableTerminationOnOutOfMemory();
}
+
+#if defined(OS_MACOSX)
+ void TearDown() override {
+ base::allocator::UninterceptMallocZonesForTesting();
+ }
+#endif
};
TEST_F(OutOfMemoryDeathTest, New) {
@@ -422,6 +436,12 @@ class OutOfMemoryHandledTest : public OutOfMemoryTest {
// properly by-pass this in order to allow the caller to handle OOM.
base::EnableTerminationOnOutOfMemory();
}
+
+ void TearDown() override {
+#if defined(OS_MACOSX)
+ base::allocator::UninterceptMallocZonesForTesting();
+#endif
+ }
};
#if defined(OS_WIN)
diff --git a/chromium/base/process/process_info_linux.cc b/chromium/base/process/process_info_linux.cc
index 7cec8f4e4ec..2f227484f5f 100644
--- a/chromium/base/process/process_info_linux.cc
+++ b/chromium/base/process/process_info_linux.cc
@@ -17,10 +17,12 @@ namespace base {
const Time CurrentProcessInfo::CreationTime() {
int64_t start_ticks =
internal::ReadProcSelfStatsAndGetFieldAsInt64(internal::VM_STARTTIME);
- DCHECK(start_ticks);
+ if (!start_ticks)
+ return Time();
TimeDelta start_offset = internal::ClockTicksToTimeDelta(start_ticks);
Time boot_time = internal::GetBootTime();
- DCHECK(!boot_time.is_null());
+ if (boot_time.is_null())
+ return Time();
return Time(boot_time + start_offset);
}
diff --git a/chromium/base/process/process_info_unittest.cc b/chromium/base/process/process_info_unittest.cc
new file mode 100644
index 00000000000..a757774fdad
--- /dev/null
+++ b/chromium/base/process/process_info_unittest.cc
@@ -0,0 +1,20 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_info.h"
+
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+#if !defined(OS_IOS)
+TEST(ProcessInfoTest, CreationTime) {
+ Time creation_time = CurrentProcessInfo::CreationTime();
+ ASSERT_FALSE(creation_time.is_null());
+}
+#endif // !defined(OS_IOS)
+
+} // namespace base
diff --git a/chromium/base/process/process_linux.cc b/chromium/base/process/process_linux.cc
index 2973ef3cbcc..f98bb4c0554 100644
--- a/chromium/base/process/process_linux.cc
+++ b/chromium/base/process/process_linux.cc
@@ -60,9 +60,12 @@ struct CGroups {
foreground_type == FILE_SYSTEM_CGROUP &&
background_type == FILE_SYSTEM_CGROUP;
}
-};
-base::LazyInstance<CGroups> g_cgroups = LAZY_INSTANCE_INITIALIZER;
+ static CGroups& Get() {
+ static auto& groups = *new CGroups;
+ return groups;
+ }
+};
#else
const int kBackgroundPriority = 5;
#endif // defined(OS_CHROMEOS)
@@ -86,12 +89,12 @@ struct CheckForNicePermission {
// static
bool Process::CanBackgroundProcesses() {
#if defined(OS_CHROMEOS)
- if (g_cgroups.Get().enabled)
+ if (CGroups::Get().enabled)
return true;
#endif // defined(OS_CHROMEOS)
- static LazyInstance<CheckForNicePermission> check_for_nice_permission =
- LAZY_INSTANCE_INITIALIZER;
+ static LazyInstance<CheckForNicePermission>::DestructorAtExit
+ check_for_nice_permission = LAZY_INSTANCE_INITIALIZER;
return check_for_nice_permission.Get().can_reraise_priority;
}
@@ -99,7 +102,7 @@ bool Process::IsProcessBackgrounded() const {
DCHECK(IsValid());
#if defined(OS_CHROMEOS)
- if (g_cgroups.Get().enabled) {
+ if (CGroups::Get().enabled) {
// Used to allow reading the process priority from proc on thread launch.
base::ThreadRestrictions::ScopedAllowIO allow_io;
std::string proc;
@@ -118,11 +121,10 @@ bool Process::SetProcessBackgrounded(bool background) {
DCHECK(IsValid());
#if defined(OS_CHROMEOS)
- if (g_cgroups.Get().enabled) {
+ if (CGroups::Get().enabled) {
std::string pid = IntToString(process_);
- const base::FilePath file =
- background ?
- g_cgroups.Get().background_file : g_cgroups.Get().foreground_file;
+ const base::FilePath file = background ? CGroups::Get().background_file
+ : CGroups::Get().foreground_file;
return base::WriteFile(file, pid.c_str(), pid.size()) > 0;
}
#endif // defined(OS_CHROMEOS)
diff --git a/chromium/base/process/process_metrics.cc b/chromium/base/process/process_metrics.cc
index a38930a2088..ad555aedffe 100644
--- a/chromium/base/process/process_metrics.cc
+++ b/chromium/base/process/process_metrics.cc
@@ -12,6 +12,11 @@
namespace base {
+SystemMemoryInfoKB::SystemMemoryInfoKB() = default;
+
+SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
+ default;
+
SystemMetrics::SystemMetrics() {
committed_memory_ = 0;
}
diff --git a/chromium/base/process/process_metrics.h b/chromium/base/process/process_metrics.h
index 2448a715ace..6e5e796a7c8 100644
--- a/chromium/base/process/process_metrics.h
+++ b/chromium/base/process/process_metrics.h
@@ -25,6 +25,14 @@
#if defined(OS_MACOSX)
#include <mach/mach.h>
#include "base/process/port_provider_mac.h"
+
+#if !defined(OS_IOS)
+#include <mach/mach_vm.h>
+#endif
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
#endif
namespace base {
@@ -63,8 +71,12 @@ struct IoCounters {
// shareable: 0
// swapped Pages swapped out to zram.
//
-// On OS X: TODO(thakis): Revise.
-// priv: Memory.
+// On macOS:
+// priv: Resident size (RSS) including shared memory. Warning: This
+// does not include compressed size and does not always
+// accurately account for shared memory due to things like
+// copy-on-write. TODO(erikchen): Revamp this with something
+// more accurate.
// shared: 0
// shareable: 0
//
@@ -136,8 +148,7 @@ class BASE_EXPORT ProcessMetrics {
// memory currently allocated to a process that cannot be shared. Returns
// false on platform specific error conditions. Note: |private_bytes|
// returns 0 on unsupported OSes: prior to XP SP2.
- bool GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes);
+ bool GetMemoryBytes(size_t* private_bytes, size_t* shared_bytes) const;
// Fills a CommittedKBytes with both resident and paged
// memory usage as per definition of CommittedBytes.
void GetCommittedKBytes(CommittedKBytes* usage) const;
@@ -155,6 +166,19 @@ class BASE_EXPORT ProcessMetrics {
// system call.
bool GetCommittedAndWorkingSetKBytes(CommittedKBytes* usage,
WorkingSetKBytes* ws_usage) const;
+
+ // Returns the physical footprint, only available on macOS 10.11+. This
+ // measures anonymous, non-discardable memory. Returns 0 on error, or if the
+ // measurement was unavailable.
+ size_t GetPhysicalFootprint() const;
+
+ // Returns private, shared, and total resident bytes. |locked_bytes| refers to
+ // bytes that must stay resident. |locked_bytes| only counts bytes locked by
+ // this task, not bytes locked by the kernel.
+ bool GetMemoryBytes(size_t* private_bytes,
+ size_t* shared_bytes,
+ size_t* resident_bytes,
+ size_t* locked_bytes) const;
#endif
// Returns the CPU usage in percent since the last time this method or
@@ -185,6 +209,10 @@ class BASE_EXPORT ProcessMetrics {
// Returns the number of file descriptors currently open by the process, or
// -1 on error.
int GetOpenFdCount() const;
+
+ // Returns the soft limit of file descriptors that can be opened by the
+ // process, or -1 on error.
+ int GetOpenFdSoftLimit() const;
#endif // defined(OS_LINUX)
private:
@@ -206,7 +234,11 @@ class BASE_EXPORT ProcessMetrics {
int CalculateIdleWakeupsPerSecond(uint64_t absolute_idle_wakeups);
#endif
+#if defined(OS_WIN)
+ win::ScopedHandle process_;
+#else
ProcessHandle process_;
+#endif
int processor_count_;
@@ -261,11 +293,13 @@ BASE_EXPORT void SetFdLimit(unsigned int max_descriptors);
// Data about system-wide memory consumption. Values are in KB. Available on
// Windows, Mac, Linux, Android and Chrome OS.
//
-// Total/free memory are available on all platforms that implement
+// Total memory are available on all platforms that implement
// GetSystemMemoryInfo(). Total/free swap memory are available on all platforms
// except on Mac. Buffers/cached/active_anon/inactive_anon/active_file/
-// inactive_file/dirty/pswpin/pswpout/pgmajfault are available on
+// inactive_file/dirty/reclaimable/pswpin/pswpout/pgmajfault are available on
// Linux/Android/Chrome OS. Shmem/slab/gem_objects/gem_size are Chrome OS only.
+// Speculative/file_backed/purgeable are Mac and iOS only.
+// Free is absent on Windows (see "avail_phys" below).
struct BASE_EXPORT SystemMemoryInfoKB {
SystemMemoryInfoKB();
SystemMemoryInfoKB(const SystemMemoryInfoKB& other);
@@ -273,44 +307,64 @@ struct BASE_EXPORT SystemMemoryInfoKB {
// Serializes the platform specific fields to value.
std::unique_ptr<Value> ToValue() const;
- int total;
- int free;
+ int total = 0;
-#if defined(OS_LINUX)
+#if !defined(OS_WIN)
+ int free = 0;
+#endif
+
+#if defined(OS_WIN)
+ // "This is the amount of physical memory that can be immediately reused
+ // without having to write its contents to disk first. It is the sum of the
+ // size of the standby, free, and zero lists." (MSDN).
+ // Standby: not modified pages of physical ram (file-backed memory) that are
+ // not actively being used.
+ int avail_phys = 0;
+#endif
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
// This provides an estimate of available memory as described here:
// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
// NOTE: this is ONLY valid in kernels 3.14 and up. Its value will always
// be 0 in earlier kernel versions.
- int available;
+ // Note: it includes _all_ file-backed memory (active + inactive).
+ int available = 0;
#endif
#if !defined(OS_MACOSX)
- int swap_total;
- int swap_free;
+ int swap_total = 0;
+ int swap_free = 0;
#endif
#if defined(OS_ANDROID) || defined(OS_LINUX)
- int buffers;
- int cached;
- int active_anon;
- int inactive_anon;
- int active_file;
- int inactive_file;
- int dirty;
+ int buffers = 0;
+ int cached = 0;
+ int active_anon = 0;
+ int inactive_anon = 0;
+ int active_file = 0;
+ int inactive_file = 0;
+ int dirty = 0;
+ int reclaimable = 0;
// vmstats data.
- unsigned long pswpin;
- unsigned long pswpout;
- unsigned long pgmajfault;
+ unsigned long pswpin = 0;
+ unsigned long pswpout = 0;
+ unsigned long pgmajfault = 0;
#endif // defined(OS_ANDROID) || defined(OS_LINUX)
#if defined(OS_CHROMEOS)
- int shmem;
- int slab;
+ int shmem = 0;
+ int slab = 0;
// Gem data will be -1 if not supported.
- int gem_objects;
- long long gem_size;
+ int gem_objects = -1;
+ long long gem_size = -1;
#endif // defined(OS_CHROMEOS)
+
+#if defined(OS_MACOSX)
+ int speculative = 0;
+ int file_backed = 0;
+ int purgeable = 0;
+#endif // defined(OS_MACOSX)
};
// On Linux/Android/Chrome OS, system-wide memory consumption data is parsed
@@ -434,6 +488,42 @@ class SystemMetrics {
#endif
};
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+enum class MachVMRegionResult {
+ // There were no more memory regions between |address| and the end of the
+ // virtual address space.
+ Finished,
+
+ // All output parameters are invalid.
+ Error,
+
+ // All output parameters are filled in.
+ Success
+};
+
+// Returns info on the first memory region at or after |address|, including
+// resident memory and share mode. On Success, |size| reflects the size of the
+// memory region.
+// |size| and |info| are output parameters, only valid on Success.
+// |address| is an in-out parameter, than represents both the address to start
+// looking, and the start address of the memory region.
+BASE_EXPORT MachVMRegionResult GetTopInfo(mach_port_t task,
+ mach_vm_size_t* size,
+ mach_vm_address_t* address,
+ vm_region_top_info_data_t* info);
+
+// Returns info on the first memory region at or after |address|, including
+// protection values. On Success, |size| reflects the size of the
+// memory region.
+// Returns info on the first memory region at or after |address|, including
+// resident memory and share mode.
+// |size| and |info| are output parameters, only valid on Success.
+BASE_EXPORT MachVMRegionResult GetBasicInfo(mach_port_t task,
+ mach_vm_size_t* size,
+ mach_vm_address_t* address,
+ vm_region_basic_info_64* info);
+#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+
} // namespace base
#endif // BASE_PROCESS_PROCESS_METRICS_H_
diff --git a/chromium/base/process/process_metrics_freebsd.cc b/chromium/base/process/process_metrics_freebsd.cc
index 686f6338724..4f5adf790f6 100644
--- a/chromium/base/process/process_metrics_freebsd.cc
+++ b/chromium/base/process/process_metrics_freebsd.cc
@@ -58,7 +58,7 @@ size_t ProcessMetrics::GetPeakWorkingSetSize() const {
}
bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes) {
+ size_t* shared_bytes) const {
WorkingSetKBytes ws_usage;
if (!GetWorkingSetKBytes(&ws_usage))
return false;
diff --git a/chromium/base/process/process_metrics_ios.cc b/chromium/base/process/process_metrics_ios.cc
index e6b01192b59..2ed65ab37f9 100644
--- a/chromium/base/process/process_metrics_ios.cc
+++ b/chromium/base/process/process_metrics_ios.cc
@@ -9,7 +9,9 @@
#include <stddef.h>
#include "base/logging.h"
+#include "base/mac/scoped_mach_port.h"
#include "base/memory/ptr_util.h"
+#include "base/numerics/safe_conversions.h"
namespace base {
@@ -26,11 +28,6 @@ bool GetTaskInfo(task_basic_info_64* task_info_data) {
} // namespace
-SystemMemoryInfoKB::SystemMemoryInfoKB() : total(0), free(0) {}
-
-SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
- default;
-
ProcessMetrics::ProcessMetrics(ProcessHandle process) {}
ProcessMetrics::~ProcessMetrics() {}
@@ -91,11 +88,40 @@ size_t GetSystemCommitCharge() {
return 0;
}
-// Bytes committed by the system.
bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
- // Unimplemented. Must enable unittest for IOS when this gets implemented.
- NOTIMPLEMENTED();
- return false;
+ struct host_basic_info hostinfo;
+ mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+ base::mac::ScopedMachSendRight host(mach_host_self());
+ int result = host_info(host.get(), HOST_BASIC_INFO,
+ reinterpret_cast<host_info_t>(&hostinfo), &count);
+ if (result != KERN_SUCCESS)
+ return false;
+
+ DCHECK_EQ(HOST_BASIC_INFO_COUNT, count);
+ meminfo->total = static_cast<int>(hostinfo.max_mem / 1024);
+
+ vm_statistics64_data_t vm_info;
+ count = HOST_VM_INFO64_COUNT;
+
+ if (host_statistics64(host.get(), HOST_VM_INFO64,
+ reinterpret_cast<host_info64_t>(&vm_info),
+ &count) != KERN_SUCCESS) {
+ return false;
+ }
+ DCHECK_EQ(HOST_VM_INFO64_COUNT, count);
+
+ // Check that PAGE_SIZE is divisible by 1024 (2^10).
+ CHECK_EQ(PAGE_SIZE, (PAGE_SIZE >> 10) << 10);
+ meminfo->free = saturated_cast<int>(
+ PAGE_SIZE / 1024 * (vm_info.free_count - vm_info.speculative_count));
+ meminfo->speculative =
+ saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.speculative_count);
+ meminfo->file_backed =
+ saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.external_page_count);
+ meminfo->purgeable =
+ saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.purgeable_count);
+
+ return true;
}
} // namespace base
diff --git a/chromium/base/process/process_metrics_linux.cc b/chromium/base/process/process_metrics_linux.cc
index b14aa210bd5..ba0dfa76b96 100644
--- a/chromium/base/process/process_metrics_linux.cc
+++ b/chromium/base/process/process_metrics_linux.cc
@@ -192,7 +192,7 @@ size_t ProcessMetrics::GetPeakWorkingSetSize() const {
}
bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes) {
+ size_t* shared_bytes) const {
WorkingSetKBytes ws_usage;
if (!GetWorkingSetKBytes(&ws_usage))
return false;
@@ -311,6 +311,32 @@ int ProcessMetrics::GetOpenFdCount() const {
return total_count;
}
+
+int ProcessMetrics::GetOpenFdSoftLimit() const {
+ // Use /proc/<pid>/limits to read the open fd limit.
+ FilePath fd_path = internal::GetProcPidDir(process_).Append("limits");
+
+ std::string limits_contents;
+ if (!ReadFileToString(fd_path, &limits_contents))
+ return -1;
+
+ for (const auto& line :
+ base::SplitStringPiece(limits_contents, "\n", base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY)) {
+ if (line.starts_with("Max open files")) {
+ auto tokens = base::SplitStringPiece(line, " ", base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
+ if (tokens.size() > 3) {
+ int limit = -1;
+ if (StringToInt(tokens[3], &limit))
+ return limit;
+ return -1;
+ }
+ }
+ }
+ return -1;
+}
+
#endif // defined(OS_LINUX)
ProcessMetrics::ProcessMetrics(ProcessHandle process)
@@ -532,45 +558,12 @@ const size_t kDiskWeightedIOTime = 13;
} // namespace
-SystemMemoryInfoKB::SystemMemoryInfoKB() {
- total = 0;
- free = 0;
-#if defined(OS_LINUX)
- available = 0;
-#endif
- buffers = 0;
- cached = 0;
- active_anon = 0;
- inactive_anon = 0;
- active_file = 0;
- inactive_file = 0;
- swap_total = 0;
- swap_free = 0;
- dirty = 0;
-
- pswpin = 0;
- pswpout = 0;
- pgmajfault = 0;
-
-#ifdef OS_CHROMEOS
- shmem = 0;
- slab = 0;
- gem_objects = -1;
- gem_size = -1;
-#endif
-}
-
-SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
- default;
-
std::unique_ptr<Value> SystemMemoryInfoKB::ToValue() const {
std::unique_ptr<DictionaryValue> res(new DictionaryValue());
res->SetInteger("total", total);
res->SetInteger("free", free);
-#if defined(OS_LINUX)
res->SetInteger("available", available);
-#endif
res->SetInteger("buffers", buffers);
res->SetInteger("cached", cached);
res->SetInteger("active_anon", active_anon);
@@ -581,6 +574,7 @@ std::unique_ptr<Value> SystemMemoryInfoKB::ToValue() const {
res->SetInteger("swap_free", swap_free);
res->SetInteger("swap_used", swap_total - swap_free);
res->SetInteger("dirty", dirty);
+ res->SetInteger("reclaimable", reclaimable);
res->SetInteger("pswpin", pswpin);
res->SetInteger("pswpout", pswpout);
res->SetInteger("pgmajfault", pgmajfault);
@@ -628,10 +622,8 @@ bool ParseProcMeminfo(const std::string& meminfo_data,
target = &meminfo->total;
else if (tokens[0] == "MemFree:")
target = &meminfo->free;
-#if defined(OS_LINUX)
else if (tokens[0] == "MemAvailable:")
target = &meminfo->available;
-#endif
else if (tokens[0] == "Buffers:")
target = &meminfo->buffers;
else if (tokens[0] == "Cached:")
@@ -650,6 +642,8 @@ bool ParseProcMeminfo(const std::string& meminfo_data,
target = &meminfo->swap_free;
else if (tokens[0] == "Dirty:")
target = &meminfo->dirty;
+ else if (tokens[0] == "SReclaimable:")
+ target = &meminfo->reclaimable;
#if defined(OS_CHROMEOS)
// Chrome OS has a tweaked kernel that allows us to query Shmem, which is
// usually video memory otherwise invisible to the OS.
diff --git a/chromium/base/process/process_metrics_mac.cc b/chromium/base/process/process_metrics_mac.cc
index a3c2d6a14aa..19428fafef7 100644
--- a/chromium/base/process/process_metrics_mac.cc
+++ b/chromium/base/process/process_metrics_mac.cc
@@ -13,32 +13,50 @@
#include "base/containers/hash_tables.h"
#include "base/logging.h"
+#include "base/mac/mac_util.h"
#include "base/mac/mach_logging.h"
#include "base/mac/scoped_mach_port.h"
#include "base/memory/ptr_util.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/numerics/safe_math.h"
#include "base/sys_info.h"
-#if !defined(TASK_POWER_INFO)
-// Doesn't exist in the 10.6 or 10.7 SDKs.
-#define TASK_POWER_INFO 21
-struct task_power_info {
- uint64_t total_user;
- uint64_t total_system;
- uint64_t task_interrupt_wakeups;
- uint64_t task_platform_idle_wakeups;
- uint64_t task_timer_wakeups_bin_1;
- uint64_t task_timer_wakeups_bin_2;
-};
-typedef struct task_power_info task_power_info_data_t;
-typedef struct task_power_info *task_power_info_t;
-#define TASK_POWER_INFO_COUNT ((mach_msg_type_number_t) \
- (sizeof (task_power_info_data_t) / sizeof (natural_t)))
-#endif
-
namespace base {
namespace {
+#if !defined(MAC_OS_X_VERSION_10_11) || \
+ MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_11
+// The |phys_footprint| field was introduced in 10.11.
+struct ChromeTaskVMInfo {
+ mach_vm_size_t virtual_size;
+ integer_t region_count;
+ integer_t page_size;
+ mach_vm_size_t resident_size;
+ mach_vm_size_t resident_size_peak;
+ mach_vm_size_t device;
+ mach_vm_size_t device_peak;
+ mach_vm_size_t internal;
+ mach_vm_size_t internal_peak;
+ mach_vm_size_t external;
+ mach_vm_size_t external_peak;
+ mach_vm_size_t reusable;
+ mach_vm_size_t reusable_peak;
+ mach_vm_size_t purgeable_volatile_pmap;
+ mach_vm_size_t purgeable_volatile_resident;
+ mach_vm_size_t purgeable_volatile_virtual;
+ mach_vm_size_t compressed;
+ mach_vm_size_t compressed_peak;
+ mach_vm_size_t compressed_lifetime;
+ mach_vm_size_t phys_footprint;
+};
+mach_msg_type_number_t ChromeTaskVMInfoCount =
+ sizeof(ChromeTaskVMInfo) / sizeof(natural_t);
+#else
+using ChromeTaskVMInfo = task_vm_info;
+mach_msg_type_number_t ChromeTaskVMInfoCount = TASK_VM_INFO_REV1_COUNT;
+#endif // MAC_OS_X_VERSION_10_11
+
bool GetTaskInfo(mach_port_t task, task_basic_info_64* task_info_data) {
if (task == MACH_PORT_NULL)
return false;
@@ -78,12 +96,17 @@ bool IsAddressInSharedRegion(mach_vm_address_t addr, cpu_type_t type) {
}
}
-} // namespace
-
-SystemMemoryInfoKB::SystemMemoryInfoKB() : total(0), free(0) {}
+MachVMRegionResult ParseOutputFromMachVMRegion(kern_return_t kr) {
+ if (kr == KERN_INVALID_ADDRESS) {
+ // We're at the end of the address space.
+ return MachVMRegionResult::Finished;
+ } else if (kr != KERN_SUCCESS) {
+ return MachVMRegionResult::Error;
+ }
+ return MachVMRegionResult::Success;
+}
-SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
- default;
+} // namespace
// Getting a mach task from a pid for another process requires permissions in
// general, so there doesn't really seem to be a way to do these (and spinning
@@ -110,26 +133,31 @@ size_t ProcessMetrics::GetPeakPagefileUsage() const {
}
size_t ProcessMetrics::GetWorkingSetSize() const {
- task_basic_info_64 task_info_data;
- if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
+ size_t resident_bytes = 0;
+ if (!GetMemoryBytes(nullptr, nullptr, &resident_bytes, nullptr))
return 0;
- return task_info_data.resident_size;
+ return resident_bytes;
}
size_t ProcessMetrics::GetPeakWorkingSetSize() const {
return 0;
}
+bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
+ size_t* shared_bytes) const {
+ return GetMemoryBytes(private_bytes, shared_bytes, nullptr, nullptr);
+}
+
// This is a rough approximation of the algorithm that libtop uses.
// private_bytes is the size of private resident memory.
// shared_bytes is the size of shared resident memory.
bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes) {
+ size_t* shared_bytes,
+ size_t* resident_bytes,
+ size_t* locked_bytes) const {
size_t private_pages_count = 0;
size_t shared_pages_count = 0;
-
- if (!private_bytes && !shared_bytes)
- return true;
+ size_t wired_pages_count = 0;
mach_port_t task = TaskForPid(process_);
if (task == MACH_PORT_NULL) {
@@ -157,29 +185,31 @@ bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
// See libtop_update_vm_regions in
// http://www.opensource.apple.com/source/top/top-67/libtop.c
mach_vm_size_t size = 0;
- for (mach_vm_address_t address = MACH_VM_MIN_ADDRESS;; address += size) {
+ mach_vm_address_t address = MACH_VM_MIN_ADDRESS;
+ while (true) {
+ base::CheckedNumeric<mach_vm_address_t> next_address(address);
+ next_address += size;
+ if (!next_address.IsValid())
+ return false;
+ address = next_address.ValueOrDie();
+
+ mach_vm_address_t address_copy = address;
vm_region_top_info_data_t info;
- mach_msg_type_number_t info_count = VM_REGION_TOP_INFO_COUNT;
- mach_port_t object_name;
- kern_return_t kr = mach_vm_region(task,
- &address,
- &size,
- VM_REGION_TOP_INFO,
- reinterpret_cast<vm_region_info_t>(&info),
- &info_count,
- &object_name);
- if (kr == KERN_INVALID_ADDRESS) {
- // We're at the end of the address space.
+ MachVMRegionResult result = GetTopInfo(task, &size, &address, &info);
+ if (result == MachVMRegionResult::Error)
+ return false;
+ if (result == MachVMRegionResult::Finished)
break;
- } else if (kr != KERN_SUCCESS) {
- MACH_DLOG(ERROR, kr) << "mach_vm_region";
+
+ vm_region_basic_info_64 basic_info;
+ mach_vm_size_t dummy_size = 0;
+ result = GetBasicInfo(task, &dummy_size, &address_copy, &basic_info);
+ if (result == MachVMRegionResult::Error)
return false;
- }
+ if (result == MachVMRegionResult::Finished)
+ break;
- // The kernel always returns a null object for VM_REGION_TOP_INFO, but
- // balance it with a deallocate in case this ever changes. See 10.9.2
- // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
- mach_port_deallocate(mach_task_self(), object_name);
+ bool is_wired = basic_info.user_wired_count > 0;
if (IsAddressInSharedRegion(address, cpu_type) &&
info.share_mode != SM_PRIVATE)
@@ -189,6 +219,7 @@ bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
info.share_mode = SM_PRIVATE;
switch (info.share_mode) {
+ case SM_LARGE_PAGE:
case SM_PRIVATE:
private_pages_count += info.private_pages_resident;
private_pages_count += info.shared_pages_resident;
@@ -197,6 +228,9 @@ bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
private_pages_count += info.private_pages_resident;
// Fall through
case SM_SHARED:
+ case SM_PRIVATE_ALIASED:
+ case SM_TRUESHARED:
+ case SM_SHARED_ALIASED:
if (seen_objects.count(info.obj_id) == 0) {
// Only count the first reference to this region.
seen_objects.insert(info.obj_id);
@@ -206,12 +240,20 @@ bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
default:
break;
}
+ if (is_wired) {
+ wired_pages_count +=
+ info.private_pages_resident + info.shared_pages_resident;
+ }
}
if (private_bytes)
*private_bytes = private_pages_count * PAGE_SIZE;
if (shared_bytes)
*shared_bytes = shared_pages_count * PAGE_SIZE;
+ if (resident_bytes)
+ *resident_bytes = (private_pages_count + shared_pages_count) * PAGE_SIZE;
+ if (locked_bytes)
+ *locked_bytes = wired_pages_count * PAGE_SIZE;
return true;
}
@@ -246,6 +288,20 @@ bool ProcessMetrics::GetCommittedAndWorkingSetKBytes(
return true;
}
+size_t ProcessMetrics::GetPhysicalFootprint() const {
+ if (mac::IsAtMostOS10_11())
+ return 0;
+
+ ChromeTaskVMInfo task_vm_info;
+ mach_msg_type_number_t count = ChromeTaskVMInfoCount;
+ kern_return_t result =
+ task_info(TaskForPid(process_), TASK_VM_INFO,
+ reinterpret_cast<task_info_t>(&task_vm_info), &count);
+ if (result != KERN_SUCCESS)
+ return 0;
+ return task_vm_info.phys_footprint;
+}
+
#define TIME_VALUE_TO_TIMEVAL(a, r) do { \
(r)->tv_sec = (a)->seconds; \
(r)->tv_usec = (a)->microseconds; \
@@ -377,7 +433,6 @@ size_t GetSystemCommitCharge() {
return (data.active_count * PAGE_SIZE) / 1024;
}
-// On Mac, We only get total memory and free memory from the system.
bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
struct host_basic_info hostinfo;
mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
@@ -390,19 +445,61 @@ bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
DCHECK_EQ(HOST_BASIC_INFO_COUNT, count);
meminfo->total = static_cast<int>(hostinfo.max_mem / 1024);
- vm_statistics_data_t vm_info;
- count = HOST_VM_INFO_COUNT;
+ vm_statistics64_data_t vm_info;
+ count = HOST_VM_INFO64_COUNT;
- if (host_statistics(host.get(), HOST_VM_INFO,
- reinterpret_cast<host_info_t>(&vm_info),
- &count) != KERN_SUCCESS) {
+ if (host_statistics64(host.get(), HOST_VM_INFO64,
+ reinterpret_cast<host_info64_t>(&vm_info),
+ &count) != KERN_SUCCESS) {
return false;
}
-
- meminfo->free = static_cast<int>(
- (vm_info.free_count - vm_info.speculative_count) * PAGE_SIZE / 1024);
+ DCHECK_EQ(HOST_VM_INFO64_COUNT, count);
+
+ static_assert(PAGE_SIZE % 1024 == 0, "Invalid page size");
+ meminfo->free = saturated_cast<int>(
+ PAGE_SIZE / 1024 * (vm_info.free_count - vm_info.speculative_count));
+ meminfo->speculative =
+ saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.speculative_count);
+ meminfo->file_backed =
+ saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.external_page_count);
+ meminfo->purgeable =
+ saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.purgeable_count);
return true;
}
+// Both |size| and |address| are in-out parameters.
+// |info| is an output parameter, only valid on Success.
+MachVMRegionResult GetTopInfo(mach_port_t task,
+ mach_vm_size_t* size,
+ mach_vm_address_t* address,
+ vm_region_top_info_data_t* info) {
+ mach_msg_type_number_t info_count = VM_REGION_TOP_INFO_COUNT;
+ mach_port_t object_name;
+ kern_return_t kr = mach_vm_region(task, address, size, VM_REGION_TOP_INFO,
+ reinterpret_cast<vm_region_info_t>(info),
+ &info_count, &object_name);
+ // The kernel always returns a null object for VM_REGION_TOP_INFO, but
+ // balance it with a deallocate in case this ever changes. See 10.9.2
+ // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
+ mach_port_deallocate(task, object_name);
+ return ParseOutputFromMachVMRegion(kr);
+}
+
+MachVMRegionResult GetBasicInfo(mach_port_t task,
+ mach_vm_size_t* size,
+ mach_vm_address_t* address,
+ vm_region_basic_info_64* info) {
+ mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
+ mach_port_t object_name;
+ kern_return_t kr = mach_vm_region(
+ task, address, size, VM_REGION_BASIC_INFO_64,
+ reinterpret_cast<vm_region_info_t>(info), &info_count, &object_name);
+ // The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
+ // balance it with a deallocate in case this ever changes. See 10.9.2
+ // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
+ mach_port_deallocate(task, object_name);
+ return ParseOutputFromMachVMRegion(kr);
+}
+
} // namespace base
diff --git a/chromium/base/process/process_metrics_openbsd.cc b/chromium/base/process/process_metrics_openbsd.cc
index 58033aef62e..d8fbe7e3710 100644
--- a/chromium/base/process/process_metrics_openbsd.cc
+++ b/chromium/base/process/process_metrics_openbsd.cc
@@ -64,7 +64,7 @@ size_t ProcessMetrics::GetPeakWorkingSetSize() const {
}
bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes) {
+ size_t* shared_bytes) const {
WorkingSetKBytes ws_usage;
if (!GetWorkingSetKBytes(&ws_usage))
diff --git a/chromium/base/process/process_metrics_unittest.cc b/chromium/base/process/process_metrics_unittest.cc
index 3e059b48a45..8abdb264075 100644
--- a/chromium/base/process/process_metrics_unittest.cc
+++ b/chromium/base/process/process_metrics_unittest.cc
@@ -17,12 +17,17 @@
#include "base/files/scoped_temp_dir.h"
#include "base/macros.h"
#include "base/strings/string_number_conversions.h"
+#include "base/sys_info.h"
#include "base/test/multiprocess_test.h"
#include "base/threading/thread.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/multiprocess_func_list.h"
+#if defined(OS_MACOSX)
+#include <sys/mman.h>
+#endif
+
namespace base {
namespace debug {
@@ -52,6 +57,42 @@ class SystemMetricsTest : public testing::Test {
/////////////////////////////////////////////////////////////////////////////
+#if defined(OS_MACOSX) && !defined(OS_IOS) && !defined(ADDRESS_SANITIZER)
+TEST_F(SystemMetricsTest, LockedBytes) {
+ ProcessHandle handle = GetCurrentProcessHandle();
+ std::unique_ptr<ProcessMetrics> metrics(
+ ProcessMetrics::CreateProcessMetrics(handle, nullptr));
+
+ size_t initial_locked_bytes;
+ bool result =
+ metrics->GetMemoryBytes(nullptr, nullptr, nullptr, &initial_locked_bytes);
+ ASSERT_TRUE(result);
+
+ size_t size = 8 * 1024 * 1024;
+ std::unique_ptr<char[]> memory(new char[size]);
+ int r = mlock(memory.get(), size);
+ ASSERT_EQ(0, r);
+
+ size_t new_locked_bytes;
+ result =
+ metrics->GetMemoryBytes(nullptr, nullptr, nullptr, &new_locked_bytes);
+ ASSERT_TRUE(result);
+
+ // There should be around |size| more locked bytes, but multi-threading might
+ // cause noise.
+ EXPECT_LT(initial_locked_bytes + size / 2, new_locked_bytes);
+ EXPECT_GT(initial_locked_bytes + size * 1.5, new_locked_bytes);
+
+ r = munlock(memory.get(), size);
+ ASSERT_EQ(0, r);
+
+ result =
+ metrics->GetMemoryBytes(nullptr, nullptr, nullptr, &new_locked_bytes);
+ ASSERT_TRUE(result);
+ EXPECT_EQ(initial_locked_bytes, new_locked_bytes);
+}
+#endif // defined(OS_MACOSX) && !defined(OS_IOS) && !defined(ADDRESS_SANITIZER)
+
#if defined(OS_LINUX) || defined(OS_ANDROID)
TEST_F(SystemMetricsTest, IsValidDiskName) {
std::string invalid_input1 = "";
@@ -106,6 +147,7 @@ TEST_F(SystemMetricsTest, ParseMeminfo) {
std::string valid_input1 =
"MemTotal: 3981504 kB\n"
"MemFree: 140764 kB\n"
+ "MemAvailable: 535413 kB\n"
"Buffers: 116480 kB\n"
"Cached: 406160 kB\n"
"SwapCached: 21304 kB\n"
@@ -171,6 +213,7 @@ TEST_F(SystemMetricsTest, ParseMeminfo) {
EXPECT_TRUE(ParseProcMeminfo(valid_input1, &meminfo));
EXPECT_EQ(meminfo.total, 3981504);
EXPECT_EQ(meminfo.free, 140764);
+ EXPECT_EQ(meminfo.available, 535413);
EXPECT_EQ(meminfo.buffers, 116480);
EXPECT_EQ(meminfo.cached, 406160);
EXPECT_EQ(meminfo.active_anon, 2972352);
@@ -180,18 +223,29 @@ TEST_F(SystemMetricsTest, ParseMeminfo) {
EXPECT_EQ(meminfo.swap_total, 5832280);
EXPECT_EQ(meminfo.swap_free, 3672368);
EXPECT_EQ(meminfo.dirty, 184);
+ EXPECT_EQ(meminfo.reclaimable, 30936);
#if defined(OS_CHROMEOS)
EXPECT_EQ(meminfo.shmem, 140204);
EXPECT_EQ(meminfo.slab, 54212);
#endif
+ EXPECT_EQ(355725,
+ base::SysInfo::AmountOfAvailablePhysicalMemory(meminfo) / 1024);
+ // Simulate as if there is no MemAvailable.
+ meminfo.available = 0;
+ EXPECT_EQ(374448,
+ base::SysInfo::AmountOfAvailablePhysicalMemory(meminfo) / 1024);
+ meminfo = {};
EXPECT_TRUE(ParseProcMeminfo(valid_input2, &meminfo));
EXPECT_EQ(meminfo.total, 255908);
EXPECT_EQ(meminfo.free, 69936);
+ EXPECT_EQ(meminfo.available, 0);
EXPECT_EQ(meminfo.buffers, 15812);
EXPECT_EQ(meminfo.cached, 115124);
EXPECT_EQ(meminfo.swap_total, 524280);
EXPECT_EQ(meminfo.swap_free, 524200);
EXPECT_EQ(meminfo.dirty, 4);
+ EXPECT_EQ(69936,
+ base::SysInfo::AmountOfAvailablePhysicalMemory(meminfo) / 1024);
}
TEST_F(SystemMetricsTest, ParseVmstat) {
@@ -323,9 +377,9 @@ TEST_F(SystemMetricsTest, TestNoNegativeCpuUsage) {
std::vector<std::string> vec2;
std::vector<std::string> vec3;
- thread1.task_runner()->PostTask(FROM_HERE, Bind(&BusyWork, &vec1));
- thread2.task_runner()->PostTask(FROM_HERE, Bind(&BusyWork, &vec2));
- thread3.task_runner()->PostTask(FROM_HERE, Bind(&BusyWork, &vec3));
+ thread1.task_runner()->PostTask(FROM_HERE, BindOnce(&BusyWork, &vec1));
+ thread2.task_runner()->PostTask(FROM_HERE, BindOnce(&BusyWork, &vec2));
+ thread3.task_runner()->PostTask(FROM_HERE, BindOnce(&BusyWork, &vec3));
EXPECT_GE(metrics->GetCPUUsage(), 0.0);
@@ -341,15 +395,19 @@ TEST_F(SystemMetricsTest, TestNoNegativeCpuUsage) {
#endif // defined(OS_LINUX) || defined(OS_CHROMEOS)
-#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS)) || \
- defined(OS_LINUX) || defined(OS_ANDROID)
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
+ defined(OS_ANDROID)
TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
SystemMemoryInfoKB info;
EXPECT_TRUE(GetSystemMemoryInfo(&info));
// Ensure each field received a value.
EXPECT_GT(info.total, 0);
+#if defined(OS_WIN)
+ EXPECT_GT(info.avail_phys, 0);
+#else
EXPECT_GT(info.free, 0);
+#endif
#if defined(OS_LINUX) || defined(OS_ANDROID)
EXPECT_GT(info.buffers, 0);
EXPECT_GT(info.cached, 0);
@@ -360,7 +418,10 @@ TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
// All the values should be less than the total amount of memory.
+#if !defined(OS_WIN) && !defined(OS_IOS)
+ // TODO(crbug.com/711450): re-enable the following assertion on iOS.
EXPECT_LT(info.free, info.total);
+#endif
#if defined(OS_LINUX) || defined(OS_ANDROID)
EXPECT_LT(info.buffers, info.total);
EXPECT_LT(info.cached, info.total);
@@ -370,6 +431,10 @@ TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
EXPECT_LT(info.inactive_file, info.total);
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
+#if defined(OS_MACOSX) || defined(OS_IOS)
+ EXPECT_GT(info.file_backed, 0);
+#endif
+
#if defined(OS_CHROMEOS)
// Chrome OS exposes shmem.
EXPECT_GT(info.shmem, 0);
@@ -378,8 +443,8 @@ TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
// and gem_size cannot be tested here.
#endif
}
-#endif // defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS)) ||
- // defined(OS_LINUX) || defined(OS_ANDROID)
+#endif // defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) ||
+ // defined(OS_ANDROID)
#if defined(OS_LINUX) || defined(OS_ANDROID)
TEST(ProcessMetricsTest, ParseProcStatCPU) {
@@ -491,15 +556,15 @@ TEST(ProcessMetricsTest, GetOpenFdCount) {
const FilePath temp_path = temp_dir.GetPath();
CommandLine child_command_line(GetMultiProcessTestChildBaseCommandLine());
child_command_line.AppendSwitchPath(kTempDirFlag, temp_path);
- Process child = SpawnMultiProcessTestChild(
+ SpawnChildResult spawn_child = SpawnMultiProcessTestChild(
ChildMainString, child_command_line, LaunchOptions());
- ASSERT_TRUE(child.IsValid());
+ ASSERT_TRUE(spawn_child.process.IsValid());
WaitForEvent(temp_path, kSignalClosed);
std::unique_ptr<ProcessMetrics> metrics(
- ProcessMetrics::CreateProcessMetrics(child.Handle()));
+ ProcessMetrics::CreateProcessMetrics(spawn_child.process.Handle()));
EXPECT_EQ(0, metrics->GetOpenFdCount());
- ASSERT_TRUE(child.Terminate(0, true));
+ ASSERT_TRUE(spawn_child.process.Terminate(0, true));
}
#endif // defined(OS_LINUX)
diff --git a/chromium/base/process/process_metrics_win.cc b/chromium/base/process/process_metrics_win.cc
index d2f0c935531..f5b191dfc08 100644
--- a/chromium/base/process/process_metrics_win.cc
+++ b/chromium/base/process/process_metrics_win.cc
@@ -31,12 +31,6 @@ typedef NTSTATUS(WINAPI* NTQUERYSYSTEMINFORMATION)(
} // namespace
-SystemMemoryInfoKB::SystemMemoryInfoKB()
- : total(0), free(0), swap_total(0), swap_free(0) {}
-
-SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
- default;
-
ProcessMetrics::~ProcessMetrics() { }
// static
@@ -47,7 +41,7 @@ std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
size_t ProcessMetrics::GetPagefileUsage() const {
PROCESS_MEMORY_COUNTERS pmc;
- if (GetProcessMemoryInfo(process_, &pmc, sizeof(pmc))) {
+ if (GetProcessMemoryInfo(process_.Get(), &pmc, sizeof(pmc))) {
return pmc.PagefileUsage;
}
return 0;
@@ -56,7 +50,7 @@ size_t ProcessMetrics::GetPagefileUsage() const {
// Returns the peak space allocated for the pagefile, in bytes.
size_t ProcessMetrics::GetPeakPagefileUsage() const {
PROCESS_MEMORY_COUNTERS pmc;
- if (GetProcessMemoryInfo(process_, &pmc, sizeof(pmc))) {
+ if (GetProcessMemoryInfo(process_.Get(), &pmc, sizeof(pmc))) {
return pmc.PeakPagefileUsage;
}
return 0;
@@ -65,7 +59,7 @@ size_t ProcessMetrics::GetPeakPagefileUsage() const {
// Returns the current working set size, in bytes.
size_t ProcessMetrics::GetWorkingSetSize() const {
PROCESS_MEMORY_COUNTERS pmc;
- if (GetProcessMemoryInfo(process_, &pmc, sizeof(pmc))) {
+ if (GetProcessMemoryInfo(process_.Get(), &pmc, sizeof(pmc))) {
return pmc.WorkingSetSize;
}
return 0;
@@ -74,21 +68,21 @@ size_t ProcessMetrics::GetWorkingSetSize() const {
// Returns the peak working set size, in bytes.
size_t ProcessMetrics::GetPeakWorkingSetSize() const {
PROCESS_MEMORY_COUNTERS pmc;
- if (GetProcessMemoryInfo(process_, &pmc, sizeof(pmc))) {
+ if (GetProcessMemoryInfo(process_.Get(), &pmc, sizeof(pmc))) {
return pmc.PeakWorkingSetSize;
}
return 0;
}
bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes) {
+ size_t* shared_bytes) const {
// PROCESS_MEMORY_COUNTERS_EX is not supported until XP SP2.
// GetProcessMemoryInfo() will simply fail on prior OS. So the requested
// information is simply not available. Hence, we will return 0 on unsupported
// OSes. Unlike most Win32 API, we don't need to initialize the "cb" member.
PROCESS_MEMORY_COUNTERS_EX pmcx;
if (private_bytes &&
- GetProcessMemoryInfo(process_,
+ GetProcessMemoryInfo(process_.Get(),
reinterpret_cast<PROCESS_MEMORY_COUNTERS*>(&pmcx),
sizeof(pmcx))) {
*private_bytes = pmcx.PrivateUsage;
@@ -111,8 +105,8 @@ void ProcessMetrics::GetCommittedKBytes(CommittedKBytes* usage) const {
size_t committed_mapped = 0;
size_t committed_image = 0;
void* base_address = NULL;
- while (VirtualQueryEx(process_, base_address, &mbi, sizeof(mbi)) ==
- sizeof(mbi)) {
+ while (VirtualQueryEx(process_.Get(), base_address, &mbi, sizeof(mbi)) ==
+ sizeof(mbi)) {
if (mbi.State == MEM_COMMIT) {
if (mbi.Type == MEM_PRIVATE) {
committed_private += mbi.RegionSize;
@@ -160,7 +154,7 @@ class WorkingSetInformationBuffer {
size_t GetPageEntryCount() const { return number_of_entries; }
// This function is used to get page entries for a process.
- bool QueryPageEntries(const ProcessHandle& process_) {
+ bool QueryPageEntries(const ProcessHandle& process) {
int retries = 5;
number_of_entries = 4096; // Just a guess.
@@ -173,9 +167,9 @@ class WorkingSetInformationBuffer {
return false;
// On success, |buffer_| is populated with info about the working set of
- // |process_|. On ERROR_BAD_LENGTH failure, increase the size of the
+ // |process|. On ERROR_BAD_LENGTH failure, increase the size of the
// buffer and try again.
- if (QueryWorkingSet(process_, buffer_, buffer_size))
+ if (QueryWorkingSet(process, buffer_, buffer_size))
break; // Success
if (GetLastError() != ERROR_BAD_LENGTH)
@@ -232,7 +226,7 @@ bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
memset(ws_usage, 0, sizeof(*ws_usage));
WorkingSetInformationBuffer buffer;
- if (!buffer.QueryPageEntries(process_))
+ if (!buffer.QueryPageEntries(process_.Get()))
return false;
size_t num_page_entries = buffer.GetPageEntryCount();
@@ -258,7 +252,7 @@ bool ProcessMetrics::GetProportionalSetSizeBytes(uint64_t* pss_bytes) const {
double ws_pss = 0.0;
WorkingSetInformationBuffer buffer;
- if (!buffer.QueryPageEntries(process_))
+ if (!buffer.QueryPageEntries(process_.Get()))
return false;
size_t num_page_entries = buffer.GetPageEntryCount();
@@ -287,8 +281,8 @@ double ProcessMetrics::GetCPUUsage() {
FILETIME kernel_time;
FILETIME user_time;
- if (!GetProcessTimes(process_, &creation_time, &exit_time,
- &kernel_time, &user_time)) {
+ if (!GetProcessTimes(process_.Get(), &creation_time, &exit_time, &kernel_time,
+ &user_time)) {
// We don't assert here because in some cases (such as in the Task Manager)
// we may call this function on a process that has just exited but we have
// not yet received the notification.
@@ -321,13 +315,20 @@ double ProcessMetrics::GetCPUUsage() {
}
bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
- return GetProcessIoCounters(process_, io_counters) != FALSE;
+ return GetProcessIoCounters(process_.Get(), io_counters) != FALSE;
}
ProcessMetrics::ProcessMetrics(ProcessHandle process)
- : process_(process),
- processor_count_(SysInfo::NumberOfProcessors()),
- last_system_time_(0) {}
+ : processor_count_(SysInfo::NumberOfProcessors()), last_system_time_(0) {
+ if (process) {
+ HANDLE duplicate_handle;
+ BOOL result = ::DuplicateHandle(::GetCurrentProcess(), process,
+ ::GetCurrentProcess(), &duplicate_handle,
+ PROCESS_QUERY_INFORMATION, FALSE, 0);
+ DCHECK(result);
+ process_.Set(duplicate_handle);
+ }
+}
size_t GetSystemCommitCharge() {
// Get the System Page Size.
@@ -349,7 +350,7 @@ size_t GetPageSize() {
// This function uses the following mapping between MEMORYSTATUSEX and
// SystemMemoryInfoKB:
// ullTotalPhys ==> total
-// ullAvailPhys ==> free
+// ullAvailPhys ==> avail_phys
// ullTotalPageFile ==> swap_total
// ullAvailPageFile ==> swap_free
bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
@@ -359,7 +360,7 @@ bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
return false;
meminfo->total = mem_status.ullTotalPhys / 1024;
- meminfo->free = mem_status.ullAvailPhys / 1024;
+ meminfo->avail_phys = mem_status.ullAvailPhys / 1024;
meminfo->swap_total = mem_status.ullTotalPageFile / 1024;
meminfo->swap_free = mem_status.ullAvailPageFile / 1024;
diff --git a/chromium/base/process/process_posix.cc b/chromium/base/process/process_posix.cc
index 9b94891dd96..bbb342c4cf4 100644
--- a/chromium/base/process/process_posix.cc
+++ b/chromium/base/process/process_posix.cc
@@ -225,7 +225,6 @@ Process::Process(Process&& other) : process_(other.process_) {
}
Process& Process::operator=(Process&& other) {
- DCHECK_NE(this, &other);
process_ = other.process_;
other.Close();
return *this;
diff --git a/chromium/base/process/process_unittest.cc b/chromium/base/process/process_unittest.cc
index 619d22b5d95..839ec1359ff 100644
--- a/chromium/base/process/process_unittest.cc
+++ b/chromium/base/process/process_unittest.cc
@@ -42,11 +42,11 @@ class ProcessTest : public MultiProcessTest {
};
TEST_F(ProcessTest, Create) {
- Process process(SpawnChild("SimpleChildProcess"));
- ASSERT_TRUE(process.IsValid());
- ASSERT_FALSE(process.is_current());
- process.Close();
- ASSERT_FALSE(process.IsValid());
+ SpawnChildResult spawn_child = SpawnChild("SimpleChildProcess");
+ ASSERT_TRUE(spawn_child.process.IsValid());
+ ASSERT_FALSE(spawn_child.process.is_current());
+ spawn_child.process.Close();
+ ASSERT_FALSE(spawn_child.process.IsValid());
}
TEST_F(ProcessTest, CreateCurrent) {
@@ -58,7 +58,8 @@ TEST_F(ProcessTest, CreateCurrent) {
}
TEST_F(ProcessTest, Move) {
- Process process1(SpawnChild("SimpleChildProcess"));
+ SpawnChildResult spawn_result = SpawnChild("SimpleChildProcess");
+ Process& process1 = spawn_result.process;
EXPECT_TRUE(process1.IsValid());
Process process2;
@@ -77,7 +78,8 @@ TEST_F(ProcessTest, Move) {
}
TEST_F(ProcessTest, Duplicate) {
- Process process1(SpawnChild("SimpleChildProcess"));
+ SpawnChildResult spawn_result = SpawnChild("SimpleChildProcess");
+ Process& process1 = spawn_result.process;
ASSERT_TRUE(process1.IsValid());
Process process2 = process1.Duplicate();
@@ -107,7 +109,8 @@ TEST_F(ProcessTest, DuplicateCurrent) {
}
TEST_F(ProcessTest, DeprecatedGetProcessFromHandle) {
- Process process1(SpawnChild("SimpleChildProcess"));
+ SpawnChildResult spawn_result = SpawnChild("SimpleChildProcess");
+ Process& process1 = spawn_result.process;
ASSERT_TRUE(process1.IsValid());
Process process2 = Process::DeprecatedGetProcessFromHandle(process1.Handle());
@@ -127,7 +130,8 @@ MULTIPROCESS_TEST_MAIN(SleepyChildProcess) {
}
TEST_F(ProcessTest, Terminate) {
- Process process(SpawnChild("SleepyChildProcess"));
+ SpawnChildResult spawn_result = SpawnChild("SleepyChildProcess");
+ Process& process = spawn_result.process;
ASSERT_TRUE(process.IsValid());
const int kDummyExitCode = 42;
@@ -173,11 +177,12 @@ MULTIPROCESS_TEST_MAIN(TerminateCurrentProcessImmediatelyWithCode0) {
}
TEST_F(ProcessTest, TerminateCurrentProcessImmediatelyWithZeroExitCode) {
- Process process(SpawnChild("TerminateCurrentProcessImmediatelyWithCode0"));
- ASSERT_TRUE(process.IsValid());
+ SpawnChildResult spawn_child =
+ SpawnChild("TerminateCurrentProcessImmediatelyWithCode0");
+ ASSERT_TRUE(spawn_child.process.IsValid());
int exit_code = 42;
- ASSERT_TRUE(process.WaitForExitWithTimeout(TestTimeouts::action_max_timeout(),
- &exit_code));
+ ASSERT_TRUE(spawn_child.process.WaitForExitWithTimeout(
+ TestTimeouts::action_max_timeout(), &exit_code));
EXPECT_EQ(0, exit_code);
}
@@ -188,11 +193,12 @@ MULTIPROCESS_TEST_MAIN(TerminateCurrentProcessImmediatelyWithCode250) {
}
TEST_F(ProcessTest, TerminateCurrentProcessImmediatelyWithNonZeroExitCode) {
- Process process(SpawnChild("TerminateCurrentProcessImmediatelyWithCode250"));
- ASSERT_TRUE(process.IsValid());
+ SpawnChildResult spawn_child =
+ SpawnChild("TerminateCurrentProcessImmediatelyWithCode250");
+ ASSERT_TRUE(spawn_child.process.IsValid());
int exit_code = 42;
- ASSERT_TRUE(process.WaitForExitWithTimeout(TestTimeouts::action_max_timeout(),
- &exit_code));
+ ASSERT_TRUE(spawn_child.process.WaitForExitWithTimeout(
+ TestTimeouts::action_max_timeout(), &exit_code));
EXPECT_EQ(250, exit_code);
}
@@ -202,26 +208,26 @@ MULTIPROCESS_TEST_MAIN(FastSleepyChildProcess) {
}
TEST_F(ProcessTest, WaitForExit) {
- Process process(SpawnChild("FastSleepyChildProcess"));
- ASSERT_TRUE(process.IsValid());
+ SpawnChildResult spawn_child = SpawnChild("FastSleepyChildProcess");
+ ASSERT_TRUE(spawn_child.process.IsValid());
const int kDummyExitCode = 42;
int exit_code = kDummyExitCode;
- EXPECT_TRUE(process.WaitForExit(&exit_code));
+ EXPECT_TRUE(spawn_child.process.WaitForExit(&exit_code));
EXPECT_EQ(0, exit_code);
}
TEST_F(ProcessTest, WaitForExitWithTimeout) {
- Process process(SpawnChild("SleepyChildProcess"));
- ASSERT_TRUE(process.IsValid());
+ SpawnChildResult spawn_child = SpawnChild("SleepyChildProcess");
+ ASSERT_TRUE(spawn_child.process.IsValid());
const int kDummyExitCode = 42;
int exit_code = kDummyExitCode;
TimeDelta timeout = TestTimeouts::tiny_timeout();
- EXPECT_FALSE(process.WaitForExitWithTimeout(timeout, &exit_code));
+ EXPECT_FALSE(spawn_child.process.WaitForExitWithTimeout(timeout, &exit_code));
EXPECT_EQ(kDummyExitCode, exit_code);
- process.Terminate(kDummyExitCode, false);
+ spawn_child.process.Terminate(kDummyExitCode, false);
}
// Ensure that the priority of a process is restored correctly after
@@ -231,13 +237,13 @@ TEST_F(ProcessTest, WaitForExitWithTimeout) {
TEST_F(ProcessTest, SetProcessBackgrounded) {
if (!Process::CanBackgroundProcesses())
return;
- Process process(SpawnChild("SimpleChildProcess"));
- int old_priority = process.GetPriority();
+ SpawnChildResult spawn_child = SpawnChild("SimpleChildProcess");
+ int old_priority = spawn_child.process.GetPriority();
#if defined(OS_WIN)
- EXPECT_TRUE(process.SetProcessBackgrounded(true));
- EXPECT_TRUE(process.IsProcessBackgrounded());
- EXPECT_TRUE(process.SetProcessBackgrounded(false));
- EXPECT_FALSE(process.IsProcessBackgrounded());
+ EXPECT_TRUE(spawn_child.process.SetProcessBackgrounded(true));
+ EXPECT_TRUE(spawn_child.process.IsProcessBackgrounded());
+ EXPECT_TRUE(spawn_child.process.SetProcessBackgrounded(false));
+ EXPECT_FALSE(spawn_child.process.IsProcessBackgrounded());
#elif defined(OS_MACOSX)
// On the Mac, backgrounding a process requires a port to that process.
// In the browser it's available through the MachBroker class, which is not
@@ -246,16 +252,16 @@ TEST_F(ProcessTest, SetProcessBackgrounded) {
// the ability to background/foreground a process, we can use the current
// process's port instead.
FakePortProvider provider;
- EXPECT_TRUE(process.SetProcessBackgrounded(&provider, true));
- EXPECT_TRUE(process.IsProcessBackgrounded(&provider));
- EXPECT_TRUE(process.SetProcessBackgrounded(&provider, false));
- EXPECT_FALSE(process.IsProcessBackgrounded(&provider));
+ EXPECT_TRUE(spawn_child.process.SetProcessBackgrounded(&provider, true));
+ EXPECT_TRUE(spawn_child.process.IsProcessBackgrounded(&provider));
+ EXPECT_TRUE(spawn_child.process.SetProcessBackgrounded(&provider, false));
+ EXPECT_FALSE(spawn_child.process.IsProcessBackgrounded(&provider));
#else
- process.SetProcessBackgrounded(true);
- process.SetProcessBackgrounded(false);
+ spawn_child.process.SetProcessBackgrounded(true);
+ spawn_child.process.SetProcessBackgrounded(false);
#endif
- int new_priority = process.GetPriority();
+ int new_priority = spawn_child.process.GetPriority();
EXPECT_EQ(old_priority, new_priority);
}
diff --git a/chromium/base/process/process_util_unittest.cc b/chromium/base/process/process_util_unittest.cc
index 7031706e0b7..3f28b89b827 100644
--- a/chromium/base/process/process_util_unittest.cc
+++ b/chromium/base/process/process_util_unittest.cc
@@ -160,11 +160,11 @@ MULTIPROCESS_TEST_MAIN(SimpleChildProcess) {
// TODO(viettrungluu): This should be in a "MultiProcessTestTest".
TEST_F(ProcessUtilTest, SpawnChild) {
- base::Process process = SpawnChild("SimpleChildProcess");
- ASSERT_TRUE(process.IsValid());
+ base::SpawnChildResult spawn_child = SpawnChild("SimpleChildProcess");
+ ASSERT_TRUE(spawn_child.process.IsValid());
int exit_code;
- EXPECT_TRUE(process.WaitForExitWithTimeout(
- TestTimeouts::action_max_timeout(), &exit_code));
+ EXPECT_TRUE(spawn_child.process.WaitForExitWithTimeout(
+ TestTimeouts::action_max_timeout(), &exit_code));
}
MULTIPROCESS_TEST_MAIN(SlowChildProcess) {
@@ -176,12 +176,12 @@ TEST_F(ProcessUtilTest, KillSlowChild) {
const std::string signal_file =
ProcessUtilTest::GetSignalFilePath(kSignalFileSlow);
remove(signal_file.c_str());
- base::Process process = SpawnChild("SlowChildProcess");
- ASSERT_TRUE(process.IsValid());
+ base::SpawnChildResult spawn_child = SpawnChild("SlowChildProcess");
+ ASSERT_TRUE(spawn_child.process.IsValid());
SignalChildren(signal_file.c_str());
int exit_code;
- EXPECT_TRUE(process.WaitForExitWithTimeout(
- TestTimeouts::action_max_timeout(), &exit_code));
+ EXPECT_TRUE(spawn_child.process.WaitForExitWithTimeout(
+ TestTimeouts::action_max_timeout(), &exit_code));
remove(signal_file.c_str());
}
@@ -190,18 +190,19 @@ TEST_F(ProcessUtilTest, DISABLED_GetTerminationStatusExit) {
const std::string signal_file =
ProcessUtilTest::GetSignalFilePath(kSignalFileSlow);
remove(signal_file.c_str());
- base::Process process = SpawnChild("SlowChildProcess");
- ASSERT_TRUE(process.IsValid());
+ base::SpawnChildResult spawn_child = SpawnChild("SlowChildProcess");
+ ASSERT_TRUE(spawn_child.process.IsValid());
int exit_code = 42;
- EXPECT_EQ(base::TERMINATION_STATUS_STILL_RUNNING,
- base::GetTerminationStatus(process.Handle(), &exit_code));
+ EXPECT_EQ(
+ base::TERMINATION_STATUS_STILL_RUNNING,
+ base::GetTerminationStatus(spawn_child.process.Handle(), &exit_code));
EXPECT_EQ(kExpectedStillRunningExitCode, exit_code);
SignalChildren(signal_file.c_str());
exit_code = 42;
base::TerminationStatus status =
- WaitForChildTermination(process.Handle(), &exit_code);
+ WaitForChildTermination(spawn_child.process.Handle(), &exit_code);
EXPECT_EQ(base::TERMINATION_STATUS_NORMAL_TERMINATION, status);
EXPECT_EQ(kSuccess, exit_code);
remove(signal_file.c_str());
@@ -235,11 +236,12 @@ TEST_F(ProcessUtilTest, CurrentDirectory) {
base::LaunchOptions options;
options.current_directory = tmp_dir;
- base::Process process(SpawnChildWithOptions("CheckCwdProcess", options));
- ASSERT_TRUE(process.IsValid());
+ base::SpawnChildResult spawn_child =
+ SpawnChildWithOptions("CheckCwdProcess", options);
+ ASSERT_TRUE(spawn_child.process.IsValid());
int exit_code = 42;
- EXPECT_TRUE(process.WaitForExit(&exit_code));
+ EXPECT_TRUE(spawn_child.process.WaitForExit(&exit_code));
EXPECT_EQ(kSuccess, exit_code);
}
#endif // !defined(OS_ANDROID)
@@ -249,9 +251,9 @@ TEST_F(ProcessUtilTest, CurrentDirectory) {
TEST_F(ProcessUtilTest, GetProcId) {
base::ProcessId id1 = base::GetProcId(GetCurrentProcess());
EXPECT_NE(0ul, id1);
- base::Process process = SpawnChild("SimpleChildProcess");
- ASSERT_TRUE(process.IsValid());
- base::ProcessId id2 = process.Pid();
+ base::SpawnChildResult spawn_child = SpawnChild("SimpleChildProcess");
+ ASSERT_TRUE(spawn_child.process.IsValid());
+ base::ProcessId id2 = spawn_child.process.Pid();
EXPECT_NE(0ul, id2);
EXPECT_NE(id1, id2);
}
@@ -295,18 +297,19 @@ TEST_F(ProcessUtilTest, MAYBE_GetTerminationStatusCrash) {
const std::string signal_file =
ProcessUtilTest::GetSignalFilePath(kSignalFileCrash);
remove(signal_file.c_str());
- base::Process process = SpawnChild("CrashingChildProcess");
- ASSERT_TRUE(process.IsValid());
+ base::SpawnChildResult spawn_child = SpawnChild("CrashingChildProcess");
+ ASSERT_TRUE(spawn_child.process.IsValid());
int exit_code = 42;
- EXPECT_EQ(base::TERMINATION_STATUS_STILL_RUNNING,
- base::GetTerminationStatus(process.Handle(), &exit_code));
+ EXPECT_EQ(
+ base::TERMINATION_STATUS_STILL_RUNNING,
+ base::GetTerminationStatus(spawn_child.process.Handle(), &exit_code));
EXPECT_EQ(kExpectedStillRunningExitCode, exit_code);
SignalChildren(signal_file.c_str());
exit_code = 42;
base::TerminationStatus status =
- WaitForChildTermination(process.Handle(), &exit_code);
+ WaitForChildTermination(spawn_child.process.Handle(), &exit_code);
EXPECT_EQ(base::TERMINATION_STATUS_PROCESS_CRASHED, status);
#if defined(OS_WIN)
@@ -350,18 +353,19 @@ TEST_F(ProcessUtilTest, GetTerminationStatusSigKill) {
const std::string signal_file =
ProcessUtilTest::GetSignalFilePath(kSignalFileKill);
remove(signal_file.c_str());
- base::Process process = SpawnChild("KilledChildProcess");
- ASSERT_TRUE(process.IsValid());
+ base::SpawnChildResult spawn_child = SpawnChild("KilledChildProcess");
+ ASSERT_TRUE(spawn_child.process.IsValid());
int exit_code = 42;
- EXPECT_EQ(base::TERMINATION_STATUS_STILL_RUNNING,
- base::GetTerminationStatus(process.Handle(), &exit_code));
+ EXPECT_EQ(
+ base::TERMINATION_STATUS_STILL_RUNNING,
+ base::GetTerminationStatus(spawn_child.process.Handle(), &exit_code));
EXPECT_EQ(kExpectedStillRunningExitCode, exit_code);
SignalChildren(signal_file.c_str());
exit_code = 42;
base::TerminationStatus status =
- WaitForChildTermination(process.Handle(), &exit_code);
+ WaitForChildTermination(spawn_child.process.Handle(), &exit_code);
#if defined(OS_CHROMEOS)
EXPECT_EQ(base::TERMINATION_STATUS_PROCESS_WAS_KILLED_BY_OOM, status);
#else
@@ -384,18 +388,19 @@ TEST_F(ProcessUtilTest, GetTerminationStatusSigTerm) {
const std::string signal_file =
ProcessUtilTest::GetSignalFilePath(kSignalFileTerm);
remove(signal_file.c_str());
- base::Process process = SpawnChild("TerminatedChildProcess");
- ASSERT_TRUE(process.IsValid());
+ base::SpawnChildResult spawn_child = SpawnChild("TerminatedChildProcess");
+ ASSERT_TRUE(spawn_child.process.IsValid());
int exit_code = 42;
- EXPECT_EQ(base::TERMINATION_STATUS_STILL_RUNNING,
- base::GetTerminationStatus(process.Handle(), &exit_code));
+ EXPECT_EQ(
+ base::TERMINATION_STATUS_STILL_RUNNING,
+ base::GetTerminationStatus(spawn_child.process.Handle(), &exit_code));
EXPECT_EQ(kExpectedStillRunningExitCode, exit_code);
SignalChildren(signal_file.c_str());
exit_code = 42;
base::TerminationStatus status =
- WaitForChildTermination(process.Handle(), &exit_code);
+ WaitForChildTermination(spawn_child.process.Handle(), &exit_code);
EXPECT_EQ(base::TERMINATION_STATUS_PROCESS_WAS_KILLED, status);
int signaled = WIFSIGNALED(exit_code);
@@ -627,9 +632,9 @@ int ProcessUtilTest::CountOpenFDsInChild() {
fd_mapping_vec.push_back(std::pair<int, int>(fds[1], kChildPipe));
base::LaunchOptions options;
options.fds_to_remap = &fd_mapping_vec;
- base::Process process =
+ base::SpawnChildResult spawn_child =
SpawnChildWithOptions("ProcessUtilsLeakFDChildProcess", options);
- CHECK(process.IsValid());
+ CHECK(spawn_child.process.IsValid());
int ret = IGNORE_EINTR(close(fds[1]));
DPCHECK(ret == 0);
@@ -646,7 +651,7 @@ int ProcessUtilTest::CountOpenFDsInChild() {
base::TimeDelta timeout = base::TimeDelta::FromSeconds(1);
#endif
int exit_code;
- CHECK(process.WaitForExitWithTimeout(timeout, &exit_code));
+ CHECK(spawn_child.process.WaitForExitWithTimeout(timeout, &exit_code));
ret = IGNORE_EINTR(close(fds[0]));
DPCHECK(ret == 0);
@@ -869,15 +874,16 @@ bool IsProcessDead(base::ProcessHandle child) {
}
TEST_F(ProcessUtilTest, DelayedTermination) {
- base::Process child_process = SpawnChild("process_util_test_never_die");
- ASSERT_TRUE(child_process.IsValid());
- base::EnsureProcessTerminated(child_process.Duplicate());
+ base::SpawnChildResult spawn_child =
+ SpawnChild("process_util_test_never_die");
+ ASSERT_TRUE(spawn_child.process.IsValid());
+ base::EnsureProcessTerminated(spawn_child.process.Duplicate());
int exit_code;
- child_process.WaitForExitWithTimeout(base::TimeDelta::FromSeconds(5),
- &exit_code);
+ spawn_child.process.WaitForExitWithTimeout(base::TimeDelta::FromSeconds(5),
+ &exit_code);
// Check that process was really killed.
- EXPECT_TRUE(IsProcessDead(child_process.Handle()));
+ EXPECT_TRUE(IsProcessDead(spawn_child.process.Handle()));
}
MULTIPROCESS_TEST_MAIN(process_util_test_never_die) {
@@ -888,14 +894,15 @@ MULTIPROCESS_TEST_MAIN(process_util_test_never_die) {
}
TEST_F(ProcessUtilTest, ImmediateTermination) {
- base::Process child_process = SpawnChild("process_util_test_die_immediately");
- ASSERT_TRUE(child_process.IsValid());
+ base::SpawnChildResult spawn_child =
+ SpawnChild("process_util_test_die_immediately");
+ ASSERT_TRUE(spawn_child.process.IsValid());
// Give it time to die.
sleep(2);
- base::EnsureProcessTerminated(child_process.Duplicate());
+ base::EnsureProcessTerminated(spawn_child.process.Duplicate());
// Check that process was really killed.
- EXPECT_TRUE(IsProcessDead(child_process.Handle()));
+ EXPECT_TRUE(IsProcessDead(spawn_child.process.Handle()));
}
MULTIPROCESS_TEST_MAIN(process_util_test_die_immediately) {
@@ -934,14 +941,15 @@ TEST_F(ProcessUtilTest, PreExecHook) {
base::LaunchOptions options;
options.fds_to_remap = &fds_to_remap;
options.pre_exec_delegate = &read_from_pipe_delegate;
- base::Process process(SpawnChildWithOptions("SimpleChildProcess", options));
- ASSERT_TRUE(process.IsValid());
+ base::SpawnChildResult spawn_child =
+ SpawnChildWithOptions("SimpleChildProcess", options);
+ ASSERT_TRUE(spawn_child.process.IsValid());
read_fd.reset();
ASSERT_EQ(1, HANDLE_EINTR(write(write_fd.get(), &kPipeValue, 1)));
int exit_code = 42;
- EXPECT_TRUE(process.WaitForExit(&exit_code));
+ EXPECT_TRUE(spawn_child.process.WaitForExit(&exit_code));
EXPECT_EQ(0, exit_code);
}
#endif // !defined(OS_ANDROID)
@@ -969,11 +977,12 @@ TEST_F(ProcessUtilTest, CloneFlags) {
base::LaunchOptions options;
options.clone_flags = CLONE_NEWUSER | CLONE_NEWPID;
- base::Process process(SpawnChildWithOptions("CheckPidProcess", options));
- ASSERT_TRUE(process.IsValid());
+ base::SpawnChildResult spawn_child =
+ SpawnChildWithOptions("CheckPidProcess", options);
+ ASSERT_TRUE(spawn_child.process.IsValid());
int exit_code = 42;
- EXPECT_TRUE(process.WaitForExit(&exit_code));
+ EXPECT_TRUE(spawn_child.process.WaitForExit(&exit_code));
EXPECT_EQ(kSuccess, exit_code);
}
#endif // defined(CLONE_NEWUSER) && defined(CLONE_NEWPID)
@@ -1010,11 +1019,12 @@ TEST_F(ProcessUtilTest, InvalidCurrentDirectory) {
base::LaunchOptions options;
options.current_directory = base::FilePath("/dev/null");
- base::Process process(SpawnChildWithOptions("SimpleChildProcess", options));
- ASSERT_TRUE(process.IsValid());
+ base::SpawnChildResult spawn_child =
+ SpawnChildWithOptions("SimpleChildProcess", options);
+ ASSERT_TRUE(spawn_child.process.IsValid());
int exit_code = kSuccess;
- EXPECT_TRUE(process.WaitForExit(&exit_code));
+ EXPECT_TRUE(spawn_child.process.WaitForExit(&exit_code));
EXPECT_NE(kSuccess, exit_code);
}
#endif // defined(OS_LINUX)
diff --git a/chromium/base/process/process_win.cc b/chromium/base/process/process_win.cc
index 623212654ee..9232c6dbcac 100644
--- a/chromium/base/process/process_win.cc
+++ b/chromium/base/process/process_win.cc
@@ -139,6 +139,10 @@ bool Process::Terminate(int exit_code, bool wait) const {
} else if (!result) {
DPLOG(ERROR) << "Unable to terminate process";
}
+ if (result) {
+ base::debug::GlobalActivityTracker::RecordProcessExitIfEnabled(Pid(),
+ exit_code);
+ }
return result;
}
@@ -162,6 +166,9 @@ bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const {
if (exit_code)
*exit_code = temp_code;
+
+ base::debug::GlobalActivityTracker::RecordProcessExitIfEnabled(
+ Pid(), static_cast<int>(temp_code));
return true;
}
diff --git a/chromium/base/run_loop_unittest.cc b/chromium/base/run_loop_unittest.cc
index a87ced09885..5be97d4a7d0 100644
--- a/chromium/base/run_loop_unittest.cc
+++ b/chromium/base/run_loop_unittest.cc
@@ -35,11 +35,11 @@ void RunNestedLoopTask(int* counter) {
// This task should quit |nested_run_loop| but not the main RunLoop.
ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&QuitWhenIdleTask, Unretained(&nested_run_loop),
- Unretained(counter)));
+ FROM_HERE, BindOnce(&QuitWhenIdleTask, Unretained(&nested_run_loop),
+ Unretained(counter)));
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, Bind(&ShouldNotRunTask), TimeDelta::FromDays(1));
+ FROM_HERE, BindOnce(&ShouldNotRunTask), TimeDelta::FromDays(1));
MessageLoop::ScopedNestableTaskAllower allower(MessageLoop::current());
nested_run_loop.Run();
@@ -63,12 +63,12 @@ class RunLoopTest : public testing::Test {
TEST_F(RunLoopTest, QuitWhenIdle) {
message_loop_.task_runner()->PostTask(
- FROM_HERE,
- Bind(&QuitWhenIdleTask, Unretained(&run_loop_), Unretained(&counter_)));
+ FROM_HERE, BindOnce(&QuitWhenIdleTask, Unretained(&run_loop_),
+ Unretained(&counter_)));
message_loop_.task_runner()->PostTask(
- FROM_HERE, Bind(&ShouldRunTask, Unretained(&counter_)));
+ FROM_HERE, BindOnce(&ShouldRunTask, Unretained(&counter_)));
message_loop_.task_runner()->PostDelayedTask(
- FROM_HERE, Bind(&ShouldNotRunTask), TimeDelta::FromDays(1));
+ FROM_HERE, BindOnce(&ShouldNotRunTask), TimeDelta::FromDays(1));
run_loop_.Run();
EXPECT_EQ(2, counter_);
@@ -76,14 +76,14 @@ TEST_F(RunLoopTest, QuitWhenIdle) {
TEST_F(RunLoopTest, QuitWhenIdleNestedLoop) {
message_loop_.task_runner()->PostTask(
- FROM_HERE, Bind(&RunNestedLoopTask, Unretained(&counter_)));
+ FROM_HERE, BindOnce(&RunNestedLoopTask, Unretained(&counter_)));
message_loop_.task_runner()->PostTask(
- FROM_HERE,
- Bind(&QuitWhenIdleTask, Unretained(&run_loop_), Unretained(&counter_)));
+ FROM_HERE, BindOnce(&QuitWhenIdleTask, Unretained(&run_loop_),
+ Unretained(&counter_)));
message_loop_.task_runner()->PostTask(
- FROM_HERE, Bind(&ShouldRunTask, Unretained(&counter_)));
+ FROM_HERE, BindOnce(&ShouldRunTask, Unretained(&counter_)));
message_loop_.task_runner()->PostDelayedTask(
- FROM_HERE, Bind(&ShouldNotRunTask), TimeDelta::FromDays(1));
+ FROM_HERE, BindOnce(&ShouldNotRunTask), TimeDelta::FromDays(1));
run_loop_.Run();
EXPECT_EQ(4, counter_);
@@ -93,9 +93,9 @@ TEST_F(RunLoopTest, QuitWhenIdleClosure) {
message_loop_.task_runner()->PostTask(FROM_HERE,
run_loop_.QuitWhenIdleClosure());
message_loop_.task_runner()->PostTask(
- FROM_HERE, Bind(&ShouldRunTask, Unretained(&counter_)));
+ FROM_HERE, BindOnce(&ShouldRunTask, Unretained(&counter_)));
message_loop_.task_runner()->PostDelayedTask(
- FROM_HERE, Bind(&ShouldNotRunTask), TimeDelta::FromDays(1));
+ FROM_HERE, BindOnce(&ShouldNotRunTask), TimeDelta::FromDays(1));
run_loop_.Run();
EXPECT_EQ(1, counter_);
diff --git a/chromium/base/sequence_checker_impl.cc b/chromium/base/sequence_checker_impl.cc
index df2a8cb24fe..6a9b5b2d0f5 100644
--- a/chromium/base/sequence_checker_impl.cc
+++ b/chromium/base/sequence_checker_impl.cc
@@ -26,7 +26,7 @@ class SequenceCheckerImpl::Core {
~Core() = default;
- bool CalledOnValidThread() const {
+ bool CalledOnValidSequence() const {
if (sequence_token_.IsValid())
return sequence_token_ == SequenceToken::GetForCurrentThread();
@@ -58,7 +58,7 @@ bool SequenceCheckerImpl::CalledOnValidSequence() const {
AutoLock auto_lock(lock_);
if (!core_)
core_ = MakeUnique<Core>();
- return core_->CalledOnValidThread();
+ return core_->CalledOnValidSequence();
}
void SequenceCheckerImpl::DetachFromSequence() {
diff --git a/chromium/base/sequence_checker_unittest.cc b/chromium/base/sequence_checker_unittest.cc
index 86e9298d979..41fd77be63f 100644
--- a/chromium/base/sequence_checker_unittest.cc
+++ b/chromium/base/sequence_checker_unittest.cc
@@ -250,8 +250,9 @@ TEST_F(SequenceCheckerTest,
SequencedWorkerPoolOwner second_pool_owner(kNumWorkerThreads, "test2");
second_pool_owner.pool()->PostNamedSequencedWorkerTask(
- "A", FROM_HERE, base::Bind(&ExpectNotCalledOnValidSequence,
- base::Unretained(&sequence_checker)));
+ "A", FROM_HERE,
+ base::BindOnce(&ExpectNotCalledOnValidSequence,
+ base::Unretained(&sequence_checker)));
second_pool_owner.pool()->FlushForTesting();
}
diff --git a/chromium/base/sequenced_task_runner.cc b/chromium/base/sequenced_task_runner.cc
index dc11ebc3f12..ff859c8ceee 100644
--- a/chromium/base/sequenced_task_runner.cc
+++ b/chromium/base/sequenced_task_runner.cc
@@ -4,21 +4,24 @@
#include "base/sequenced_task_runner.h"
+#include <utility>
+
#include "base/bind.h"
namespace base {
bool SequencedTaskRunner::PostNonNestableTask(
const tracked_objects::Location& from_here,
- const Closure& task) {
- return PostNonNestableDelayedTask(from_here, task, base::TimeDelta());
+ OnceClosure task) {
+ return PostNonNestableDelayedTask(from_here, std::move(task),
+ base::TimeDelta());
}
bool SequencedTaskRunner::DeleteOrReleaseSoonInternal(
const tracked_objects::Location& from_here,
void (*deleter)(const void*),
const void* object) {
- return PostNonNestableTask(from_here, Bind(deleter, object));
+ return PostNonNestableTask(from_here, BindOnce(deleter, object));
}
OnTaskRunnerDeleter::OnTaskRunnerDeleter(
diff --git a/chromium/base/sequenced_task_runner.h b/chromium/base/sequenced_task_runner.h
index 6b2726ed4fe..b29153927e8 100644
--- a/chromium/base/sequenced_task_runner.h
+++ b/chromium/base/sequenced_task_runner.h
@@ -6,6 +6,7 @@
#define BASE_SEQUENCED_TASK_RUNNER_H_
#include "base/base_export.h"
+#include "base/callback.h"
#include "base/sequenced_task_runner_helpers.h"
#include "base/task_runner.h"
@@ -109,11 +110,11 @@ class BASE_EXPORT SequencedTaskRunner : public TaskRunner {
// below.
bool PostNonNestableTask(const tracked_objects::Location& from_here,
- const Closure& task);
+ OnceClosure task);
virtual bool PostNonNestableDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
base::TimeDelta delay) = 0;
// Submits a non-nestable task to delete the given object. Returns
diff --git a/chromium/base/sequenced_task_runner_unittest.cc b/chromium/base/sequenced_task_runner_unittest.cc
index b999ffc8cd8..93ac20f7190 100644
--- a/chromium/base/sequenced_task_runner_unittest.cc
+++ b/chromium/base/sequenced_task_runner_unittest.cc
@@ -47,16 +47,12 @@ TEST(SequencedTaskRunnerTest, OnTaskRunnerDeleter) {
OnTaskRunnerDeleter(current_thread));
EXPECT_EQ(0, counter);
foreign_thread->PostTask(
- FROM_HERE,
- Bind([](SequenceBoundUniquePtr) {},
- Passed(&ptr)));
+ FROM_HERE, BindOnce([](SequenceBoundUniquePtr) {}, Passed(&ptr)));
{
RunLoop run_loop;
- foreign_thread->PostTaskAndReply(
- FROM_HERE,
- Bind([]{}),
- run_loop.QuitClosure());
+ foreign_thread->PostTaskAndReply(FROM_HERE, BindOnce([] {}),
+ run_loop.QuitClosure());
run_loop.Run();
}
EXPECT_EQ(1, counter);
diff --git a/chromium/base/stl_util.h b/chromium/base/stl_util.h
index 016bb156450..b0670b295eb 100644
--- a/chromium/base/stl_util.h
+++ b/chromium/base/stl_util.h
@@ -8,15 +8,37 @@
#define BASE_STL_UTIL_H_
#include <algorithm>
+#include <deque>
+#include <forward_list>
#include <functional>
#include <iterator>
+#include <list>
+#include <map>
+#include <set>
#include <string>
+#include <unordered_map>
+#include <unordered_set>
#include <vector>
#include "base/logging.h"
namespace base {
+namespace internal {
+
+// Calls erase on iterators of matching elements.
+template <typename Container, typename Predicate>
+void IterateAndEraseIf(Container& container, Predicate pred) {
+ for (auto it = container.begin(); it != container.end();) {
+ if (pred(*it))
+ it = container.erase(it);
+ else
+ ++it;
+ }
+}
+
+} // namespace internal
+
// Clears internal memory of an STL object.
// STL clear()/reserve(0) does not always free internal memory allocated
// This function uses swap/destructor to ensure the internal memory is freed.
@@ -126,6 +148,145 @@ bool STLIncludes(const Arg1& a1, const Arg2& a2) {
a2.begin(), a2.end());
}
+// Erase/EraseIf are based on library fundamentals ts v2 erase/erase_if
+// http://en.cppreference.com/w/cpp/experimental/lib_extensions_2
+// They provide a generic way to erase elements from a container.
+// The functions here implement these for the standard containers until those
+// functions are available in the C++ standard.
+// For Chromium containers overloads should be defined in their own headers
+// (like standard containers).
+// Note: there is no std::erase for standard associative containers so we don't
+// have it either.
+
+template <typename CharT, typename Traits, typename Allocator, typename Value>
+void Erase(std::basic_string<CharT, Traits, Allocator>& container,
+ const Value& value) {
+ container.erase(std::remove(container.begin(), container.end(), value),
+ container.end());
+}
+
+template <typename CharT, typename Traits, typename Allocator, class Predicate>
+void EraseIf(std::basic_string<CharT, Traits, Allocator>& container,
+ Predicate pred) {
+ container.erase(std::remove_if(container.begin(), container.end(), pred),
+ container.end());
+}
+
+template <class T, class Allocator, class Value>
+void Erase(std::deque<T, Allocator>& container, const Value& value) {
+ container.erase(std::remove(container.begin(), container.end(), value),
+ container.end());
+}
+
+template <class T, class Allocator, class Predicate>
+void EraseIf(std::deque<T, Allocator>& container, Predicate pred) {
+ container.erase(std::remove_if(container.begin(), container.end(), pred),
+ container.end());
+}
+
+template <class T, class Allocator, class Value>
+void Erase(std::vector<T, Allocator>& container, const Value& value) {
+ container.erase(std::remove(container.begin(), container.end(), value),
+ container.end());
+}
+
+template <class T, class Allocator, class Predicate>
+void EraseIf(std::vector<T, Allocator>& container, Predicate pred) {
+ container.erase(std::remove_if(container.begin(), container.end(), pred),
+ container.end());
+}
+
+template <class T, class Allocator, class Value>
+void Erase(std::forward_list<T, Allocator>& container, const Value& value) {
+ // Unlike std::forward_list::remove, this function template accepts
+ // heterogeneous types and does not force a conversion to the container's
+ // value type before invoking the == operator.
+ container.remove_if([&](const T& cur) { return cur == value; });
+}
+
+template <class T, class Allocator, class Predicate>
+void EraseIf(std::forward_list<T, Allocator>& container, Predicate pred) {
+ container.remove_if(pred);
+}
+
+template <class T, class Allocator, class Value>
+void Erase(std::list<T, Allocator>& container, const Value& value) {
+ // Unlike std::list::remove, this function template accepts heterogeneous
+ // types and does not force a conversion to the container's value type before
+ // invoking the == operator.
+ container.remove_if([&](const T& cur) { return cur == value; });
+}
+
+template <class T, class Allocator, class Predicate>
+void EraseIf(std::list<T, Allocator>& container, Predicate pred) {
+ container.remove_if(pred);
+}
+
+template <class Key, class T, class Compare, class Allocator, class Predicate>
+void EraseIf(std::map<Key, T, Compare, Allocator>& container, Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key, class T, class Compare, class Allocator, class Predicate>
+void EraseIf(std::multimap<Key, T, Compare, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key, class Compare, class Allocator, class Predicate>
+void EraseIf(std::set<Key, Compare, Allocator>& container, Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key, class Compare, class Allocator, class Predicate>
+void EraseIf(std::multiset<Key, Compare, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key,
+ class T,
+ class Hash,
+ class KeyEqual,
+ class Allocator,
+ class Predicate>
+void EraseIf(std::unordered_map<Key, T, Hash, KeyEqual, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key,
+ class T,
+ class Hash,
+ class KeyEqual,
+ class Allocator,
+ class Predicate>
+void EraseIf(
+ std::unordered_multimap<Key, T, Hash, KeyEqual, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key,
+ class Hash,
+ class KeyEqual,
+ class Allocator,
+ class Predicate>
+void EraseIf(std::unordered_set<Key, Hash, KeyEqual, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key,
+ class Hash,
+ class KeyEqual,
+ class Allocator,
+ class Predicate>
+void EraseIf(std::unordered_multiset<Key, Hash, KeyEqual, Allocator>& container,
+ Predicate pred) {
+ internal::IterateAndEraseIf(container, pred);
+}
+
} // namespace base
#endif // BASE_STL_UTIL_H_
diff --git a/chromium/base/stl_util_unittest.cc b/chromium/base/stl_util_unittest.cc
index 42004eb869b..48d0f660b58 100644
--- a/chromium/base/stl_util_unittest.cc
+++ b/chromium/base/stl_util_unittest.cc
@@ -4,8 +4,20 @@
#include "base/stl_util.h"
+#include <deque>
+#include <forward_list>
+#include <functional>
+#include <iterator>
+#include <list>
+#include <map>
#include <set>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+#include "base/strings/string16.h"
+#include "base/strings/utf_string_conversions.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace {
@@ -28,6 +40,55 @@ class ComparableValue {
int value_;
};
+template <typename Container>
+void RunEraseTest() {
+ const std::pair<Container, Container> test_data[] = {
+ {Container(), Container()}, {{1, 2, 3}, {1, 3}}, {{1, 2, 3, 2}, {1, 3}}};
+
+ for (auto test_case : test_data) {
+ base::Erase(test_case.first, 2);
+ EXPECT_EQ(test_case.second, test_case.first);
+ }
+}
+
+// This test is written for containers of std::pair<int, int> to support maps.
+template <typename Container>
+void RunEraseIfTest() {
+ struct {
+ Container input;
+ Container erase_even;
+ Container erase_odd;
+ } test_data[] = {
+ {Container(), Container(), Container()},
+ {{{1, 1}, {2, 2}, {3, 3}}, {{1, 1}, {3, 3}}, {{2, 2}}},
+ {{{1, 1}, {2, 2}, {3, 3}, {4, 4}}, {{1, 1}, {3, 3}}, {{2, 2}, {4, 4}}},
+ };
+
+ for (auto test_case : test_data) {
+ base::EraseIf(test_case.input, [](const std::pair<int, int>& elem) {
+ return !(elem.first & 1);
+ });
+ EXPECT_EQ(test_case.erase_even, test_case.input);
+ }
+
+ for (auto test_case : test_data) {
+ base::EraseIf(test_case.input, [](const std::pair<int, int>& elem) {
+ return elem.first & 1;
+ });
+ EXPECT_EQ(test_case.erase_odd, test_case.input);
+ }
+}
+
+struct CustomIntHash {
+ size_t operator()(int elem) const { return std::hash<int>()(elem) + 1; }
+};
+
+struct HashByFirst {
+ size_t operator()(const std::pair<int, int>& elem) const {
+ return std::hash<int>()(elem.first);
+ }
+};
+
} // namespace
namespace base {
@@ -263,5 +324,100 @@ TEST(StringAsArrayTest, WriteCopy) {
EXPECT_EQ("abc", s2);
}
+TEST(Erase, String) {
+ const std::pair<std::string, std::string> test_data[] = {
+ {"", ""}, {"abc", "bc"}, {"abca", "bc"},
+ };
+
+ for (auto test_case : test_data) {
+ Erase(test_case.first, 'a');
+ EXPECT_EQ(test_case.second, test_case.first);
+ }
+
+ for (auto test_case : test_data) {
+ EraseIf(test_case.first, [](char elem) { return elem < 'b'; });
+ EXPECT_EQ(test_case.second, test_case.first);
+ }
+}
+
+TEST(Erase, String16) {
+ std::pair<base::string16, base::string16> test_data[] = {
+ {base::string16(), base::string16()},
+ {UTF8ToUTF16("abc"), UTF8ToUTF16("bc")},
+ {UTF8ToUTF16("abca"), UTF8ToUTF16("bc")},
+ };
+
+ const base::string16 letters = UTF8ToUTF16("ab");
+ for (auto test_case : test_data) {
+ Erase(test_case.first, letters[0]);
+ EXPECT_EQ(test_case.second, test_case.first);
+ }
+
+ for (auto test_case : test_data) {
+ EraseIf(test_case.first, [&](short elem) { return elem < letters[1]; });
+ EXPECT_EQ(test_case.second, test_case.first);
+ }
+}
+
+TEST(Erase, Deque) {
+ RunEraseTest<std::deque<int>>();
+ RunEraseIfTest<std::deque<std::pair<int, int>>>();
+}
+
+TEST(Erase, Vector) {
+ RunEraseTest<std::vector<int>>();
+ RunEraseIfTest<std::vector<std::pair<int, int>>>();
+}
+
+TEST(Erase, ForwardList) {
+ RunEraseTest<std::forward_list<int>>();
+ RunEraseIfTest<std::forward_list<std::pair<int, int>>>();
+}
+
+TEST(Erase, List) {
+ RunEraseTest<std::list<int>>();
+ RunEraseIfTest<std::list<std::pair<int, int>>>();
+}
+
+TEST(Erase, Map) {
+ RunEraseIfTest<std::map<int, int>>();
+ RunEraseIfTest<std::map<int, int, std::greater<int>>>();
+}
+
+TEST(Erase, Multimap) {
+ RunEraseIfTest<std::multimap<int, int>>();
+ RunEraseIfTest<std::multimap<int, int, std::greater<int>>>();
+}
+
+TEST(Erase, Set) {
+ RunEraseIfTest<std::set<std::pair<int, int>>>();
+ RunEraseIfTest<
+ std::set<std::pair<int, int>, std::greater<std::pair<int, int>>>>();
+}
+
+TEST(Erase, Multiset) {
+ RunEraseIfTest<std::multiset<std::pair<int, int>>>();
+ RunEraseIfTest<
+ std::multiset<std::pair<int, int>, std::greater<std::pair<int, int>>>>();
+}
+
+TEST(Erase, UnorderedMap) {
+ RunEraseIfTest<std::unordered_map<int, int>>();
+ RunEraseIfTest<std::unordered_map<int, int, CustomIntHash>>();
+}
+
+TEST(Erase, UnorderedMultimap) {
+ RunEraseIfTest<std::unordered_multimap<int, int>>();
+ RunEraseIfTest<std::unordered_multimap<int, int, CustomIntHash>>();
+}
+
+TEST(Erase, UnorderedSet) {
+ RunEraseIfTest<std::unordered_set<std::pair<int, int>, HashByFirst>>();
+}
+
+TEST(Erase, UnorderedMultiset) {
+ RunEraseIfTest<std::unordered_multiset<std::pair<int, int>, HashByFirst>>();
+}
+
} // namespace
} // namespace base
diff --git a/chromium/base/strings/string_piece.h b/chromium/base/strings/string_piece.h
index eaec14de5da..5333640fee3 100644
--- a/chromium/base/strings/string_piece.h
+++ b/chromium/base/strings/string_piece.h
@@ -245,6 +245,9 @@ template <typename STRING_TYPE> class BasicStringPiece {
return r;
}
+ // This is the style of conversion preferred by std::string_view in C++17.
+ explicit operator STRING_TYPE() const { return as_string(); }
+
STRING_TYPE as_string() const {
// std::string doesn't like to take a NULL pointer even with a 0 size.
return empty() ? STRING_TYPE() : STRING_TYPE(data(), size());
diff --git a/chromium/base/strings/string_piece_unittest.cc b/chromium/base/strings/string_piece_unittest.cc
index f05aa152b50..7dfd71116bc 100644
--- a/chromium/base/strings/string_piece_unittest.cc
+++ b/chromium/base/strings/string_piece_unittest.cc
@@ -295,6 +295,8 @@ TYPED_TEST(CommonStringPieceTest, CheckFind) {
ASSERT_EQ(b.rfind(c, 0U), Piece::npos);
ASSERT_EQ(a.rfind(d), static_cast<size_t>(a.as_string().rfind(TypeParam())));
ASSERT_EQ(a.rfind(e), a.as_string().rfind(TypeParam()));
+ ASSERT_EQ(a.rfind(d), static_cast<size_t>(TypeParam(a).rfind(TypeParam())));
+ ASSERT_EQ(a.rfind(e), TypeParam(a).rfind(TypeParam()));
ASSERT_EQ(a.rfind(d, 12), 12U);
ASSERT_EQ(a.rfind(e, 17), 17U);
ASSERT_EQ(a.rfind(g), Piece::npos);
@@ -518,6 +520,12 @@ TYPED_TEST(CommonStringPieceTest, CheckCustom) {
ASSERT_TRUE(c == s3);
TypeParam s4(e.as_string());
ASSERT_TRUE(s4.empty());
+
+ // operator STRING_TYPE()
+ TypeParam s5(TypeParam(a).c_str(), 7); // Note, has an embedded NULL
+ ASSERT_TRUE(c == s5);
+ TypeParam s6(e);
+ ASSERT_TRUE(s6.empty());
}
TEST(StringPieceTest, CheckCustom) {
@@ -591,7 +599,11 @@ TYPED_TEST(CommonStringPieceTest, CheckNULL) {
ASSERT_EQ(s.data(), (const typename TypeParam::value_type*)NULL);
ASSERT_EQ(s.size(), 0U);
- TypeParam str = s.as_string();
+ TypeParam str(s);
+ ASSERT_EQ(str.length(), 0U);
+ ASSERT_EQ(str, TypeParam());
+
+ str = s.as_string();
ASSERT_EQ(str.length(), 0U);
ASSERT_EQ(str, TypeParam());
}
diff --git a/chromium/base/strings/string_util_unittest.cc b/chromium/base/strings/string_util_unittest.cc
index 6c054f8327e..6ac307ec2b4 100644
--- a/chromium/base/strings/string_util_unittest.cc
+++ b/chromium/base/strings/string_util_unittest.cc
@@ -676,6 +676,10 @@ TEST(StringUtilTest, JoinString) {
std::vector<std::string> parts;
EXPECT_EQ(std::string(), JoinString(parts, separator));
+ parts.push_back(std::string());
+ EXPECT_EQ(std::string(), JoinString(parts, separator));
+ parts.clear();
+
parts.push_back("a");
EXPECT_EQ("a", JoinString(parts, separator));
@@ -694,6 +698,10 @@ TEST(StringUtilTest, JoinString16) {
std::vector<string16> parts;
EXPECT_EQ(string16(), JoinString(parts, separator));
+ parts.push_back(string16());
+ EXPECT_EQ(string16(), JoinString(parts, separator));
+ parts.clear();
+
parts.push_back(ASCIIToUTF16("a"));
EXPECT_EQ(ASCIIToUTF16("a"), JoinString(parts, separator));
@@ -709,8 +717,13 @@ TEST(StringUtilTest, JoinString16) {
TEST(StringUtilTest, JoinStringPiece) {
std::string separator(", ");
- std::vector<base::StringPiece> parts;
- EXPECT_EQ(base::StringPiece(), JoinString(parts, separator));
+ std::vector<StringPiece> parts;
+ EXPECT_EQ(std::string(), JoinString(parts, separator));
+
+ // Test empty first part (https://crbug.com/698073).
+ parts.push_back(StringPiece());
+ EXPECT_EQ(std::string(), JoinString(parts, separator));
+ parts.clear();
parts.push_back("a");
EXPECT_EQ("a", JoinString(parts, separator));
@@ -719,7 +732,7 @@ TEST(StringUtilTest, JoinStringPiece) {
parts.push_back("c");
EXPECT_EQ("a, b, c", JoinString(parts, separator));
- parts.push_back(base::StringPiece());
+ parts.push_back(StringPiece());
EXPECT_EQ("a, b, c, ", JoinString(parts, separator));
parts.push_back(" ");
EXPECT_EQ("a|b|c|| ", JoinString(parts, "|"));
@@ -727,8 +740,13 @@ TEST(StringUtilTest, JoinStringPiece) {
TEST(StringUtilTest, JoinStringPiece16) {
string16 separator = ASCIIToUTF16(", ");
- std::vector<base::StringPiece16> parts;
- EXPECT_EQ(base::StringPiece16(), JoinString(parts, separator));
+ std::vector<StringPiece16> parts;
+ EXPECT_EQ(string16(), JoinString(parts, separator));
+
+ // Test empty first part (https://crbug.com/698073).
+ parts.push_back(StringPiece16());
+ EXPECT_EQ(string16(), JoinString(parts, separator));
+ parts.clear();
const string16 kA = ASCIIToUTF16("a");
parts.push_back(kA);
@@ -740,7 +758,7 @@ TEST(StringUtilTest, JoinStringPiece16) {
parts.push_back(kC);
EXPECT_EQ(ASCIIToUTF16("a, b, c"), JoinString(parts, separator));
- parts.push_back(base::StringPiece16());
+ parts.push_back(StringPiece16());
EXPECT_EQ(ASCIIToUTF16("a, b, c, "), JoinString(parts, separator));
const string16 kSpace = ASCIIToUTF16(" ");
parts.push_back(kSpace);
@@ -749,13 +767,16 @@ TEST(StringUtilTest, JoinStringPiece16) {
TEST(StringUtilTest, JoinStringInitializerList) {
std::string separator(", ");
- EXPECT_EQ(base::StringPiece(), JoinString({}, separator));
+ EXPECT_EQ(std::string(), JoinString({}, separator));
+
+ // Test empty first part (https://crbug.com/698073).
+ EXPECT_EQ(std::string(), JoinString({StringPiece()}, separator));
// With const char*s.
EXPECT_EQ("a", JoinString({"a"}, separator));
EXPECT_EQ("a, b, c", JoinString({"a", "b", "c"}, separator));
- EXPECT_EQ("a, b, c, ", JoinString({"a", "b", "c", ""}, separator));
- EXPECT_EQ("a|b|c|| ", JoinString({"a", "b", "c", "", " "}, "|"));
+ EXPECT_EQ("a, b, c, ", JoinString({"a", "b", "c", StringPiece()}, separator));
+ EXPECT_EQ("a|b|c|| ", JoinString({"a", "b", "c", StringPiece(), " "}, "|"));
// With std::strings.
const std::string kA = "a";
@@ -770,7 +791,10 @@ TEST(StringUtilTest, JoinStringInitializerList) {
TEST(StringUtilTest, JoinStringInitializerList16) {
string16 separator = ASCIIToUTF16(", ");
- EXPECT_EQ(base::StringPiece16(), JoinString({}, separator));
+ EXPECT_EQ(string16(), JoinString({}, separator));
+
+ // Test empty first part (https://crbug.com/698073).
+ EXPECT_EQ(string16(), JoinString({StringPiece16()}, separator));
// With string16s.
const string16 kA = ASCIIToUTF16("a");
@@ -780,12 +804,12 @@ TEST(StringUtilTest, JoinStringInitializerList16) {
const string16 kC = ASCIIToUTF16("c");
EXPECT_EQ(ASCIIToUTF16("a, b, c"), JoinString({kA, kB, kC}, separator));
- const string16 kEmpty = ASCIIToUTF16("");
EXPECT_EQ(ASCIIToUTF16("a, b, c, "),
- JoinString({kA, kB, kC, kEmpty}, separator));
+ JoinString({kA, kB, kC, StringPiece16()}, separator));
const string16 kSpace = ASCIIToUTF16(" ");
- EXPECT_EQ(ASCIIToUTF16("a|b|c|| "),
- JoinString({kA, kB, kC, kEmpty, kSpace}, ASCIIToUTF16("|")));
+ EXPECT_EQ(
+ ASCIIToUTF16("a|b|c|| "),
+ JoinString({kA, kB, kC, StringPiece16(), kSpace}, ASCIIToUTF16("|")));
// With StringPiece16s.
const StringPiece16 kPieceA = kA;
diff --git a/chromium/base/sync_socket.h b/chromium/base/sync_socket.h
index fcf4155047e..53fbeb613fa 100644
--- a/chromium/base/sync_socket.h
+++ b/chromium/base/sync_socket.h
@@ -93,6 +93,9 @@ class BASE_EXPORT SyncSocket {
// processes.
Handle handle() const { return handle_; }
+ // Extracts and takes ownership of the contained handle.
+ Handle Release();
+
protected:
Handle handle_;
diff --git a/chromium/base/sync_socket_nacl.cc b/chromium/base/sync_socket_nacl.cc
index 4a02082edd6..19a20becfa6 100644
--- a/chromium/base/sync_socket_nacl.cc
+++ b/chromium/base/sync_socket_nacl.cc
@@ -75,6 +75,12 @@ size_t SyncSocket::Peek() {
return 0;
}
+SyncSocket::Handle SyncSocket::Release() {
+ Handle r = handle_;
+ handle_ = kInvalidHandle;
+ return r;
+}
+
CancelableSyncSocket::CancelableSyncSocket() {
}
diff --git a/chromium/base/sync_socket_posix.cc b/chromium/base/sync_socket_posix.cc
index 5d9e25e5ead..da995f4b648 100644
--- a/chromium/base/sync_socket_posix.cc
+++ b/chromium/base/sync_socket_posix.cc
@@ -207,6 +207,12 @@ size_t SyncSocket::Peek() {
return number_chars;
}
+SyncSocket::Handle SyncSocket::Release() {
+ Handle r = handle_;
+ handle_ = kInvalidHandle;
+ return r;
+}
+
CancelableSyncSocket::CancelableSyncSocket() {}
CancelableSyncSocket::CancelableSyncSocket(Handle handle)
: SyncSocket(handle) {
diff --git a/chromium/base/sync_socket_win.cc b/chromium/base/sync_socket_win.cc
index c101f77ad9d..797f12f72dc 100644
--- a/chromium/base/sync_socket_win.cc
+++ b/chromium/base/sync_socket_win.cc
@@ -293,6 +293,12 @@ size_t SyncSocket::Peek() {
return available;
}
+SyncSocket::Handle SyncSocket::Release() {
+ Handle r = handle_;
+ handle_ = kInvalidHandle;
+ return r;
+}
+
CancelableSyncSocket::CancelableSyncSocket()
: shutdown_event_(base::WaitableEvent::ResetPolicy::MANUAL,
base::WaitableEvent::InitialState::NOT_SIGNALED),
diff --git a/chromium/base/synchronization/atomic_flag_unittest.cc b/chromium/base/synchronization/atomic_flag_unittest.cc
index a3aa3341a0d..76e5d968f78 100644
--- a/chromium/base/synchronization/atomic_flag_unittest.cc
+++ b/chromium/base/synchronization/atomic_flag_unittest.cc
@@ -68,10 +68,9 @@ TEST(AtomicFlagTest, ReadFromDifferentThread) {
Thread thread("AtomicFlagTest.ReadFromDifferentThread");
ASSERT_TRUE(thread.Start());
- thread.task_runner()->PostTask(
- FROM_HERE,
- Bind(&BusyWaitUntilFlagIsSet, &tested_flag, &expected_after_flag,
- &reset_flag));
+ thread.task_runner()->PostTask(FROM_HERE,
+ BindOnce(&BusyWaitUntilFlagIsSet, &tested_flag,
+ &expected_after_flag, &reset_flag));
// To verify that IsSet() fetches the flag's value from memory every time it
// is called (not just the first time that it is called on a thread), sleep
@@ -100,10 +99,9 @@ TEST(AtomicFlagTest, ReadFromDifferentThread) {
// |thread| is guaranteed to be synchronized past the
// |UnsafeResetForTesting()| call when the task runs per the implicit
// synchronization in the post task mechanism.
- thread.task_runner()->PostTask(
- FROM_HERE,
- Bind(&BusyWaitUntilFlagIsSet, &tested_flag, &expected_after_flag,
- nullptr));
+ thread.task_runner()->PostTask(FROM_HERE,
+ BindOnce(&BusyWaitUntilFlagIsSet, &tested_flag,
+ &expected_after_flag, nullptr));
PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
@@ -125,7 +123,7 @@ TEST(AtomicFlagTest, SetOnDifferentSequenceDeathTest) {
AtomicFlag flag;
flag.Set();
- t.task_runner()->PostTask(FROM_HERE, Bind(&ExpectSetFlagDeath, &flag));
+ t.task_runner()->PostTask(FROM_HERE, BindOnce(&ExpectSetFlagDeath, &flag));
}
} // namespace base
diff --git a/chromium/base/synchronization/condition_variable_unittest.cc b/chromium/base/synchronization/condition_variable_unittest.cc
index d60b2b8af58..ebdbe5776b9 100644
--- a/chromium/base/synchronization/condition_variable_unittest.cc
+++ b/chromium/base/synchronization/condition_variable_unittest.cc
@@ -225,7 +225,7 @@ TEST_F(ConditionVariableTest, DISABLED_TimeoutAcrossSetTimeOfDay) {
Thread thread("Helper");
thread.Start();
- thread.task_runner()->PostTask(FROM_HERE, base::Bind(&BackInTime, &lock));
+ thread.task_runner()->PostTask(FROM_HERE, base::BindOnce(&BackInTime, &lock));
TimeTicks start = TimeTicks::Now();
const TimeDelta kWaitTime = TimeDelta::FromMilliseconds(300);
diff --git a/chromium/base/synchronization/waitable_event.h b/chromium/base/synchronization/waitable_event.h
index 761965f03a0..e8caffeec32 100644
--- a/chromium/base/synchronization/waitable_event.h
+++ b/chromium/base/synchronization/waitable_event.h
@@ -112,6 +112,9 @@ class BASE_EXPORT WaitableEvent {
// You MUST NOT delete any of the WaitableEvent objects while this wait is
// happening, however WaitMany's return "happens after" the |Signal| call
// that caused it has completed, like |Wait|.
+ //
+ // If more than one WaitableEvent is signaled to unblock WaitMany, the lowest
+ // index among them is returned.
static size_t WaitMany(WaitableEvent** waitables, size_t count);
// For asynchronous waiting, see WaitableEventWatcher
diff --git a/chromium/base/synchronization/waitable_event_posix.cc b/chromium/base/synchronization/waitable_event_posix.cc
index 5dfff468ad6..846fa06700e 100644
--- a/chromium/base/synchronization/waitable_event_posix.cc
+++ b/chromium/base/synchronization/waitable_event_posix.cc
@@ -5,6 +5,7 @@
#include <stddef.h>
#include <algorithm>
+#include <limits>
#include <vector>
#include "base/debug/activity_tracker.h"
@@ -266,12 +267,10 @@ size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
SyncWaiter sw;
const size_t r = EnqueueMany(&waitables[0], count, &sw);
- if (r) {
+ if (r < count) {
// One of the events is already signaled. The SyncWaiter has not been
- // enqueued anywhere. EnqueueMany returns the count of remaining waitables
- // when the signaled one was seen, so the index of the signaled event is
- // @count - @r.
- return waitables[count - r].second;
+ // enqueued anywhere.
+ return waitables[r].second;
}
// At this point, we hold the locks on all the WaitableEvents and we have
@@ -319,38 +318,50 @@ size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
}
// -----------------------------------------------------------------------------
-// If return value == 0:
+// If return value == count:
// The locks of the WaitableEvents have been taken in order and the Waiter has
// been enqueued in the wait-list of each. None of the WaitableEvents are
// currently signaled
// else:
// None of the WaitableEvent locks are held. The Waiter has not been enqueued
-// in any of them and the return value is the index of the first WaitableEvent
-// which was signaled, from the end of the array.
+// in any of them and the return value is the index of the WaitableEvent which
+// was signaled with the lowest input index from the original WaitMany call.
// -----------------------------------------------------------------------------
// static
-size_t WaitableEvent::EnqueueMany
- (std::pair<WaitableEvent*, size_t>* waitables,
- size_t count, Waiter* waiter) {
- if (!count)
- return 0;
-
- waitables[0].first->kernel_->lock_.Acquire();
- if (waitables[0].first->kernel_->signaled_) {
- if (!waitables[0].first->kernel_->manual_reset_)
- waitables[0].first->kernel_->signaled_ = false;
- waitables[0].first->kernel_->lock_.Release();
- return count;
+size_t WaitableEvent::EnqueueMany(std::pair<WaitableEvent*, size_t>* waitables,
+ size_t count,
+ Waiter* waiter) {
+ size_t winner = count;
+ size_t winner_index = count;
+ for (size_t i = 0; i < count; ++i) {
+ auto& kernel = waitables[i].first->kernel_;
+ kernel->lock_.Acquire();
+ if (kernel->signaled_ && waitables[i].second < winner) {
+ winner = waitables[i].second;
+ winner_index = i;
}
+ }
- const size_t r = EnqueueMany(waitables + 1, count - 1, waiter);
- if (r) {
- waitables[0].first->kernel_->lock_.Release();
- } else {
- waitables[0].first->Enqueue(waiter);
+ // No events signaled. All locks acquired. Enqueue the Waiter on all of them
+ // and return.
+ if (winner == count) {
+ for (size_t i = 0; i < count; ++i)
+ waitables[i].first->Enqueue(waiter);
+ return count;
+ }
+
+ // Unlock in reverse order and possibly clear the chosen winner's signal
+ // before returning its index.
+ for (auto* w = waitables + count - 1; w >= waitables; --w) {
+ auto& kernel = w->first->kernel_;
+ if (w->second == winner) {
+ if (!kernel->manual_reset_)
+ kernel->signaled_ = false;
}
+ kernel->lock_.Release();
+ }
- return r;
+ return winner_index;
}
// -----------------------------------------------------------------------------
diff --git a/chromium/base/synchronization/waitable_event_unittest.cc b/chromium/base/synchronization/waitable_event_unittest.cc
index c0e280aa974..3aa1af1619b 100644
--- a/chromium/base/synchronization/waitable_event_unittest.cc
+++ b/chromium/base/synchronization/waitable_event_unittest.cc
@@ -6,6 +6,8 @@
#include <stddef.h>
+#include <algorithm>
+
#include "base/compiler_specific.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
@@ -78,6 +80,42 @@ TEST(WaitableEventTest, WaitManyShortcut) {
delete ev[i];
}
+TEST(WaitableEventTest, WaitManyLeftToRight) {
+ WaitableEvent* ev[5];
+ for (size_t i = 0; i < 5; ++i) {
+ ev[i] = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ }
+
+ // Test for consistent left-to-right return behavior across all permutations
+ // of the input array. This is to verify that only the indices -- and not
+ // the WaitableEvents' addresses -- are relevant in determining who wins when
+ // multiple events are signaled.
+
+ std::sort(ev, ev + 5);
+ do {
+ ev[0]->Signal();
+ ev[1]->Signal();
+ EXPECT_EQ(0u, WaitableEvent::WaitMany(ev, 5));
+
+ ev[2]->Signal();
+ EXPECT_EQ(1u, WaitableEvent::WaitMany(ev, 5));
+ EXPECT_EQ(2u, WaitableEvent::WaitMany(ev, 5));
+
+ ev[3]->Signal();
+ ev[4]->Signal();
+ ev[0]->Signal();
+ EXPECT_EQ(0u, WaitableEvent::WaitMany(ev, 5));
+ EXPECT_EQ(3u, WaitableEvent::WaitMany(ev, 5));
+ ev[2]->Signal();
+ EXPECT_EQ(2u, WaitableEvent::WaitMany(ev, 5));
+ EXPECT_EQ(4u, WaitableEvent::WaitMany(ev, 5));
+ } while (std::next_permutation(ev, ev + 5));
+
+ for (size_t i = 0; i < 5; ++i)
+ delete ev[i];
+}
+
class WaitableEventSignaler : public PlatformThread::Delegate {
public:
WaitableEventSignaler(TimeDelta delay, WaitableEvent* event)
diff --git a/chromium/base/sys_info.h b/chromium/base/sys_info.h
index e35feff735e..18bdaf0096a 100644
--- a/chromium/base/sys_info.h
+++ b/chromium/base/sys_info.h
@@ -13,11 +13,18 @@
#include "base/base_export.h"
#include "base/files/file_path.h"
+#include "base/gtest_prod_util.h"
#include "base/time/time.h"
#include "build/build_config.h"
namespace base {
+namespace debug {
+FORWARD_DECLARE_TEST(SystemMetricsTest, ParseMeminfo);
+}
+
+struct SystemMemoryInfoKB;
+
class BASE_EXPORT SysInfo {
public:
// Return the number of logical processors/cores on the current machine.
@@ -28,6 +35,9 @@ class BASE_EXPORT SysInfo {
// Return the number of bytes of current available physical memory on the
// machine.
+ // (The amount of memory that can be allocated without any significant
+ // impact on the system. It can lead to freeing inactive file-backed
+ // and/or speculative file-backed memory).
static int64_t AmountOfAvailablePhysicalMemory();
// Return the number of bytes of virtual memory of this process. A return
@@ -70,8 +80,6 @@ class BASE_EXPORT SysInfo {
static std::string OperatingSystemVersion();
// Retrieves detailed numeric values for the OS version.
- // TODO(port): Implement a Linux version of this method and enable the
- // corresponding unit test.
// DON'T USE THIS ON THE MAC OR WINDOWS to determine the current OS release
// for OS version-specific feature checks and workarounds. If you must use
// an OS version check instead of a feature check, use the base::mac::IsOS*
@@ -147,6 +155,15 @@ class BASE_EXPORT SysInfo {
// Low-end device refers to devices having less than 512M memory in the
// current implementation.
static bool IsLowEndDevice();
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(SysInfoTest, AmountOfAvailablePhysicalMemory);
+ FRIEND_TEST_ALL_PREFIXES(debug::SystemMetricsTest, ParseMeminfo);
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+ static int64_t AmountOfAvailablePhysicalMemory(
+ const SystemMemoryInfoKB& meminfo);
+#endif
};
} // namespace base
diff --git a/chromium/base/sys_info_android.cc b/chromium/base/sys_info_android.cc
index 8b5a9dab6ef..b21bd2d56df 100644
--- a/chromium/base/sys_info_android.cc
+++ b/chromium/base/sys_info_android.cc
@@ -66,11 +66,16 @@ const int kDefaultAndroidMajorVersion = 7;
const int kDefaultAndroidMinorVersion = 0;
const int kDefaultAndroidBugfixVersion = 99;
-// Parse out the OS version numbers from the system properties.
-void ParseOSVersionNumbers(const char* os_version_str,
- int32_t* major_version,
- int32_t* minor_version,
- int32_t* bugfix_version) {
+// Get and parse out the OS version numbers from the system properties.
+// Note if parse fails, the "default" version is returned as fallback.
+void GetOsVersionStringAndNumbers(std::string* version_string,
+ int32_t* major_version,
+ int32_t* minor_version,
+ int32_t* bugfix_version) {
+ // Read the version number string out from the properties.
+ char os_version_str[PROP_VALUE_MAX];
+ __system_property_get("ro.build.version.release", os_version_str);
+
if (os_version_str[0]) {
// Try to parse out the version numbers from the string.
int num_read = sscanf(os_version_str, "%d.%d.%d", major_version,
@@ -78,8 +83,11 @@ void ParseOSVersionNumbers(const char* os_version_str,
if (num_read > 0) {
// If we don't have a full set of version numbers, make the extras 0.
- if (num_read < 2) *minor_version = 0;
- if (num_read < 3) *bugfix_version = 0;
+ if (num_read < 2)
+ *minor_version = 0;
+ if (num_read < 3)
+ *bugfix_version = 0;
+ *version_string = std::string(os_version_str);
return;
}
}
@@ -88,6 +96,8 @@ void ParseOSVersionNumbers(const char* os_version_str,
*major_version = kDefaultAndroidMajorVersion;
*minor_version = kDefaultAndroidMinorVersion;
*bugfix_version = kDefaultAndroidBugfixVersion;
+ *version_string = ::base::StringPrintf("%d.%d.%d", *major_version,
+ *minor_version, *bugfix_version);
}
// Parses a system property (specified with unit 'k','m' or 'g').
@@ -171,21 +181,18 @@ std::string SysInfo::OperatingSystemName() {
}
std::string SysInfo::OperatingSystemVersion() {
+ std::string version_string;
int32_t major, minor, bugfix;
- OperatingSystemVersionNumbers(&major, &minor, &bugfix);
- return StringPrintf("%d.%d.%d", major, minor, bugfix);
+ GetOsVersionStringAndNumbers(&version_string, &major, &minor, &bugfix);
+ return version_string;
}
void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
int32_t* minor_version,
int32_t* bugfix_version) {
- // Read the version number string out from the properties.
- char os_version_str[PROP_VALUE_MAX];
- __system_property_get("ro.build.version.release", os_version_str);
-
- // Parse out the numbers.
- ParseOSVersionNumbers(os_version_str, major_version, minor_version,
- bugfix_version);
+ std::string version_string;
+ GetOsVersionStringAndNumbers(&version_string, major_version, minor_version,
+ bugfix_version);
}
std::string SysInfo::GetAndroidBuildCodename() {
diff --git a/chromium/base/sys_info_ios.mm b/chromium/base/sys_info_ios.mm
index 9a95298e692..9a329b9f092 100644
--- a/chromium/base/sys_info_ios.mm
+++ b/chromium/base/sys_info_ios.mm
@@ -15,6 +15,7 @@
#include "base/mac/scoped_mach_port.h"
#include "base/mac/scoped_nsautorelease_pool.h"
#include "base/macros.h"
+#include "base/process/process_metrics.h"
#include "base/strings/sys_string_conversions.h"
namespace base {
@@ -83,19 +84,12 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
// static
int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
- base::mac::ScopedMachSendRight host(mach_host_self());
- vm_statistics_data_t vm_info;
- mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
- if (host_statistics(host.get(),
- HOST_VM_INFO,
- reinterpret_cast<host_info_t>(&vm_info),
- &count) != KERN_SUCCESS) {
- NOTREACHED();
+ SystemMemoryInfoKB info;
+ if (!GetSystemMemoryInfo(&info))
return 0;
- }
-
- return static_cast<int64_t>(vm_info.free_count - vm_info.speculative_count) *
- PAGE_SIZE;
+ // We should add inactive file-backed memory also but there is no such
+ // information from iOS unfortunately.
+ return static_cast<int64_t>(info.free + info.speculative) * 1024;
}
// static
diff --git a/chromium/base/sys_info_linux.cc b/chromium/base/sys_info_linux.cc
index 298d245ecf5..0cd05b363a2 100644
--- a/chromium/base/sys_info_linux.cc
+++ b/chromium/base/sys_info_linux.cc
@@ -13,6 +13,7 @@
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
+#include "base/process/process_metrics.h"
#include "base/strings/string_number_conversions.h"
#include "base/sys_info_internal.h"
#include "build/build_config.h"
@@ -42,13 +43,29 @@ base::LazyInstance<
namespace base {
// static
+int64_t SysInfo::AmountOfPhysicalMemory() {
+ return g_lazy_physical_memory.Get().value();
+}
+
+// static
int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
- return AmountOfMemory(_SC_AVPHYS_PAGES);
+ SystemMemoryInfoKB info;
+ if (!GetSystemMemoryInfo(&info))
+ return 0;
+ return AmountOfAvailablePhysicalMemory(info);
}
// static
-int64_t SysInfo::AmountOfPhysicalMemory() {
- return g_lazy_physical_memory.Get().value();
+int64_t SysInfo::AmountOfAvailablePhysicalMemory(
+ const SystemMemoryInfoKB& info) {
+ // See details here:
+ // https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
+ // The fallback logic (when there is no MemAvailable) would be more precise
+ // if we had info about zones watermarks (/proc/zoneinfo).
+ int64_t res_kb = info.available != 0
+ ? info.available - info.active_file
+ : info.free + info.reclaimable + info.inactive_file;
+ return res_kb * 1024;
}
// static
diff --git a/chromium/base/sys_info_mac.mm b/chromium/base/sys_info_mac.mm
index aab1103d4c4..1141bd55776 100644
--- a/chromium/base/sys_info_mac.mm
+++ b/chromium/base/sys_info_mac.mm
@@ -19,6 +19,7 @@
#include "base/mac/scoped_mach_port.h"
#import "base/mac/sdk_forward_declarations.h"
#include "base/macros.h"
+#include "base/process/process_metrics.h"
#include "base/strings/stringprintf.h"
namespace base {
@@ -83,20 +84,12 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
// static
int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
- base::mac::ScopedMachSendRight host(mach_host_self());
- vm_statistics_data_t vm_info;
- mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
-
- if (host_statistics(host.get(),
- HOST_VM_INFO,
- reinterpret_cast<host_info_t>(&vm_info),
- &count) != KERN_SUCCESS) {
- NOTREACHED();
+ SystemMemoryInfoKB info;
+ if (!GetSystemMemoryInfo(&info))
return 0;
- }
-
- return static_cast<int64_t>(vm_info.free_count - vm_info.speculative_count) *
- PAGE_SIZE;
+ // We should add inactive file-backed memory also but there is no such
+ // information from Mac OS unfortunately.
+ return static_cast<int64_t>(info.free + info.speculative) * 1024;
}
// static
diff --git a/chromium/base/sys_info_openbsd.cc b/chromium/base/sys_info_openbsd.cc
index 531c11733b6..9c987841836 100644
--- a/chromium/base/sys_info_openbsd.cc
+++ b/chromium/base/sys_info_openbsd.cc
@@ -48,6 +48,8 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
// static
int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
+ // We should add inactive file-backed memory also but there is no such
+ // information from OpenBSD unfortunately.
return AmountOfMemory(_SC_AVPHYS_PAGES);
}
diff --git a/chromium/base/sys_info_posix.cc b/chromium/base/sys_info_posix.cc
index f480055cc4b..c4c07d07da2 100644
--- a/chromium/base/sys_info_posix.cc
+++ b/chromium/base/sys_info_posix.cc
@@ -183,6 +183,30 @@ std::string SysInfo::OperatingSystemVersion() {
}
#endif
+#if !defined(OS_MACOSX) && !defined(OS_ANDROID) && !defined(OS_CHROMEOS)
+// static
+void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
+ int32_t* minor_version,
+ int32_t* bugfix_version) {
+ struct utsname info;
+ if (uname(&info) < 0) {
+ NOTREACHED();
+ *major_version = 0;
+ *minor_version = 0;
+ *bugfix_version = 0;
+ return;
+ }
+ int num_read = sscanf(info.release, "%d.%d.%d", major_version, minor_version,
+ bugfix_version);
+ if (num_read < 1)
+ *major_version = 0;
+ if (num_read < 2)
+ *minor_version = 0;
+ if (num_read < 3)
+ *bugfix_version = 0;
+}
+#endif
+
// static
std::string SysInfo::OperatingSystemArchitecture() {
struct utsname info;
diff --git a/chromium/base/sys_info_unittest.cc b/chromium/base/sys_info_unittest.cc
index c3b85077071..94b5a849716 100644
--- a/chromium/base/sys_info_unittest.cc
+++ b/chromium/base/sys_info_unittest.cc
@@ -6,6 +6,7 @@
#include "base/environment.h"
#include "base/files/file_util.h"
+#include "base/process/process_metrics.h"
#include "base/sys_info.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
@@ -13,46 +14,71 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/platform_test.h"
-typedef PlatformTest SysInfoTest;
-using base::FilePath;
+namespace base {
+
+using SysInfoTest = PlatformTest;
TEST_F(SysInfoTest, NumProcs) {
// We aren't actually testing that it's correct, just that it's sane.
- EXPECT_GE(base::SysInfo::NumberOfProcessors(), 1);
+ EXPECT_GE(SysInfo::NumberOfProcessors(), 1);
}
TEST_F(SysInfoTest, AmountOfMem) {
// We aren't actually testing that it's correct, just that it's sane.
- EXPECT_GT(base::SysInfo::AmountOfPhysicalMemory(), 0);
- EXPECT_GT(base::SysInfo::AmountOfPhysicalMemoryMB(), 0);
+ EXPECT_GT(SysInfo::AmountOfPhysicalMemory(), 0);
+ EXPECT_GT(SysInfo::AmountOfPhysicalMemoryMB(), 0);
// The maxmimal amount of virtual memory can be zero which means unlimited.
- EXPECT_GE(base::SysInfo::AmountOfVirtualMemory(), 0);
+ EXPECT_GE(SysInfo::AmountOfVirtualMemory(), 0);
}
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+TEST_F(SysInfoTest, AmountOfAvailablePhysicalMemory) {
+ // Note: info is in _K_bytes.
+ SystemMemoryInfoKB info;
+ ASSERT_TRUE(GetSystemMemoryInfo(&info));
+ EXPECT_GT(info.free, 0);
+
+ if (info.available != 0) {
+ // If there is MemAvailable from kernel.
+ EXPECT_LT(info.available, info.total);
+ const int64_t amount = SysInfo::AmountOfAvailablePhysicalMemory(info);
+ // We aren't actually testing that it's correct, just that it's sane.
+ EXPECT_GT(amount, static_cast<int64_t>(info.free) * 1024);
+ EXPECT_LT(amount / 1024, info.available);
+ // Simulate as if there is no MemAvailable.
+ info.available = 0;
+ }
+
+ // There is no MemAvailable. Check the fallback logic.
+ const int64_t amount = SysInfo::AmountOfAvailablePhysicalMemory(info);
+ // We aren't actually testing that it's correct, just that it's sane.
+ EXPECT_GT(amount, static_cast<int64_t>(info.free) * 1024);
+ EXPECT_LT(amount / 1024, info.total);
+}
+#endif // defined(OS_LINUX) || defined(OS_ANDROID)
+
TEST_F(SysInfoTest, AmountOfFreeDiskSpace) {
// We aren't actually testing that it's correct, just that it's sane.
FilePath tmp_path;
- ASSERT_TRUE(base::GetTempDir(&tmp_path));
- EXPECT_GE(base::SysInfo::AmountOfFreeDiskSpace(tmp_path), 0)
- << tmp_path.value();
+ ASSERT_TRUE(GetTempDir(&tmp_path));
+ EXPECT_GE(SysInfo::AmountOfFreeDiskSpace(tmp_path), 0) << tmp_path.value();
}
TEST_F(SysInfoTest, AmountOfTotalDiskSpace) {
// We aren't actually testing that it's correct, just that it's sane.
FilePath tmp_path;
- ASSERT_TRUE(base::GetTempDir(&tmp_path));
- EXPECT_GT(base::SysInfo::AmountOfTotalDiskSpace(tmp_path), 0)
- << tmp_path.value();
+ ASSERT_TRUE(GetTempDir(&tmp_path));
+ EXPECT_GT(SysInfo::AmountOfTotalDiskSpace(tmp_path), 0) << tmp_path.value();
}
-#if defined(OS_WIN) || defined(OS_MACOSX)
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX)
TEST_F(SysInfoTest, OperatingSystemVersionNumbers) {
int32_t os_major_version = -1;
int32_t os_minor_version = -1;
int32_t os_bugfix_version = -1;
- base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
- &os_minor_version,
- &os_bugfix_version);
+ SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
EXPECT_GT(os_major_version, -1);
EXPECT_GT(os_minor_version, -1);
EXPECT_GT(os_bugfix_version, -1);
@@ -60,18 +86,18 @@ TEST_F(SysInfoTest, OperatingSystemVersionNumbers) {
#endif
TEST_F(SysInfoTest, Uptime) {
- base::TimeDelta up_time_1 = base::SysInfo::Uptime();
+ TimeDelta up_time_1 = SysInfo::Uptime();
// UpTime() is implemented internally using TimeTicks::Now(), which documents
// system resolution as being 1-15ms. Sleep a little longer than that.
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(20));
- base::TimeDelta up_time_2 = base::SysInfo::Uptime();
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
+ TimeDelta up_time_2 = SysInfo::Uptime();
EXPECT_GT(up_time_1.InMicroseconds(), 0);
EXPECT_GT(up_time_2.InMicroseconds(), up_time_1.InMicroseconds());
}
#if defined(OS_MACOSX) && !defined(OS_IOS)
TEST_F(SysInfoTest, HardwareModelName) {
- std::string hardware_model = base::SysInfo::HardwareModelName();
+ std::string hardware_model = SysInfo::HardwareModelName();
EXPECT_FALSE(hardware_model.empty());
}
#endif
@@ -85,10 +111,10 @@ TEST_F(SysInfoTest, GoogleChromeOSVersionNumbers) {
const char kLsbRelease[] =
"FOO=1234123.34.5\n"
"CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
- base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
- &os_minor_version,
- &os_bugfix_version);
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, Time());
+ SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
EXPECT_EQ(1, os_major_version);
EXPECT_EQ(2, os_minor_version);
EXPECT_EQ(3, os_bugfix_version);
@@ -101,10 +127,10 @@ TEST_F(SysInfoTest, GoogleChromeOSVersionNumbersFirst) {
const char kLsbRelease[] =
"CHROMEOS_RELEASE_VERSION=1.2.3.4\n"
"FOO=1234123.34.5\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
- base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
- &os_minor_version,
- &os_bugfix_version);
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, Time());
+ SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
EXPECT_EQ(1, os_major_version);
EXPECT_EQ(2, os_minor_version);
EXPECT_EQ(3, os_bugfix_version);
@@ -115,10 +141,10 @@ TEST_F(SysInfoTest, GoogleChromeOSNoVersionNumbers) {
int32_t os_minor_version = -1;
int32_t os_bugfix_version = -1;
const char kLsbRelease[] = "FOO=1234123.34.5\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
- base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
- &os_minor_version,
- &os_bugfix_version);
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, Time());
+ SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
EXPECT_EQ(0, os_major_version);
EXPECT_EQ(0, os_minor_version);
EXPECT_EQ(0, os_bugfix_version);
@@ -127,43 +153,45 @@ TEST_F(SysInfoTest, GoogleChromeOSNoVersionNumbers) {
TEST_F(SysInfoTest, GoogleChromeOSLsbReleaseTime) {
const char kLsbRelease[] = "CHROMEOS_RELEASE_VERSION=1.2.3.4";
// Use a fake time that can be safely displayed as a string.
- const base::Time lsb_release_time(base::Time::FromDoubleT(12345.6));
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, lsb_release_time);
- base::Time parsed_lsb_release_time = base::SysInfo::GetLsbReleaseTime();
+ const Time lsb_release_time(Time::FromDoubleT(12345.6));
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, lsb_release_time);
+ Time parsed_lsb_release_time = SysInfo::GetLsbReleaseTime();
EXPECT_DOUBLE_EQ(lsb_release_time.ToDoubleT(),
parsed_lsb_release_time.ToDoubleT());
}
TEST_F(SysInfoTest, IsRunningOnChromeOS) {
- base::SysInfo::SetChromeOSVersionInfoForTest("", base::Time());
- EXPECT_FALSE(base::SysInfo::IsRunningOnChromeOS());
+ SysInfo::SetChromeOSVersionInfoForTest("", Time());
+ EXPECT_FALSE(SysInfo::IsRunningOnChromeOS());
const char kLsbRelease1[] =
"CHROMEOS_RELEASE_NAME=Non Chrome OS\n"
"CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, base::Time());
- EXPECT_FALSE(base::SysInfo::IsRunningOnChromeOS());
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, Time());
+ EXPECT_FALSE(SysInfo::IsRunningOnChromeOS());
const char kLsbRelease2[] =
"CHROMEOS_RELEASE_NAME=Chrome OS\n"
"CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, base::Time());
- EXPECT_TRUE(base::SysInfo::IsRunningOnChromeOS());
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, Time());
+ EXPECT_TRUE(SysInfo::IsRunningOnChromeOS());
const char kLsbRelease3[] =
"CHROMEOS_RELEASE_NAME=Chromium OS\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease3, base::Time());
- EXPECT_TRUE(base::SysInfo::IsRunningOnChromeOS());
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease3, Time());
+ EXPECT_TRUE(SysInfo::IsRunningOnChromeOS());
}
TEST_F(SysInfoTest, GetStrippedReleaseBoard) {
const char* kLsbRelease1 = "CHROMEOS_RELEASE_BOARD=Glimmer\n";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, base::Time());
- EXPECT_EQ("glimmer", base::SysInfo::GetStrippedReleaseBoard());
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, Time());
+ EXPECT_EQ("glimmer", SysInfo::GetStrippedReleaseBoard());
const char* kLsbRelease2 = "CHROMEOS_RELEASE_BOARD=glimmer-signed-mp-v4keys";
- base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, base::Time());
- EXPECT_EQ("glimmer", base::SysInfo::GetStrippedReleaseBoard());
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, Time());
+ EXPECT_EQ("glimmer", SysInfo::GetStrippedReleaseBoard());
}
#endif // OS_CHROMEOS
+
+} // namespace base
diff --git a/chromium/base/sys_info_win.cc b/chromium/base/sys_info_win.cc
index cb184bae258..d1c485c129e 100644
--- a/chromium/base/sys_info_win.cc
+++ b/chromium/base/sys_info_win.cc
@@ -12,6 +12,7 @@
#include "base/files/file_path.h"
#include "base/logging.h"
+#include "base/process/process_metrics.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_restrictions.h"
#include "base/win/windows_version.h"
@@ -68,7 +69,10 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
// static
int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
- return AmountOfMemory(&MEMORYSTATUSEX::ullAvailPhys);
+ SystemMemoryInfoKB info;
+ if (!GetSystemMemoryInfo(&info))
+ return 0;
+ return static_cast<int64_t>(info.avail_phys) * 1024;
}
// static
diff --git a/chromium/base/task/cancelable_task_tracker.cc b/chromium/base/task/cancelable_task_tracker.cc
index 2a68a57bc6a..e68b95917aa 100644
--- a/chromium/base/task/cancelable_task_tracker.cc
+++ b/chromium/base/task/cancelable_task_tracker.cc
@@ -90,9 +90,9 @@ CancelableTaskTracker::TaskId CancelableTaskTracker::PostTaskAndReply(
Closure untrack_closure =
Bind(&CancelableTaskTracker::Untrack, weak_factory_.GetWeakPtr(), id);
bool success = task_runner->PostTaskAndReply(
- from_here, Bind(&RunIfNotCanceled, flag, std::move(task)),
- Bind(&RunIfNotCanceledThenUntrack, base::Owned(flag), std::move(reply),
- std::move(untrack_closure)));
+ from_here, BindOnce(&RunIfNotCanceled, flag, std::move(task)),
+ BindOnce(&RunIfNotCanceledThenUntrack, base::Owned(flag),
+ std::move(reply), std::move(untrack_closure)));
if (!success)
return kBadTaskId;
diff --git a/chromium/base/task/cancelable_task_tracker_unittest.cc b/chromium/base/task/cancelable_task_tracker_unittest.cc
index fd480f36875..a16f5af95fa 100644
--- a/chromium/base/task/cancelable_task_tracker_unittest.cc
+++ b/chromium/base/task/cancelable_task_tracker_unittest.cc
@@ -194,14 +194,14 @@ TEST_F(CancelableTaskTrackerTest, NewTrackedTaskIdDifferentThread) {
Thread other_thread("other thread");
ASSERT_TRUE(other_thread.Start());
other_thread.task_runner()->PostTask(
- FROM_HERE, Bind(&ExpectIsCanceled, is_canceled, false));
+ FROM_HERE, BindOnce(&ExpectIsCanceled, is_canceled, false));
other_thread.Stop();
task_tracker_.TryCancel(task_id);
ASSERT_TRUE(other_thread.Start());
other_thread.task_runner()->PostTask(
- FROM_HERE, Bind(&ExpectIsCanceled, is_canceled, true));
+ FROM_HERE, BindOnce(&ExpectIsCanceled, is_canceled, true));
other_thread.Stop();
}
@@ -364,8 +364,9 @@ TEST_F(CancelableTaskTrackerDeathTest, PostFromDifferentThread) {
ASSERT_TRUE(bad_thread.Start());
bad_thread.task_runner()->PostTask(
- FROM_HERE, Bind(&MaybeRunDeadlyTaskTrackerMemberFunction,
- Unretained(&task_tracker_), Bind(&PostDoNothingTask)));
+ FROM_HERE,
+ BindOnce(&MaybeRunDeadlyTaskTrackerMemberFunction,
+ Unretained(&task_tracker_), Bind(&PostDoNothingTask)));
}
void TryCancel(CancelableTaskTracker::TaskId task_id,
@@ -385,8 +386,9 @@ TEST_F(CancelableTaskTrackerDeathTest, CancelOnDifferentThread) {
EXPECT_NE(CancelableTaskTracker::kBadTaskId, task_id);
bad_thread.task_runner()->PostTask(
- FROM_HERE, Bind(&MaybeRunDeadlyTaskTrackerMemberFunction,
- Unretained(&task_tracker_), Bind(&TryCancel, task_id)));
+ FROM_HERE,
+ BindOnce(&MaybeRunDeadlyTaskTrackerMemberFunction,
+ Unretained(&task_tracker_), Bind(&TryCancel, task_id)));
test_task_runner->RunUntilIdle();
}
@@ -403,9 +405,9 @@ TEST_F(CancelableTaskTrackerDeathTest, CancelAllOnDifferentThread) {
EXPECT_NE(CancelableTaskTracker::kBadTaskId, task_id);
bad_thread.task_runner()->PostTask(
- FROM_HERE,
- Bind(&MaybeRunDeadlyTaskTrackerMemberFunction, Unretained(&task_tracker_),
- Bind(&CancelableTaskTracker::TryCancelAll)));
+ FROM_HERE, BindOnce(&MaybeRunDeadlyTaskTrackerMemberFunction,
+ Unretained(&task_tracker_),
+ Bind(&CancelableTaskTracker::TryCancelAll)));
test_task_runner->RunUntilIdle();
}
diff --git a/chromium/base/task_runner.cc b/chromium/base/task_runner.cc
index 35c0a23274a..c3e0574a1bf 100644
--- a/chromium/base/task_runner.cc
+++ b/chromium/base/task_runner.cc
@@ -23,7 +23,7 @@ class PostTaskAndReplyTaskRunner : public internal::PostTaskAndReplyImpl {
private:
bool PostTask(const tracked_objects::Location& from_here,
- const Closure& task) override;
+ OnceClosure task) override;
// Non-owning.
TaskRunner* destination_;
@@ -36,20 +36,20 @@ PostTaskAndReplyTaskRunner::PostTaskAndReplyTaskRunner(
bool PostTaskAndReplyTaskRunner::PostTask(
const tracked_objects::Location& from_here,
- const Closure& task) {
- return destination_->PostTask(from_here, task);
+ OnceClosure task) {
+ return destination_->PostTask(from_here, std::move(task));
}
} // namespace
bool TaskRunner::PostTask(const tracked_objects::Location& from_here,
- const Closure& task) {
- return PostDelayedTask(from_here, task, base::TimeDelta());
+ OnceClosure task) {
+ return PostDelayedTask(from_here, std::move(task), base::TimeDelta());
}
bool TaskRunner::PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply) {
+ OnceClosure task,
+ OnceClosure reply) {
return PostTaskAndReplyTaskRunner(this).PostTaskAndReply(
from_here, std::move(task), std::move(reply));
}
diff --git a/chromium/base/task_runner.h b/chromium/base/task_runner.h
index be3039d3723..0421d564e6d 100644
--- a/chromium/base/task_runner.h
+++ b/chromium/base/task_runner.h
@@ -61,8 +61,7 @@ class BASE_EXPORT TaskRunner
// will not be run.
//
// Equivalent to PostDelayedTask(from_here, task, 0).
- bool PostTask(const tracked_objects::Location& from_here,
- const Closure& task);
+ bool PostTask(const tracked_objects::Location& from_here, OnceClosure task);
// Like PostTask, but tries to run the posted task only after
// |delay_ms| has passed.
@@ -70,7 +69,7 @@ class BASE_EXPORT TaskRunner
// It is valid for an implementation to ignore |delay_ms|; that is,
// to have PostDelayedTask behave the same as PostTask.
virtual bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
base::TimeDelta delay) = 0;
// Returns true if the current thread is a thread on which a task
@@ -123,8 +122,8 @@ class BASE_EXPORT TaskRunner
// and the reply will cancel itself safely because it is bound to a
// WeakPtr<>.
bool PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply);
+ OnceClosure task,
+ OnceClosure reply);
protected:
friend struct TaskRunnerTraits;
diff --git a/chromium/base/task_runner_util.h b/chromium/base/task_runner_util.h
index 7fda07624da..9196bf17a16 100644
--- a/chromium/base/task_runner_util.h
+++ b/chromium/base/task_runner_util.h
@@ -39,10 +39,11 @@ bool PostTaskAndReplyWithResult(TaskRunner* task_runner,
DCHECK(reply);
TaskReturnType* result = new TaskReturnType();
return task_runner->PostTaskAndReply(
- from_here, base::Bind(&internal::ReturnAsParamAdapter<TaskReturnType>,
- std::move(task), result),
- base::Bind(&internal::ReplyAdapter<TaskReturnType, ReplyArgType>,
- std::move(reply), base::Owned(result)));
+ from_here,
+ base::BindOnce(&internal::ReturnAsParamAdapter<TaskReturnType>,
+ std::move(task), result),
+ base::BindOnce(&internal::ReplyAdapter<TaskReturnType, ReplyArgType>,
+ std::move(reply), base::Owned(result)));
}
} // namespace base
diff --git a/chromium/base/task_scheduler/delayed_task_manager.cc b/chromium/base/task_scheduler/delayed_task_manager.cc
index 1921364bd1e..0bf7565a339 100644
--- a/chromium/base/task_scheduler/delayed_task_manager.cc
+++ b/chromium/base/task_scheduler/delayed_task_manager.cc
@@ -27,13 +27,18 @@ void DelayedTaskManager::AddDelayedTask(
const PostTaskNowCallback& post_task_now_callback) {
DCHECK(task);
+ // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+ // for details.
+ CHECK(task->task);
+
const TimeDelta delay = task->delay;
DCHECK(!delay.is_zero());
// TODO(fdoray): Use |task->delayed_run_time| on the service thread
// MessageLoop rather than recomputing it from |delay|.
service_thread_task_runner_->PostDelayedTask(
- FROM_HERE, Bind(post_task_now_callback, Passed(std::move(task))), delay);
+ FROM_HERE, BindOnce(post_task_now_callback, Passed(std::move(task))),
+ delay);
}
} // namespace internal
diff --git a/chromium/base/task_scheduler/post_task.cc b/chromium/base/task_scheduler/post_task.cc
index a1a3bc2da38..6f9adae27f4 100644
--- a/chromium/base/task_scheduler/post_task.cc
+++ b/chromium/base/task_scheduler/post_task.cc
@@ -21,8 +21,8 @@ class PostTaskAndReplyTaskRunner : public internal::PostTaskAndReplyImpl {
private:
bool PostTask(const tracked_objects::Location& from_here,
- const Closure& task) override {
- PostTaskWithTraits(from_here, traits_, task);
+ OnceClosure task) override {
+ PostTaskWithTraits(from_here, traits_, std::move(task));
return true;
}
@@ -32,43 +32,43 @@ class PostTaskAndReplyTaskRunner : public internal::PostTaskAndReplyImpl {
} // namespace
-void PostTask(const tracked_objects::Location& from_here, const Closure& task) {
- PostDelayedTask(from_here, task, TimeDelta());
+void PostTask(const tracked_objects::Location& from_here, OnceClosure task) {
+ PostDelayedTask(from_here, std::move(task), TimeDelta());
}
void PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
- PostDelayedTaskWithTraits(from_here, TaskTraits(), task, delay);
+ PostDelayedTaskWithTraits(from_here, TaskTraits(), std::move(task), delay);
}
void PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply) {
+ OnceClosure task,
+ OnceClosure reply) {
PostTaskWithTraitsAndReply(from_here, TaskTraits(), std::move(task),
std::move(reply));
}
void PostTaskWithTraits(const tracked_objects::Location& from_here,
const TaskTraits& traits,
- const Closure& task) {
- PostDelayedTaskWithTraits(from_here, traits, task, TimeDelta());
+ OnceClosure task) {
+ PostDelayedTaskWithTraits(from_here, traits, std::move(task), TimeDelta());
}
void PostDelayedTaskWithTraits(const tracked_objects::Location& from_here,
const TaskTraits& traits,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
DCHECK(TaskScheduler::GetInstance())
<< "Ref. Prerequisite section of post_task.h";
- TaskScheduler::GetInstance()->PostDelayedTaskWithTraits(from_here, traits,
- task, delay);
+ TaskScheduler::GetInstance()->PostDelayedTaskWithTraits(
+ from_here, traits, std::move(task), std::move(delay));
}
void PostTaskWithTraitsAndReply(const tracked_objects::Location& from_here,
const TaskTraits& traits,
- Closure task,
- Closure reply) {
+ OnceClosure task,
+ OnceClosure reply) {
PostTaskAndReplyTaskRunner(traits).PostTaskAndReply(
from_here, std::move(task), std::move(reply));
}
@@ -95,4 +95,13 @@ scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
traits);
}
+#if defined(OS_WIN)
+scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunnerWithTraits(
+ const TaskTraits& traits) {
+ DCHECK(TaskScheduler::GetInstance())
+ << "Ref. Prerequisite section of post_task.h";
+ return TaskScheduler::GetInstance()->CreateCOMSTATaskRunnerWithTraits(traits);
+}
+#endif // defined(OS_WIN)
+
} // namespace base
diff --git a/chromium/base/task_scheduler/post_task.h b/chromium/base/task_scheduler/post_task.h
index 3174f06d63b..69eda894ff8 100644
--- a/chromium/base/task_scheduler/post_task.h
+++ b/chromium/base/task_scheduler/post_task.h
@@ -18,6 +18,7 @@
#include "base/task_runner.h"
#include "base/task_scheduler/task_traits.h"
#include "base/time/time.h"
+#include "build/build_config.h"
namespace base {
@@ -70,7 +71,7 @@ namespace base {
// Posts |task| to the TaskScheduler. Calling this is equivalent to calling
// PostTaskWithTraits with plain TaskTraits.
BASE_EXPORT void PostTask(const tracked_objects::Location& from_here,
- const Closure& task);
+ OnceClosure task);
// Posts |task| to the TaskScheduler. |task| will not run before |delay|
// expires. Calling this is equivalent to calling PostDelayedTaskWithTraits with
@@ -79,7 +80,7 @@ BASE_EXPORT void PostTask(const tracked_objects::Location& from_here,
// Use PostDelayedTaskWithTraits to specify a BACKGROUND priority if the task
// doesn't have to run as soon as |delay| expires.
BASE_EXPORT void PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay);
// Posts |task| to the TaskScheduler and posts |reply| on the caller's execution
@@ -88,8 +89,8 @@ BASE_EXPORT void PostDelayedTask(const tracked_objects::Location& from_here,
// PostTaskWithTraitsAndReply with plain TaskTraits. Can only be called when
// SequencedTaskRunnerHandle::IsSet().
BASE_EXPORT void PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply);
+ OnceClosure task,
+ OnceClosure reply);
// Posts |task| to the TaskScheduler and posts |reply| with the return value of
// |task| as argument on the caller's execution context (i.e. same sequence or
@@ -98,16 +99,30 @@ BASE_EXPORT void PostTaskAndReply(const tracked_objects::Location& from_here,
// TaskTraits. Can only be called when SequencedTaskRunnerHandle::IsSet().
template <typename TaskReturnType, typename ReplyArgType>
void PostTaskAndReplyWithResult(const tracked_objects::Location& from_here,
- Callback<TaskReturnType(void)> task,
- Callback<void(ReplyArgType)> reply) {
+ OnceCallback<TaskReturnType()> task,
+ OnceCallback<void(ReplyArgType)> reply) {
PostTaskWithTraitsAndReplyWithResult(from_here, TaskTraits(), std::move(task),
std::move(reply));
}
+// Callback version of PostTaskAndReplyWithResult above.
+// Though RepeatingCallback is convertible to OnceCallback, we need this since
+// we can not use template deduction and object conversion at once on the
+// overload resolution.
+// TODO(tzik): Update all callers of the Callback version to use OnceCallback.
+template <typename TaskReturnType, typename ReplyArgType>
+void PostTaskAndReplyWithResult(const tracked_objects::Location& from_here,
+ Callback<TaskReturnType()> task,
+ Callback<void(ReplyArgType)> reply) {
+ PostTaskAndReplyWithResult(
+ from_here, OnceCallback<TaskReturnType()>(std::move(task)),
+ OnceCallback<void(ReplyArgType)>(std::move(reply)));
+}
+
// Posts |task| with specific |traits| to the TaskScheduler.
BASE_EXPORT void PostTaskWithTraits(const tracked_objects::Location& from_here,
const TaskTraits& traits,
- const Closure& task);
+ OnceClosure task);
// Posts |task| with specific |traits| to the TaskScheduler. |task| will not run
// before |delay| expires.
@@ -117,7 +132,7 @@ BASE_EXPORT void PostTaskWithTraits(const tracked_objects::Location& from_here,
BASE_EXPORT void PostDelayedTaskWithTraits(
const tracked_objects::Location& from_here,
const TaskTraits& traits,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay);
// Posts |task| with specific |traits| to the TaskScheduler and posts |reply| on
@@ -127,8 +142,8 @@ BASE_EXPORT void PostDelayedTaskWithTraits(
BASE_EXPORT void PostTaskWithTraitsAndReply(
const tracked_objects::Location& from_here,
const TaskTraits& traits,
- Closure task,
- Closure reply);
+ OnceClosure task,
+ OnceClosure reply);
// Posts |task| with specific |traits| to the TaskScheduler and posts |reply|
// with the return value of |task| as argument on the caller's execution context
@@ -138,14 +153,31 @@ template <typename TaskReturnType, typename ReplyArgType>
void PostTaskWithTraitsAndReplyWithResult(
const tracked_objects::Location& from_here,
const TaskTraits& traits,
- Callback<TaskReturnType()> task,
- Callback<void(ReplyArgType)> reply) {
+ OnceCallback<TaskReturnType()> task,
+ OnceCallback<void(ReplyArgType)> reply) {
TaskReturnType* result = new TaskReturnType();
return PostTaskWithTraitsAndReply(
- from_here, traits, Bind(&internal::ReturnAsParamAdapter<TaskReturnType>,
- std::move(task), result),
- Bind(&internal::ReplyAdapter<TaskReturnType, ReplyArgType>,
- std::move(reply), Owned(result)));
+ from_here, traits,
+ BindOnce(&internal::ReturnAsParamAdapter<TaskReturnType>, std::move(task),
+ result),
+ BindOnce(&internal::ReplyAdapter<TaskReturnType, ReplyArgType>,
+ std::move(reply), Owned(result)));
+}
+
+// Callback version of PostTaskWithTraitsAndReplyWithResult above.
+// Though RepeatingCallback is convertible to OnceCallback, we need this since
+// we can not use template deduction and object conversion at once on the
+// overload resolution.
+// TODO(tzik): Update all callers of the Callback version to use OnceCallback.
+template <typename TaskReturnType, typename ReplyArgType>
+void PostTaskWithTraitsAndReplyWithResult(
+ const tracked_objects::Location& from_here,
+ const TaskTraits& traits,
+ Callback<TaskReturnType()> task,
+ Callback<void(ReplyArgType)> reply) {
+ PostTaskWithTraitsAndReplyWithResult(
+ from_here, traits, OnceCallback<TaskReturnType()>(std::move(task)),
+ OnceCallback<void(ReplyArgType)>(std::move(reply)));
}
// Returns a TaskRunner whose PostTask invocations result in scheduling tasks
@@ -171,6 +203,19 @@ CreateSequencedTaskRunnerWithTraits(const TaskTraits& traits);
BASE_EXPORT scoped_refptr<SingleThreadTaskRunner>
CreateSingleThreadTaskRunnerWithTraits(const TaskTraits& traits);
+#if defined(OS_WIN)
+// Returns a SingleThreadTaskRunner whose PostTask invocations result in
+// scheduling tasks using |traits| in a COM Single-Threaded Apartment. Tasks run
+// in the same Single-Threaded Apartment in posting order for the returned
+// SingleThreadTaskRunner. There is not necessarily a one-to-one correspondence
+// between SingleThreadTaskRunners and Single-Threaded Apartments. The
+// implementation is free to share apartments or create new apartments as
+// necessary. In either case, care should be taken to make sure COM pointers are
+// not smuggled across apartments.
+BASE_EXPORT scoped_refptr<SingleThreadTaskRunner>
+CreateCOMSTATaskRunnerWithTraits(const TaskTraits& traits);
+#endif // defined(OS_WIN)
+
} // namespace base
#endif // BASE_TASK_SCHEDULER_POST_TASK_H_
diff --git a/chromium/base/task_scheduler/priority_queue.cc b/chromium/base/task_scheduler/priority_queue.cc
index fe307fe9b19..fe03381219b 100644
--- a/chromium/base/task_scheduler/priority_queue.cc
+++ b/chromium/base/task_scheduler/priority_queue.cc
@@ -18,9 +18,9 @@ namespace internal {
// call.
class PriorityQueue::SequenceAndSortKey {
public:
- SequenceAndSortKey(scoped_refptr<Sequence>&& sequence,
+ SequenceAndSortKey(scoped_refptr<Sequence> sequence,
const SequenceSortKey& sort_key)
- : sequence_(sequence), sort_key_(sort_key) {
+ : sequence_(std::move(sequence)), sort_key_(sort_key) {
DCHECK(sequence_);
}
@@ -95,11 +95,6 @@ bool PriorityQueue::Transaction::IsEmpty() const {
PriorityQueue::PriorityQueue() = default;
-PriorityQueue::PriorityQueue(const PriorityQueue* predecessor_priority_queue)
- : container_lock_(&predecessor_priority_queue->container_lock_) {
- DCHECK(predecessor_priority_queue);
-}
-
PriorityQueue::~PriorityQueue() = default;
std::unique_ptr<PriorityQueue::Transaction> PriorityQueue::BeginTransaction() {
diff --git a/chromium/base/task_scheduler/priority_queue.h b/chromium/base/task_scheduler/priority_queue.h
index 887579377af..2844ef3fa54 100644
--- a/chromium/base/task_scheduler/priority_queue.h
+++ b/chromium/base/task_scheduler/priority_queue.h
@@ -70,11 +70,6 @@ class BASE_EXPORT PriorityQueue {
PriorityQueue();
- // |predecessor_priority_queue| is a PriorityQueue for which a thread is
- // allowed to have an active Transaction when it creates a Transaction for
- // this PriorityQueue.
- PriorityQueue(const PriorityQueue* predecessor_priority_queue);
-
~PriorityQueue();
// Begins a Transaction. This method cannot be called on a thread which has an
diff --git a/chromium/base/task_scheduler/priority_queue_unittest.cc b/chromium/base/task_scheduler/priority_queue_unittest.cc
index 3762856061f..afaeafb644e 100644
--- a/chromium/base/task_scheduler/priority_queue_unittest.cc
+++ b/chromium/base/task_scheduler/priority_queue_unittest.cc
@@ -6,6 +6,8 @@
#include <memory>
+#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
@@ -59,26 +61,26 @@ TEST(TaskSchedulerPriorityQueueTest, PushPopPeek) {
// Create test sequences.
scoped_refptr<Sequence> sequence_a(new Sequence);
sequence_a->PushTask(MakeUnique<Task>(
- FROM_HERE, Closure(),
+ FROM_HERE, Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::USER_VISIBLE), TimeDelta()));
SequenceSortKey sort_key_a = sequence_a->GetSortKey();
scoped_refptr<Sequence> sequence_b(new Sequence);
sequence_b->PushTask(MakeUnique<Task>(
- FROM_HERE, Closure(),
+ FROM_HERE, Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::USER_BLOCKING), TimeDelta()));
SequenceSortKey sort_key_b = sequence_b->GetSortKey();
scoped_refptr<Sequence> sequence_c(new Sequence);
sequence_c->PushTask(MakeUnique<Task>(
- FROM_HERE, Closure(),
+ FROM_HERE, Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::USER_BLOCKING), TimeDelta()));
SequenceSortKey sort_key_c = sequence_c->GetSortKey();
scoped_refptr<Sequence> sequence_d(new Sequence);
sequence_d->PushTask(MakeUnique<Task>(
- FROM_HERE, Closure(), TaskTraits().WithPriority(TaskPriority::BACKGROUND),
- TimeDelta()));
+ FROM_HERE, Bind(&DoNothing),
+ TaskTraits().WithPriority(TaskPriority::BACKGROUND), TimeDelta()));
SequenceSortKey sort_key_d = sequence_d->GetSortKey();
// Create a PriorityQueue and a Transaction.
@@ -141,19 +143,6 @@ TEST(TaskSchedulerPriorityQueueTest, IllegalTwoTransactionsSameThread) {
});
}
-// Check that there is no crash when Transactions are created on the same thread
-// for 2 PriorityQueues which have a predecessor relationship.
-TEST(TaskSchedulerPriorityQueueTest, LegalTwoTransactionsSameThread) {
- PriorityQueue pq_a;
- PriorityQueue pq_b(&pq_a);
-
- // This shouldn't crash.
- std::unique_ptr<PriorityQueue::Transaction> transaction_a =
- pq_a.BeginTransaction();
- std::unique_ptr<PriorityQueue::Transaction> transaction_b =
- pq_b.BeginTransaction();
-}
-
// Check that it is possible to begin multiple Transactions for the same
// PriorityQueue on different threads. The call to BeginTransaction() on the
// second thread should block until the Transaction has ended on the first
diff --git a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc
new file mode 100644
index 00000000000..d4c26d6a404
--- /dev/null
+++ b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc
@@ -0,0 +1,453 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_single_thread_task_runner_manager.h"
+
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/memory/ptr_util.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/scheduler_worker.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+
+#include "base/win/scoped_com_initializer.h"
+#endif // defined(OS_WIN)
+
+namespace base {
+namespace internal {
+
+namespace {
+
+// Allows for checking the PlatformThread::CurrentRef() against a set
+// PlatformThreadRef atomically without using locks.
+class AtomicThreadRefChecker {
+ public:
+ AtomicThreadRefChecker() = default;
+ ~AtomicThreadRefChecker() = default;
+
+ void Set() {
+ thread_ref_ = PlatformThread::CurrentRef();
+ is_set_.Set();
+ }
+
+ bool IsCurrentThreadSameAsSetThread() {
+ return is_set_.IsSet() && thread_ref_ == PlatformThread::CurrentRef();
+ }
+
+ private:
+ AtomicFlag is_set_;
+ PlatformThreadRef thread_ref_;
+
+ DISALLOW_COPY_AND_ASSIGN(AtomicThreadRefChecker);
+};
+
+class SchedulerWorkerDelegate : public SchedulerWorker::Delegate {
+ public:
+ SchedulerWorkerDelegate(const std::string& thread_name)
+ : thread_name_(thread_name) {}
+
+ // SchedulerWorker::Delegate:
+ void OnMainEntry(SchedulerWorker* worker) override {
+ thread_ref_checker_.Set();
+ PlatformThread::SetName(thread_name_);
+ }
+
+ scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
+ AutoSchedulerLock auto_lock(sequence_lock_);
+ bool has_work = has_work_;
+ has_work_ = false;
+ return has_work ? sequence_ : nullptr;
+ }
+
+ void DidRunTask() override {}
+
+ void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override {
+ AutoSchedulerLock auto_lock(sequence_lock_);
+ // We've shut down, so no-op this work request. Any sequence cleanup will
+ // occur in the caller's context.
+ if (!sequence_)
+ return;
+
+ DCHECK_EQ(sequence, sequence_);
+ has_work_ = true;
+ }
+
+ TimeDelta GetSleepTimeout() override { return TimeDelta::Max(); }
+
+ bool CanDetach(SchedulerWorker* worker) override { return false; }
+
+ void OnDetach() override { NOTREACHED(); }
+
+ bool RunsTasksOnCurrentThread() {
+ // We check the thread ref instead of the sequence for the benefit of COM
+ // callbacks which may execute without a sequence context.
+ return thread_ref_checker_.IsCurrentThreadSameAsSetThread();
+ }
+
+ void OnMainExit() override {
+ // Move |sequence_| to |local_sequence| so that if we have the last
+ // reference to the sequence we don't destroy it (and its tasks) within
+ // |sequence_lock_|.
+ scoped_refptr<Sequence> local_sequence;
+ {
+ AutoSchedulerLock auto_lock(sequence_lock_);
+ // To reclaim skipped tasks on shutdown, we null out the sequence to allow
+ // the tasks to destroy themselves.
+ local_sequence = std::move(sequence_);
+ }
+ }
+
+ // SchedulerWorkerDelegate:
+
+ // Consumers should release their sequence reference as soon as possible to
+ // ensure timely cleanup for general shutdown.
+ scoped_refptr<Sequence> sequence() {
+ AutoSchedulerLock auto_lock(sequence_lock_);
+ return sequence_;
+ }
+
+ private:
+ const std::string thread_name_;
+
+ // Synchronizes access to |sequence_| and |has_work_|.
+ SchedulerLock sequence_lock_;
+ scoped_refptr<Sequence> sequence_ = new Sequence;
+ bool has_work_ = false;
+
+ AtomicThreadRefChecker thread_ref_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerDelegate);
+};
+
+#if defined(OS_WIN)
+
+class SchedulerWorkerCOMDelegate : public SchedulerWorkerDelegate {
+ public:
+ SchedulerWorkerCOMDelegate(const std::string& thread_name,
+ TaskTracker* task_tracker)
+ : SchedulerWorkerDelegate(thread_name), task_tracker_(task_tracker) {}
+
+ ~SchedulerWorkerCOMDelegate() override { DCHECK(!scoped_com_initializer_); }
+
+ // SchedulerWorker::Delegate:
+ void OnMainEntry(SchedulerWorker* worker) override {
+ SchedulerWorkerDelegate::OnMainEntry(worker);
+
+ scoped_com_initializer_ = MakeUnique<win::ScopedCOMInitializer>();
+ }
+
+ scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
+ // This scheme below allows us to cover the following scenarios:
+ // * Only SchedulerWorkerDelegate::GetWork() has work:
+ // Always return the sequence from GetWork().
+ // * Only the Windows Message Queue has work:
+ // Always return the sequence from GetWorkFromWindowsMessageQueue();
+ // * Both SchedulerWorkerDelegate::GetWork() and the Windows Message Queue
+ // have work:
+ // Process sequences from each source round-robin style.
+ scoped_refptr<Sequence> sequence;
+ if (get_work_first_) {
+ sequence = SchedulerWorkerDelegate::GetWork(worker);
+ if (sequence)
+ get_work_first_ = false;
+ }
+
+ if (!sequence) {
+ sequence = GetWorkFromWindowsMessageQueue();
+ if (sequence)
+ get_work_first_ = true;
+ }
+
+ if (!sequence && !get_work_first_) {
+ // This case is important if we checked the Windows Message Queue first
+ // and found there was no work. We don't want to return null immediately
+ // as that could cause the thread to go to sleep while work is waiting via
+ // SchedulerWorkerDelegate::GetWork().
+ sequence = SchedulerWorkerDelegate::GetWork(worker);
+ }
+ return sequence;
+ }
+
+ void OnMainExit() override { scoped_com_initializer_.reset(); }
+
+ void WaitForWork(WaitableEvent* wake_up_event) override {
+ DCHECK(wake_up_event);
+ const TimeDelta sleep_time = GetSleepTimeout();
+ const DWORD milliseconds_wait =
+ sleep_time.is_max() ? INFINITE : sleep_time.InMilliseconds();
+ HANDLE wake_up_event_handle = wake_up_event->handle();
+ DWORD result = MsgWaitForMultipleObjectsEx(
+ 1, &wake_up_event_handle, milliseconds_wait, QS_ALLINPUT, 0);
+ if (result == WAIT_OBJECT_0) {
+ // Reset the event since we woke up due to it.
+ wake_up_event->Reset();
+ }
+ }
+
+ private:
+ scoped_refptr<Sequence> GetWorkFromWindowsMessageQueue() {
+ MSG msg;
+ if (PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE) {
+ auto pump_message_task =
+ MakeUnique<Task>(FROM_HERE,
+ Bind(
+ [](MSG msg) {
+ TranslateMessage(&msg);
+ DispatchMessage(&msg);
+ },
+ std::move(msg)),
+ TaskTraits().MayBlock(), TimeDelta());
+ if (task_tracker_->WillPostTask(pump_message_task.get())) {
+ bool was_empty =
+ message_pump_sequence_->PushTask(std::move(pump_message_task));
+ DCHECK(was_empty) << "GetWorkFromWindowsMessageQueue() does not expect "
+ "queueing of pump tasks.";
+ return message_pump_sequence_;
+ }
+ }
+ return nullptr;
+ }
+
+ bool get_work_first_ = true;
+ const scoped_refptr<Sequence> message_pump_sequence_ = new Sequence;
+ TaskTracker* const task_tracker_;
+ std::unique_ptr<win::ScopedCOMInitializer> scoped_com_initializer_;
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerCOMDelegate);
+};
+
+#endif // defined(OS_WIN)
+
+} // namespace
+
+class SchedulerSingleThreadTaskRunnerManager::SchedulerSingleThreadTaskRunner
+ : public SingleThreadTaskRunner {
+ public:
+ // Constructs a SchedulerSingleThreadTaskRunner that indirectly controls the
+ // lifetime of a dedicated |worker| for |traits|.
+ SchedulerSingleThreadTaskRunner(
+ SchedulerSingleThreadTaskRunnerManager* const outer,
+ const TaskTraits& traits,
+ SchedulerWorker* worker)
+ : outer_(outer), traits_(traits), worker_(worker) {
+ DCHECK(outer_);
+ DCHECK(worker_);
+ }
+
+ // SingleThreadTaskRunner:
+ bool PostDelayedTask(const tracked_objects::Location& from_here,
+ OnceClosure closure,
+ TimeDelta delay) override {
+ auto task = MakeUnique<Task>(from_here, std::move(closure), traits_, delay);
+ task->single_thread_task_runner_ref = this;
+
+ if (!outer_->task_tracker_->WillPostTask(task.get()))
+ return false;
+
+ if (task->delayed_run_time.is_null()) {
+ PostTaskNow(std::move(task));
+ } else {
+ outer_->delayed_task_manager_->AddDelayedTask(
+ std::move(task), Bind(&SchedulerSingleThreadTaskRunner::PostTaskNow,
+ Unretained(this)));
+ }
+ return true;
+ }
+
+ bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
+ OnceClosure closure,
+ TimeDelta delay) override {
+ // Tasks are never nested within the task scheduler.
+ return PostDelayedTask(from_here, std::move(closure), delay);
+ }
+
+ bool RunsTasksOnCurrentThread() const override {
+ return GetDelegate()->RunsTasksOnCurrentThread();
+ }
+
+ private:
+ ~SchedulerSingleThreadTaskRunner() override {
+ outer_->UnregisterSchedulerWorker(worker_);
+ }
+
+ void PostTaskNow(std::unique_ptr<Task> task) {
+ scoped_refptr<Sequence> sequence = GetDelegate()->sequence();
+ // If |sequence| is null, then the thread is effectively gone (either
+ // shutdown or joined).
+ if (!sequence)
+ return;
+
+ const bool sequence_was_empty = sequence->PushTask(std::move(task));
+ if (sequence_was_empty) {
+ GetDelegate()->ReEnqueueSequence(std::move(sequence));
+ worker_->WakeUp();
+ }
+ }
+
+ SchedulerWorkerDelegate* GetDelegate() const {
+ return static_cast<SchedulerWorkerDelegate*>(worker_->delegate());
+ }
+
+ SchedulerSingleThreadTaskRunnerManager* const outer_;
+ const TaskTraits traits_;
+ SchedulerWorker* const worker_;
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerSingleThreadTaskRunner);
+};
+
+SchedulerSingleThreadTaskRunnerManager::SchedulerSingleThreadTaskRunnerManager(
+ const std::vector<SchedulerWorkerPoolParams>& worker_pool_params_vector,
+ const TaskScheduler::WorkerPoolIndexForTraitsCallback&
+ worker_pool_index_for_traits_callback,
+ TaskTracker* task_tracker,
+ DelayedTaskManager* delayed_task_manager)
+ : worker_pool_params_vector_(worker_pool_params_vector),
+ worker_pool_index_for_traits_callback_(
+ worker_pool_index_for_traits_callback),
+ task_tracker_(task_tracker),
+ delayed_task_manager_(delayed_task_manager) {
+ DCHECK_GT(worker_pool_params_vector_.size(), 0U);
+ DCHECK(worker_pool_index_for_traits_callback_);
+ DCHECK(task_tracker_);
+ DCHECK(delayed_task_manager_);
+}
+
+SchedulerSingleThreadTaskRunnerManager::
+ ~SchedulerSingleThreadTaskRunnerManager() {
+#if DCHECK_IS_ON()
+ size_t workers_unregistered_during_join =
+ subtle::NoBarrier_Load(&workers_unregistered_during_join_);
+ DCHECK_EQ(workers_unregistered_during_join, workers_.size())
+ << "There cannot be outstanding SingleThreadTaskRunners upon destruction "
+ "of SchedulerSingleThreadTaskRunnerManager or the Task Scheduler";
+#endif
+}
+
+scoped_refptr<SingleThreadTaskRunner>
+SchedulerSingleThreadTaskRunnerManager::CreateSingleThreadTaskRunnerWithTraits(
+ const TaskTraits& traits) {
+ return CreateSingleThreadTaskRunnerWithDelegate<SchedulerWorkerDelegate>(
+ traits);
+}
+
+#if defined(OS_WIN)
+scoped_refptr<SingleThreadTaskRunner>
+SchedulerSingleThreadTaskRunnerManager::CreateCOMSTATaskRunnerWithTraits(
+ const TaskTraits& traits) {
+ return CreateSingleThreadTaskRunnerWithDelegate<SchedulerWorkerCOMDelegate>(
+ traits);
+}
+#endif // defined(OS_WIN)
+
+void SchedulerSingleThreadTaskRunnerManager::JoinForTesting() {
+ decltype(workers_) local_workers;
+ {
+ AutoSchedulerLock auto_lock(workers_lock_);
+ local_workers = std::move(workers_);
+ }
+
+ for (const auto& worker : local_workers)
+ worker->JoinForTesting();
+
+ {
+ AutoSchedulerLock auto_lock(workers_lock_);
+ DCHECK(workers_.empty())
+ << "New worker(s) unexpectedly registered during join.";
+ workers_ = std::move(local_workers);
+ }
+}
+
+template <typename DelegateType>
+scoped_refptr<SingleThreadTaskRunner> SchedulerSingleThreadTaskRunnerManager::
+ CreateSingleThreadTaskRunnerWithDelegate(const TaskTraits& traits) {
+ size_t index = worker_pool_index_for_traits_callback_.Run(traits);
+ DCHECK_LT(index, worker_pool_params_vector_.size());
+ return new SchedulerSingleThreadTaskRunner(
+ this, traits,
+ CreateAndRegisterSchedulerWorker<DelegateType>(
+ worker_pool_params_vector_[index]));
+}
+
+template <>
+std::unique_ptr<SchedulerWorkerDelegate>
+SchedulerSingleThreadTaskRunnerManager::CreateSchedulerWorkerDelegate<
+ SchedulerWorkerDelegate>(const SchedulerWorkerPoolParams& params, int id) {
+ return MakeUnique<SchedulerWorkerDelegate>(StringPrintf(
+ "TaskSchedulerSingleThreadWorker%d%s", id, params.name().c_str()));
+}
+
+#if defined(OS_WIN)
+template <>
+std::unique_ptr<SchedulerWorkerDelegate>
+SchedulerSingleThreadTaskRunnerManager::CreateSchedulerWorkerDelegate<
+ SchedulerWorkerCOMDelegate>(const SchedulerWorkerPoolParams& params,
+ int id) {
+ return MakeUnique<SchedulerWorkerCOMDelegate>(
+ StringPrintf("TaskSchedulerSingleThreadWorker%d%sCOMSTA", id,
+ params.name().c_str()),
+ task_tracker_);
+}
+#endif // defined(OS_WIN)
+
+template <typename DelegateType>
+SchedulerWorker*
+SchedulerSingleThreadTaskRunnerManager::CreateAndRegisterSchedulerWorker(
+ const SchedulerWorkerPoolParams& params) {
+ AutoSchedulerLock auto_lock(workers_lock_);
+ int id = next_worker_id_++;
+
+ workers_.emplace_back(SchedulerWorker::Create(
+ params.priority_hint(),
+ CreateSchedulerWorkerDelegate<DelegateType>(params, id), task_tracker_,
+ SchedulerWorker::InitialState::DETACHED));
+ return workers_.back().get();
+}
+
+void SchedulerSingleThreadTaskRunnerManager::UnregisterSchedulerWorker(
+ SchedulerWorker* worker) {
+ // Cleanup uses a SchedulerLock, so call Cleanup() after releasing
+ // |workers_lock_|.
+ scoped_refptr<SchedulerWorker> worker_to_destroy;
+ {
+ AutoSchedulerLock auto_lock(workers_lock_);
+
+ // We might be joining, so record that a worker was unregistered for
+ // verification at destruction.
+ if (workers_.empty()) {
+#if DCHECK_IS_ON()
+ subtle::NoBarrier_AtomicIncrement(&workers_unregistered_during_join_, 1);
+#endif
+ return;
+ }
+
+ auto worker_iter =
+ std::find_if(workers_.begin(), workers_.end(),
+ [worker](const scoped_refptr<SchedulerWorker>& candidate) {
+ return candidate.get() == worker;
+ });
+ DCHECK(worker_iter != workers_.end());
+ worker_to_destroy = std::move(*worker_iter);
+ workers_.erase(worker_iter);
+ }
+ worker_to_destroy->Cleanup();
+}
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.h b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.h
new file mode 100644
index 00000000000..24d1a9a010b
--- /dev/null
+++ b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.h
@@ -0,0 +1,97 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_SINGLE_THREAD_TASK_RUNNER_MANAGER_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_SINGLE_THREAD_TASK_RUNNER_MANAGER_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
+#include "base/task_scheduler/task_scheduler.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class TaskTraits;
+class SingleThreadTaskRunner;
+
+namespace internal {
+
+class DelayedTaskManager;
+class SchedulerWorker;
+class TaskTracker;
+
+namespace {
+
+class SchedulerWorkerDelegate;
+
+} // namespace
+
+class BASE_EXPORT SchedulerSingleThreadTaskRunnerManager final {
+ public:
+ SchedulerSingleThreadTaskRunnerManager(
+ const std::vector<SchedulerWorkerPoolParams>& worker_pool_params_vector,
+ const TaskScheduler::WorkerPoolIndexForTraitsCallback&
+ worker_pool_index_for_traits_callback,
+ TaskTracker* task_tracker,
+ DelayedTaskManager* delayed_task_manager);
+ ~SchedulerSingleThreadTaskRunnerManager();
+
+ scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
+ const TaskTraits& traits);
+
+#if defined(OS_WIN)
+ scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunnerWithTraits(
+ const TaskTraits& traits);
+#endif // defined(OS_WIN)
+
+ void JoinForTesting();
+
+ private:
+ class SchedulerSingleThreadTaskRunner;
+
+ template <typename DelegateType>
+ scoped_refptr<SingleThreadTaskRunner>
+ CreateSingleThreadTaskRunnerWithDelegate(const TaskTraits& traits);
+
+ template <typename DelegateType>
+ std::unique_ptr<SchedulerWorkerDelegate> CreateSchedulerWorkerDelegate(
+ const SchedulerWorkerPoolParams& params,
+ int id);
+
+ template <typename DelegateType>
+ SchedulerWorker* CreateAndRegisterSchedulerWorker(
+ const SchedulerWorkerPoolParams& params);
+
+ void UnregisterSchedulerWorker(SchedulerWorker* worker);
+
+ const std::vector<SchedulerWorkerPoolParams> worker_pool_params_vector_;
+ const TaskScheduler::WorkerPoolIndexForTraitsCallback
+ worker_pool_index_for_traits_callback_;
+ TaskTracker* const task_tracker_;
+ DelayedTaskManager* const delayed_task_manager_;
+
+ // Synchronizes access to |workers_| and |worker_id_|.
+ SchedulerLock workers_lock_;
+ std::vector<scoped_refptr<SchedulerWorker>> workers_;
+ int next_worker_id_ = 0;
+
+#if DCHECK_IS_ON()
+ subtle::Atomic32 workers_unregistered_during_join_ = 0;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerSingleThreadTaskRunnerManager);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCHEDULER_SINGLE_THREAD_TASK_RUNNER_MANAGER_H_
diff --git a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc
new file mode 100644
index 00000000000..8244bb58dbc
--- /dev/null
+++ b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc
@@ -0,0 +1,470 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_single_thread_task_runner_manager.h"
+
+#include "base/bind.h"
+#include "base/memory/ptr_util.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/simple_thread.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include <objbase.h>
+
+#include "base/win/current_module.h"
+#endif // defined(OS_WIN)
+
+namespace base {
+namespace internal {
+
+namespace {
+
+enum WorkerPoolType : size_t {
+ BACKGROUND_WORKER_POOL = 0,
+ FOREGROUND_WORKER_POOL,
+};
+
+static size_t GetThreadPoolIndexForTraits(const TaskTraits& traits) {
+ return traits.priority() == TaskPriority::BACKGROUND ? BACKGROUND_WORKER_POOL
+ : FOREGROUND_WORKER_POOL;
+}
+
+std::vector<SchedulerWorkerPoolParams> GetParamsVector() {
+ using StandbyThreadPolicy = SchedulerWorkerPoolParams::StandbyThreadPolicy;
+
+ std::vector<SchedulerWorkerPoolParams> params_vector;
+
+ DCHECK_EQ(BACKGROUND_WORKER_POOL, params_vector.size());
+ params_vector.emplace_back("Background", ThreadPriority::BACKGROUND,
+ StandbyThreadPolicy::LAZY, 1U, TimeDelta::Max());
+
+ DCHECK_EQ(FOREGROUND_WORKER_POOL, params_vector.size());
+ params_vector.emplace_back("Foreground", ThreadPriority::NORMAL,
+ StandbyThreadPolicy::LAZY, 1U, TimeDelta::Max());
+
+ return params_vector;
+}
+
+class TaskSchedulerSingleThreadTaskRunnerManagerTest : public testing::Test {
+ public:
+ TaskSchedulerSingleThreadTaskRunnerManagerTest()
+ : service_thread_("TaskSchedulerServiceThread") {}
+
+ void SetUp() override {
+ service_thread_.Start();
+
+ delayed_task_manager_ =
+ MakeUnique<DelayedTaskManager>(service_thread_.task_runner());
+ single_thread_task_runner_manager_ =
+ MakeUnique<SchedulerSingleThreadTaskRunnerManager>(
+ GetParamsVector(), Bind(&GetThreadPoolIndexForTraits),
+ &task_tracker_, delayed_task_manager_.get());
+ }
+
+ void TearDown() override {
+ TearDownSingleThreadTaskRunnerManager();
+ delayed_task_manager_.reset();
+ service_thread_.Stop();
+ }
+
+ protected:
+ virtual void TearDownSingleThreadTaskRunnerManager() {
+ single_thread_task_runner_manager_->JoinForTesting();
+ single_thread_task_runner_manager_.reset();
+ }
+
+ std::unique_ptr<SchedulerSingleThreadTaskRunnerManager>
+ single_thread_task_runner_manager_;
+ TaskTracker task_tracker_;
+
+ private:
+ Thread service_thread_;
+ std::unique_ptr<DelayedTaskManager> delayed_task_manager_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskSchedulerSingleThreadTaskRunnerManagerTest);
+};
+
+void CaptureThreadRef(PlatformThreadRef* thread_ref) {
+ ASSERT_TRUE(thread_ref);
+ *thread_ref = PlatformThread::CurrentRef();
+}
+
+void CaptureThreadPriority(ThreadPriority* thread_priority) {
+ ASSERT_TRUE(thread_priority);
+ *thread_priority = PlatformThread::GetCurrentThreadPriority();
+}
+
+void ShouldNotRun() {
+ ADD_FAILURE() << "Ran a task that shouldn't run.";
+}
+
+} // namespace
+
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest, DifferentThreadsUsed) {
+ scoped_refptr<SingleThreadTaskRunner> task_runner_1 =
+ single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(
+ TaskTraits().WithShutdownBehavior(
+ TaskShutdownBehavior::BLOCK_SHUTDOWN));
+ scoped_refptr<SingleThreadTaskRunner> task_runner_2 =
+ single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(
+ TaskTraits().WithShutdownBehavior(
+ TaskShutdownBehavior::BLOCK_SHUTDOWN));
+
+ PlatformThreadRef thread_ref_1;
+ task_runner_1->PostTask(FROM_HERE,
+ BindOnce(&CaptureThreadRef, &thread_ref_1));
+ PlatformThreadRef thread_ref_2;
+ task_runner_2->PostTask(FROM_HERE,
+ BindOnce(&CaptureThreadRef, &thread_ref_2));
+
+ task_tracker_.Shutdown();
+
+ ASSERT_FALSE(thread_ref_1.is_null());
+ ASSERT_FALSE(thread_ref_2.is_null());
+ EXPECT_NE(thread_ref_1, thread_ref_2);
+}
+
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest, PrioritySetCorrectly) {
+ // Why are events used here instead of the task tracker?
+ // Shutting down can cause priorities to get raised. This means we have to use
+ // events to determine when a task is run.
+ scoped_refptr<SingleThreadTaskRunner> task_runner_background =
+ single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(
+ TaskTraits().WithPriority(TaskPriority::BACKGROUND));
+ scoped_refptr<SingleThreadTaskRunner> task_runner_user_visible =
+ single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(
+ TaskTraits().WithPriority(TaskPriority::USER_VISIBLE));
+ scoped_refptr<SingleThreadTaskRunner> task_runner_user_blocking =
+ single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(
+ TaskTraits()
+ .WithPriority(TaskPriority::USER_BLOCKING)
+ .WithShutdownBehavior(TaskShutdownBehavior::BLOCK_SHUTDOWN));
+
+ ThreadPriority thread_priority_background;
+ task_runner_background->PostTask(
+ FROM_HERE, BindOnce(&CaptureThreadPriority, &thread_priority_background));
+ WaitableEvent waitable_event_background(
+ WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner_background->PostTask(
+ FROM_HERE,
+ BindOnce(&WaitableEvent::Signal, Unretained(&waitable_event_background)));
+
+ ThreadPriority thread_priority_user_visible;
+ task_runner_user_visible->PostTask(
+ FROM_HERE,
+ BindOnce(&CaptureThreadPriority, &thread_priority_user_visible));
+ WaitableEvent waitable_event_user_visible(
+ WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner_user_visible->PostTask(
+ FROM_HERE, BindOnce(&WaitableEvent::Signal,
+ Unretained(&waitable_event_user_visible)));
+
+ ThreadPriority thread_priority_user_blocking;
+ task_runner_user_blocking->PostTask(
+ FROM_HERE,
+ BindOnce(&CaptureThreadPriority, &thread_priority_user_blocking));
+ WaitableEvent waitable_event_user_blocking(
+ WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner_user_blocking->PostTask(
+ FROM_HERE, BindOnce(&WaitableEvent::Signal,
+ Unretained(&waitable_event_user_blocking)));
+
+ waitable_event_background.Wait();
+ waitable_event_user_visible.Wait();
+ waitable_event_user_blocking.Wait();
+
+ if (Lock::HandlesMultipleThreadPriorities())
+ EXPECT_EQ(ThreadPriority::BACKGROUND, thread_priority_background);
+ else
+ EXPECT_EQ(ThreadPriority::NORMAL, thread_priority_background);
+ EXPECT_EQ(ThreadPriority::NORMAL, thread_priority_user_visible);
+ EXPECT_EQ(ThreadPriority::NORMAL, thread_priority_user_blocking);
+}
+
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest, PostTaskAfterShutdown) {
+ auto task_runner = single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(TaskTraits());
+ task_tracker_.Shutdown();
+ EXPECT_FALSE(task_runner->PostTask(FROM_HERE, BindOnce(&ShouldNotRun)));
+}
+
+// Verify that a Task runs shortly after its delay expires.
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest, PostDelayedTask) {
+ TimeTicks start_time = TimeTicks::Now();
+
+ // Post a task with a short delay.
+ WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ auto task_runner = single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(TaskTraits());
+ EXPECT_TRUE(task_runner->PostDelayedTask(
+ FROM_HERE, BindOnce(&WaitableEvent::Signal, Unretained(&task_ran)),
+ TestTimeouts::tiny_timeout()));
+
+ // Wait until the task runs.
+ task_ran.Wait();
+
+ // Expect the task to run after its delay expires, but not more than 250 ms
+ // after that.
+ const TimeDelta actual_delay = TimeTicks::Now() - start_time;
+ EXPECT_GE(actual_delay, TestTimeouts::tiny_timeout());
+ EXPECT_LT(actual_delay,
+ TimeDelta::FromMilliseconds(250) + TestTimeouts::tiny_timeout());
+}
+
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest,
+ RunsTasksOnCurrentThread) {
+ scoped_refptr<SingleThreadTaskRunner> task_runner_1 =
+ single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(
+ TaskTraits().WithShutdownBehavior(
+ TaskShutdownBehavior::BLOCK_SHUTDOWN));
+ scoped_refptr<SingleThreadTaskRunner> task_runner_2 =
+ single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(
+ TaskTraits().WithShutdownBehavior(
+ TaskShutdownBehavior::BLOCK_SHUTDOWN));
+
+ EXPECT_FALSE(task_runner_1->RunsTasksOnCurrentThread());
+ EXPECT_FALSE(task_runner_2->RunsTasksOnCurrentThread());
+
+ task_runner_1->PostTask(
+ FROM_HERE, BindOnce(
+ [](scoped_refptr<SingleThreadTaskRunner> task_runner_1,
+ scoped_refptr<SingleThreadTaskRunner> task_runner_2) {
+ EXPECT_TRUE(task_runner_1->RunsTasksOnCurrentThread());
+ EXPECT_FALSE(task_runner_2->RunsTasksOnCurrentThread());
+ },
+ task_runner_1, task_runner_2));
+
+ task_runner_2->PostTask(
+ FROM_HERE, BindOnce(
+ [](scoped_refptr<SingleThreadTaskRunner> task_runner_1,
+ scoped_refptr<SingleThreadTaskRunner> task_runner_2) {
+ EXPECT_FALSE(task_runner_1->RunsTasksOnCurrentThread());
+ EXPECT_TRUE(task_runner_2->RunsTasksOnCurrentThread());
+ },
+ task_runner_1, task_runner_2));
+
+ task_tracker_.Shutdown();
+}
+
+namespace {
+
+class CallJoinFromDifferentThread : public SimpleThread {
+ public:
+ CallJoinFromDifferentThread(
+ SchedulerSingleThreadTaskRunnerManager* manager_to_join)
+ : SimpleThread("SchedulerSingleThreadTaskRunnerManagerJoinThread"),
+ manager_to_join_(manager_to_join),
+ run_started_event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+ ~CallJoinFromDifferentThread() override = default;
+
+ void Run() override {
+ run_started_event_.Signal();
+ manager_to_join_->JoinForTesting();
+ }
+
+ void WaitForRunToStart() { run_started_event_.Wait(); }
+
+ private:
+ SchedulerSingleThreadTaskRunnerManager* const manager_to_join_;
+ WaitableEvent run_started_event_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallJoinFromDifferentThread);
+};
+
+class TaskSchedulerSingleThreadTaskRunnerManagerJoinTest
+ : public TaskSchedulerSingleThreadTaskRunnerManagerTest {
+ public:
+ TaskSchedulerSingleThreadTaskRunnerManagerJoinTest() = default;
+ ~TaskSchedulerSingleThreadTaskRunnerManagerJoinTest() override = default;
+
+ protected:
+ void TearDownSingleThreadTaskRunnerManager() override {
+ // The tests themselves are responsible for calling JoinForTesting().
+ single_thread_task_runner_manager_.reset();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TaskSchedulerSingleThreadTaskRunnerManagerJoinTest);
+};
+
+} // namespace
+
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerJoinTest, ConcurrentJoin) {
+ // Exercises the codepath where the workers are unavailable for unregistration
+ // because of a Join call.
+ WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_blocking(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ {
+ auto task_runner = single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(
+ TaskTraits().WithBaseSyncPrimitives());
+ EXPECT_TRUE(task_runner->PostTask(
+ FROM_HERE,
+ BindOnce(&WaitableEvent::Signal, Unretained(&task_running))));
+ EXPECT_TRUE(task_runner->PostTask(
+ FROM_HERE, BindOnce(&WaitableEvent::Wait, Unretained(&task_blocking))));
+ }
+
+ task_running.Wait();
+ CallJoinFromDifferentThread join_from_different_thread(
+ single_thread_task_runner_manager_.get());
+ join_from_different_thread.Start();
+ join_from_different_thread.WaitForRunToStart();
+ task_blocking.Signal();
+ join_from_different_thread.Join();
+}
+
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerJoinTest,
+ ConcurrentJoinExtraSkippedTask) {
+ // Tests to make sure that tasks are properly cleaned up at Join, allowing
+ // SingleThreadTaskRunners to unregister themselves.
+ WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_blocking(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ {
+ auto task_runner = single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(
+ TaskTraits().WithBaseSyncPrimitives());
+ EXPECT_TRUE(task_runner->PostTask(
+ FROM_HERE,
+ BindOnce(&WaitableEvent::Signal, Unretained(&task_running))));
+ EXPECT_TRUE(task_runner->PostTask(
+ FROM_HERE, BindOnce(&WaitableEvent::Wait, Unretained(&task_blocking))));
+ EXPECT_TRUE(task_runner->PostTask(FROM_HERE, BindOnce(&DoNothing)));
+ }
+
+ task_running.Wait();
+ CallJoinFromDifferentThread join_from_different_thread(
+ single_thread_task_runner_manager_.get());
+ join_from_different_thread.Start();
+ join_from_different_thread.WaitForRunToStart();
+ task_blocking.Signal();
+ join_from_different_thread.Join();
+}
+
+#if defined(OS_WIN)
+
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest, COMSTAInitialized) {
+ scoped_refptr<SingleThreadTaskRunner> com_task_runner =
+ single_thread_task_runner_manager_->CreateCOMSTATaskRunnerWithTraits(
+ TaskTraits().WithShutdownBehavior(
+ TaskShutdownBehavior::BLOCK_SHUTDOWN));
+
+ com_task_runner->PostTask(
+ FROM_HERE, Bind([]() {
+ HRESULT hr = CoInitializeEx(nullptr, COINIT_MULTITHREADED);
+ if (SUCCEEDED(hr)) {
+ ADD_FAILURE() << "COM STA was not initialized on this thread";
+ CoUninitialize();
+ }
+ }));
+
+ task_tracker_.Shutdown();
+}
+
+namespace {
+
+const wchar_t* const kTestWindowClassName =
+ L"TaskSchedulerSingleThreadTaskRunnerManagerTestWinMessageWindow";
+
+class TaskSchedulerSingleThreadTaskRunnerManagerTestWin
+ : public TaskSchedulerSingleThreadTaskRunnerManagerTest {
+ public:
+ TaskSchedulerSingleThreadTaskRunnerManagerTestWin() = default;
+
+ void SetUp() override {
+ TaskSchedulerSingleThreadTaskRunnerManagerTest::SetUp();
+ register_class_succeeded_ = RegisterTestWindowClass();
+ ASSERT_TRUE(register_class_succeeded_);
+ }
+
+ void TearDown() override {
+ if (register_class_succeeded_)
+ ::UnregisterClass(kTestWindowClassName, CURRENT_MODULE());
+
+ TaskSchedulerSingleThreadTaskRunnerManagerTest::TearDown();
+ }
+
+ HWND CreateTestWindow() {
+ return CreateWindow(kTestWindowClassName, kTestWindowClassName, 0, 0, 0, 0,
+ 0, HWND_MESSAGE, nullptr, CURRENT_MODULE(), nullptr);
+ }
+
+ private:
+ bool RegisterTestWindowClass() {
+ WNDCLASSEX window_class = {};
+ window_class.cbSize = sizeof(window_class);
+ window_class.lpfnWndProc = &::DefWindowProc;
+ window_class.hInstance = CURRENT_MODULE();
+ window_class.lpszClassName = kTestWindowClassName;
+ return !!::RegisterClassEx(&window_class);
+ }
+
+ bool register_class_succeeded_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskSchedulerSingleThreadTaskRunnerManagerTestWin);
+};
+
+} // namespace
+
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTestWin, PumpsMessages) {
+ scoped_refptr<SingleThreadTaskRunner> com_task_runner =
+ single_thread_task_runner_manager_->CreateCOMSTATaskRunnerWithTraits(
+ TaskTraits().WithShutdownBehavior(
+ TaskShutdownBehavior::BLOCK_SHUTDOWN));
+ HWND hwnd = nullptr;
+ // HWNDs process messages on the thread that created them, so we have to
+ // create them within the context of the task runner to properly simulate a
+ // COM callback.
+ com_task_runner->PostTask(
+ FROM_HERE,
+ Bind([](TaskSchedulerSingleThreadTaskRunnerManagerTestWin* test_harness,
+ HWND* hwnd) { *hwnd = test_harness->CreateTestWindow(); },
+ Unretained(this), &hwnd));
+
+ task_tracker_.Flush();
+
+ ASSERT_NE(hwnd, nullptr);
+ // If the message pump isn't running, we will hang here. This simulates how
+ // COM would receive a callback with its own message HWND.
+ SendMessage(hwnd, WM_USER, 0, 0);
+
+ com_task_runner->PostTask(
+ FROM_HERE, Bind([](HWND hwnd) { ::DestroyWindow(hwnd); }, hwnd));
+
+ task_tracker_.Shutdown();
+}
+
+#endif // defined(OS_WIN)
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/task_scheduler/scheduler_worker.cc b/chromium/base/task_scheduler/scheduler_worker.cc
index 3e77436b984..4084c527a73 100644
--- a/chromium/base/task_scheduler/scheduler_worker.cc
+++ b/chromium/base/task_scheduler/scheduler_worker.cc
@@ -41,7 +41,7 @@ class SchedulerWorker::Thread : public PlatformThread::Delegate {
outer_->delegate_->OnMainEntry(outer_.get());
// A SchedulerWorker starts out waiting for work.
- WaitForWork();
+ outer_->delegate_->WaitForWork(&wake_up_event_);
#if defined(OS_WIN)
std::unique_ptr<win::ScopedCOMInitializer> com_initializer;
@@ -67,13 +67,12 @@ class SchedulerWorker::Thread : public PlatformThread::Delegate {
if (outer_->delegate_->CanDetach(outer_.get())) {
detached_thread = outer_->DetachThreadObject(DetachNotify::DELEGATE);
if (detached_thread) {
- outer_ = nullptr;
DCHECK_EQ(detached_thread.get(), this);
PlatformThread::Detach(thread_handle_);
break;
}
}
- WaitForWork();
+ outer_->delegate_->WaitForWork(&wake_up_event_);
continue;
}
@@ -119,6 +118,8 @@ class SchedulerWorker::Thread : public PlatformThread::Delegate {
// nullptr. JoinForTesting() cleans up if we get nullptr.
if (!detached_thread)
detached_thread = outer_->DetachThreadObject(DetachNotify::SILENT);
+
+ outer_->delegate_->OnMainExit();
}
void Join() { PlatformThread::Join(thread_handle_); }
@@ -142,19 +143,6 @@ class SchedulerWorker::Thread : public PlatformThread::Delegate {
current_thread_priority_);
}
- void WaitForWork() {
- DCHECK(outer_);
- const TimeDelta sleep_time = outer_->delegate_->GetSleepTimeout();
- if (sleep_time.is_max()) {
- // Calling TimedWait with TimeDelta::Max is not recommended per
- // http://crbug.com/465948.
- wake_up_event_.Wait();
- } else {
- wake_up_event_.TimedWait(sleep_time);
- }
- wake_up_event_.Reset();
- }
-
// Returns the priority for which the thread should be set based on the
// priority hint, current shutdown state, and platform capabilities.
ThreadPriority GetDesiredThreadPriority() {
@@ -200,6 +188,19 @@ class SchedulerWorker::Thread : public PlatformThread::Delegate {
DISALLOW_COPY_AND_ASSIGN(Thread);
};
+void SchedulerWorker::Delegate::WaitForWork(WaitableEvent* wake_up_event) {
+ DCHECK(wake_up_event);
+ const TimeDelta sleep_time = GetSleepTimeout();
+ if (sleep_time.is_max()) {
+ // Calling TimedWait with TimeDelta::Max is not recommended per
+ // http://crbug.com/465948.
+ wake_up_event->Wait();
+ } else {
+ wake_up_event->TimedWait(sleep_time);
+ }
+ wake_up_event->Reset();
+}
+
scoped_refptr<SchedulerWorker> SchedulerWorker::Create(
ThreadPriority priority_hint,
std::unique_ptr<Delegate> delegate,
@@ -330,8 +331,12 @@ void SchedulerWorker::CreateThreadAssertSynchronized() {
}
bool SchedulerWorker::ShouldExit() {
- return task_tracker_->IsShutdownComplete() ||
- join_called_for_testing_.IsSet() || should_exit_.IsSet();
+ // The ordering of the checks is important below. This SchedulerWorker may be
+ // released and outlive |task_tracker_| in unit tests. However, when the
+ // SchedulerWorker is released, |should_exit_| will be set, so check that
+ // first.
+ return should_exit_.IsSet() || join_called_for_testing_.IsSet() ||
+ task_tracker_->IsShutdownComplete();
}
} // namespace internal
diff --git a/chromium/base/task_scheduler/scheduler_worker.h b/chromium/base/task_scheduler/scheduler_worker.h
index 0f9bc7f345e..0fc595d4e48 100644
--- a/chromium/base/task_scheduler/scheduler_worker.h
+++ b/chromium/base/task_scheduler/scheduler_worker.h
@@ -42,7 +42,7 @@ class BASE_EXPORT SchedulerWorker
public:
// Delegate interface for SchedulerWorker. The methods are always called from
// the thread managed by the SchedulerWorker instance.
- class Delegate {
+ class BASE_EXPORT Delegate {
public:
virtual ~Delegate() = default;
@@ -68,6 +68,11 @@ class BASE_EXPORT SchedulerWorker
// worker's WakeUp() method is called.
virtual TimeDelta GetSleepTimeout() = 0;
+ // Called by a thread to wait for work. Override this method if the thread
+ // in question needs special handling to go to sleep. |wake_up_event| is a
+ // manually resettable event and is signaled on SchedulerWorker::WakeUp()
+ virtual void WaitForWork(WaitableEvent* wake_up_event);
+
// Called by a thread if it is allowed to detach if the last call to
// GetWork() returned nullptr.
//
@@ -85,6 +90,9 @@ class BASE_EXPORT SchedulerWorker
// acquire a SchedulerLock because it is called within the scope of another
// SchedulerLock.
virtual void OnDetach() = 0;
+
+ // Called by a thread right before the main function exits.
+ virtual void OnMainExit() {}
};
enum class InitialState { ALIVE, DETACHED };
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool.h b/chromium/base/task_scheduler/scheduler_worker_pool.h
index c742ac3c454..f7a89084c59 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool.h
+++ b/chromium/base/task_scheduler/scheduler_worker_pool.h
@@ -10,7 +10,6 @@
#include "base/base_export.h"
#include "base/memory/ref_counted.h"
#include "base/sequenced_task_runner.h"
-#include "base/single_thread_task_runner.h"
#include "base/task_runner.h"
#include "base/task_scheduler/sequence.h"
#include "base/task_scheduler/task.h"
@@ -19,7 +18,6 @@
namespace base {
namespace internal {
-class SchedulerWorker;
class SequenceSortKey;
// Interface for a worker pool.
@@ -39,12 +37,6 @@ class BASE_EXPORT SchedulerWorkerPool {
virtual scoped_refptr<SequencedTaskRunner>
CreateSequencedTaskRunnerWithTraits(const TaskTraits& traits) = 0;
- // Returns a SingleThreadTaskRunner whose PostTask invocations result in
- // scheduling tasks in this SchedulerWorkerPool using |traits|. Tasks run on a
- // single thread in posting order.
- virtual scoped_refptr<SingleThreadTaskRunner>
- CreateSingleThreadTaskRunnerWithTraits(const TaskTraits& traits) = 0;
-
// Inserts |sequence| with |sequence_sort_key| into a queue of Sequences that
// can be processed by any worker owned by this SchedulerWorkerPool. Must only
// be used to put |sequence| back into a queue after running a Task from it.
@@ -54,24 +46,16 @@ class BASE_EXPORT SchedulerWorkerPool {
const SequenceSortKey& sequence_sort_key) = 0;
// Posts |task| to be executed by this SchedulerWorkerPool as part of
- // |sequence|. If |worker| is non-null, |task| will be scheduled to run on it
- // specifically (note: |worker| must be owned by this SchedulerWorkerPool);
- // otherwise, |task| will be added to the pending shared work. |task| won't be
- // executed before its delayed run time, if any. Returns true if |task| is
- // posted.
+ // |sequence|. |task| won't be executed before its delayed run time, if any.
+ // Returns true if |task| is posted.
virtual bool PostTaskWithSequence(std::unique_ptr<Task> task,
- scoped_refptr<Sequence> sequence,
- SchedulerWorker* worker) = 0;
+ scoped_refptr<Sequence> sequence) = 0;
// Posts |task| to be executed by this SchedulerWorkerPool as part of
- // |sequence|. If |worker| is non-null, |task| will be scheduled to run on it
- // specifically (note: |worker| must be owned by this SchedulerWorkerPool);
- // otherwise, |task| will be added to the pending shared work. This must only
- // be called after |task| has gone through PostTaskWithSequence() and after
- // |task|'s delayed run time.
+ // |sequence|. This must only be called after |task| has gone through
+ // PostTaskWithSequence() and after |task|'s delayed run time.
virtual void PostTaskWithSequenceNow(std::unique_ptr<Task> task,
- scoped_refptr<Sequence> sequence,
- SchedulerWorker* worker) = 0;
+ scoped_refptr<Sequence> sequence) = 0;
};
} // namespace internal
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc b/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
index 22aa2c65db0..0515154e8ec 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
@@ -17,7 +17,6 @@
#include "base/metrics/histogram.h"
#include "base/sequence_token.h"
#include "base/sequenced_task_runner.h"
-#include "base/single_thread_task_runner.h"
#include "base/strings/stringprintf.h"
#include "base/task_runner.h"
#include "base/task_scheduler/delayed_task_manager.h"
@@ -59,12 +58,12 @@ class SchedulerParallelTaskRunner : public TaskRunner {
// TaskRunner:
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& closure,
+ OnceClosure closure,
TimeDelta delay) override {
// Post the task as part of a one-off single-task Sequence.
return worker_pool_->PostTaskWithSequence(
- MakeUnique<Task>(from_here, closure, traits_, delay),
- make_scoped_refptr(new Sequence), nullptr);
+ MakeUnique<Task>(from_here, std::move(closure), traits_, delay),
+ make_scoped_refptr(new Sequence));
}
bool RunsTasksOnCurrentThread() const override {
@@ -94,21 +93,21 @@ class SchedulerSequencedTaskRunner : public SequencedTaskRunner {
// SequencedTaskRunner:
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& closure,
+ OnceClosure closure,
TimeDelta delay) override {
- std::unique_ptr<Task> task(new Task(from_here, closure, traits_, delay));
+ std::unique_ptr<Task> task(
+ new Task(from_here, std::move(closure), traits_, delay));
task->sequenced_task_runner_ref = this;
// Post the task as part of |sequence_|.
- return worker_pool_->PostTaskWithSequence(std::move(task), sequence_,
- nullptr);
+ return worker_pool_->PostTaskWithSequence(std::move(task), sequence_);
}
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& closure,
+ OnceClosure closure,
base::TimeDelta delay) override {
// Tasks are never nested within the task scheduler.
- return PostDelayedTask(from_here, closure, delay);
+ return PostDelayedTask(from_here, std::move(closure), delay);
}
bool RunsTasksOnCurrentThread() const override {
@@ -141,77 +140,19 @@ bool ContainsWorker(const std::vector<scoped_refptr<SchedulerWorker>>& workers,
} // namespace
-// A task runner that runs tasks with the SINGLE_THREADED ExecutionMode.
-class SchedulerWorkerPoolImpl::SchedulerSingleThreadTaskRunner :
- public SingleThreadTaskRunner {
- public:
- // Constructs a SchedulerSingleThreadTaskRunner which can be used to post
- // tasks so long as |worker_pool| and |worker| are alive.
- // TODO(robliao): Find a concrete way to manage the memory of |worker_pool|
- // and |worker|.
- SchedulerSingleThreadTaskRunner(const TaskTraits& traits,
- SchedulerWorkerPool* worker_pool,
- SchedulerWorker* worker);
-
- // SingleThreadTaskRunner:
- bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& closure,
- TimeDelta delay) override {
- std::unique_ptr<Task> task(new Task(from_here, closure, traits_, delay));
- task->single_thread_task_runner_ref = this;
-
- // Post the task to be executed by |worker_| as part of |sequence_|.
- return worker_pool_->PostTaskWithSequence(std::move(task), sequence_,
- worker_);
- }
-
- bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& closure,
- base::TimeDelta delay) override {
- // Tasks are never nested within the task scheduler.
- return PostDelayedTask(from_here, closure, delay);
- }
-
- bool RunsTasksOnCurrentThread() const override {
- // Even though this is a SingleThreadTaskRunner, test the actual sequence
- // instead of the assigned worker so that another task randomly assigned
- // to the same worker doesn't return true by happenstance.
- return sequence_->token() == SequenceToken::GetForCurrentThread();
- }
-
- private:
- ~SchedulerSingleThreadTaskRunner() override;
-
- // Sequence for all Tasks posted through this TaskRunner.
- const scoped_refptr<Sequence> sequence_ = new Sequence;
-
- const TaskTraits traits_;
- SchedulerWorkerPool* const worker_pool_;
- SchedulerWorker* const worker_;
-
- DISALLOW_COPY_AND_ASSIGN(SchedulerSingleThreadTaskRunner);
-};
-
class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl
: public SchedulerWorker::Delegate {
public:
// |outer| owns the worker for which this delegate is constructed.
// |re_enqueue_sequence_callback| is invoked when ReEnqueueSequence() is
- // called with a non-single-threaded Sequence. |shared_priority_queue| is a
- // PriorityQueue whose transactions may overlap with the worker's
- // single-threaded PriorityQueue's transactions. |index| will be appended to
- // the pool name to label the underlying worker threads.
+ // called. |index| will be appended to the pool name to label the underlying
+ // worker threads.
SchedulerWorkerDelegateImpl(
SchedulerWorkerPoolImpl* outer,
const ReEnqueueSequenceCallback& re_enqueue_sequence_callback,
- const PriorityQueue* shared_priority_queue,
int index);
~SchedulerWorkerDelegateImpl() override;
- PriorityQueue* single_threaded_priority_queue() {
- return &single_threaded_priority_queue_;
- }
-
// SchedulerWorker::Delegate:
void OnMainEntry(SchedulerWorker* worker) override;
scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override;
@@ -221,28 +162,10 @@ class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl
bool CanDetach(SchedulerWorker* worker) override;
void OnDetach() override;
- void RegisterSingleThreadTaskRunner() {
- // No barrier as barriers only affect sequential consistency which is
- // irrelevant in a single variable use case (they don't force an immediate
- // flush anymore than atomics do by default).
- subtle::NoBarrier_AtomicIncrement(&num_single_threaded_runners_, 1);
- }
-
- void UnregisterSingleThreadTaskRunner() {
- subtle::NoBarrier_AtomicIncrement(&num_single_threaded_runners_, -1);
- }
-
private:
SchedulerWorkerPoolImpl* outer_;
const ReEnqueueSequenceCallback re_enqueue_sequence_callback_;
- // Single-threaded PriorityQueue for the worker.
- PriorityQueue single_threaded_priority_queue_;
-
- // True if the last Sequence returned by GetWork() was extracted from
- // |single_threaded_priority_queue_|.
- bool last_sequence_is_single_threaded_ = false;
-
// Time of the last detach.
TimeTicks last_detach_time_;
@@ -264,32 +187,125 @@ class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl
// TaskScheduler.NumTasksBeforeDetach histogram was recorded.
size_t num_tasks_since_last_detach_ = 0;
- subtle::Atomic32 num_single_threaded_runners_ = 0;
-
const int index_;
DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerDelegateImpl);
};
+SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
+ const std::string& name,
+ ThreadPriority priority_hint,
+ ReEnqueueSequenceCallback re_enqueue_sequence_callback,
+ TaskTracker* task_tracker,
+ DelayedTaskManager* delayed_task_manager)
+ : name_(name),
+ priority_hint_(priority_hint),
+ re_enqueue_sequence_callback_(std::move(re_enqueue_sequence_callback)),
+ idle_workers_stack_lock_(shared_priority_queue_.container_lock()),
+ idle_workers_stack_cv_for_testing_(
+ idle_workers_stack_lock_.CreateConditionVariable()),
+ join_for_testing_returned_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+#if DCHECK_IS_ON()
+ workers_created_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+#endif
+ // Mimics the UMA_HISTOGRAM_LONG_TIMES macro.
+ detach_duration_histogram_(Histogram::FactoryTimeGet(
+ kDetachDurationHistogramPrefix + name_ + kPoolNameSuffix,
+ TimeDelta::FromMilliseconds(1),
+ TimeDelta::FromHours(1),
+ 50,
+ HistogramBase::kUmaTargetedHistogramFlag)),
+ // Mimics the UMA_HISTOGRAM_COUNTS_1000 macro. When a worker runs more
+ // than 1000 tasks before detaching, there is no need to know the exact
+ // number of tasks that ran.
+ num_tasks_before_detach_histogram_(Histogram::FactoryGet(
+ kNumTasksBeforeDetachHistogramPrefix + name_ + kPoolNameSuffix,
+ 1,
+ 1000,
+ 50,
+ HistogramBase::kUmaTargetedHistogramFlag)),
+ // Mimics the UMA_HISTOGRAM_COUNTS_100 macro. A SchedulerWorker is
+ // expected to run between zero and a few tens of tasks between waits.
+ // When it runs more than 100 tasks, there is no need to know the exact
+ // number of tasks that ran.
+ num_tasks_between_waits_histogram_(Histogram::FactoryGet(
+ kNumTasksBetweenWaitsHistogramPrefix + name_ + kPoolNameSuffix,
+ 1,
+ 100,
+ 50,
+ HistogramBase::kUmaTargetedHistogramFlag)),
+ task_tracker_(task_tracker),
+ delayed_task_manager_(delayed_task_manager) {
+ DCHECK(task_tracker_);
+ DCHECK(delayed_task_manager_);
+}
+
+void SchedulerWorkerPoolImpl::Start(const SchedulerWorkerPoolParams& params) {
+ suggested_reclaim_time_ = params.suggested_reclaim_time();
+
+ std::vector<SchedulerWorker*> workers_to_wake_up;
+
+ {
+ AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
+
+#if DCHECK_IS_ON()
+ DCHECK(!workers_created_.IsSignaled());
+#endif
+
+ DCHECK(workers_.empty());
+ workers_.resize(params.max_threads());
+
+ // The number of workers created alive is |num_wake_ups_before_start_|, plus
+ // one if the standby thread policy is ONE (in order to start with one alive
+ // idle worker).
+ const int num_alive_workers =
+ num_wake_ups_before_start_ +
+ (params.standby_thread_policy() ==
+ SchedulerWorkerPoolParams::StandbyThreadPolicy::ONE
+ ? 1
+ : 0);
+
+ // Create workers in reverse order of index so that the worker with the
+ // highest index is at the bottom of the idle stack.
+ for (int index = params.max_threads() - 1; index >= 0; --index) {
+ const SchedulerWorker::InitialState initial_state =
+ index < num_alive_workers ? SchedulerWorker::InitialState::ALIVE
+ : SchedulerWorker::InitialState::DETACHED;
+ scoped_refptr<SchedulerWorker> worker = SchedulerWorker::Create(
+ params.priority_hint(),
+ MakeUnique<SchedulerWorkerDelegateImpl>(
+ this, re_enqueue_sequence_callback_, index),
+ task_tracker_, initial_state, params.backward_compatibility());
+ if (!worker)
+ break;
+
+ if (index < num_wake_ups_before_start_)
+ workers_to_wake_up.push_back(worker.get());
+ else
+ idle_workers_stack_.Push(worker.get());
+
+ workers_[index] = std::move(worker);
+ }
+
+#if DCHECK_IS_ON()
+ workers_created_.Signal();
+#endif
+
+ CHECK(!workers_.empty());
+ }
+
+ for (SchedulerWorker* worker : workers_to_wake_up)
+ worker->WakeUp();
+}
+
SchedulerWorkerPoolImpl::~SchedulerWorkerPoolImpl() {
// SchedulerWorkerPool should never be deleted in production unless its
// initialization failed.
DCHECK(join_for_testing_returned_.IsSignaled() || workers_.empty());
}
-// static
-std::unique_ptr<SchedulerWorkerPoolImpl> SchedulerWorkerPoolImpl::Create(
- const SchedulerWorkerPoolParams& params,
- const ReEnqueueSequenceCallback& re_enqueue_sequence_callback,
- TaskTracker* task_tracker,
- DelayedTaskManager* delayed_task_manager) {
- auto worker_pool = WrapUnique(
- new SchedulerWorkerPoolImpl(params, task_tracker, delayed_task_manager));
- if (worker_pool->Initialize(params, re_enqueue_sequence_callback))
- return worker_pool;
- return nullptr;
-}
-
scoped_refptr<TaskRunner> SchedulerWorkerPoolImpl::CreateTaskRunnerWithTraits(
const TaskTraits& traits) {
return make_scoped_refptr(new SchedulerParallelTaskRunner(traits, this));
@@ -301,21 +317,6 @@ SchedulerWorkerPoolImpl::CreateSequencedTaskRunnerWithTraits(
return make_scoped_refptr(new SchedulerSequencedTaskRunner(traits, this));
}
-scoped_refptr<SingleThreadTaskRunner>
-SchedulerWorkerPoolImpl::CreateSingleThreadTaskRunnerWithTraits(
- const TaskTraits& traits) {
- // TODO(fdoray): Find a way to take load into account when assigning a
- // SchedulerWorker to a SingleThreadTaskRunner.
- size_t worker_index;
- {
- AutoSchedulerLock auto_lock(next_worker_index_lock_);
- worker_index = next_worker_index_;
- next_worker_index_ = (next_worker_index_ + 1) % workers_.size();
- }
- return make_scoped_refptr(new SchedulerSingleThreadTaskRunner(
- traits, this, workers_[worker_index].get()));
-}
-
void SchedulerWorkerPoolImpl::ReEnqueueSequence(
scoped_refptr<Sequence> sequence,
const SequenceSortKey& sequence_sort_key) {
@@ -336,27 +337,28 @@ void SchedulerWorkerPoolImpl::ReEnqueueSequence(
bool SchedulerWorkerPoolImpl::PostTaskWithSequence(
std::unique_ptr<Task> task,
- scoped_refptr<Sequence> sequence,
- SchedulerWorker* worker) {
+ scoped_refptr<Sequence> sequence) {
DCHECK(task);
DCHECK(sequence);
- DCHECK(!worker || ContainsWorker(workers_, worker));
if (!task_tracker_->WillPostTask(task.get()))
return false;
if (task->delayed_run_time.is_null()) {
- PostTaskWithSequenceNow(std::move(task), std::move(sequence), worker);
+ PostTaskWithSequenceNow(std::move(task), std::move(sequence));
} else {
+ // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+ // for details.
+ CHECK(task->task);
delayed_task_manager_->AddDelayedTask(
std::move(task),
Bind(
- [](scoped_refptr<Sequence> sequence, SchedulerWorker* worker,
+ [](scoped_refptr<Sequence> sequence,
SchedulerWorkerPool* worker_pool, std::unique_ptr<Task> task) {
worker_pool->PostTaskWithSequenceNow(std::move(task),
- std::move(sequence), worker);
+ std::move(sequence));
},
- std::move(sequence), Unretained(worker), Unretained(this)));
+ std::move(sequence), Unretained(this)));
}
return true;
@@ -364,42 +366,27 @@ bool SchedulerWorkerPoolImpl::PostTaskWithSequence(
void SchedulerWorkerPoolImpl::PostTaskWithSequenceNow(
std::unique_ptr<Task> task,
- scoped_refptr<Sequence> sequence,
- SchedulerWorker* worker) {
+ scoped_refptr<Sequence> sequence) {
DCHECK(task);
DCHECK(sequence);
- DCHECK(!worker || ContainsWorker(workers_, worker));
// Confirm that |task| is ready to run (its delayed run time is either null or
// in the past).
DCHECK_LE(task->delayed_run_time, TimeTicks::Now());
- // Because |worker| belongs to this worker pool, we know that the type
- // of its delegate is SchedulerWorkerDelegateImpl.
- PriorityQueue* const priority_queue =
- worker
- ? static_cast<SchedulerWorkerDelegateImpl*>(worker->delegate())
- ->single_threaded_priority_queue()
- : &shared_priority_queue_;
- DCHECK(priority_queue);
-
const bool sequence_was_empty = sequence->PushTask(std::move(task));
if (sequence_was_empty) {
- // Insert |sequence| in |priority_queue| if it was empty before |task| was
- // inserted into it. Otherwise, one of these must be true:
- // - |sequence| is already in a PriorityQueue (not necessarily
- // |shared_priority_queue_|), or,
+ // Insert |sequence| in |shared_priority_queue_| if it was empty before
+ // |task| was inserted into it. Otherwise, one of these must be true:
+ // - |sequence| is already in a PriorityQueue, or,
// - A worker is running a Task from |sequence|. It will insert |sequence|
// in a PriorityQueue once it's done running the Task.
const auto sequence_sort_key = sequence->GetSortKey();
- priority_queue->BeginTransaction()->Push(std::move(sequence),
- sequence_sort_key);
+ shared_priority_queue_.BeginTransaction()->Push(std::move(sequence),
+ sequence_sort_key);
// Wake up a worker to process |sequence|.
- if (worker)
- WakeUpWorker(worker);
- else
- WakeUpOneWorker();
+ WakeUpOneWorker();
}
}
@@ -410,16 +397,25 @@ void SchedulerWorkerPoolImpl::GetHistograms(
}
int SchedulerWorkerPoolImpl::GetMaxConcurrentTasksDeprecated() const {
+#if DCHECK_IS_ON()
+ DCHECK(workers_created_.IsSignaled());
+#endif
return workers_.size();
}
void SchedulerWorkerPoolImpl::WaitForAllWorkersIdleForTesting() {
+#if DCHECK_IS_ON()
+ DCHECK(workers_created_.IsSignaled());
+#endif
AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
while (idle_workers_stack_.Size() < workers_.size())
idle_workers_stack_cv_for_testing_->Wait();
}
void SchedulerWorkerPoolImpl::JoinForTesting() {
+#if DCHECK_IS_ON()
+ DCHECK(workers_created_.IsSignaled());
+#endif
DCHECK(!CanWorkerDetachForTesting() || suggested_reclaim_time_.is_max())
<< "Workers can detach during join.";
for (const auto& worker : workers_)
@@ -442,34 +438,13 @@ size_t SchedulerWorkerPoolImpl::NumberOfAliveWorkersForTesting() {
return num_alive_workers;
}
-SchedulerWorkerPoolImpl::SchedulerSingleThreadTaskRunner::
- SchedulerSingleThreadTaskRunner(const TaskTraits& traits,
- SchedulerWorkerPool* worker_pool,
- SchedulerWorker* worker)
- : traits_(traits),
- worker_pool_(worker_pool),
- worker_(worker) {
- DCHECK(worker_pool_);
- DCHECK(worker_);
- static_cast<SchedulerWorkerDelegateImpl*>(worker_->delegate())->
- RegisterSingleThreadTaskRunner();
-}
-
-SchedulerWorkerPoolImpl::SchedulerSingleThreadTaskRunner::
- ~SchedulerSingleThreadTaskRunner() {
- static_cast<SchedulerWorkerDelegateImpl*>(worker_->delegate())->
- UnregisterSingleThreadTaskRunner();
-}
-
SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
SchedulerWorkerDelegateImpl(
SchedulerWorkerPoolImpl* outer,
const ReEnqueueSequenceCallback& re_enqueue_sequence_callback,
- const PriorityQueue* shared_priority_queue,
int index)
: outer_(outer),
re_enqueue_sequence_callback_(re_enqueue_sequence_callback),
- single_threaded_priority_queue_(shared_priority_queue),
index_(index) {}
SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
@@ -527,13 +502,8 @@ SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork(
{
std::unique_ptr<PriorityQueue::Transaction> shared_transaction(
outer_->shared_priority_queue_.BeginTransaction());
- std::unique_ptr<PriorityQueue::Transaction> single_threaded_transaction(
- single_threaded_priority_queue_.BeginTransaction());
-
- if (shared_transaction->IsEmpty() &&
- single_threaded_transaction->IsEmpty()) {
- single_threaded_transaction.reset();
+ if (shared_transaction->IsEmpty()) {
// |shared_transaction| is kept alive while |worker| is added to
// |idle_workers_stack_| to avoid this race:
// 1. This thread creates a Transaction, finds |shared_priority_queue_|
@@ -554,23 +524,7 @@ SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork(
return nullptr;
}
- // True if both PriorityQueues have Sequences and the Sequence at the top of
- // the shared PriorityQueue is more important.
- const bool shared_sequence_is_more_important =
- !shared_transaction->IsEmpty() &&
- !single_threaded_transaction->IsEmpty() &&
- shared_transaction->PeekSortKey() >
- single_threaded_transaction->PeekSortKey();
-
- if (single_threaded_transaction->IsEmpty() ||
- shared_sequence_is_more_important) {
- sequence = shared_transaction->PopSequence();
- last_sequence_is_single_threaded_ = false;
- } else {
- DCHECK(!single_threaded_transaction->IsEmpty());
- sequence = single_threaded_transaction->PopSequence();
- last_sequence_is_single_threaded_ = true;
- }
+ sequence = shared_transaction->PopSequence();
}
DCHECK(sequence);
@@ -589,17 +543,9 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::DidRunTask() {
void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
ReEnqueueSequence(scoped_refptr<Sequence> sequence) {
- if (last_sequence_is_single_threaded_) {
- // A single-threaded Sequence is always re-enqueued in the single-threaded
- // PriorityQueue from which it was extracted.
- const SequenceSortKey sequence_sort_key = sequence->GetSortKey();
- single_threaded_priority_queue_.BeginTransaction()->Push(
- std::move(sequence), sequence_sort_key);
- } else {
- // |re_enqueue_sequence_callback_| will determine in which PriorityQueue
- // |sequence| must be enqueued.
- re_enqueue_sequence_callback_.Run(std::move(sequence));
- }
+ // |re_enqueue_sequence_callback_| will determine in which PriorityQueue
+ // |sequence| must be enqueued.
+ re_enqueue_sequence_callback_.Run(std::move(sequence));
}
TimeDelta SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
@@ -609,15 +555,10 @@ TimeDelta SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
bool SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::CanDetach(
SchedulerWorker* worker) {
- // It's not an issue if |num_single_threaded_runners_| is incremented after
- // this because the newly created SingleThreadTaskRunner (from which no task
- // has run yet) will simply run all its tasks on the next physical thread
- // created by the worker.
const bool can_detach =
!idle_start_time_.is_null() &&
(TimeTicks::Now() - idle_start_time_) > outer_->suggested_reclaim_time_ &&
worker != outer_->PeekAtIdleWorkersStack() &&
- !subtle::NoBarrier_Load(&num_single_threaded_runners_) &&
outer_->CanWorkerDetachForTesting();
return can_detach;
}
@@ -630,106 +571,25 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnDetach() {
last_detach_time_ = TimeTicks::Now();
}
-SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
- const SchedulerWorkerPoolParams& params,
- TaskTracker* task_tracker,
- DelayedTaskManager* delayed_task_manager)
- : name_(params.name()),
- suggested_reclaim_time_(params.suggested_reclaim_time()),
- idle_workers_stack_lock_(shared_priority_queue_.container_lock()),
- idle_workers_stack_cv_for_testing_(
- idle_workers_stack_lock_.CreateConditionVariable()),
- join_for_testing_returned_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED),
-#if DCHECK_IS_ON()
- workers_created_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED),
-#endif
- // Mimics the UMA_HISTOGRAM_LONG_TIMES macro.
- detach_duration_histogram_(Histogram::FactoryTimeGet(
- kDetachDurationHistogramPrefix + name_ + kPoolNameSuffix,
- TimeDelta::FromMilliseconds(1),
- TimeDelta::FromHours(1),
- 50,
- HistogramBase::kUmaTargetedHistogramFlag)),
- // Mimics the UMA_HISTOGRAM_COUNTS_1000 macro. When a worker runs more
- // than 1000 tasks before detaching, there is no need to know the exact
- // number of tasks that ran.
- num_tasks_before_detach_histogram_(Histogram::FactoryGet(
- kNumTasksBeforeDetachHistogramPrefix + name_ + kPoolNameSuffix,
- 1,
- 1000,
- 50,
- HistogramBase::kUmaTargetedHistogramFlag)),
- // Mimics the UMA_HISTOGRAM_COUNTS_100 macro. A SchedulerWorker is
- // expected to run between zero and a few tens of tasks between waits.
- // When it runs more than 100 tasks, there is no need to know the exact
- // number of tasks that ran.
- num_tasks_between_waits_histogram_(Histogram::FactoryGet(
- kNumTasksBetweenWaitsHistogramPrefix + name_ + kPoolNameSuffix,
- 1,
- 100,
- 50,
- HistogramBase::kUmaTargetedHistogramFlag)),
- task_tracker_(task_tracker),
- delayed_task_manager_(delayed_task_manager) {
- DCHECK(task_tracker_);
- DCHECK(delayed_task_manager_);
-}
-
-bool SchedulerWorkerPoolImpl::Initialize(
- const SchedulerWorkerPoolParams& params,
- const ReEnqueueSequenceCallback& re_enqueue_sequence_callback) {
- AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
-
- DCHECK(workers_.empty());
- workers_.resize(params.max_threads());
-
- // Create workers and push them to the idle stack in reverse order of index.
- // This ensures that they are woken up in order of index and that the ALIVE
- // worker is on top of the stack.
- for (int index = params.max_threads() - 1; index >= 0; --index) {
- const bool is_standby_lazy =
- params.standby_thread_policy() ==
- SchedulerWorkerPoolParams::StandbyThreadPolicy::LAZY;
- const SchedulerWorker::InitialState initial_state =
- (index == 0 && !is_standby_lazy)
- ? SchedulerWorker::InitialState::ALIVE
- : SchedulerWorker::InitialState::DETACHED;
- scoped_refptr<SchedulerWorker> worker = SchedulerWorker::Create(
- params.priority_hint(),
- MakeUnique<SchedulerWorkerDelegateImpl>(
- this, re_enqueue_sequence_callback, &shared_priority_queue_, index),
- task_tracker_, initial_state, params.backward_compatibility());
- if (!worker)
- break;
- idle_workers_stack_.Push(worker.get());
- workers_[index] = std::move(worker);
- }
+void SchedulerWorkerPoolImpl::WakeUpOneWorker() {
+ SchedulerWorker* worker = nullptr;
+ {
+ AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
#if DCHECK_IS_ON()
- workers_created_.Signal();
+ DCHECK_EQ(workers_.empty(), !workers_created_.IsSignaled());
#endif
- return !workers_.empty();
-}
-
-void SchedulerWorkerPoolImpl::WakeUpWorker(SchedulerWorker* worker) {
- DCHECK(worker);
- RemoveFromIdleWorkersStack(worker);
- worker->WakeUp();
- // TODO(robliao): Honor StandbyThreadPolicy::ONE here and consider adding
- // hysteresis to the CanDetach check. See https://crbug.com/666041.
-}
-
-void SchedulerWorkerPoolImpl::WakeUpOneWorker() {
- SchedulerWorker* worker;
- {
- AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
- worker = idle_workers_stack_.Pop();
+ if (workers_.empty())
+ ++num_wake_ups_before_start_;
+ else
+ worker = idle_workers_stack_.Pop();
}
+
if (worker)
worker->WakeUp();
+ // TODO(robliao): Honor StandbyThreadPolicy::ONE here and consider adding
+ // hysteresis to the CanDetach check. See https://crbug.com/666041.
}
void SchedulerWorkerPoolImpl::AddToIdleWorkersStack(
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl.h b/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
index d4b8440ce86..f9b04a1d2fd 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
@@ -45,38 +45,44 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
// from it.
using ReEnqueueSequenceCallback = Callback<void(scoped_refptr<Sequence>)>;
+ // Constructs a pool without workers. Tasks can be posted to the pool, but
+ // they won't run until workers are created. To create workers and start
+ // running tasks, call Start().
+ //
+ // |name| is used to label the pool's threads ("TaskScheduler" + |name| +
+ // index) and histograms ("TaskScheduler." + histogram name + "." + |name| +
+ // extra suffixes). |priority_hint| is the preferred thread priority; the
+ // actual thread priority depends on shutdown state and platform capabilities.
+ // |re_enqueue_sequence_callback| is invoked when a Sequence isn't empty after
+ // a worker pops a Task from it. |task_tracker| keeps track of tasks.
+ // |delayed_task_manager| handles tasks posted with a delay.
+ SchedulerWorkerPoolImpl(
+ const std::string& name,
+ ThreadPriority priority_hint,
+ ReEnqueueSequenceCallback re_enqueue_sequence_callback,
+ TaskTracker* task_tracker,
+ DelayedTaskManager* delayed_task_manager);
+
+ // Creates workers following the |params| specification, allowing existing and
+ // future tasks to run. Can only be called once. CHECKs on failure.
+ void Start(const SchedulerWorkerPoolParams& params);
+
// Destroying a SchedulerWorkerPoolImpl returned by Create() is not allowed in
// production; it is always leaked. In tests, it can only be destroyed after
// JoinForTesting() has returned.
~SchedulerWorkerPoolImpl() override;
- // Creates a SchedulerWorkerPoolImpl following the |worker_pool_params|
- // specification. |re_enqueue_sequence_callback| will be invoked after a
- // worker of this worker pool tries to run a Task. |task_tracker| is used to
- // handle shutdown behavior of Tasks. |delayed_task_manager| handles Tasks
- // posted with a delay. Returns nullptr on failure to create a worker pool
- // with at least one thread.
- static std::unique_ptr<SchedulerWorkerPoolImpl> Create(
- const SchedulerWorkerPoolParams& params,
- const ReEnqueueSequenceCallback& re_enqueue_sequence_callback,
- TaskTracker* task_tracker,
- DelayedTaskManager* delayed_task_manager);
-
// SchedulerWorkerPool:
scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
const TaskTraits& traits) override;
scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunnerWithTraits(
const TaskTraits& traits) override;
- scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
- const TaskTraits& traits) override;
void ReEnqueueSequence(scoped_refptr<Sequence> sequence,
const SequenceSortKey& sequence_sort_key) override;
bool PostTaskWithSequence(std::unique_ptr<Task> task,
- scoped_refptr<Sequence> sequence,
- SchedulerWorker* worker) override;
+ scoped_refptr<Sequence> sequence) override;
void PostTaskWithSequenceNow(std::unique_ptr<Task> task,
- scoped_refptr<Sequence> sequence,
- SchedulerWorker* worker) override;
+ scoped_refptr<Sequence> sequence) override;
const HistogramBase* num_tasks_before_detach_histogram() const {
return num_tasks_before_detach_histogram_;
@@ -111,20 +117,12 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
size_t NumberOfAliveWorkersForTesting();
private:
- class SchedulerSingleThreadTaskRunner;
class SchedulerWorkerDelegateImpl;
SchedulerWorkerPoolImpl(const SchedulerWorkerPoolParams& params,
TaskTracker* task_tracker,
DelayedTaskManager* delayed_task_manager);
- bool Initialize(
- const SchedulerWorkerPoolParams& params,
- const ReEnqueueSequenceCallback& re_enqueue_sequence_callback);
-
- // Wakes up |worker|.
- void WakeUpWorker(SchedulerWorker* worker);
-
// Wakes up the last worker from this worker pool to go idle, if any.
void WakeUpOneWorker();
@@ -140,30 +138,27 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
// Returns true if worker thread detachment is permitted.
bool CanWorkerDetachForTesting();
- // The name of this worker pool, used to label its worker threads.
const std::string name_;
-
- // All worker owned by this worker pool. Only modified during initialization
- // of the worker pool.
- std::vector<scoped_refptr<SchedulerWorker>> workers_;
-
- // Synchronizes access to |next_worker_index_|.
- SchedulerLock next_worker_index_lock_;
-
- // Index of the worker that will be assigned to the next single-threaded
- // TaskRunner returned by this pool.
- size_t next_worker_index_ = 0;
+ const ThreadPriority priority_hint_;
+ const ReEnqueueSequenceCallback re_enqueue_sequence_callback_;
// PriorityQueue from which all threads of this worker pool get work.
PriorityQueue shared_priority_queue_;
- // Suggested reclaim time for workers.
- const TimeDelta suggested_reclaim_time_;
+ // All workers owned by this worker pool. Initialized by Start() within the
+ // scope of |idle_workers_stack_lock_|. Never modified afterwards (i.e. can be
+ // read without synchronization once |workers_created_.IsSignaled()|).
+ std::vector<scoped_refptr<SchedulerWorker>> workers_;
+
+ // Suggested reclaim time for workers. Initialized by Start(). Never modified
+ // afterwards (i.e. can be read without synchronization once
+ // |workers_created_.IsSignaled()|).
+ TimeDelta suggested_reclaim_time_;
- // Synchronizes access to |idle_workers_stack_| and
- // |idle_workers_stack_cv_for_testing_|. Has |shared_priority_queue_|'s
- // lock as its predecessor so that a worker can be pushed to
- // |idle_workers_stack_| within the scope of a Transaction (more
+ // Synchronizes access to |idle_workers_stack_|,
+ // |idle_workers_stack_cv_for_testing_| and |num_wake_ups_before_start_|. Has
+ // |shared_priority_queue_|'s lock as its predecessor so that a worker can be
+ // pushed to |idle_workers_stack_| within the scope of a Transaction (more
// details in GetWork()).
mutable SchedulerLock idle_workers_stack_lock_;
@@ -177,6 +172,9 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
// Signaled when all workers become idle.
std::unique_ptr<ConditionVariable> idle_workers_stack_cv_for_testing_;
+ // Number of wake ups that occurred before Start().
+ int num_wake_ups_before_start_ = 0;
+
// Signaled once JoinForTesting() has returned.
WaitableEvent join_for_testing_returned_;
@@ -186,7 +184,7 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
#if DCHECK_IS_ON()
// Signaled when all workers have been created.
- WaitableEvent workers_created_;
+ mutable WaitableEvent workers_created_;
#endif
// TaskScheduler.DetachDuration.[worker pool name] histogram. Intentionally
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
index 53b7e982ab0..ea5cd51b47f 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
@@ -67,7 +67,7 @@ class TaskSchedulerWorkerPoolImplTest
: service_thread_("TaskSchedulerServiceThread") {}
void SetUp() override {
- InitializeWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
+ CreateAndStartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
}
void TearDown() override {
@@ -76,23 +76,33 @@ class TaskSchedulerWorkerPoolImplTest
worker_pool_->JoinForTesting();
}
- void InitializeWorkerPool(TimeDelta suggested_reclaim_time,
- size_t num_workers) {
+ void CreateWorkerPool() {
ASSERT_FALSE(worker_pool_);
ASSERT_FALSE(delayed_task_manager_);
service_thread_.Start();
delayed_task_manager_ =
base::MakeUnique<DelayedTaskManager>(service_thread_.task_runner());
- worker_pool_ = SchedulerWorkerPoolImpl::Create(
- SchedulerWorkerPoolParams("TestWorkerPool", ThreadPriority::NORMAL,
- StandbyThreadPolicy::LAZY, num_workers,
- suggested_reclaim_time),
+ worker_pool_ = MakeUnique<SchedulerWorkerPoolImpl>(
+ "TestWorkerPool", ThreadPriority::NORMAL,
Bind(&TaskSchedulerWorkerPoolImplTest::ReEnqueueSequenceCallback,
Unretained(this)),
&task_tracker_, delayed_task_manager_.get());
ASSERT_TRUE(worker_pool_);
}
+ void StartWorkerPool(TimeDelta suggested_reclaim_time, size_t num_workers) {
+ ASSERT_TRUE(worker_pool_);
+ worker_pool_->Start(SchedulerWorkerPoolParams(
+ "TestWorkerPool", ThreadPriority::NORMAL, StandbyThreadPolicy::LAZY,
+ num_workers, suggested_reclaim_time));
+ }
+
+ void CreateAndStartWorkerPool(TimeDelta suggested_reclaim_time,
+ size_t num_workers) {
+ CreateWorkerPool();
+ StartWorkerPool(suggested_reclaim_time, num_workers);
+ }
+
std::unique_ptr<SchedulerWorkerPoolImpl> worker_pool_;
TaskTracker task_tracker_;
@@ -121,10 +131,11 @@ scoped_refptr<TaskRunner> CreateTaskRunnerWithExecutionMode(
return worker_pool->CreateTaskRunnerWithTraits(traits);
case test::ExecutionMode::SEQUENCED:
return worker_pool->CreateSequencedTaskRunnerWithTraits(traits);
- case test::ExecutionMode::SINGLE_THREADED:
- return worker_pool->CreateSingleThreadTaskRunnerWithTraits(traits);
+ default:
+ // Fall through.
+ break;
}
- ADD_FAILURE() << "Unknown ExecutionMode";
+ ADD_FAILURE() << "Unexpected ExecutionMode";
return nullptr;
}
@@ -316,7 +327,7 @@ TEST_P(TaskSchedulerWorkerPoolImplTest, PostTaskAfterShutdown) {
auto task_runner =
CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam());
task_tracker_.Shutdown();
- EXPECT_FALSE(task_runner->PostTask(FROM_HERE, Bind(&ShouldNotRun)));
+ EXPECT_FALSE(task_runner->PostTask(FROM_HERE, BindOnce(&ShouldNotRun)));
}
// Verify that a Task runs shortly after its delay expires.
@@ -327,9 +338,10 @@ TEST_P(TaskSchedulerWorkerPoolImplTest, PostDelayedTask) {
WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
EXPECT_TRUE(CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam())
- ->PostDelayedTask(FROM_HERE, Bind(&WaitableEvent::Signal,
- Unretained(&task_ran)),
- TestTimeouts::tiny_timeout()));
+ ->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&WaitableEvent::Signal, Unretained(&task_ran)),
+ TestTimeouts::tiny_timeout()));
// Wait until the task runs.
task_ran.Wait();
@@ -357,7 +369,7 @@ TEST_P(TaskSchedulerWorkerPoolImplTest, SequencedRunsTasksOnCurrentThread) {
WaitableEvent::InitialState::NOT_SIGNALED);
task_runner->PostTask(
FROM_HERE,
- Bind(
+ BindOnce(
[](scoped_refptr<TaskRunner> sequenced_task_runner,
WaitableEvent* task_ran) {
EXPECT_FALSE(sequenced_task_runner->RunsTasksOnCurrentThread());
@@ -373,127 +385,74 @@ INSTANTIATE_TEST_CASE_P(Parallel,
INSTANTIATE_TEST_CASE_P(Sequenced,
TaskSchedulerWorkerPoolImplTest,
::testing::Values(test::ExecutionMode::SEQUENCED));
-INSTANTIATE_TEST_CASE_P(
- SingleThreaded,
- TaskSchedulerWorkerPoolImplTest,
- ::testing::Values(test::ExecutionMode::SINGLE_THREADED));
namespace {
-// Same as TaskSchedulerWorkerPoolImplTest but its SchedulerWorkerPoolImpl
-// instance uses |max_threads == 1|.
-class TaskSchedulerWorkerPoolImplSingleWorkerTest
+class TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest
: public TaskSchedulerWorkerPoolImplTest {
public:
- TaskSchedulerWorkerPoolImplSingleWorkerTest() = default;
-
- protected:
void SetUp() override {
- InitializeWorkerPool(TimeDelta::Max(), 1);
+ CreateWorkerPool();
+ // Let the test start the worker pool.
}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplSingleWorkerTest);
};
-} // namespace
-
-// Verify that the RunsTasksOnCurrentThread() method of a
-// SchedulerSingleThreadTaskRunner returns false when called from a task that
-// isn't part of its sequence even though it's running on that
-// SchedulerSingleThreadTaskRunner's assigned worker. Note: Tests that use
-// TestTaskFactory already verify that RunsTasksOnCurrentThread() returns true
-// when appropriate so this method complements it to get full coverage of that
-// method.
-TEST_P(TaskSchedulerWorkerPoolImplSingleWorkerTest,
- SingleThreadRunsTasksOnCurrentThread) {
- scoped_refptr<TaskRunner> task_runner(
- CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()));
- scoped_refptr<SingleThreadTaskRunner> single_thread_task_runner(
- worker_pool_->CreateSingleThreadTaskRunnerWithTraits(TaskTraits()));
-
- WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- task_runner->PostTask(
- FROM_HERE,
- Bind(
- [](scoped_refptr<TaskRunner> single_thread_task_runner,
- WaitableEvent* task_ran) {
- EXPECT_FALSE(single_thread_task_runner->RunsTasksOnCurrentThread());
- task_ran->Signal();
- },
- single_thread_task_runner, Unretained(&task_ran)));
- task_ran.Wait();
+void TaskPostedBeforeStart(PlatformThreadRef* platform_thread_ref,
+ WaitableEvent* task_scheduled,
+ WaitableEvent* barrier) {
+ *platform_thread_ref = PlatformThread::CurrentRef();
+ task_scheduled->Signal();
+ barrier->Wait();
}
-INSTANTIATE_TEST_CASE_P(Parallel,
- TaskSchedulerWorkerPoolImplSingleWorkerTest,
- ::testing::Values(test::ExecutionMode::PARALLEL));
-INSTANTIATE_TEST_CASE_P(Sequenced,
- TaskSchedulerWorkerPoolImplSingleWorkerTest,
- ::testing::Values(test::ExecutionMode::SEQUENCED));
-INSTANTIATE_TEST_CASE_P(
- SingleThreaded,
- TaskSchedulerWorkerPoolImplSingleWorkerTest,
- ::testing::Values(test::ExecutionMode::SINGLE_THREADED));
-
-namespace {
-
-class TaskSchedulerWorkerPoolSingleThreadedTest
- : public TaskSchedulerWorkerPoolImplTest {
- public:
- void InitializeThreadChecker() {
- thread_checker_.reset(new ThreadCheckerImpl());
- }
-
- void CheckValidThread() {
- EXPECT_TRUE(thread_checker_->CalledOnValidThread());
- }
-
- protected:
- void SetUp() override {
- InitializeWorkerPool(kReclaimTimeForDetachTests, kNumWorkersInWorkerPool);
- }
-
- TaskSchedulerWorkerPoolSingleThreadedTest() = default;
-
- private:
- std::unique_ptr<ThreadCheckerImpl> thread_checker_;
-
- DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolSingleThreadedTest);
-};
-
} // namespace
-// Verify that thread resources for a single thread remain.
-TEST_F(TaskSchedulerWorkerPoolSingleThreadedTest, SingleThreadTask) {
- auto single_thread_task_runner =
- worker_pool_->CreateSingleThreadTaskRunnerWithTraits(
- TaskTraits().WithShutdownBehavior(
- TaskShutdownBehavior::BLOCK_SHUTDOWN));
- single_thread_task_runner->PostTask(
- FROM_HERE,
- Bind(&TaskSchedulerWorkerPoolSingleThreadedTest::InitializeThreadChecker,
- Unretained(this)));
- WaitableEvent task_waiter(WaitableEvent::ResetPolicy::AUTOMATIC,
- WaitableEvent::InitialState::NOT_SIGNALED);
- single_thread_task_runner->PostTask(
- FROM_HERE, Bind(&WaitableEvent::Signal, Unretained(&task_waiter)));
- task_waiter.Wait();
- worker_pool_->WaitForAllWorkersIdleForTesting();
-
- // Give the worker pool a chance to reclaim its threads.
- PlatformThread::Sleep(kReclaimTimeForDetachTests + kExtraTimeToWaitForDetach);
-
- worker_pool_->DisallowWorkerDetachmentForTesting();
-
- single_thread_task_runner->PostTask(
- FROM_HERE,
- Bind(&TaskSchedulerWorkerPoolSingleThreadedTest::CheckValidThread,
- Unretained(this)));
- single_thread_task_runner->PostTask(
- FROM_HERE, Bind(&WaitableEvent::Signal, Unretained(&task_waiter)));
- task_waiter.Wait();
+// Verify that 2 tasks posted before Start() to a SchedulerWorkerPoolImpl with
+// more than 2 workers are scheduled on different workers when Start() is
+// called.
+TEST_F(TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest,
+ PostTasksBeforeStart) {
+ PlatformThreadRef task_1_thread_ref;
+ PlatformThreadRef task_2_thread_ref;
+ WaitableEvent task_1_scheduled(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_2_scheduled(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ // This event is used to prevent a task from completing before the other task
+ // is scheduled. If that happened, both tasks could run on the same worker and
+ // this test couldn't verify that the correct number of workers were woken up.
+ WaitableEvent barrier(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ worker_pool_
+ ->CreateTaskRunnerWithTraits(TaskTraits().WithBaseSyncPrimitives())
+ ->PostTask(FROM_HERE,
+ Bind(&TaskPostedBeforeStart, Unretained(&task_1_thread_ref),
+ Unretained(&task_1_scheduled), Unretained(&barrier)));
+ worker_pool_
+ ->CreateTaskRunnerWithTraits(TaskTraits().WithBaseSyncPrimitives())
+ ->PostTask(FROM_HERE,
+ Bind(&TaskPostedBeforeStart, Unretained(&task_2_thread_ref),
+ Unretained(&task_2_scheduled), Unretained(&barrier)));
+
+ // Workers should not be created and tasks should not run before the pool is
+ // started.
+ EXPECT_EQ(0U, worker_pool_->NumberOfAliveWorkersForTesting());
+ EXPECT_FALSE(task_1_scheduled.IsSignaled());
+ EXPECT_FALSE(task_2_scheduled.IsSignaled());
+
+ StartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
+
+ // Tasks should be scheduled shortly after the pool is started.
+ task_1_scheduled.Wait();
+ task_2_scheduled.Wait();
+
+ // Tasks should be scheduled on different threads.
+ EXPECT_NE(task_1_thread_ref, task_2_thread_ref);
+
+ barrier.Signal();
+ task_tracker_.Flush();
}
namespace {
@@ -522,7 +481,8 @@ class TaskSchedulerWorkerPoolCheckTlsReuse
WaitableEvent::InitialState::NOT_SIGNALED) {}
void SetUp() override {
- InitializeWorkerPool(kReclaimTimeForDetachTests, kNumWorkersInWorkerPool);
+ CreateAndStartWorkerPool(kReclaimTimeForDetachTests,
+ kNumWorkersInWorkerPool);
}
subtle::Atomic32 zero_tls_values_ = 0;
@@ -614,19 +574,19 @@ class TaskSchedulerWorkerPoolHistogramTest
TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaits) {
WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- InitializeWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
+ CreateAndStartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
auto task_runner = worker_pool_->CreateSequencedTaskRunnerWithTraits(
TaskTraits().WithBaseSyncPrimitives());
// Post a task.
task_runner->PostTask(FROM_HERE,
- Bind(&WaitableEvent::Wait, Unretained(&event)));
+ BindOnce(&WaitableEvent::Wait, Unretained(&event)));
// Post 2 more tasks while the first task hasn't completed its execution. It
// is guaranteed that these tasks will run immediately after the first task,
// without allowing the worker to sleep.
- task_runner->PostTask(FROM_HERE, Bind(&DoNothing));
- task_runner->PostTask(FROM_HERE, Bind(&DoNothing));
+ task_runner->PostTask(FROM_HERE, BindOnce(&DoNothing));
+ task_runner->PostTask(FROM_HERE, BindOnce(&DoNothing));
// Allow tasks to run and wait until the SchedulerWorker is idle.
event.Signal();
@@ -635,7 +595,7 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaits) {
// Wake up the SchedulerWorker that just became idle by posting a task and
// wait until it becomes idle again. The SchedulerWorker should record the
// TaskScheduler.NumTasksBetweenWaits.* histogram on wake up.
- task_runner->PostTask(FROM_HERE, Bind(&DoNothing));
+ task_runner->PostTask(FROM_HERE, BindOnce(&DoNothing));
worker_pool_->WaitForAllWorkersIdleForTesting();
// Verify that counts were recorded to the histogram as expected.
@@ -658,7 +618,7 @@ void SignalAndWaitEvent(WaitableEvent* signal_event,
TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaitsWithDetach) {
WaitableEvent tasks_can_exit_event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- InitializeWorkerPool(kReclaimTimeForDetachTests, kNumWorkersInWorkerPool);
+ CreateAndStartWorkerPool(kReclaimTimeForDetachTests, kNumWorkersInWorkerPool);
auto task_runner = worker_pool_->CreateTaskRunnerWithTraits(
TaskTraits().WithBaseSyncPrimitives());
@@ -668,10 +628,10 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaitsWithDetach) {
task_started_events.push_back(
MakeUnique<WaitableEvent>(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED));
- task_runner->PostTask(
- FROM_HERE,
- Bind(&SignalAndWaitEvent, Unretained(task_started_events.back().get()),
- Unretained(&tasks_can_exit_event)));
+ task_runner->PostTask(FROM_HERE,
+ BindOnce(&SignalAndWaitEvent,
+ Unretained(task_started_events.back().get()),
+ Unretained(&tasks_can_exit_event)));
}
for (const auto& task_started_event : task_started_events)
task_started_event->Wait();
@@ -690,10 +650,10 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaitsWithDetach) {
task_started_events.push_back(
MakeUnique<WaitableEvent>(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED));
- task_runner->PostTask(
- FROM_HERE,
- Bind(&SignalAndWaitEvent, Unretained(task_started_events.back().get()),
- Unretained(&tasks_can_exit_event)));
+ task_runner->PostTask(FROM_HERE,
+ BindOnce(&SignalAndWaitEvent,
+ Unretained(task_started_events.back().get()),
+ Unretained(&tasks_can_exit_event)));
}
for (const auto& task_started_event : task_started_events)
task_started_event->Wait();
@@ -719,9 +679,8 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaitsWithDetach) {
worker_pool_->DisallowWorkerDetachmentForTesting();
}
-// TODO(crbug.com/698046): disabled due to flakyness.
-TEST_F(TaskSchedulerWorkerPoolHistogramTest, DISABLED_NumTasksBeforeDetach) {
- InitializeWorkerPool(kReclaimTimeForDetachTests, kNumWorkersInWorkerPool);
+TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBeforeDetach) {
+ CreateAndStartWorkerPool(kReclaimTimeForDetachTests, kNumWorkersInWorkerPool);
auto histogrammed_thread_task_runner =
worker_pool_->CreateSequencedTaskRunnerWithTraits(
@@ -732,18 +691,20 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, DISABLED_NumTasksBeforeDetach) {
// thread for each of its tasks.
PlatformThreadRef thread_ref;
histogrammed_thread_task_runner->PostTask(
- FROM_HERE, Bind(
+ FROM_HERE, BindOnce(
[](PlatformThreadRef* thread_ref) {
ASSERT_TRUE(thread_ref);
*thread_ref = PlatformThread::CurrentRef();
},
Unretained(&thread_ref)));
histogrammed_thread_task_runner->PostTask(
- FROM_HERE, Bind(
- [](PlatformThreadRef thread_ref) {
- EXPECT_EQ(thread_ref, PlatformThreadRef());
+ FROM_HERE, BindOnce(
+ [](PlatformThreadRef* thread_ref) {
+ ASSERT_FALSE(thread_ref->is_null());
+ EXPECT_EQ(*thread_ref, PlatformThread::CurrentRef());
},
- thread_ref));
+ Unretained(&thread_ref)));
+
WaitableEvent detach_thread_running(
WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
@@ -752,14 +713,16 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, DISABLED_NumTasksBeforeDetach) {
WaitableEvent::InitialState::NOT_SIGNALED);
histogrammed_thread_task_runner->PostTask(
FROM_HERE,
- Bind(
- [](PlatformThreadRef thread_ref, WaitableEvent* detach_thread_running,
+ BindOnce(
+ [](PlatformThreadRef* thread_ref,
+ WaitableEvent* detach_thread_running,
WaitableEvent* detach_thread_continue) {
- EXPECT_EQ(thread_ref, PlatformThreadRef());
+ ASSERT_FALSE(thread_ref->is_null());
+ EXPECT_EQ(*thread_ref, PlatformThread::CurrentRef());
detach_thread_running->Signal();
detach_thread_continue->Wait();
},
- thread_ref, Unretained(&detach_thread_running),
+ Unretained(&thread_ref), Unretained(&detach_thread_running),
Unretained(&detach_thread_continue)));
detach_thread_running.Wait();
@@ -781,10 +744,11 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, DISABLED_NumTasksBeforeDetach) {
worker_pool_->CreateSequencedTaskRunnerWithTraits(
TaskTraits().WithBaseSyncPrimitives());
task_runner_for_top_idle->PostTask(
- FROM_HERE, Bind(
+ FROM_HERE, BindOnce(
[](PlatformThreadRef thread_ref,
WaitableEvent* top_idle_thread_running,
WaitableEvent* top_idle_thread_continue) {
+ ASSERT_FALSE(thread_ref.is_null());
EXPECT_NE(thread_ref, PlatformThread::CurrentRef())
<< "Worker reused. Thread will not detach and the "
"histogram value will be wrong.";
@@ -831,12 +795,12 @@ TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, InitLazy) {
TaskTracker task_tracker;
DelayedTaskManager delayed_task_manager(
make_scoped_refptr(new TestSimpleTaskRunner));
- auto worker_pool = SchedulerWorkerPoolImpl::Create(
- SchedulerWorkerPoolParams("LazyPolicyWorkerPool", ThreadPriority::NORMAL,
- StandbyThreadPolicy::LAZY, 8U,
- TimeDelta::Max()),
+ auto worker_pool = MakeUnique<SchedulerWorkerPoolImpl>(
+ "LazyPolicyWorkerPool", ThreadPriority::NORMAL,
Bind(&NotReachedReEnqueueSequenceCallback), &task_tracker,
&delayed_task_manager);
+ worker_pool->Start(SchedulerWorkerPoolParams(StandbyThreadPolicy::LAZY, 8U,
+ TimeDelta::Max()));
ASSERT_TRUE(worker_pool);
EXPECT_EQ(0U, worker_pool->NumberOfAliveWorkersForTesting());
worker_pool->JoinForTesting();
@@ -846,11 +810,12 @@ TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, InitOne) {
TaskTracker task_tracker;
DelayedTaskManager delayed_task_manager(
make_scoped_refptr(new TestSimpleTaskRunner));
- auto worker_pool = SchedulerWorkerPoolImpl::Create(
- SchedulerWorkerPoolParams("LazyPolicyWorkerPool", ThreadPriority::NORMAL,
- StandbyThreadPolicy::ONE, 8U, TimeDelta::Max()),
+ auto worker_pool = MakeUnique<SchedulerWorkerPoolImpl>(
+ "OnePolicyWorkerPool", ThreadPriority::NORMAL,
Bind(&NotReachedReEnqueueSequenceCallback), &task_tracker,
&delayed_task_manager);
+ worker_pool->Start(SchedulerWorkerPoolParams(StandbyThreadPolicy::ONE, 8U,
+ TimeDelta::Max()));
ASSERT_TRUE(worker_pool);
EXPECT_EQ(1U, worker_pool->NumberOfAliveWorkersForTesting());
worker_pool->JoinForTesting();
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_params.cc b/chromium/base/task_scheduler/scheduler_worker_pool_params.cc
index 0747c2ed2cd..a77dc128b28 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_params.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_params.cc
@@ -21,9 +21,21 @@ SchedulerWorkerPoolParams::SchedulerWorkerPoolParams(
backward_compatibility_(backward_compatibility) {}
SchedulerWorkerPoolParams::SchedulerWorkerPoolParams(
- SchedulerWorkerPoolParams&& other) = default;
+ StandbyThreadPolicy standby_thread_policy,
+ int max_threads,
+ TimeDelta suggested_reclaim_time,
+ SchedulerBackwardCompatibility backward_compatibility)
+ : SchedulerWorkerPoolParams(std::string(),
+ ThreadPriority::NORMAL,
+ standby_thread_policy,
+ max_threads,
+ suggested_reclaim_time,
+ backward_compatibility) {}
+
+SchedulerWorkerPoolParams::SchedulerWorkerPoolParams(
+ const SchedulerWorkerPoolParams& other) = default;
SchedulerWorkerPoolParams& SchedulerWorkerPoolParams::operator=(
- SchedulerWorkerPoolParams&& other) = default;
+ const SchedulerWorkerPoolParams& other) = default;
} // namespace base
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_params.h b/chromium/base/task_scheduler/scheduler_worker_pool_params.h
index 5f90fd482d0..ca2aafc85ed 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_params.h
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_params.h
@@ -7,7 +7,6 @@
#include <string>
-#include "base/macros.h"
#include "base/task_scheduler/scheduler_worker_params.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
@@ -34,6 +33,8 @@ class BASE_EXPORT SchedulerWorkerPoolParams final {
// suggestion on when to reclaim idle threads. The pool is free to ignore this
// value for performance or correctness reasons. |backward_compatibility|
// indicates whether backward compatibility is enabled.
+ //
+ // TODO(fdoray): Remove this constructor. https://crbug.com/690706
SchedulerWorkerPoolParams(
const std::string& name,
ThreadPriority priority_hint,
@@ -42,29 +43,37 @@ class BASE_EXPORT SchedulerWorkerPoolParams final {
TimeDelta suggested_reclaim_time,
SchedulerBackwardCompatibility backward_compatibility =
SchedulerBackwardCompatibility::DISABLED);
- SchedulerWorkerPoolParams(SchedulerWorkerPoolParams&& other);
- SchedulerWorkerPoolParams& operator=(SchedulerWorkerPoolParams&& other);
+
+ // Same as above, with no explicit |name| and |priority_hint|.
+ SchedulerWorkerPoolParams(
+ StandbyThreadPolicy standby_thread_policy,
+ int max_threads,
+ TimeDelta suggested_reclaim_time,
+ SchedulerBackwardCompatibility backward_compatibility =
+ SchedulerBackwardCompatibility::DISABLED);
+
+ SchedulerWorkerPoolParams(const SchedulerWorkerPoolParams& other);
+ SchedulerWorkerPoolParams& operator=(const SchedulerWorkerPoolParams& other);
const std::string& name() const { return name_; }
ThreadPriority priority_hint() const { return priority_hint_; }
StandbyThreadPolicy standby_thread_policy() const {
return standby_thread_policy_;
}
- size_t max_threads() const { return max_threads_; }
+ int max_threads() const { return max_threads_; }
TimeDelta suggested_reclaim_time() const { return suggested_reclaim_time_; }
SchedulerBackwardCompatibility backward_compatibility() const {
return backward_compatibility_;
}
private:
+ // TODO(fdoray): Remove |name_| and |priority_hint_|. https://crbug.com/690706
std::string name_;
ThreadPriority priority_hint_;
StandbyThreadPolicy standby_thread_policy_;
- size_t max_threads_;
+ int max_threads_;
TimeDelta suggested_reclaim_time_;
SchedulerBackwardCompatibility backward_compatibility_;
-
- DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerPoolParams);
};
} // namespace base
diff --git a/chromium/base/task_scheduler/scheduler_worker_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_unittest.cc
index b8dea8e7454..747c80b27b8 100644
--- a/chromium/base/task_scheduler/scheduler_worker_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_unittest.cc
@@ -172,10 +172,11 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
// Create a Sequence with TasksPerSequence() Tasks.
scoped_refptr<Sequence> sequence(new Sequence);
for (size_t i = 0; i < outer_->TasksPerSequence(); ++i) {
- std::unique_ptr<Task> task(new Task(
- FROM_HERE, Bind(&TaskSchedulerWorkerTest::RunTaskCallback,
- Unretained(outer_)),
- TaskTraits(), TimeDelta()));
+ std::unique_ptr<Task> task(
+ new Task(FROM_HERE,
+ BindOnce(&TaskSchedulerWorkerTest::RunTaskCallback,
+ Unretained(outer_)),
+ TaskTraits(), TimeDelta()));
EXPECT_TRUE(outer_->task_tracker_.WillPostTask(task.get()));
sequence->PushTask(std::move(task));
}
@@ -434,7 +435,7 @@ class ControllableDetachDelegate : public SchedulerWorkerDefaultDelegate {
scoped_refptr<Sequence> sequence(new Sequence);
std::unique_ptr<Task> task(new Task(
FROM_HERE,
- Bind(
+ BindOnce(
[](WaitableEvent* work_processed, WaitableEvent* work_running) {
work_processed->Signal();
work_running->Wait();
diff --git a/chromium/base/task_scheduler/sequence.cc b/chromium/base/task_scheduler/sequence.cc
index 601b5402d06..4db5478fe4a 100644
--- a/chromium/base/task_scheduler/sequence.cc
+++ b/chromium/base/task_scheduler/sequence.cc
@@ -15,6 +15,11 @@ namespace internal {
Sequence::Sequence() = default;
bool Sequence::PushTask(std::unique_ptr<Task> task) {
+ DCHECK(task);
+
+ // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+ // for details.
+ CHECK(task->task);
DCHECK(task->sequenced_time.is_null());
task->sequenced_time = base::TimeTicks::Now();
diff --git a/chromium/base/task_scheduler/sequence_unittest.cc b/chromium/base/task_scheduler/sequence_unittest.cc
index c45d8a87d01..7093b1e94d7 100644
--- a/chromium/base/task_scheduler/sequence_unittest.cc
+++ b/chromium/base/task_scheduler/sequence_unittest.cc
@@ -7,6 +7,7 @@
#include <utility>
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/test/gtest_util.h"
@@ -24,27 +25,27 @@ class TaskSchedulerSequenceTest : public testing::Test {
TaskSchedulerSequenceTest()
: task_a_owned_(
new Task(FROM_HERE,
- Closure(),
+ Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::BACKGROUND),
TimeDelta())),
task_b_owned_(
new Task(FROM_HERE,
- Closure(),
+ Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::USER_VISIBLE),
TimeDelta())),
task_c_owned_(
new Task(FROM_HERE,
- Closure(),
+ Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::USER_BLOCKING),
TimeDelta())),
task_d_owned_(
new Task(FROM_HERE,
- Closure(),
+ Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::USER_BLOCKING),
TimeDelta())),
task_e_owned_(
new Task(FROM_HERE,
- Closure(),
+ Bind(&DoNothing),
TaskTraits().WithPriority(TaskPriority::BACKGROUND),
TimeDelta())),
task_a_(task_a_owned_.get()),
diff --git a/chromium/base/task_scheduler/task.cc b/chromium/base/task_scheduler/task.cc
index bf3bbbd5c19..fc513e3e9fb 100644
--- a/chromium/base/task_scheduler/task.cc
+++ b/chromium/base/task_scheduler/task.cc
@@ -4,20 +4,22 @@
#include "base/task_scheduler/task.h"
+#include <utility>
+
#include "base/critical_closure.h"
namespace base {
namespace internal {
Task::Task(const tracked_objects::Location& posted_from,
- const Closure& task,
+ OnceClosure task,
const TaskTraits& traits,
TimeDelta delay)
: PendingTask(
posted_from,
traits.shutdown_behavior() == TaskShutdownBehavior::BLOCK_SHUTDOWN
- ? MakeCriticalClosure(task)
- : task,
+ ? MakeCriticalClosure(std::move(task))
+ : std::move(task),
delay.is_zero() ? TimeTicks() : TimeTicks::Now() + delay,
false), // Not nestable.
// Prevent a delayed BLOCK_SHUTDOWN task from blocking shutdown before
diff --git a/chromium/base/task_scheduler/task.h b/chromium/base/task_scheduler/task.h
index c5b9bdb53bd..43095f2ae7c 100644
--- a/chromium/base/task_scheduler/task.h
+++ b/chromium/base/task_scheduler/task.h
@@ -6,7 +6,7 @@
#define BASE_TASK_SCHEDULER_TASK_H_
#include "base/base_export.h"
-#include "base/callback_forward.h"
+#include "base/callback.h"
#include "base/location.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -28,7 +28,7 @@ struct BASE_EXPORT Task : public PendingTask {
// behavior in |traits| is BLOCK_SHUTDOWN, the shutdown behavior is
// automatically adjusted to SKIP_ON_SHUTDOWN.
Task(const tracked_objects::Location& posted_from,
- const Closure& task,
+ OnceClosure task,
const TaskTraits& traits,
TimeDelta delay);
~Task();
diff --git a/chromium/base/task_scheduler/task_scheduler.cc b/chromium/base/task_scheduler/task_scheduler.cc
index 00ca4f15ce9..0e621f12226 100644
--- a/chromium/base/task_scheduler/task_scheduler.cc
+++ b/chromium/base/task_scheduler/task_scheduler.cc
@@ -4,8 +4,11 @@
#include "base/task_scheduler/task_scheduler.h"
+#include <algorithm>
+
#include "base/bind.h"
#include "base/logging.h"
+#include "base/sys_info.h"
#include "base/task_scheduler/scheduler_worker_pool_params.h"
#include "base/task_scheduler/task_scheduler_impl.h"
#include "base/threading/platform_thread.h"
@@ -20,17 +23,48 @@ TaskScheduler* g_task_scheduler = nullptr;
} // namespace
+TaskScheduler::InitParams::InitParams(
+ const SchedulerWorkerPoolParams& background_worker_pool_params_in,
+ const SchedulerWorkerPoolParams& background_blocking_worker_pool_params_in,
+ const SchedulerWorkerPoolParams& foreground_worker_pool_params_in,
+ const SchedulerWorkerPoolParams& foreground_blocking_worker_pool_params_in)
+ : background_worker_pool_params(background_worker_pool_params_in),
+ background_blocking_worker_pool_params(
+ background_blocking_worker_pool_params_in),
+ foreground_worker_pool_params(foreground_worker_pool_params_in),
+ foreground_blocking_worker_pool_params(
+ foreground_blocking_worker_pool_params_in) {}
+
+TaskScheduler::InitParams::~InitParams() = default;
+
+#if !defined(OS_NACL)
// static
-void TaskScheduler::CreateAndSetSimpleTaskScheduler(int max_threads) {
- std::vector<SchedulerWorkerPoolParams> worker_pool_params_vector;
- worker_pool_params_vector.emplace_back(
- "Simple", ThreadPriority::NORMAL,
- SchedulerWorkerPoolParams::StandbyThreadPolicy::LAZY, max_threads,
- TimeDelta::FromSeconds(30));
+void TaskScheduler::CreateAndSetSimpleTaskScheduler(const std::string& name) {
+ using StandbyThreadPolicy = SchedulerWorkerPoolParams::StandbyThreadPolicy;
+
+ // Values were chosen so that:
+ // * There are few background threads.
+ // * Background threads never outnumber foreground threads.
+ // * The system is utilized maximally by foreground threads.
+ const int num_cores = SysInfo::NumberOfProcessors();
+ constexpr int kBackgroundMaxThreads = 1;
+ constexpr int kBackgroundBlockingMaxThreads = 2;
+ const int kForegroundMaxThreads = std::max(1, num_cores);
+ const int kForegroundBlockingMaxThreads = std::max(2, num_cores);
+
+ constexpr TimeDelta kSuggestedReclaimTime = TimeDelta::FromSeconds(30);
+
CreateAndSetDefaultTaskScheduler(
- worker_pool_params_vector,
- Bind([](const TaskTraits&) -> size_t { return 0; }));
+ name, {{StandbyThreadPolicy::LAZY, kBackgroundMaxThreads,
+ kSuggestedReclaimTime},
+ {StandbyThreadPolicy::LAZY, kBackgroundBlockingMaxThreads,
+ kSuggestedReclaimTime},
+ {StandbyThreadPolicy::LAZY, kForegroundMaxThreads,
+ kSuggestedReclaimTime},
+ {StandbyThreadPolicy::LAZY, kForegroundBlockingMaxThreads,
+ kSuggestedReclaimTime}});
}
+#endif // !defined(OS_NACL)
// static
void TaskScheduler::CreateAndSetDefaultTaskScheduler(
@@ -41,6 +75,12 @@ void TaskScheduler::CreateAndSetDefaultTaskScheduler(
worker_pool_params_vector, worker_pool_index_for_traits_callback));
}
+void TaskScheduler::CreateAndSetDefaultTaskScheduler(
+ const std::string& name,
+ const InitParams& init_params) {
+ SetInstance(internal::TaskSchedulerImpl::Create(name, init_params));
+}
+
// static
void TaskScheduler::SetInstance(std::unique_ptr<TaskScheduler> task_scheduler) {
delete g_task_scheduler;
diff --git a/chromium/base/task_scheduler/task_scheduler.h b/chromium/base/task_scheduler/task_scheduler.h
index da2cb79504e..d19841f3ba8 100644
--- a/chromium/base/task_scheduler/task_scheduler.h
+++ b/chromium/base/task_scheduler/task_scheduler.h
@@ -6,16 +6,19 @@
#define BASE_TASK_SCHEDULER_TASK_SCHEDULER_H_
#include <memory>
+#include <string>
#include <vector>
#include "base/base_export.h"
-#include "base/callback_forward.h"
+#include "base/callback.h"
#include "base/memory/ref_counted.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "base/task_runner.h"
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
#include "base/task_scheduler/task_traits.h"
#include "base/time/time.h"
+#include "build/build_config.h"
namespace gin {
class V8Platform;
@@ -28,7 +31,6 @@ class Location;
namespace base {
class HistogramBase;
-class SchedulerWorkerPoolParams;
// Interface for a task scheduler and static methods to manage the instance used
// by the post_task.h API. Note: all base/task_scheduler users should go through
@@ -36,6 +38,22 @@ class SchedulerWorkerPoolParams;
// which manages the process' instance.
class BASE_EXPORT TaskScheduler {
public:
+ struct BASE_EXPORT InitParams {
+ InitParams(
+ const SchedulerWorkerPoolParams& background_worker_pool_params_in,
+ const SchedulerWorkerPoolParams&
+ background_blocking_worker_pool_params_in,
+ const SchedulerWorkerPoolParams& foreground_worker_pool_params_in,
+ const SchedulerWorkerPoolParams&
+ foreground_blocking_worker_pool_params_in);
+ ~InitParams();
+
+ const SchedulerWorkerPoolParams background_worker_pool_params;
+ const SchedulerWorkerPoolParams background_blocking_worker_pool_params;
+ const SchedulerWorkerPoolParams foreground_worker_pool_params;
+ const SchedulerWorkerPoolParams foreground_blocking_worker_pool_params;
+ };
+
// Returns the index of the worker pool in which a task with |traits| should
// run. This should be coded in a future-proof way: new traits should
// gracefully map to a default pool.
@@ -52,7 +70,7 @@ class BASE_EXPORT TaskScheduler {
virtual void PostDelayedTaskWithTraits(
const tracked_objects::Location& from_here,
const TaskTraits& traits,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) = 0;
// Returns a TaskRunner whose PostTask invocations result in scheduling tasks
@@ -71,6 +89,19 @@ class BASE_EXPORT TaskScheduler {
virtual scoped_refptr<SingleThreadTaskRunner>
CreateSingleThreadTaskRunnerWithTraits(const TaskTraits& traits) = 0;
+#if defined(OS_WIN)
+ // Returns a SingleThreadTaskRunner whose PostTask invocations result in
+ // scheduling tasks using |traits| in a COM Single-Threaded Apartment. Tasks
+ // run in the same Single-Threaded Apartment in posting order for the returned
+ // SingleThreadTaskRunner. There is not necessarily a one-to-one
+ // correspondence between SingleThreadTaskRunners and Single-Threaded
+ // Apartments. The implementation is free to share apartments or create new
+ // apartments as necessary. In either case, care should be taken to make sure
+ // COM pointers are not smuggled across apartments.
+ virtual scoped_refptr<SingleThreadTaskRunner>
+ CreateCOMSTATaskRunnerWithTraits(const TaskTraits& traits) = 0;
+#endif // defined(OS_WIN)
+
// Returns a vector of all histograms available in this task scheduler.
virtual std::vector<const HistogramBase*> GetHistograms() const = 0;
@@ -107,21 +138,37 @@ class BASE_EXPORT TaskScheduler {
// synchronization is required to use the post_task.h API after registering a
// new TaskScheduler.
- // Creates and sets a task scheduler with one worker pool that can have up to
- // |max_threads| threads. CHECKs on failure. For tests, prefer
- // base::test::ScopedTaskScheduler (ensures isolation).
- static void CreateAndSetSimpleTaskScheduler(int max_threads);
+#if !defined(OS_NACL)
+ // Creates and sets a task scheduler using default params. |name| is used to
+ // label threads and histograms. It should identify the component that calls
+ // this. CHECKs on failure. For tests, prefer base::test::ScopedTaskScheduler
+ // (ensures isolation).
+ static void CreateAndSetSimpleTaskScheduler(const std::string& name);
+#endif // !defined(OS_NACL)
// Creates and sets a task scheduler with custom worker pools. CHECKs on
// failure. |worker_pool_params_vector| describes the worker pools to create.
// |worker_pool_index_for_traits_callback| returns the index in |worker_pools|
// of the worker pool in which a task with given traits should run. For tests,
// prefer base::test::ScopedTaskScheduler (ensures isolation).
+ //
+ // Deprecated. Use the overload below instead. https://crbug.com/690706
static void CreateAndSetDefaultTaskScheduler(
const std::vector<SchedulerWorkerPoolParams>& worker_pool_params_vector,
const WorkerPoolIndexForTraitsCallback&
worker_pool_index_for_traits_callback);
+ // Creates and sets a task scheduler using custom params. |name| is used to
+ // label threads and histograms. It should identify the component that creates
+ // the TaskScheduler. |init_params| is used to initialize the worker pools.
+ // CHECKs on failure. For tests, prefer base::test::ScopedTaskScheduler
+ // (ensures isolation).
+ //
+ // Note: The names and priority hints in |init_params| are ignored (ref. TODO
+ // to remove them).
+ static void CreateAndSetDefaultTaskScheduler(const std::string& name,
+ const InitParams& init_params);
+
// Registers |task_scheduler| to handle tasks posted through the post_task.h
// API for this process. For tests, prefer base::test::ScopedTaskScheduler
// (ensures isolation).
diff --git a/chromium/base/task_scheduler/task_scheduler_impl.cc b/chromium/base/task_scheduler/task_scheduler_impl.cc
index 6157635514c..9d0320caf1a 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl.cc
+++ b/chromium/base/task_scheduler/task_scheduler_impl.cc
@@ -10,6 +10,7 @@
#include "base/bind_helpers.h"
#include "base/memory/ptr_util.h"
#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/scheduler_single_thread_task_runner_manager.h"
#include "base/task_scheduler/scheduler_worker_pool_params.h"
#include "base/task_scheduler/sequence_sort_key.h"
#include "base/task_scheduler/task.h"
@@ -23,6 +24,57 @@
namespace base {
namespace internal {
+namespace {
+
+enum EnvironmentType {
+ BACKGROUND = 0,
+ BACKGROUND_BLOCKING,
+ FOREGROUND,
+ FOREGROUND_BLOCKING,
+ ENVIRONMENT_COUNT // Always last.
+};
+
+// Order must match the EnvironmentType enum.
+constexpr struct {
+ // The threads and histograms of this environment will be labeled with
+ // the task scheduler name concatenated to this.
+ const char* name_suffix;
+
+ // Preferred priority for threads in this environment; the actual thread
+ // priority depends on shutdown state and platform capabilities.
+ ThreadPriority priority_hint;
+} kEnvironmentParams[] = {
+ {"Background", base::ThreadPriority::BACKGROUND},
+ {"BackgroundBlocking", base::ThreadPriority::BACKGROUND},
+ {"Foreground", base::ThreadPriority::NORMAL},
+ {"ForegroundBlocking", base::ThreadPriority::NORMAL},
+};
+
+size_t GetEnvironmentIndexForTraits(const TaskTraits& traits) {
+ const bool is_background =
+ traits.priority() == base::TaskPriority::BACKGROUND;
+ if (traits.may_block() || traits.with_base_sync_primitives())
+ return is_background ? BACKGROUND_BLOCKING : FOREGROUND_BLOCKING;
+ return is_background ? BACKGROUND : FOREGROUND;
+}
+
+void AddAugmentedSchedulerWorkerPoolParamsToVector(
+ EnvironmentType environment_type,
+ const std::string& task_scheduler_name,
+ const SchedulerWorkerPoolParams& params,
+ std::vector<SchedulerWorkerPoolParams>*
+ scheduler_worker_pool_params_vector) {
+ DCHECK_EQ(static_cast<size_t>(environment_type),
+ scheduler_worker_pool_params_vector->size());
+ scheduler_worker_pool_params_vector->emplace_back(
+ task_scheduler_name + kEnvironmentParams[environment_type].name_suffix,
+ kEnvironmentParams[environment_type].priority_hint,
+ params.standby_thread_policy(), params.max_threads(),
+ params.suggested_reclaim_time(), params.backward_compatibility());
+}
+
+} // namespace
+
// static
std::unique_ptr<TaskSchedulerImpl> TaskSchedulerImpl::Create(
const std::vector<SchedulerWorkerPoolParams>& worker_pool_params_vector,
@@ -34,6 +86,33 @@ std::unique_ptr<TaskSchedulerImpl> TaskSchedulerImpl::Create(
return scheduler;
}
+// static
+std::unique_ptr<TaskSchedulerImpl> TaskSchedulerImpl::Create(
+ const std::string& name,
+ const TaskScheduler::InitParams& init_params) {
+ // Create a vector of SchedulerWorkerPoolParams using names and priority hints
+ // derived from |kEnvironmentParams| and other params from |init_params|.
+ std::vector<SchedulerWorkerPoolParams> worker_pool_params_vector;
+ AddAugmentedSchedulerWorkerPoolParamsToVector(
+ BACKGROUND, name, init_params.background_worker_pool_params,
+ &worker_pool_params_vector);
+ AddAugmentedSchedulerWorkerPoolParamsToVector(
+ BACKGROUND_BLOCKING, name,
+ init_params.background_blocking_worker_pool_params,
+ &worker_pool_params_vector);
+ AddAugmentedSchedulerWorkerPoolParamsToVector(
+ FOREGROUND, name, init_params.foreground_worker_pool_params,
+ &worker_pool_params_vector);
+ AddAugmentedSchedulerWorkerPoolParamsToVector(
+ FOREGROUND_BLOCKING, name,
+ init_params.foreground_blocking_worker_pool_params,
+ &worker_pool_params_vector);
+ DCHECK_EQ(static_cast<size_t>(ENVIRONMENT_COUNT),
+ worker_pool_params_vector.size());
+
+ return Create(worker_pool_params_vector, Bind(&GetEnvironmentIndexForTraits));
+}
+
TaskSchedulerImpl::~TaskSchedulerImpl() {
#if DCHECK_IS_ON()
DCHECK(join_for_testing_returned_.IsSet());
@@ -43,12 +122,12 @@ TaskSchedulerImpl::~TaskSchedulerImpl() {
void TaskSchedulerImpl::PostDelayedTaskWithTraits(
const tracked_objects::Location& from_here,
const TaskTraits& traits,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
// Post |task| as part of a one-off single-task Sequence.
GetWorkerPoolForTraits(traits)->PostTaskWithSequence(
- MakeUnique<Task>(from_here, task, traits, delay),
- make_scoped_refptr(new Sequence), nullptr);
+ MakeUnique<Task>(from_here, std::move(task), traits, delay),
+ make_scoped_refptr(new Sequence));
}
scoped_refptr<TaskRunner> TaskSchedulerImpl::CreateTaskRunnerWithTraits(
@@ -66,9 +145,17 @@ TaskSchedulerImpl::CreateSequencedTaskRunnerWithTraits(
scoped_refptr<SingleThreadTaskRunner>
TaskSchedulerImpl::CreateSingleThreadTaskRunnerWithTraits(
const TaskTraits& traits) {
- return GetWorkerPoolForTraits(traits)->CreateSingleThreadTaskRunnerWithTraits(
+ return single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(traits);
+}
+
+#if defined(OS_WIN)
+scoped_refptr<SingleThreadTaskRunner>
+TaskSchedulerImpl::CreateCOMSTATaskRunnerWithTraits(const TaskTraits& traits) {
+ return single_thread_task_runner_manager_->CreateCOMSTATaskRunnerWithTraits(
traits);
}
+#endif // defined(OS_WIN)
std::vector<const HistogramBase*> TaskSchedulerImpl::GetHistograms() const {
std::vector<const HistogramBase*> histograms;
@@ -98,6 +185,7 @@ void TaskSchedulerImpl::JoinForTesting() {
#if DCHECK_IS_ON()
DCHECK(!join_for_testing_returned_.IsSet());
#endif
+ single_thread_task_runner_manager_->JoinForTesting();
for (const auto& worker_pool : worker_pools_)
worker_pool->DisallowWorkerDetachmentForTesting();
for (const auto& worker_pool : worker_pools_)
@@ -148,6 +236,11 @@ void TaskSchedulerImpl::Initialize(
delayed_task_manager_ =
base::MakeUnique<DelayedTaskManager>(service_thread_.task_runner());
+ single_thread_task_runner_manager_ =
+ MakeUnique<SchedulerSingleThreadTaskRunnerManager>(
+ worker_pool_params_vector, worker_pool_index_for_traits_callback_,
+ task_tracker_.get(), delayed_task_manager_.get());
+
// Callback invoked by workers to re-enqueue a sequence in the appropriate
// PriorityQueue.
const SchedulerWorkerPoolImpl::ReEnqueueSequenceCallback
@@ -159,10 +252,11 @@ void TaskSchedulerImpl::Initialize(
// Passing pointers to objects owned by |this| to
// SchedulerWorkerPoolImpl::Create() is safe because a TaskSchedulerImpl
// can't be deleted before all its worker pools have been joined.
- worker_pools_.push_back(SchedulerWorkerPoolImpl::Create(
- worker_pool_params, re_enqueue_sequence_callback, task_tracker_.get(),
+ worker_pools_.push_back(MakeUnique<SchedulerWorkerPoolImpl>(
+ worker_pool_params.name(), worker_pool_params.priority_hint(),
+ re_enqueue_sequence_callback, task_tracker_.get(),
delayed_task_manager_.get()));
- CHECK(worker_pools_.back());
+ worker_pools_.back()->Start(worker_pool_params);
}
}
diff --git a/chromium/base/task_scheduler/task_scheduler_impl.h b/chromium/base/task_scheduler/task_scheduler_impl.h
index 1e80d5c5d17..1e31888d401 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl.h
+++ b/chromium/base/task_scheduler/task_scheduler_impl.h
@@ -19,6 +19,7 @@
#include "base/task_scheduler/task_scheduler.h"
#include "base/task_scheduler/task_traits.h"
#include "base/threading/thread.h"
+#include "build/build_config.h"
namespace base {
@@ -28,15 +29,29 @@ class SchedulerWorkerPoolParams;
namespace internal {
class DelayedTaskManager;
+class SchedulerSingleThreadTaskRunnerManager;
class TaskTracker;
// Default TaskScheduler implementation. This class is thread-safe.
class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
public:
// Creates and returns an initialized TaskSchedulerImpl. CHECKs on failure.
+ // |name| is used to label threads and histograms. It should identify the
+ // component that creates the TaskScheduler. |init_params| contains params to
+ // initialize worker pools.
+ //
+ // Note: The names and priority hints in |init_params| are ignored.
+ // https://crbug.com/690706
+ static std::unique_ptr<TaskSchedulerImpl> Create(
+ const std::string& name,
+ const TaskScheduler::InitParams& init_params);
+
+ // Creates and returns an initialized TaskSchedulerImpl. CHECKs on failure.
// |worker_pool_params_vector| describes the worker pools to create.
// |worker_pool_index_for_traits_callback| returns the index in |worker_pools|
// of the worker pool in which a task with given traits should run.
+ //
+ // Deprecated. https://crbug.com/690706
static std::unique_ptr<TaskSchedulerImpl> Create(
const std::vector<SchedulerWorkerPoolParams>& worker_pool_params_vector,
const WorkerPoolIndexForTraitsCallback&
@@ -47,7 +62,7 @@ class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
// TaskScheduler:
void PostDelayedTaskWithTraits(const tracked_objects::Location& from_here,
const TaskTraits& traits,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
const TaskTraits& traits) override;
@@ -55,6 +70,10 @@ class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
const TaskTraits& traits) override;
scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
const TaskTraits& traits) override;
+#if defined(OS_WIN)
+ scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunnerWithTraits(
+ const TaskTraits& traits) override;
+#endif // defined(OS_WIN)
std::vector<const HistogramBase*> GetHistograms() const override;
int GetMaxConcurrentTasksWithTraitsDeprecated(
const TaskTraits& traits) const override;
@@ -80,6 +99,8 @@ class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
Thread service_thread_;
std::unique_ptr<TaskTracker> task_tracker_;
std::unique_ptr<DelayedTaskManager> delayed_task_manager_;
+ std::unique_ptr<SchedulerSingleThreadTaskRunnerManager>
+ single_thread_task_runner_manager_;
const WorkerPoolIndexForTraitsCallback worker_pool_index_for_traits_callback_;
std::vector<std::unique_ptr<SchedulerWorkerPoolImpl>> worker_pools_;
diff --git a/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc b/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc
index 6aaf4af8753..e0f88691bba 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc
+++ b/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc
@@ -28,6 +28,10 @@
#include "base/time/time.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if defined(OS_WIN)
+#include <objbase.h>
+#endif // defined(OS_WIN)
+
namespace base {
namespace internal {
@@ -238,8 +242,8 @@ TEST_P(TaskSchedulerImplTest, PostDelayedTaskWithTraitsNoDelay) {
WaitableEvent::InitialState::NOT_SIGNALED);
scheduler_->PostDelayedTaskWithTraits(
FROM_HERE, GetParam().traits,
- Bind(&VerifyTaskEnvironmentAndSignalEvent, GetParam().traits,
- Unretained(&task_ran)),
+ BindOnce(&VerifyTaskEnvironmentAndSignalEvent, GetParam().traits,
+ Unretained(&task_ran)),
TimeDelta());
task_ran.Wait();
}
@@ -253,9 +257,9 @@ TEST_P(TaskSchedulerImplTest, PostDelayedTaskWithTraitsWithDelay) {
WaitableEvent::InitialState::NOT_SIGNALED);
scheduler_->PostDelayedTaskWithTraits(
FROM_HERE, GetParam().traits,
- Bind(&VerifyTimeAndTaskEnvironmentAndSignalEvent, GetParam().traits,
- TimeTicks::Now() + TestTimeouts::tiny_timeout(),
- Unretained(&task_ran)),
+ BindOnce(&VerifyTimeAndTaskEnvironmentAndSignalEvent, GetParam().traits,
+ TimeTicks::Now() + TestTimeouts::tiny_timeout(),
+ Unretained(&task_ran)),
TestTimeouts::tiny_timeout());
task_ran.Wait();
}
@@ -322,5 +326,73 @@ TEST_F(TaskSchedulerImplTest, GetMaxConcurrentTasksWithTraitsDeprecated) {
TaskTraits().WithPriority(TaskPriority::USER_BLOCKING).MayBlock()));
}
+// Verify that the RunsTasksOnCurrentThread() method of a SequencedTaskRunner
+// returns false when called from a task that isn't part of the sequence.
+TEST_F(TaskSchedulerImplTest, SequencedRunsTasksOnCurrentThread) {
+ auto single_thread_task_runner =
+ scheduler_->CreateSingleThreadTaskRunnerWithTraits(TaskTraits());
+ auto sequenced_task_runner =
+ scheduler_->CreateSequencedTaskRunnerWithTraits(TaskTraits());
+
+ WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ single_thread_task_runner->PostTask(
+ FROM_HERE,
+ BindOnce(
+ [](scoped_refptr<TaskRunner> sequenced_task_runner,
+ WaitableEvent* task_ran) {
+ EXPECT_FALSE(sequenced_task_runner->RunsTasksOnCurrentThread());
+ task_ran->Signal();
+ },
+ sequenced_task_runner, Unretained(&task_ran)));
+ task_ran.Wait();
+}
+
+// Verify that the RunsTasksOnCurrentThread() method of a SingleThreadTaskRunner
+// returns false when called from a task that isn't part of the sequence.
+TEST_F(TaskSchedulerImplTest, SingleThreadRunsTasksOnCurrentThread) {
+ auto sequenced_task_runner =
+ scheduler_->CreateSequencedTaskRunnerWithTraits(TaskTraits());
+ auto single_thread_task_runner =
+ scheduler_->CreateSingleThreadTaskRunnerWithTraits(TaskTraits());
+
+ WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ sequenced_task_runner->PostTask(
+ FROM_HERE,
+ BindOnce(
+ [](scoped_refptr<TaskRunner> single_thread_task_runner,
+ WaitableEvent* task_ran) {
+ EXPECT_FALSE(single_thread_task_runner->RunsTasksOnCurrentThread());
+ task_ran->Signal();
+ },
+ single_thread_task_runner, Unretained(&task_ran)));
+ task_ran.Wait();
+}
+
+#if defined(OS_WIN)
+TEST_F(TaskSchedulerImplTest, COMSTATaskRunnersRunWithCOMSTA) {
+ auto com_sta_task_runner =
+ scheduler_->CreateCOMSTATaskRunnerWithTraits(TaskTraits());
+
+ WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ com_sta_task_runner->PostTask(
+ FROM_HERE,
+ Bind(
+ [](scoped_refptr<TaskRunner> single_thread_task_runner,
+ WaitableEvent* task_ran) {
+ HRESULT hr = CoInitializeEx(nullptr, COINIT_MULTITHREADED);
+ if (SUCCEEDED(hr)) {
+ ADD_FAILURE() << "COM STA was not initialized on this thread";
+ CoUninitialize();
+ }
+ task_ran->Signal();
+ },
+ com_sta_task_runner, Unretained(&task_ran)));
+ task_ran.Wait();
+}
+#endif // defined(OS_WIN)
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/task_scheduler/task_tracker_unittest.cc b/chromium/base/task_scheduler/task_tracker_unittest.cc
index 85378b6234a..e1596f7b51f 100644
--- a/chromium/base/task_scheduler/task_tracker_unittest.cc
+++ b/chromium/base/task_scheduler/task_tracker_unittest.cc
@@ -43,7 +43,7 @@ namespace internal {
namespace {
-constexpr size_t kLoadTestNumIterations = 100;
+constexpr size_t kLoadTestNumIterations = 75;
// Invokes a closure asynchronously.
class CallbackThread : public SimpleThread {
@@ -263,24 +263,34 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunBeforeShutdown) {
}
TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunLongTaskBeforeShutdown) {
- // Create a task that will block until |event| is signaled.
- WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ // Create a task that signals |task_running| and blocks until |task_barrier|
+ // is signaled.
+ WaitableEvent task_running(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_barrier(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
auto blocked_task = base::MakeUnique<Task>(
- FROM_HERE, Bind(&WaitableEvent::Wait, Unretained(&event)),
+ FROM_HERE,
+ Bind(
+ [](WaitableEvent* task_running, WaitableEvent* task_barrier) {
+ task_running->Signal();
+ task_barrier->Wait();
+ },
+ Unretained(&task_running), base::Unretained(&task_barrier)),
TaskTraits().WithBaseSyncPrimitives().WithShutdownBehavior(GetParam()),
TimeDelta());
// Inform |task_tracker_| that |blocked_task| will be posted.
EXPECT_TRUE(tracker_.WillPostTask(blocked_task.get()));
- // Run the task asynchronouly.
+ // Create a thread to run the task. Wait until the task starts running.
ThreadPostingAndRunningTask thread_running_task(
&tracker_, std::move(blocked_task),
ThreadPostingAndRunningTask::Action::RUN, false);
thread_running_task.Start();
+ task_running.Wait();
- // Initiate shutdown while the task is running.
+ // Initiate shutdown after the task has been scheduled.
CallShutdownAsync();
if (GetParam() == TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN) {
@@ -292,7 +302,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunLongTaskBeforeShutdown) {
}
// Unblock the task.
- event.Signal();
+ task_barrier.Signal();
thread_running_task.Join();
// Shutdown should now complete for a non CONTINUE_ON_SHUTDOWN task.
@@ -418,7 +428,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, SingletonAllowed) {
TaskTracker tracker;
std::unique_ptr<Task> task(
- new Task(FROM_HERE, Bind(&ThreadRestrictions::AssertSingletonAllowed),
+ new Task(FROM_HERE, BindOnce(&ThreadRestrictions::AssertSingletonAllowed),
TaskTraits().WithShutdownBehavior(GetParam()), TimeDelta()));
EXPECT_TRUE(tracker.WillPostTask(task.get()));
@@ -492,7 +502,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, TaskRunnerHandleIsNotSetOnParallel) {
// Create a task that will verify that TaskRunnerHandles are not set in its
// scope per no TaskRunner ref being set to it.
std::unique_ptr<Task> verify_task(
- new Task(FROM_HERE, Bind(&VerifyNoTaskRunnerHandle),
+ new Task(FROM_HERE, BindOnce(&VerifyNoTaskRunnerHandle),
TaskTraits().WithShutdownBehavior(GetParam()), TimeDelta()));
RunTaskRunnerHandleVerificationTask(&tracker_, std::move(verify_task));
@@ -513,8 +523,9 @@ TEST_P(TaskSchedulerTaskTrackerTest,
// set to |test_task_runner| in its scope per |sequenced_task_runner_ref|
// being set to it.
std::unique_ptr<Task> verify_task(
- new Task(FROM_HERE, Bind(&VerifySequencedTaskRunnerHandle,
- base::Unretained(test_task_runner.get())),
+ new Task(FROM_HERE,
+ BindOnce(&VerifySequencedTaskRunnerHandle,
+ base::Unretained(test_task_runner.get())),
TaskTraits().WithShutdownBehavior(GetParam()), TimeDelta()));
verify_task->sequenced_task_runner_ref = test_task_runner;
@@ -538,8 +549,9 @@ TEST_P(TaskSchedulerTaskTrackerTest,
// to |test_task_runner| in its scope per |single_thread_task_runner_ref|
// being set on it.
std::unique_ptr<Task> verify_task(
- new Task(FROM_HERE, Bind(&VerifyThreadTaskRunnerHandle,
- base::Unretained(test_task_runner.get())),
+ new Task(FROM_HERE,
+ BindOnce(&VerifyThreadTaskRunnerHandle,
+ base::Unretained(test_task_runner.get())),
TaskTraits().WithShutdownBehavior(GetParam()), TimeDelta()));
verify_task->single_thread_task_runner_ref = test_task_runner;
@@ -547,7 +559,7 @@ TEST_P(TaskSchedulerTaskTrackerTest,
}
TEST_P(TaskSchedulerTaskTrackerTest, FlushPendingDelayedTask) {
- const Task delayed_task(FROM_HERE, Bind(&DoNothing),
+ const Task delayed_task(FROM_HERE, BindOnce(&DoNothing),
TaskTraits().WithShutdownBehavior(GetParam()),
TimeDelta::FromDays(1));
tracker_.WillPostTask(&delayed_task);
diff --git a/chromium/base/task_scheduler/task_unittest.cc b/chromium/base/task_scheduler/task_unittest.cc
index e6a2c518a0a..fb076d761be 100644
--- a/chromium/base/task_scheduler/task_unittest.cc
+++ b/chromium/base/task_scheduler/task_unittest.cc
@@ -17,7 +17,7 @@ namespace internal {
// adjusted to SKIP_ON_SHUTDOWN. The shutown behavior of other delayed tasks
// should not change.
TEST(TaskSchedulerTaskTest, ShutdownBehaviorChangeWithDelay) {
- Task continue_on_shutdown(FROM_HERE, Bind(&DoNothing),
+ Task continue_on_shutdown(FROM_HERE, BindOnce(&DoNothing),
TaskTraits().WithShutdownBehavior(
TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN),
TimeDelta::FromSeconds(1));
@@ -25,14 +25,14 @@ TEST(TaskSchedulerTaskTest, ShutdownBehaviorChangeWithDelay) {
continue_on_shutdown.traits.shutdown_behavior());
Task skip_on_shutdown(
- FROM_HERE, Bind(&DoNothing),
+ FROM_HERE, BindOnce(&DoNothing),
TaskTraits().WithShutdownBehavior(TaskShutdownBehavior::SKIP_ON_SHUTDOWN),
TimeDelta::FromSeconds(1));
EXPECT_EQ(TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
skip_on_shutdown.traits.shutdown_behavior());
Task block_shutdown(
- FROM_HERE, Bind(&DoNothing),
+ FROM_HERE, BindOnce(&DoNothing),
TaskTraits().WithShutdownBehavior(TaskShutdownBehavior::BLOCK_SHUTDOWN),
TimeDelta::FromSeconds(1));
EXPECT_EQ(TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
@@ -41,7 +41,7 @@ TEST(TaskSchedulerTaskTest, ShutdownBehaviorChangeWithDelay) {
// Verify that the shutdown behavior of undelayed tasks is not adjusted.
TEST(TaskSchedulerTaskTest, NoShutdownBehaviorChangeNoDelay) {
- Task continue_on_shutdown(FROM_HERE, Bind(&DoNothing),
+ Task continue_on_shutdown(FROM_HERE, BindOnce(&DoNothing),
TaskTraits().WithShutdownBehavior(
TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN),
TimeDelta());
@@ -49,14 +49,14 @@ TEST(TaskSchedulerTaskTest, NoShutdownBehaviorChangeNoDelay) {
continue_on_shutdown.traits.shutdown_behavior());
Task skip_on_shutdown(
- FROM_HERE, Bind(&DoNothing),
+ FROM_HERE, BindOnce(&DoNothing),
TaskTraits().WithShutdownBehavior(TaskShutdownBehavior::SKIP_ON_SHUTDOWN),
TimeDelta());
EXPECT_EQ(TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
skip_on_shutdown.traits.shutdown_behavior());
Task block_shutdown(
- FROM_HERE, Bind(&DoNothing),
+ FROM_HERE, BindOnce(&DoNothing),
TaskTraits().WithShutdownBehavior(TaskShutdownBehavior::BLOCK_SHUTDOWN),
TimeDelta());
EXPECT_EQ(TaskShutdownBehavior::BLOCK_SHUTDOWN,
diff --git a/chromium/base/task_scheduler/test_task_factory.cc b/chromium/base/task_scheduler/test_task_factory.cc
index 3b4e7235f01..8db2042e05c 100644
--- a/chromium/base/task_scheduler/test_task_factory.cc
+++ b/chromium/base/task_scheduler/test_task_factory.cc
@@ -37,8 +37,8 @@ bool TestTaskFactory::PostTask(PostNestedTask post_nested_task,
AutoLock auto_lock(lock_);
return task_runner_->PostTask(
FROM_HERE,
- Bind(&TestTaskFactory::RunTaskCallback, Unretained(this),
- num_posted_tasks_++, post_nested_task, after_task_closure));
+ BindOnce(&TestTaskFactory::RunTaskCallback, Unretained(this),
+ num_posted_tasks_++, post_nested_task, after_task_closure));
}
void TestTaskFactory::WaitForAllTasksToRun() const {
diff --git a/chromium/base/template_util.h b/chromium/base/template_util.h
index 42552107cfb..536cad8f3a3 100644
--- a/chromium/base/template_util.h
+++ b/chromium/base/template_util.h
@@ -12,17 +12,6 @@
#include "build/build_config.h"
-// This hacks around libstdc++ 4.6 missing stuff in type_traits, while we need
-// to support it.
-#define CR_GLIBCXX_4_7_0 20120322
-#define CR_GLIBCXX_4_5_4 20120702
-#define CR_GLIBCXX_4_6_4 20121127
-#if defined(__GLIBCXX__) && \
- (__GLIBCXX__ < CR_GLIBCXX_4_7_0 || __GLIBCXX__ == CR_GLIBCXX_4_5_4 || \
- __GLIBCXX__ == CR_GLIBCXX_4_6_4)
-#define CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
-#endif
-
// Some versions of libstdc++ have partial support for type_traits, but misses
// a smaller subset while removing some of the older non-standard stuff. Assume
// that all versions below 5.0 fall in this category, along with one 5.0
@@ -51,46 +40,8 @@ template <class T> struct is_non_const_reference : std::false_type {};
template <class T> struct is_non_const_reference<T&> : std::true_type {};
template <class T> struct is_non_const_reference<const T&> : std::false_type {};
-// is_assignable
-
namespace internal {
-template <typename First, typename Second>
-struct SelectSecond {
- using type = Second;
-};
-
-struct Any {
- Any(...);
-};
-
-// True case: If |Lvalue| can be assigned to from |Rvalue|, then the return
-// value is a true_type.
-template <class Lvalue, class Rvalue>
-typename internal::SelectSecond<
- decltype((std::declval<Lvalue>() = std::declval<Rvalue>())),
- std::true_type>::type
-IsAssignableTest(Lvalue&&, Rvalue&&);
-
-// False case: Otherwise the return value is a false_type.
-template <class Rvalue>
-std::false_type IsAssignableTest(internal::Any, Rvalue&&);
-
-// Default case: Neither Lvalue nor Rvalue is void. Uses IsAssignableTest to
-// determine the type of IsAssignableImpl.
-template <class Lvalue,
- class Rvalue,
- bool = std::is_void<Lvalue>::value || std::is_void<Rvalue>::value>
-struct IsAssignableImpl
- : public std::common_type<decltype(
- internal::IsAssignableTest(std::declval<Lvalue>(),
- std::declval<Rvalue>()))>::type {};
-
-// Void case: Either Lvalue or Rvalue is void. Then the type of IsAssignableTest
-// is false_type.
-template <class Lvalue, class Rvalue>
-struct IsAssignableImpl<Lvalue, Rvalue, true> : public std::false_type {};
-
// Uses expression SFINAE to detect whether using operator<< would work.
template <typename T, typename = void>
struct SupportsOstreamOperator : std::false_type {};
@@ -102,56 +53,10 @@ struct SupportsOstreamOperator<T,
} // namespace internal
-// TODO(crbug.com/554293): Remove this when all platforms have this in the std
-// namespace.
-template <class Lvalue, class Rvalue>
-struct is_assignable : public internal::IsAssignableImpl<Lvalue, Rvalue> {};
-
-// is_copy_assignable is true if a T const& is assignable to a T&.
-// TODO(crbug.com/554293): Remove this when all platforms have this in the std
-// namespace.
-template <class T>
-struct is_copy_assignable
- : public is_assignable<typename std::add_lvalue_reference<T>::type,
- typename std::add_lvalue_reference<
- typename std::add_const<T>::type>::type> {};
-
-// is_move_assignable is true if a T&& is assignable to a T&.
-// TODO(crbug.com/554293): Remove this when all platforms have this in the std
-// namespace.
-template <class T>
-struct is_move_assignable
- : public is_assignable<typename std::add_lvalue_reference<T>::type,
- const typename std::add_rvalue_reference<T>::type> {
-};
-
-// underlying_type produces the integer type backing an enum type.
-// TODO(crbug.com/554293): Remove this when all platforms have this in the std
-// namespace.
-#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX)
-template <typename T>
-struct underlying_type {
- using type = __underlying_type(T);
-};
-#else
-template <typename T>
-using underlying_type = std::underlying_type<T>;
-#endif
-
-// TODO(crbug.com/554293): Remove this when all platforms have this in the std
-// namespace.
-#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX)
-template <class T>
-using is_trivially_destructible = std::has_trivial_destructor<T>;
-#else
-template <class T>
-using is_trivially_destructible = std::is_trivially_destructible<T>;
-#endif
-
// is_trivially_copyable is especially hard to get right.
// - Older versions of libstdc++ will fail to have it like they do for other
-// type traits. In this case we should provide it based on compiler
-// intrinsics. This is covered by the CR_USE_FALLBACKS_FOR_OLD_GLIBCXX define.
+// type traits. This has become a subset of the second point, but used to be
+// handled independently.
// - An experimental release of gcc includes most of type_traits but misses
// is_trivially_copyable, so we still have to avoid using libstdc++ in this
// case, which is covered by CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX.
@@ -172,8 +77,7 @@ using is_trivially_destructible = std::is_trivially_destructible<T>;
// TODO(crbug.com/554293): Remove this when all platforms have this in the std
// namespace and it works with gcc as needed.
-#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX) || \
- defined(CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX) || \
+#if defined(CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX) || \
defined(CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX)
template <typename T>
struct is_trivially_copyable {
@@ -193,7 +97,6 @@ using is_trivially_copyable = std::is_trivially_copyable<T>;
} // namespace base
-#undef CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
#undef CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX
#undef CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX
diff --git a/chromium/base/template_util_unittest.cc b/chromium/base/template_util_unittest.cc
index 921596474b6..70d28c8e4e2 100644
--- a/chromium/base/template_util_unittest.cc
+++ b/chromium/base/template_util_unittest.cc
@@ -30,39 +30,6 @@ static_assert(!is_non_const_reference<const int&>::value,
"IsNonConstReference");
static_assert(is_non_const_reference<int&>::value, "IsNonConstReference");
-class AssignParent {};
-class AssignChild : AssignParent {};
-
-// is_assignable<Type1, Type2>
-static_assert(!is_assignable<int, int>::value, "IsAssignable"); // 1 = 1;
-static_assert(!is_assignable<int, double>::value, "IsAssignable");
-static_assert(is_assignable<int&, int>::value, "IsAssignable");
-static_assert(is_assignable<int&, double>::value, "IsAssignable");
-static_assert(is_assignable<int&, int&>::value, "IsAssignable");
-static_assert(is_assignable<int&, int const&>::value, "IsAssignable");
-static_assert(!is_assignable<int const&, int>::value, "IsAssignable");
-static_assert(!is_assignable<AssignParent&, AssignChild>::value,
- "IsAssignable");
-static_assert(!is_assignable<AssignChild&, AssignParent>::value,
- "IsAssignable");
-
-struct AssignCopy {};
-struct AssignNoCopy {
- AssignNoCopy& operator=(AssignNoCopy&&) { return *this; }
- AssignNoCopy& operator=(const AssignNoCopy&) = delete;
-};
-struct AssignNoMove {
- AssignNoMove& operator=(AssignNoMove&&) = delete;
- AssignNoMove& operator=(const AssignNoMove&) = delete;
-};
-
-static_assert(is_copy_assignable<AssignCopy>::value, "IsCopyAssignable");
-static_assert(!is_copy_assignable<AssignNoCopy>::value, "IsCopyAssignable");
-
-static_assert(is_move_assignable<AssignCopy>::value, "IsMoveAssignable");
-static_assert(is_move_assignable<AssignNoCopy>::value, "IsMoveAssignable");
-static_assert(!is_move_assignable<AssignNoMove>::value, "IsMoveAssignable");
-
// A few standard types that definitely support printing.
static_assert(internal::SupportsOstreamOperator<int>::value,
"ints should be printable");
@@ -102,28 +69,5 @@ static_assert(
internal::SupportsOstreamOperator<const StructWithOperator&>::value,
"struct with operator<< should be printable by const ref");
-// underlying type of enums
-static_assert(std::is_integral<underlying_type<SimpleEnum>::type>::value,
- "simple enum must have some integral type");
-static_assert(
- std::is_same<underlying_type<EnumWithExplicitType>::type, uint64_t>::value,
- "explicit type must be detected");
-static_assert(std::is_same<underlying_type<ScopedEnum>::type, int>::value,
- "scoped enum defaults to int");
-
-struct TriviallyDestructible {
- int field;
-};
-
-class NonTriviallyDestructible {
- ~NonTriviallyDestructible() {}
-};
-
-static_assert(is_trivially_destructible<int>::value, "IsTriviallyDestructible");
-static_assert(is_trivially_destructible<TriviallyDestructible>::value,
- "IsTriviallyDestructible");
-static_assert(!is_trivially_destructible<NonTriviallyDestructible>::value,
- "IsTriviallyDestructible");
-
} // namespace
} // namespace base
diff --git a/chromium/base/test/BUILD.gn b/chromium/base/test/BUILD.gn
index 256cae257f3..d79d7f32c8e 100644
--- a/chromium/base/test/BUILD.gn
+++ b/chromium/base/test/BUILD.gn
@@ -84,6 +84,8 @@ static_library("test_support") {
"scoped_mock_time_message_loop_task_runner.h",
"scoped_path_override.cc",
"scoped_path_override.h",
+ "scoped_task_environment.cc",
+ "scoped_task_environment.h",
"scoped_task_scheduler.cc",
"scoped_task_scheduler.h",
"sequenced_task_runner_test_template.cc",
@@ -329,6 +331,7 @@ if (is_android) {
generate_jni("base_unittests_jni_headers") {
sources = [
"android/java/src/org/chromium/base/ContentUriTestUtils.java",
+ "android/java/src/org/chromium/base/JavaHandlerThreadTest.java",
"android/java/src/org/chromium/base/TestSystemMessageHandler.java",
"android/java/src/org/chromium/base/TestUiThread.java",
]
@@ -353,7 +356,7 @@ if (is_android) {
]
srcjar_deps = [ ":test_support_java_aidl" ]
java_files = [
- "android/java/src/org/chromium/base/FileDescriptorInfo.java",
+ "android/java/src/org/chromium/base/ChildProcessConstants.java",
"android/java/src/org/chromium/base/MainReturnCodeResult.java",
"android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java",
"android/java/src/org/chromium/base/MultiprocessTestClientService.java",
@@ -367,9 +370,14 @@ if (is_android) {
android_aidl("test_support_java_aidl") {
testonly = true
- import_include = "android/java/src"
+ import_include = [
+ "android/java/src",
+ "//base/android/java/src",
+ ]
sources = [
+ "android/java/src/org/chromium/base/ITestCallback.aidl",
"android/java/src/org/chromium/base/ITestClient.aidl",
+ "android/java/src/org/chromium/base/ITestController.aidl",
]
}
}
diff --git a/chromium/base/third_party/dmg_fp/README.chromium b/chromium/base/third_party/dmg_fp/README.chromium
index 2415a61295e..e3270cf3f41 100644
--- a/chromium/base/third_party/dmg_fp/README.chromium
+++ b/chromium/base/third_party/dmg_fp/README.chromium
@@ -18,3 +18,4 @@ List of changes made to original code:
- fixed warnings under msvc, see msvc_warnings.patch
- fixed parsing of long exponents, see exp_length.patch and crbug.com/542881
- made hexdig array const
+ - removed deprecated `register` keyword
diff --git a/chromium/base/third_party/dmg_fp/g_fmt.cc b/chromium/base/third_party/dmg_fp/g_fmt.cc
index bfa358d154c..67c9f5736ef 100644
--- a/chromium/base/third_party/dmg_fp/g_fmt.cc
+++ b/chromium/base/third_party/dmg_fp/g_fmt.cc
@@ -27,10 +27,10 @@
namespace dmg_fp {
char *
-g_fmt(register char *b, double x)
+g_fmt(char *b, double x)
{
- register int i, k;
- register char *s;
+ int i, k;
+ char *s;
int decpt, j, sign;
char *b0, *s0, *se;
diff --git a/chromium/base/third_party/xdg_mime/README.chromium b/chromium/base/third_party/xdg_mime/README.chromium
index 95f3a9658d1..8212752095f 100644
--- a/chromium/base/third_party/xdg_mime/README.chromium
+++ b/chromium/base/third_party/xdg_mime/README.chromium
@@ -10,4 +10,5 @@ In addition, we have the following patch(es):
- compile.patch: small tweaks to make the code compile.
- free_pointer_later.patch: small patch that fixes potential crash in
xdg_mime_get_mime_type_for_file() - use of pointer after being freed.
+ - function_casts.patch: fix bad function casts.
- Added a LICENSE file.
diff --git a/chromium/base/third_party/xdg_mime/function_casts.patch b/chromium/base/third_party/xdg_mime/function_casts.patch
new file mode 100644
index 00000000000..37d38a7b3db
--- /dev/null
+++ b/chromium/base/third_party/xdg_mime/function_casts.patch
@@ -0,0 +1,44 @@
+diff --git a/base/third_party/xdg_mime/xdgmime.c b/base/third_party/xdg_mime/xdgmime.c
+index 6dc58c253fa2..f340fcefabb4 100644
+--- a/base/third_party/xdg_mime/xdgmime.c
++++ b/base/third_party/xdg_mime/xdgmime.c
+@@ -136,7 +136,7 @@ xdg_dir_time_list_free (XdgDirTimeList *list)
+ }
+
+ static int
+-xdg_mime_init_from_directory (const char *directory)
++xdg_mime_init_from_directory (const char *directory, void *user_data)
+ {
+ char *file_name;
+ struct stat st;
+@@ -340,8 +340,9 @@ xdg_check_file (const char *file_path,
+
+ static int
+ xdg_check_dir (const char *directory,
+- int *invalid_dir_list)
++ void *user_data)
+ {
++ int *invalid_dir_list = user_data;
+ int invalid, exists;
+ char *file_name;
+
+@@ -398,8 +399,7 @@ xdg_check_dirs (void)
+ for (list = dir_time_list; list; list = list->next)
+ list->checked = XDG_CHECKED_UNCHECKED;
+
+- xdg_run_command_on_dirs ((XdgDirectoryFunc) xdg_check_dir,
+- &invalid_dir_list);
++ xdg_run_command_on_dirs (xdg_check_dir, &invalid_dir_list);
+
+ if (invalid_dir_list)
+ return TRUE;
+@@ -455,8 +455,7 @@ xdg_mime_init (void)
+ icon_list = _xdg_mime_icon_list_new ();
+ generic_icon_list = _xdg_mime_icon_list_new ();
+
+- xdg_run_command_on_dirs ((XdgDirectoryFunc) xdg_mime_init_from_directory,
+- NULL);
++ xdg_run_command_on_dirs (xdg_mime_init_from_directory, NULL);
+
+ need_reread = FALSE;
+ }
diff --git a/chromium/base/third_party/xdg_mime/xdgmime.c b/chromium/base/third_party/xdg_mime/xdgmime.c
index 6dc58c253fa..f340fcefabb 100644
--- a/chromium/base/third_party/xdg_mime/xdgmime.c
+++ b/chromium/base/third_party/xdg_mime/xdgmime.c
@@ -136,7 +136,7 @@ xdg_dir_time_list_free (XdgDirTimeList *list)
}
static int
-xdg_mime_init_from_directory (const char *directory)
+xdg_mime_init_from_directory (const char *directory, void *user_data)
{
char *file_name;
struct stat st;
@@ -340,8 +340,9 @@ xdg_check_file (const char *file_path,
static int
xdg_check_dir (const char *directory,
- int *invalid_dir_list)
+ void *user_data)
{
+ int *invalid_dir_list = user_data;
int invalid, exists;
char *file_name;
@@ -398,8 +399,7 @@ xdg_check_dirs (void)
for (list = dir_time_list; list; list = list->next)
list->checked = XDG_CHECKED_UNCHECKED;
- xdg_run_command_on_dirs ((XdgDirectoryFunc) xdg_check_dir,
- &invalid_dir_list);
+ xdg_run_command_on_dirs (xdg_check_dir, &invalid_dir_list);
if (invalid_dir_list)
return TRUE;
@@ -455,8 +455,7 @@ xdg_mime_init (void)
icon_list = _xdg_mime_icon_list_new ();
generic_icon_list = _xdg_mime_icon_list_new ();
- xdg_run_command_on_dirs ((XdgDirectoryFunc) xdg_mime_init_from_directory,
- NULL);
+ xdg_run_command_on_dirs (xdg_mime_init_from_directory, NULL);
need_reread = FALSE;
}
diff --git a/chromium/base/threading/post_task_and_reply_impl.cc b/chromium/base/threading/post_task_and_reply_impl.cc
index d16f8bd2259..1aaa1e7dda6 100644
--- a/chromium/base/threading/post_task_and_reply_impl.cc
+++ b/chromium/base/threading/post_task_and_reply_impl.cc
@@ -29,8 +29,8 @@ namespace {
class PostTaskAndReplyRelay {
public:
PostTaskAndReplyRelay(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply)
+ OnceClosure task,
+ OnceClosure reply)
: sequence_checker_(),
from_here_(from_here),
origin_task_runner_(SequencedTaskRunnerHandle::Get()),
@@ -39,27 +39,25 @@ class PostTaskAndReplyRelay {
~PostTaskAndReplyRelay() {
DCHECK(sequence_checker_.CalledOnValidSequence());
- task_.Reset();
- reply_.Reset();
}
void RunTaskAndPostReply() {
- task_.Run();
+ std::move(task_).Run();
origin_task_runner_->PostTask(
- from_here_, Bind(&PostTaskAndReplyRelay::RunReplyAndSelfDestruct,
- base::Unretained(this)));
+ from_here_, BindOnce(&PostTaskAndReplyRelay::RunReplyAndSelfDestruct,
+ base::Unretained(this)));
}
private:
void RunReplyAndSelfDestruct() {
DCHECK(sequence_checker_.CalledOnValidSequence());
- // Force |task_| to be released before |reply_| is to ensure that no one
- // accidentally depends on |task_| keeping one of its arguments alive while
- // |reply_| is executing.
- task_.Reset();
+ // Ensure |task_| has already been released before |reply_| to ensure that
+ // no one accidentally depends on |task_| keeping one of its arguments alive
+ // while |reply_| is executing.
+ DCHECK(!task_);
- reply_.Run();
+ std::move(reply_).Run();
// Cue mission impossible theme.
delete this;
@@ -68,8 +66,8 @@ class PostTaskAndReplyRelay {
const SequenceChecker sequence_checker_;
const tracked_objects::Location from_here_;
const scoped_refptr<SequencedTaskRunner> origin_task_runner_;
- Closure reply_;
- Closure task_;
+ OnceClosure reply_;
+ OnceClosure task_;
};
} // namespace
@@ -78,8 +76,8 @@ namespace internal {
bool PostTaskAndReplyImpl::PostTaskAndReply(
const tracked_objects::Location& from_here,
- Closure task,
- Closure reply) {
+ OnceClosure task,
+ OnceClosure reply) {
DCHECK(!task.is_null()) << from_here.ToString();
DCHECK(!reply.is_null()) << from_here.ToString();
PostTaskAndReplyRelay* relay =
@@ -90,8 +88,8 @@ bool PostTaskAndReplyImpl::PostTaskAndReply(
// to avoid having to suppress every callsite which happens to flakily trigger
// this race.
ANNOTATE_LEAKING_OBJECT_PTR(relay);
- if (!PostTask(from_here, Bind(&PostTaskAndReplyRelay::RunTaskAndPostReply,
- Unretained(relay)))) {
+ if (!PostTask(from_here, BindOnce(&PostTaskAndReplyRelay::RunTaskAndPostReply,
+ Unretained(relay)))) {
delete relay;
return false;
}
diff --git a/chromium/base/threading/post_task_and_reply_impl.h b/chromium/base/threading/post_task_and_reply_impl.h
index 696b668a4c3..00aee6d0ed0 100644
--- a/chromium/base/threading/post_task_and_reply_impl.h
+++ b/chromium/base/threading/post_task_and_reply_impl.h
@@ -29,12 +29,12 @@ class BASE_EXPORT PostTaskAndReplyImpl {
// SequencedTaskRunnerHandle::IsSet(). Both |task| and |reply| are guaranteed
// to be deleted on the sequence or thread that called this.
bool PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply);
+ OnceClosure task,
+ OnceClosure reply);
private:
virtual bool PostTask(const tracked_objects::Location& from_here,
- const Closure& task) = 0;
+ OnceClosure task) = 0;
};
} // namespace internal
diff --git a/chromium/base/threading/post_task_and_reply_impl_unittest.cc b/chromium/base/threading/post_task_and_reply_impl_unittest.cc
index 13d5cc98401..664c191b5e4 100644
--- a/chromium/base/threading/post_task_and_reply_impl_unittest.cc
+++ b/chromium/base/threading/post_task_and_reply_impl_unittest.cc
@@ -4,6 +4,8 @@
#include "base/threading/post_task_and_reply_impl.h"
+#include <utility>
+
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/macros.h"
@@ -27,8 +29,8 @@ class PostTaskAndReplyTaskRunner : public internal::PostTaskAndReplyImpl {
private:
bool PostTask(const tracked_objects::Location& from_here,
- const Closure& task) override {
- return destination_->PostTask(from_here, task);
+ OnceClosure task) override {
+ return destination_->PostTask(from_here, std::move(task));
}
// Non-owning.
@@ -56,14 +58,7 @@ class MockObject {
MockObject() = default;
MOCK_METHOD1(Task, void(scoped_refptr<ObjectToDelete>));
-
- void Reply(bool* delete_flag) {
- // Expect the task's deletion flag to be set before the reply runs.
- EXPECT_TRUE(*delete_flag);
- ReplyMock();
- }
-
- MOCK_METHOD0(ReplyMock, void());
+ MOCK_METHOD0(Reply, void());
private:
DISALLOW_COPY_AND_ASSIGN(MockObject);
@@ -83,27 +78,27 @@ TEST(PostTaskAndReplyImplTest, PostTaskAndReply) {
PostTaskAndReplyTaskRunner(post_runner.get())
.PostTaskAndReply(
FROM_HERE,
- Bind(&MockObject::Task, Unretained(&mock_object),
- make_scoped_refptr(new ObjectToDelete(&delete_flag))),
- Bind(&MockObject::Reply, Unretained(&mock_object),
- Unretained(&delete_flag))));
-
- // Expect no reply in |reply_runner|.
- EXPECT_FALSE(reply_runner->HasPendingTask());
+ BindOnce(&MockObject::Task, Unretained(&mock_object),
+ make_scoped_refptr(new ObjectToDelete(&delete_flag))),
+ BindOnce(&MockObject::Reply, Unretained(&mock_object))));
// Expect the task to be posted to |post_runner|.
EXPECT_TRUE(post_runner->HasPendingTask());
+ EXPECT_FALSE(reply_runner->HasPendingTask());
+ EXPECT_FALSE(delete_flag);
+
EXPECT_CALL(mock_object, Task(_));
post_runner->RunUntilIdle();
testing::Mock::VerifyAndClear(&mock_object);
- // Expect the task's argument not to have been deleted yet.
- EXPECT_FALSE(delete_flag);
+ // |task| should have been deleted right after being run.
+ EXPECT_TRUE(delete_flag);
// Expect the reply to be posted to |reply_runner|.
EXPECT_FALSE(post_runner->HasPendingTask());
EXPECT_TRUE(reply_runner->HasPendingTask());
- EXPECT_CALL(mock_object, ReplyMock());
+
+ EXPECT_CALL(mock_object, Reply());
reply_runner->RunUntilIdle();
testing::Mock::VerifyAndClear(&mock_object);
EXPECT_TRUE(delete_flag);
diff --git a/chromium/base/threading/sequenced_task_runner_handle_unittest.cc b/chromium/base/threading/sequenced_task_runner_handle_unittest.cc
index 016ab24ff85..6f4948a3e73 100644
--- a/chromium/base/threading/sequenced_task_runner_handle_unittest.cc
+++ b/chromium/base/threading/sequenced_task_runner_handle_unittest.cc
@@ -41,8 +41,8 @@ class SequencedTaskRunnerHandleTest : public ::testing::Test {
new SequenceCheckerImpl);
task_runner->PostTask(
FROM_HERE,
- base::Bind(&SequencedTaskRunnerHandleTest::CheckValidSequence,
- base::Passed(&sequence_checker), callback));
+ base::BindOnce(&SequencedTaskRunnerHandleTest::CheckValidSequence,
+ base::Passed(&sequence_checker), callback));
}
// Verifies that there is no SequencedTaskRunnerHandle in the context it runs.
@@ -77,7 +77,7 @@ TEST_F(SequencedTaskRunnerHandleTest, FromSequencedWorkerPoolTask) {
WaitableEvent::InitialState::NOT_SIGNALED);
owner.pool()->PostSequencedWorkerTask(
owner.pool()->GetSequenceToken(), FROM_HERE,
- base::Bind(
+ base::BindOnce(
&SequencedTaskRunnerHandleTest::VerifyCurrentSequencedTaskRunner,
base::Bind(&WaitableEvent::Signal, base::Unretained(&event))));
event.Wait();
@@ -91,7 +91,7 @@ TEST_F(SequencedTaskRunnerHandleTest, NoHandleFromUnsequencedTask) {
WaitableEvent::InitialState::NOT_SIGNALED);
owner.pool()->PostWorkerTask(
FROM_HERE,
- base::Bind(
+ base::BindOnce(
&SequencedTaskRunnerHandleTest::VerifyNoSequencedTaskRunner,
base::Bind(&WaitableEvent::Signal, base::Unretained(&event))));
event.Wait();
diff --git a/chromium/base/threading/sequenced_worker_pool.cc b/chromium/base/threading/sequenced_worker_pool.cc
index badd2936ee2..df218a623fa 100644
--- a/chromium/base/threading/sequenced_worker_pool.cc
+++ b/chromium/base/threading/sequenced_worker_pool.cc
@@ -93,12 +93,15 @@ struct SequencedTask : public TrackingInfo {
~SequencedTask() {}
+ SequencedTask(SequencedTask&&) = default;
+ SequencedTask& operator=(SequencedTask&&) = default;
+
int sequence_token_id;
int trace_id;
int64_t sequence_task_number;
SequencedWorkerPool::WorkerShutdown shutdown_behavior;
tracked_objects::Location posted_from;
- Closure task;
+ OnceClosure task;
// Non-delayed tasks and delayed tasks are managed together by time-to-run
// order. We calculate the time by adding the posted time and the given delay.
@@ -140,7 +143,7 @@ class SequencedWorkerPoolTaskRunner : public TaskRunner {
// TaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
@@ -164,13 +167,13 @@ SequencedWorkerPoolTaskRunner::~SequencedWorkerPoolTaskRunner() {
bool SequencedWorkerPoolTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
if (delay.is_zero()) {
- return pool_->PostWorkerTaskWithShutdownBehavior(
- from_here, task, shutdown_behavior_);
+ return pool_->PostWorkerTaskWithShutdownBehavior(from_here, std::move(task),
+ shutdown_behavior_);
}
- return pool_->PostDelayedWorkerTask(from_here, task, delay);
+ return pool_->PostDelayedWorkerTask(from_here, std::move(task), delay);
}
bool SequencedWorkerPoolTaskRunner::RunsTasksOnCurrentThread() const {
@@ -194,13 +197,13 @@ class SequencedWorkerPool::PoolSequencedTaskRunner
// TaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
// SequencedTaskRunner implementation
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
private:
@@ -227,15 +230,16 @@ SequencedWorkerPool::PoolSequencedTaskRunner::
SequencedWorkerPool::PoolSequencedTaskRunner::
~PoolSequencedTaskRunner() = default;
-bool SequencedWorkerPool::PoolSequencedTaskRunner::
- PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
- TimeDelta delay) {
+bool SequencedWorkerPool::PoolSequencedTaskRunner::PostDelayedTask(
+ const tracked_objects::Location& from_here,
+ OnceClosure task,
+ TimeDelta delay) {
if (delay.is_zero()) {
return pool_->PostSequencedWorkerTaskWithShutdownBehavior(
- token_, from_here, task, shutdown_behavior_);
+ token_, from_here, std::move(task), shutdown_behavior_);
}
- return pool_->PostDelayedSequencedWorkerTask(token_, from_here, task, delay);
+ return pool_->PostDelayedSequencedWorkerTask(token_, from_here,
+ std::move(task), delay);
}
bool SequencedWorkerPool::PoolSequencedTaskRunner::
@@ -243,13 +247,13 @@ bool SequencedWorkerPool::PoolSequencedTaskRunner::
return pool_->IsRunningSequenceOnCurrentThread(token_);
}
-bool SequencedWorkerPool::PoolSequencedTaskRunner::
- PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
- TimeDelta delay) {
+bool SequencedWorkerPool::PoolSequencedTaskRunner::PostNonNestableDelayedTask(
+ const tracked_objects::Location& from_here,
+ OnceClosure task,
+ TimeDelta delay) {
// There's no way to run nested tasks, so simply forward to
// PostDelayedTask.
- return PostDelayedTask(from_here, task, delay);
+ return PostDelayedTask(from_here, std::move(task), delay);
}
// Worker ---------------------------------------------------------------------
@@ -348,7 +352,7 @@ class SequencedWorkerPool::Inner {
SequenceToken sequence_token,
WorkerShutdown shutdown_behavior,
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay);
bool RunsTasksOnCurrentThread() const;
@@ -393,8 +397,7 @@ class SequencedWorkerPool::Inner {
// Returns true if the task may run at some point in the future and false if
// it will definitely not run.
// Coalesce upon resolution of http://crbug.com/622400.
- bool PostTaskToTaskScheduler(const SequencedTask& sequenced,
- const TimeDelta& delay);
+ bool PostTaskToTaskScheduler(SequencedTask sequenced, const TimeDelta& delay);
// Returns the TaskScheduler TaskRunner for the specified |sequence_token_id|
// and |traits|.
@@ -692,8 +695,12 @@ bool SequencedWorkerPool::Inner::PostTask(
SequenceToken sequence_token,
WorkerShutdown shutdown_behavior,
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
+ // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+ // for details.
+ CHECK(task);
+
// TODO(fdoray): Uncomment this DCHECK. It is initially commented to avoid a
// revert of the CL that adds debug::DumpWithoutCrashing() if it fails on the
// waterfall. https://crbug.com/622400
@@ -706,9 +713,9 @@ bool SequencedWorkerPool::Inner::PostTask(
sequenced.sequence_token_id = sequence_token.id_;
sequenced.shutdown_behavior = shutdown_behavior;
sequenced.posted_from = from_here;
- sequenced.task =
- shutdown_behavior == BLOCK_SHUTDOWN ?
- base::MakeCriticalClosure(task) : task;
+ sequenced.task = shutdown_behavior == BLOCK_SHUTDOWN
+ ? base::MakeCriticalClosure(std::move(task))
+ : std::move(task);
sequenced.time_to_run = TimeTicks::Now() + delay;
int create_thread_id = 0;
@@ -751,12 +758,14 @@ bool SequencedWorkerPool::Inner::PostTask(
sequenced.sequence_token_id = LockedGetNamedTokenID(*optional_token_name);
if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
- if (!PostTaskToTaskScheduler(sequenced, delay))
+ if (!PostTaskToTaskScheduler(std::move(sequenced), delay))
return false;
} else {
- pending_tasks_.insert(sequenced);
+ SequencedWorkerPool::WorkerShutdown shutdown_behavior =
+ sequenced.shutdown_behavior;
+ pending_tasks_.insert(std::move(sequenced));
- if (sequenced.shutdown_behavior == BLOCK_SHUTDOWN)
+ if (shutdown_behavior == BLOCK_SHUTDOWN)
blocking_shutdown_pending_task_count_++;
create_thread_id = PrepareToStartAdditionalThreadIfHelpful();
@@ -793,7 +802,7 @@ bool SequencedWorkerPool::Inner::PostTask(
}
bool SequencedWorkerPool::Inner::PostTaskToTaskScheduler(
- const SequencedTask& sequenced,
+ SequencedTask sequenced,
const TimeDelta& delay) {
DCHECK_EQ(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state);
@@ -823,7 +832,8 @@ bool SequencedWorkerPool::Inner::PostTaskToTaskScheduler(
.WithPriority(task_priority_)
.WithShutdownBehavior(task_shutdown_behavior);
return GetTaskSchedulerTaskRunner(sequenced.sequence_token_id, traits)
- ->PostDelayedTask(sequenced.posted_from, sequenced.task, delay);
+ ->PostDelayedTask(sequenced.posted_from, std::move(sequenced.task),
+ delay);
}
scoped_refptr<TaskRunner>
@@ -1018,7 +1028,7 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
tracked_objects::TaskStopwatch stopwatch;
stopwatch.Start();
- task.task.Run();
+ std::move(task.task).Run();
stopwatch.Stop();
tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(
@@ -1029,7 +1039,7 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
// Also, do it before calling reset_running_task_info() so
// that sequence-checking from within the task's destructor
// still works.
- task.task = Closure();
+ DCHECK(!task.task);
this_worker->reset_running_task_info();
}
@@ -1241,7 +1251,11 @@ SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork(
// refcounted, so we just need to keep a copy of them alive until the lock
// is exited. The calling code can just clear() the vector they passed to
// us once the lock is exited to make this happen.
- delete_these_outside_lock->push_back(*i);
+ //
+ // The const_cast here is safe since the object is erased from
+ // |pending_tasks_| soon after the move.
+ delete_these_outside_lock->push_back(
+ std::move(const_cast<SequencedTask&>(*i)));
pending_tasks_.erase(i++);
continue;
}
@@ -1252,14 +1266,18 @@ SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork(
status = GET_WORK_WAIT;
if (cleanup_state_ == CLEANUP_RUNNING) {
// Deferred tasks are deleted when cleaning up, see Inner::ThreadLoop.
- delete_these_outside_lock->push_back(*i);
+ // The const_cast here is safe since the object is erased from
+ // |pending_tasks_| soon after the move.
+ delete_these_outside_lock->push_back(
+ std::move(const_cast<SequencedTask&>(*i)));
pending_tasks_.erase(i);
}
break;
}
- // Found a runnable task.
- *task = *i;
+ // Found a runnable task. The const_cast is safe here since the object is
+ // erased from |pending_tasks_| soon after the move.
+ *task = std::move(const_cast<SequencedTask&>(*i));
pending_tasks_.erase(i);
if (task->shutdown_behavior == BLOCK_SHUTDOWN) {
blocking_shutdown_pending_task_count_--;
@@ -1532,71 +1550,71 @@ SequencedWorkerPool::GetTaskRunnerWithShutdownBehavior(
bool SequencedWorkerPool::PostWorkerTask(
const tracked_objects::Location& from_here,
- const Closure& task) {
- return inner_->PostTask(NULL, SequenceToken(), BLOCK_SHUTDOWN,
- from_here, task, TimeDelta());
+ OnceClosure task) {
+ return inner_->PostTask(NULL, SequenceToken(), BLOCK_SHUTDOWN, from_here,
+ std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostDelayedWorkerTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
WorkerShutdown shutdown_behavior =
delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
- return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior,
- from_here, task, delay);
+ return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior, from_here,
+ std::move(task), delay);
}
bool SequencedWorkerPool::PostWorkerTaskWithShutdownBehavior(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
WorkerShutdown shutdown_behavior) {
- return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior,
- from_here, task, TimeDelta());
+ return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior, from_here,
+ std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostSequencedWorkerTask(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task) {
- return inner_->PostTask(NULL, sequence_token, BLOCK_SHUTDOWN,
- from_here, task, TimeDelta());
+ OnceClosure task) {
+ return inner_->PostTask(NULL, sequence_token, BLOCK_SHUTDOWN, from_here,
+ std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostDelayedSequencedWorkerTask(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
WorkerShutdown shutdown_behavior =
delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
- return inner_->PostTask(NULL, sequence_token, shutdown_behavior,
- from_here, task, delay);
+ return inner_->PostTask(NULL, sequence_token, shutdown_behavior, from_here,
+ std::move(task), delay);
}
bool SequencedWorkerPool::PostNamedSequencedWorkerTask(
const std::string& token_name,
const tracked_objects::Location& from_here,
- const Closure& task) {
+ OnceClosure task) {
DCHECK(!token_name.empty());
return inner_->PostTask(&token_name, SequenceToken(), BLOCK_SHUTDOWN,
- from_here, task, TimeDelta());
+ from_here, std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostSequencedWorkerTaskWithShutdownBehavior(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
WorkerShutdown shutdown_behavior) {
- return inner_->PostTask(NULL, sequence_token, shutdown_behavior,
- from_here, task, TimeDelta());
+ return inner_->PostTask(NULL, sequence_token, shutdown_behavior, from_here,
+ std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
- return PostDelayedWorkerTask(from_here, task, delay);
+ return PostDelayedWorkerTask(from_here, std::move(task), delay);
}
bool SequencedWorkerPool::RunsTasksOnCurrentThread() const {
diff --git a/chromium/base/threading/sequenced_worker_pool.h b/chromium/base/threading/sequenced_worker_pool.h
index 0d42de9138e..e577e1be112 100644
--- a/chromium/base/threading/sequenced_worker_pool.h
+++ b/chromium/base/threading/sequenced_worker_pool.h
@@ -12,7 +12,7 @@
#include <string>
#include "base/base_export.h"
-#include "base/callback_forward.h"
+#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -276,7 +276,7 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// Returns true if the task was posted successfully. This may fail during
// shutdown regardless of the specified ShutdownBehavior.
bool PostWorkerTask(const tracked_objects::Location& from_here,
- const Closure& task);
+ OnceClosure task);
// Same as PostWorkerTask but allows a delay to be specified (although doing
// so changes the shutdown behavior). The task will be run after the given
@@ -288,13 +288,13 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// task will be guaranteed to run to completion before shutdown
// (BLOCK_SHUTDOWN semantics).
bool PostDelayedWorkerTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay);
// Same as PostWorkerTask but allows specification of the shutdown behavior.
bool PostWorkerTaskWithShutdownBehavior(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
WorkerShutdown shutdown_behavior);
// Like PostWorkerTask above, but provides sequencing semantics. This means
@@ -310,13 +310,13 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// shutdown regardless of the specified ShutdownBehavior.
bool PostSequencedWorkerTask(SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task);
+ OnceClosure task);
// Like PostSequencedWorkerTask above, but allows you to specify a named
// token, which saves an extra call to GetNamedSequenceToken.
bool PostNamedSequencedWorkerTask(const std::string& token_name,
const tracked_objects::Location& from_here,
- const Closure& task);
+ OnceClosure task);
// Same as PostSequencedWorkerTask but allows a delay to be specified
// (although doing so changes the shutdown behavior). The task will be run
@@ -330,7 +330,7 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
bool PostDelayedSequencedWorkerTask(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay);
// Same as PostSequencedWorkerTask but allows specification of the shutdown
@@ -338,12 +338,12 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
bool PostSequencedWorkerTaskWithShutdownBehavior(
SequenceToken sequence_token,
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
WorkerShutdown shutdown_behavior);
// TaskRunner implementation. Forwards to PostDelayedWorkerTask().
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
diff --git a/chromium/base/threading/sequenced_worker_pool_unittest.cc b/chromium/base/threading/sequenced_worker_pool_unittest.cc
index 5f9c9bfd2ad..d3580596af7 100644
--- a/chromium/base/threading/sequenced_worker_pool_unittest.cc
+++ b/chromium/base/threading/sequenced_worker_pool_unittest.cc
@@ -244,14 +244,12 @@ class SequencedWorkerPoolTest
void SetUp() override {
if (RedirectedToTaskScheduler()) {
- std::vector<SchedulerWorkerPoolParams> worker_pool_params;
- worker_pool_params.emplace_back(
- "SchedulerWorkerPoolName", ThreadPriority::NORMAL,
+ const SchedulerWorkerPoolParams worker_pool_params(
SchedulerWorkerPoolParams::StandbyThreadPolicy::LAZY,
- kNumWorkerThreads, TimeDelta::Max());
+ static_cast<int>(kNumWorkerThreads), TimeDelta::Max());
TaskScheduler::CreateAndSetDefaultTaskScheduler(
- std::move(worker_pool_params),
- base::Bind([](const TaskTraits&) -> size_t { return 0U; }));
+ "SequencedWorkerPoolTest", {worker_pool_params, worker_pool_params,
+ worker_pool_params, worker_pool_params});
// Unit tests run in an environment where SequencedWorkerPool is enabled
// without redirection to TaskScheduler. For the current unit test,
@@ -323,9 +321,9 @@ class SequencedWorkerPoolTest
// workers to be created.
ThreadBlocker blocker;
for (size_t i = 0; i < kNumWorkerThreads; i++) {
- pool()->PostWorkerTask(FROM_HERE,
- base::Bind(&TestTracker::BlockTask,
- tracker(), -1, &blocker));
+ pool()->PostWorkerTask(
+ FROM_HERE,
+ base::BindOnce(&TestTracker::BlockTask, tracker(), -1, &blocker));
}
tracker()->WaitUntilTasksBlocked(kNumWorkerThreads);
@@ -385,7 +383,7 @@ void ShouldNotRun(const scoped_refptr<DeletionHelper>& helper) {
TEST_P(SequencedWorkerPoolTest, DelayedTaskDuringShutdown) {
// Post something to verify the pool is started up.
EXPECT_TRUE(pool()->PostTask(
- FROM_HERE, base::Bind(&TestTracker::FastTask, tracker(), 1)));
+ FROM_HERE, base::BindOnce(&TestTracker::FastTask, tracker(), 1)));
scoped_refptr<base::RefCountedData<bool> > deleted_flag(
new base::RefCountedData<bool>(false));
@@ -394,8 +392,8 @@ TEST_P(SequencedWorkerPoolTest, DelayedTaskDuringShutdown) {
// Post something that shouldn't run.
EXPECT_TRUE(pool()->PostDelayedTask(
FROM_HERE,
- base::Bind(&ShouldNotRun,
- make_scoped_refptr(new DeletionHelper(deleted_flag))),
+ base::BindOnce(&ShouldNotRun,
+ make_scoped_refptr(new DeletionHelper(deleted_flag))),
TestTimeouts::action_timeout()));
std::vector<int> completion_sequence = tracker()->WaitUntilTasksComplete(1);
@@ -455,12 +453,12 @@ TEST_P(SequencedWorkerPoolTest, NamedTokens) {
// threads) runs them all.
TEST_P(SequencedWorkerPoolTest, LotsOfTasks) {
pool()->PostWorkerTask(FROM_HERE,
- base::Bind(&TestTracker::SlowTask, tracker(), 0));
+ base::BindOnce(&TestTracker::SlowTask, tracker(), 0));
const size_t kNumTasks = 20;
for (size_t i = 1; i < kNumTasks; i++) {
- pool()->PostWorkerTask(FROM_HERE,
- base::Bind(&TestTracker::FastTask, tracker(), i));
+ pool()->PostWorkerTask(
+ FROM_HERE, base::BindOnce(&TestTracker::FastTask, tracker(), i));
}
std::vector<int> result = tracker()->WaitUntilTasksComplete(kNumTasks);
@@ -500,8 +498,8 @@ TEST_P(SequencedWorkerPoolTest, Sequence) {
ThreadBlocker background_blocker;
for (size_t i = 0; i < kNumBackgroundTasks; i++) {
pool()->PostWorkerTask(FROM_HERE,
- base::Bind(&TestTracker::BlockTask,
- tracker(), i, &background_blocker));
+ base::BindOnce(&TestTracker::BlockTask, tracker(), i,
+ &background_blocker));
}
tracker()->WaitUntilTasksBlocked(kNumBackgroundTasks);
@@ -513,10 +511,10 @@ TEST_P(SequencedWorkerPoolTest, Sequence) {
SequencedWorkerPool::SequenceToken token1 = pool()->GetSequenceToken();
pool()->PostSequencedWorkerTask(
token1, FROM_HERE,
- base::Bind(&TestTracker::BlockTask, tracker(), 100, &blocker));
+ base::BindOnce(&TestTracker::BlockTask, tracker(), 100, &blocker));
pool()->PostSequencedWorkerTask(
token1, FROM_HERE,
- base::Bind(&TestTracker::FastTask, tracker(), 101));
+ base::BindOnce(&TestTracker::FastTask, tracker(), 101));
EXPECT_EQ(0u, tracker()->WaitUntilTasksComplete(0).size());
// Create another two tasks as above with a different token. These will be
@@ -524,10 +522,10 @@ TEST_P(SequencedWorkerPoolTest, Sequence) {
SequencedWorkerPool::SequenceToken token2 = pool()->GetSequenceToken();
pool()->PostSequencedWorkerTask(
token2, FROM_HERE,
- base::Bind(&TestTracker::FastTask, tracker(), 200));
+ base::BindOnce(&TestTracker::FastTask, tracker(), 200));
pool()->PostSequencedWorkerTask(
token2, FROM_HERE,
- base::Bind(&TestTracker::FastTask, tracker(), 201));
+ base::BindOnce(&TestTracker::FastTask, tracker(), 201));
EXPECT_EQ(0u, tracker()->WaitUntilTasksComplete(0).size());
// Let one background task complete. This should then let both tasks of
@@ -560,9 +558,8 @@ TEST_P(SequencedWorkerPoolTest, DISABLED_IgnoresAfterShutdown) {
EnsureAllWorkersCreated();
ThreadBlocker blocker;
for (size_t i = 0; i < kNumWorkerThreads; i++) {
- pool()->PostWorkerTask(FROM_HERE,
- base::Bind(&TestTracker::BlockTask,
- tracker(), i, &blocker));
+ pool()->PostWorkerTask(FROM_HERE, base::BindOnce(&TestTracker::BlockTask,
+ tracker(), i, &blocker));
}
tracker()->WaitUntilTasksBlocked(kNumWorkerThreads);
@@ -587,16 +584,13 @@ TEST_P(SequencedWorkerPoolTest, DISABLED_IgnoresAfterShutdown) {
// No further tasks, regardless of shutdown mode, should be allowed.
EXPECT_FALSE(pool()->PostWorkerTaskWithShutdownBehavior(
- FROM_HERE,
- base::Bind(&TestTracker::FastTask, tracker(), 100),
+ FROM_HERE, base::BindOnce(&TestTracker::FastTask, tracker(), 100),
SequencedWorkerPool::CONTINUE_ON_SHUTDOWN));
EXPECT_FALSE(pool()->PostWorkerTaskWithShutdownBehavior(
- FROM_HERE,
- base::Bind(&TestTracker::FastTask, tracker(), 101),
+ FROM_HERE, base::BindOnce(&TestTracker::FastTask, tracker(), 101),
SequencedWorkerPool::SKIP_ON_SHUTDOWN));
EXPECT_FALSE(pool()->PostWorkerTaskWithShutdownBehavior(
- FROM_HERE,
- base::Bind(&TestTracker::FastTask, tracker(), 102),
+ FROM_HERE, base::BindOnce(&TestTracker::FastTask, tracker(), 102),
SequencedWorkerPool::BLOCK_SHUTDOWN));
ASSERT_EQ(old_has_work_call_count, has_work_call_count());
@@ -613,7 +607,7 @@ TEST_P(SequencedWorkerPoolTest, AllowsAfterShutdown) {
for (int i = 0; i < kNumBlockTasks; ++i) {
EXPECT_TRUE(pool()->PostWorkerTask(
FROM_HERE,
- base::Bind(&TestTracker::BlockTask, tracker(), i, &blocker)));
+ base::BindOnce(&TestTracker::BlockTask, tracker(), i, &blocker)));
}
tracker()->WaitUntilTasksBlocked(kNumWorkerThreads);
@@ -623,8 +617,9 @@ TEST_P(SequencedWorkerPoolTest, AllowsAfterShutdown) {
const int kNumQueuedTasks = static_cast<int>(kNumWorkerThreads);
for (int i = 0; i < kNumQueuedTasks; ++i) {
EXPECT_TRUE(pool()->PostWorkerTaskWithShutdownBehavior(
- FROM_HERE, base::Bind(&TestTracker::PostAdditionalTasks, tracker(), i,
- base::RetainedRef(pool()), false),
+ FROM_HERE,
+ base::BindOnce(&TestTracker::PostAdditionalTasks, tracker(), i,
+ base::RetainedRef(pool()), false),
SequencedWorkerPool::BLOCK_SHUTDOWN));
}
@@ -676,7 +671,7 @@ TEST_P(SequencedWorkerPoolTest,
for (int i = 0; i < kNumBlockTasks; ++i) {
EXPECT_TRUE(pool()->PostWorkerTask(
FROM_HERE,
- base::Bind(&TestTracker::BlockTask, tracker(), i, &blocker)));
+ base::BindOnce(&TestTracker::BlockTask, tracker(), i, &blocker)));
}
tracker()->WaitUntilTasksBlocked(kNumWorkerThreads);
@@ -730,24 +725,20 @@ TEST_P(SequencedWorkerPoolTest, DiscardOnShutdown) {
EnsureAllWorkersCreated();
ThreadBlocker blocker;
for (size_t i = 0; i < kNumWorkerThreads; i++) {
- pool()->PostWorkerTask(FROM_HERE,
- base::Bind(&TestTracker::BlockTask,
- tracker(), i, &blocker));
+ pool()->PostWorkerTask(FROM_HERE, base::BindOnce(&TestTracker::BlockTask,
+ tracker(), i, &blocker));
}
tracker()->WaitUntilTasksBlocked(kNumWorkerThreads);
// Create some tasks with different shutdown modes.
pool()->PostWorkerTaskWithShutdownBehavior(
- FROM_HERE,
- base::Bind(&TestTracker::FastTask, tracker(), 100),
+ FROM_HERE, base::BindOnce(&TestTracker::FastTask, tracker(), 100),
SequencedWorkerPool::CONTINUE_ON_SHUTDOWN);
pool()->PostWorkerTaskWithShutdownBehavior(
- FROM_HERE,
- base::Bind(&TestTracker::FastTask, tracker(), 101),
+ FROM_HERE, base::BindOnce(&TestTracker::FastTask, tracker(), 101),
SequencedWorkerPool::SKIP_ON_SHUTDOWN);
pool()->PostWorkerTaskWithShutdownBehavior(
- FROM_HERE,
- base::Bind(&TestTracker::FastTask, tracker(), 102),
+ FROM_HERE, base::BindOnce(&TestTracker::FastTask, tracker(), 102),
SequencedWorkerPool::BLOCK_SHUTDOWN);
// Shutdown the worker pool. This should discard all non-blocking tasks.
@@ -780,17 +771,12 @@ TEST_P(SequencedWorkerPoolTest, ContinueOnShutdown) {
ThreadBlocker blocker;
pool()->PostWorkerTaskWithShutdownBehavior(
FROM_HERE,
- base::Bind(&TestTracker::BlockTask,
- tracker(), 0, &blocker),
+ base::BindOnce(&TestTracker::BlockTask, tracker(), 0, &blocker),
SequencedWorkerPool::CONTINUE_ON_SHUTDOWN);
- runner->PostTask(
- FROM_HERE,
- base::Bind(&TestTracker::BlockTask,
- tracker(), 1, &blocker));
- sequenced_runner->PostTask(
- FROM_HERE,
- base::Bind(&TestTracker::BlockTask,
- tracker(), 2, &blocker));
+ runner->PostTask(FROM_HERE, base::BindOnce(&TestTracker::BlockTask, tracker(),
+ 1, &blocker));
+ sequenced_runner->PostTask(FROM_HERE, base::BindOnce(&TestTracker::BlockTask,
+ tracker(), 2, &blocker));
tracker()->WaitUntilTasksBlocked(3);
@@ -804,12 +790,12 @@ TEST_P(SequencedWorkerPoolTest, ContinueOnShutdown) {
// Posting more tasks should fail.
EXPECT_FALSE(pool()->PostWorkerTaskWithShutdownBehavior(
- FROM_HERE, base::Bind(&TestTracker::FastTask, tracker(), 0),
+ FROM_HERE, base::BindOnce(&TestTracker::FastTask, tracker(), 0),
SequencedWorkerPool::CONTINUE_ON_SHUTDOWN));
EXPECT_FALSE(runner->PostTask(
- FROM_HERE, base::Bind(&TestTracker::FastTask, tracker(), 0)));
+ FROM_HERE, base::BindOnce(&TestTracker::FastTask, tracker(), 0)));
EXPECT_FALSE(sequenced_runner->PostTask(
- FROM_HERE, base::Bind(&TestTracker::FastTask, tracker(), 0)));
+ FROM_HERE, base::BindOnce(&TestTracker::FastTask, tracker(), 0)));
// Continue the background thread and make sure the tasks can complete.
blocker.Unblock(3);
@@ -838,7 +824,7 @@ TEST_P(SequencedWorkerPoolTest, SkipOnShutdown) {
for (size_t i = 0; i < kNumWorkerThreads; i++) {
pool()->PostWorkerTaskWithShutdownBehavior(
FROM_HERE,
- base::Bind(&TestTracker::BlockTask, tracker(), i, &blocker),
+ base::BindOnce(&TestTracker::BlockTask, tracker(), i, &blocker),
SequencedWorkerPool::SKIP_ON_SHUTDOWN);
}
tracker()->WaitUntilTasksBlocked(kNumWorkerThreads);
@@ -847,8 +833,7 @@ TEST_P(SequencedWorkerPoolTest, SkipOnShutdown) {
// executed once Shutdown() has been called.
pool()->PostWorkerTaskWithShutdownBehavior(
FROM_HERE,
- base::Bind(&TestTracker::BlockTask,
- tracker(), 0, &blocker),
+ base::BindOnce(&TestTracker::BlockTask, tracker(), 0, &blocker),
SequencedWorkerPool::SKIP_ON_SHUTDOWN);
// This callback will only be invoked if SKIP_ON_SHUTDOWN tasks that have
@@ -936,10 +921,10 @@ TEST_P(SequencedWorkerPoolTest, RunsTasksOnCurrentThread) {
// - unused_pool_owner.pool()->RunsTasksOnCurrentThread() returns false.
sequenced_task_runner_1->PostTask(
FROM_HERE,
- base::Bind(&VerifyRunsTasksOnCurrentThread, RedirectedToTaskScheduler(),
- sequenced_task_runner_1, sequenced_task_runner_2,
- base::RetainedRef(pool()),
- base::RetainedRef(unused_pool_owner.pool())));
+ base::BindOnce(&VerifyRunsTasksOnCurrentThread,
+ RedirectedToTaskScheduler(), sequenced_task_runner_1,
+ sequenced_task_runner_2, base::RetainedRef(pool()),
+ base::RetainedRef(unused_pool_owner.pool())));
// From a task posted to |unsequenced_task_runner|:
// - unsequenced_task_runner->RunsTasksOnCurrentThread() returns true.
// - sequenced_task_runner_1->RunsTasksOnCurrentThread() returns false.
@@ -947,10 +932,10 @@ TEST_P(SequencedWorkerPoolTest, RunsTasksOnCurrentThread) {
// - unused_pool_owner.pool()->RunsTasksOnCurrentThread() returns false.
unsequenced_task_runner->PostTask(
FROM_HERE,
- base::Bind(&VerifyRunsTasksOnCurrentThread, RedirectedToTaskScheduler(),
- unsequenced_task_runner, sequenced_task_runner_1,
- base::RetainedRef(pool()),
- base::RetainedRef(unused_pool_owner.pool())));
+ base::BindOnce(&VerifyRunsTasksOnCurrentThread,
+ RedirectedToTaskScheduler(), unsequenced_task_runner,
+ sequenced_task_runner_1, base::RetainedRef(pool()),
+ base::RetainedRef(unused_pool_owner.pool())));
}
// Checks that tasks are destroyed in the right context during shutdown. If a
@@ -1006,19 +991,18 @@ TEST_P(SequencedWorkerPoolTest, FlushForTesting) {
// Queue up a bunch of work, including a long delayed task and
// a task that produces additional tasks as an artifact.
pool()->PostDelayedWorkerTask(
- FROM_HERE,
- base::Bind(&TestTracker::FastTask, tracker(), 0),
+ FROM_HERE, base::BindOnce(&TestTracker::FastTask, tracker(), 0),
TimeDelta::FromMinutes(5));
pool()->PostWorkerTask(FROM_HERE,
- base::Bind(&TestTracker::SlowTask, tracker(), 0));
+ base::BindOnce(&TestTracker::SlowTask, tracker(), 0));
const size_t kNumFastTasks = 20;
for (size_t i = 0; i < kNumFastTasks; i++) {
- pool()->PostWorkerTask(FROM_HERE,
- base::Bind(&TestTracker::FastTask, tracker(), 0));
+ pool()->PostWorkerTask(
+ FROM_HERE, base::BindOnce(&TestTracker::FastTask, tracker(), 0));
}
pool()->PostWorkerTask(
- FROM_HERE, base::Bind(&TestTracker::PostAdditionalTasks, tracker(), 0,
- base::RetainedRef(pool()), true));
+ FROM_HERE, base::BindOnce(&TestTracker::PostAdditionalTasks, tracker(), 0,
+ base::RetainedRef(pool()), true));
// We expect all except the delayed task to have been run. We verify all
// closures have been deleted by looking at the refcount of the
@@ -1088,14 +1072,14 @@ TEST_P(SequencedWorkerPoolTest, GetWorkerPoolAndSequenceTokenForCurrentThread) {
SequencedWorkerPool::SequenceToken token2 = pool()->GetSequenceToken();
pool()->PostSequencedWorkerTask(
token1, FROM_HERE,
- base::Bind(&CheckWorkerPoolAndSequenceToken, pool(), token1));
+ base::BindOnce(&CheckWorkerPoolAndSequenceToken, pool(), token1));
pool()->PostSequencedWorkerTask(
token2, FROM_HERE,
- base::Bind(&CheckWorkerPoolAndSequenceToken, pool(), token2));
+ base::BindOnce(&CheckWorkerPoolAndSequenceToken, pool(), token2));
- pool()->PostWorkerTask(FROM_HERE,
- base::Bind(&CheckWorkerPoolAndSequenceToken, pool(),
- SequencedWorkerPool::SequenceToken()));
+ pool()->PostWorkerTask(
+ FROM_HERE, base::BindOnce(&CheckWorkerPoolAndSequenceToken, pool(),
+ SequencedWorkerPool::SequenceToken()));
pool()->FlushForTesting();
}
diff --git a/chromium/base/threading/thread.cc b/chromium/base/threading/thread.cc
index 0aeed2a9e4b..75c911b7f87 100644
--- a/chromium/base/threading/thread.cc
+++ b/chromium/base/threading/thread.cc
@@ -156,7 +156,7 @@ void Thread::FlushForTesting() {
WaitableEvent done(WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
task_runner()->PostTask(FROM_HERE,
- Bind(&WaitableEvent::Signal, Unretained(&done)));
+ BindOnce(&WaitableEvent::Signal, Unretained(&done)));
done.Wait();
}
@@ -210,7 +210,7 @@ void Thread::StopSoon() {
}
task_runner()->PostTask(
- FROM_HERE, base::Bind(&Thread::ThreadQuitHelper, Unretained(this)));
+ FROM_HERE, base::BindOnce(&Thread::ThreadQuitHelper, Unretained(this)));
}
void Thread::DetachFromSequence() {
diff --git a/chromium/base/threading/thread_perftest.cc b/chromium/base/threading/thread_perftest.cc
index ef4e8f7158a..32b4289673c 100644
--- a/chromium/base/threading/thread_perftest.cc
+++ b/chromium/base/threading/thread_perftest.cc
@@ -67,8 +67,8 @@ class ThreadPerfTest : public testing::Test {
WaitableEvent::InitialState::NOT_SIGNALED);
base::ThreadTicks ticks;
thread.task_runner()->PostTask(
- FROM_HERE, base::Bind(&ThreadPerfTest::TimeOnThread,
- base::Unretained(this), &ticks, &done));
+ FROM_HERE, base::BindOnce(&ThreadPerfTest::TimeOnThread,
+ base::Unretained(this), &ticks, &done));
done.Wait();
return ticks;
}
@@ -138,8 +138,8 @@ class TaskPerfTest : public ThreadPerfTest {
return;
}
NextThread(hops)->task_runner()->PostTask(
- FROM_HERE, base::Bind(&ThreadPerfTest::PingPong, base::Unretained(this),
- hops - 1));
+ FROM_HERE, base::BindOnce(&ThreadPerfTest::PingPong,
+ base::Unretained(this), hops - 1));
}
};
@@ -210,8 +210,8 @@ class EventPerfTest : public ThreadPerfTest {
remaining_hops_ = hops;
for (size_t i = 0; i < threads_.size(); i++) {
threads_[i]->task_runner()->PostTask(
- FROM_HERE, base::Bind(&EventPerfTest::WaitAndSignalOnThread,
- base::Unretained(this), i));
+ FROM_HERE, base::BindOnce(&EventPerfTest::WaitAndSignalOnThread,
+ base::Unretained(this), i));
}
// Kick off the Signal ping-ponging.
diff --git a/chromium/base/threading/thread_restrictions.h b/chromium/base/threading/thread_restrictions.h
index 8f3beb1d1a2..97f3eb6129e 100644
--- a/chromium/base/threading/thread_restrictions.h
+++ b/chromium/base/threading/thread_restrictions.h
@@ -12,7 +12,11 @@
class BrowserProcessImpl;
class HistogramSynchronizer;
class NativeBackendKWallet;
-class ScopedAllowWaitForLegacyWebViewApi;
+
+namespace android_webview {
+class AwFormDatabaseService;
+class CookieManager;
+}
namespace cc {
class CompletionEvent;
@@ -37,6 +41,9 @@ class NestedMessagePumpAndroid;
class ScopedAllowWaitForAndroidLayoutTests;
class ScopedAllowWaitForDebugURL;
class SoftwareOutputDeviceMus;
+class SynchronousCompositor;
+class SynchronousCompositorBrowserFilter;
+class SynchronousCompositorHost;
class TextInputClientMac;
class CategorizedWorkerPool;
} // namespace content
@@ -174,15 +181,19 @@ class BASE_EXPORT ThreadRestrictions {
private:
// DO NOT ADD ANY OTHER FRIEND STATEMENTS, talk to jam or brettw first.
// BEGIN ALLOWED USAGE.
+ friend class android_webview::AwFormDatabaseService;
+ friend class android_webview::CookieManager;
friend class content::BrowserShutdownProfileDumper;
friend class content::BrowserSurfaceViewManager;
friend class content::BrowserTestBase;
friend class content::NestedMessagePumpAndroid;
friend class content::ScopedAllowWaitForAndroidLayoutTests;
friend class content::ScopedAllowWaitForDebugURL;
+ friend class content::SynchronousCompositor;
+ friend class content::SynchronousCompositorBrowserFilter;
+ friend class content::SynchronousCompositorHost;
friend class ::HistogramSynchronizer;
friend class internal::TaskTracker;
- friend class ::ScopedAllowWaitForLegacyWebViewApi;
friend class cc::CompletionEvent;
friend class cc::SingleThreadTaskGraphRunner;
friend class content::CategorizedWorkerPool;
diff --git a/chromium/base/threading/thread_unittest.cc b/chromium/base/threading/thread_unittest.cc
index 0cb964e8f7c..e0371dd32d9 100644
--- a/chromium/base/threading/thread_unittest.cc
+++ b/chromium/base/threading/thread_unittest.cc
@@ -155,8 +155,9 @@ TEST_F(ThreadTest, StartWithOptions_StackSize) {
base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
base::WaitableEvent::InitialState::NOT_SIGNALED);
- a.task_runner()->PostTask(FROM_HERE, base::Bind(&base::WaitableEvent::Signal,
- base::Unretained(&event)));
+ a.task_runner()->PostTask(
+ FROM_HERE,
+ base::BindOnce(&base::WaitableEvent::Signal, base::Unretained(&event)));
event.Wait();
}
@@ -188,9 +189,9 @@ TEST_F(ThreadTest, StartWithOptions_NonJoinable) {
base::WaitableEvent block_event(
base::WaitableEvent::ResetPolicy::AUTOMATIC,
base::WaitableEvent::InitialState::NOT_SIGNALED);
- a->task_runner()->PostTask(
- FROM_HERE,
- base::Bind(&base::WaitableEvent::Wait, base::Unretained(&block_event)));
+ a->task_runner()->PostTask(FROM_HERE,
+ base::BindOnce(&base::WaitableEvent::Wait,
+ base::Unretained(&block_event)));
a->StopSoon();
EXPECT_TRUE(a->IsRunning());
@@ -215,11 +216,11 @@ TEST_F(ThreadTest, TwoTasksOnJoinableThread) {
// destroyed. We do this by dispatching a sleep event before the
// event that will toggle our sentinel value.
a.task_runner()->PostTask(
- FROM_HERE, base::Bind(static_cast<void (*)(base::TimeDelta)>(
- &base::PlatformThread::Sleep),
- base::TimeDelta::FromMilliseconds(20)));
+ FROM_HERE, base::BindOnce(static_cast<void (*)(base::TimeDelta)>(
+ &base::PlatformThread::Sleep),
+ base::TimeDelta::FromMilliseconds(20)));
a.task_runner()->PostTask(FROM_HERE,
- base::Bind(&ToggleValue, &was_invoked));
+ base::BindOnce(&ToggleValue, &was_invoked));
}
EXPECT_TRUE(was_invoked);
}
@@ -285,8 +286,8 @@ TEST_F(ThreadTest, DISABLED_StopOnNonOwningThreadIsDeath) {
b.Start();
EXPECT_DCHECK_DEATH({
// Stopping |a| on |b| isn't allowed.
- b.task_runner()->PostTask(FROM_HERE,
- base::Bind(&Thread::Stop, base::Unretained(&a)));
+ b.task_runner()->PostTask(
+ FROM_HERE, base::BindOnce(&Thread::Stop, base::Unretained(&a)));
// Block here so the DCHECK on |b| always happens in this scope.
base::PlatformThread::Sleep(base::TimeDelta::Max());
});
@@ -307,7 +308,7 @@ TEST_F(ThreadTest, TransferOwnershipAndStop) {
// a->DetachFromSequence() should allow |b| to use |a|'s Thread API.
a->DetachFromSequence();
b.task_runner()->PostTask(
- FROM_HERE, base::Bind(
+ FROM_HERE, base::BindOnce(
[](std::unique_ptr<Thread> thread_to_stop,
base::WaitableEvent* event_to_signal) -> void {
thread_to_stop->Stop();
@@ -361,9 +362,9 @@ TEST_F(ThreadTest, StartTwiceNonJoinableNotAllowed) {
base::WaitableEvent last_task_event(
base::WaitableEvent::ResetPolicy::AUTOMATIC,
base::WaitableEvent::InitialState::NOT_SIGNALED);
- a->task_runner()->PostTask(FROM_HERE,
- base::Bind(&base::WaitableEvent::Signal,
- base::Unretained(&last_task_event)));
+ a->task_runner()->PostTask(
+ FROM_HERE, base::BindOnce(&base::WaitableEvent::Signal,
+ base::Unretained(&last_task_event)));
// StopSoon() is non-blocking, Yield() to |a|, wait for last task to be
// processed and a little more for QuitWhenIdle() to unwind before considering
@@ -399,7 +400,8 @@ TEST_F(ThreadTest, ThreadId) {
base::WaitableEvent::InitialState::NOT_SIGNALED);
base::PlatformThreadId id_from_new_thread;
a.task_runner()->PostTask(
- FROM_HERE, base::Bind(ReturnThreadId, &a, &id_from_new_thread, &event));
+ FROM_HERE,
+ base::BindOnce(ReturnThreadId, &a, &id_from_new_thread, &event));
// Call GetThreadId() on the current thread before calling event.Wait() so
// that this test can find a race issue with TSAN.
@@ -458,8 +460,9 @@ TEST_F(ThreadTest, CleanUp) {
// Register an observer that writes into |captured_events| once the
// thread's message loop is destroyed.
t.task_runner()->PostTask(
- FROM_HERE, base::Bind(&RegisterDestructionObserver,
- base::Unretained(&loop_destruction_observer)));
+ FROM_HERE,
+ base::BindOnce(&RegisterDestructionObserver,
+ base::Unretained(&loop_destruction_observer)));
// Upon leaving this scope, the thread is deleted.
}
@@ -503,7 +506,8 @@ TEST_F(ThreadTest, FlushForTesting) {
for (size_t i = 0; i < kNumSleepTasks; ++i) {
a.task_runner()->PostTask(
- FROM_HERE, base::Bind(&base::PlatformThread::Sleep, kSleepPerTestTask));
+ FROM_HERE,
+ base::BindOnce(&base::PlatformThread::Sleep, kSleepPerTestTask));
}
// All tasks should have executed, as reflected by the elapsed time.
@@ -557,7 +561,7 @@ TEST_F(ThreadTest, ExternalMessageLoop) {
bool ran = false;
a.task_runner()->PostTask(
- FROM_HERE, base::Bind([](bool* toggled) { *toggled = true; }, &ran));
+ FROM_HERE, base::BindOnce([](bool* toggled) { *toggled = true; }, &ran));
base::RunLoop().RunUntilIdle();
EXPECT_TRUE(ran);
diff --git a/chromium/base/threading/worker_pool.cc b/chromium/base/threading/worker_pool.cc
index d47037d79a0..26ff10f1f53 100644
--- a/chromium/base/threading/worker_pool.cc
+++ b/chromium/base/threading/worker_pool.cc
@@ -27,8 +27,8 @@ class PostTaskAndReplyWorkerPool : public internal::PostTaskAndReplyImpl {
private:
bool PostTask(const tracked_objects::Location& from_here,
- const Closure& task) override {
- return WorkerPool::PostTask(from_here, task, task_is_slow_);
+ OnceClosure task) override {
+ return WorkerPool::PostTask(from_here, std::move(task), task_is_slow_);
}
bool task_is_slow_;
@@ -45,7 +45,7 @@ class WorkerPoolTaskRunner : public TaskRunner {
// TaskRunner implementation
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override;
bool RunsTasksOnCurrentThread() const override;
@@ -56,7 +56,7 @@ class WorkerPoolTaskRunner : public TaskRunner {
// zero because non-zero delays are not supported.
bool PostDelayedTaskAssertZeroDelay(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
base::TimeDelta delay);
const bool tasks_are_slow_;
@@ -73,9 +73,9 @@ WorkerPoolTaskRunner::~WorkerPoolTaskRunner() {
bool WorkerPoolTaskRunner::PostDelayedTask(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) {
- return PostDelayedTaskAssertZeroDelay(from_here, task, delay);
+ return PostDelayedTaskAssertZeroDelay(from_here, std::move(task), delay);
}
bool WorkerPoolTaskRunner::RunsTasksOnCurrentThread() const {
@@ -84,11 +84,11 @@ bool WorkerPoolTaskRunner::RunsTasksOnCurrentThread() const {
bool WorkerPoolTaskRunner::PostDelayedTaskAssertZeroDelay(
const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
base::TimeDelta delay) {
DCHECK_EQ(delay.InMillisecondsRoundedUp(), 0)
<< "WorkerPoolTaskRunner does not support non-zero delays";
- return WorkerPool::PostTask(from_here, task, tasks_are_slow_);
+ return WorkerPool::PostTask(from_here, std::move(task), tasks_are_slow_);
}
struct TaskRunnerHolder {
@@ -102,8 +102,8 @@ struct TaskRunnerHolder {
} // namespace
bool WorkerPool::PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply,
+ OnceClosure task,
+ OnceClosure reply,
bool task_is_slow) {
// Do not report PostTaskAndReplyRelay leaks in tests. There's nothing we can
// do about them because WorkerPool doesn't have a flushing API.
diff --git a/chromium/base/threading/worker_pool.h b/chromium/base/threading/worker_pool.h
index 865948e437e..d1c666d2f96 100644
--- a/chromium/base/threading/worker_pool.h
+++ b/chromium/base/threading/worker_pool.h
@@ -32,14 +32,15 @@ class BASE_EXPORT WorkerPool {
// false if |task| could not be posted to a worker thread. Regardless of
// return value, ownership of |task| is transferred to the worker pool.
static bool PostTask(const tracked_objects::Location& from_here,
- const base::Closure& task, bool task_is_slow);
+ OnceClosure task,
+ bool task_is_slow);
// Just like TaskRunner::PostTaskAndReply, except the destination
// for |task| is a worker thread and you can specify |task_is_slow| just
// like you can for PostTask above.
static bool PostTaskAndReply(const tracked_objects::Location& from_here,
- Closure task,
- Closure reply,
+ OnceClosure task,
+ OnceClosure reply,
bool task_is_slow);
// Return true if the current thread is one that this WorkerPool runs tasks
diff --git a/chromium/base/threading/worker_pool_posix.cc b/chromium/base/threading/worker_pool_posix.cc
index 947914c4b6a..e7aca86ed68 100644
--- a/chromium/base/threading/worker_pool_posix.cc
+++ b/chromium/base/threading/worker_pool_posix.cc
@@ -6,6 +6,8 @@
#include <stddef.h>
+#include <utility>
+
#include "base/bind.h"
#include "base/callback.h"
#include "base/lazy_instance.h"
@@ -47,7 +49,7 @@ class WorkerPoolImpl {
~WorkerPoolImpl() = delete;
void PostTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
+ base::OnceClosure task,
bool task_is_slow);
private:
@@ -59,9 +61,9 @@ WorkerPoolImpl::WorkerPoolImpl()
kIdleSecondsBeforeExit)) {}
void WorkerPoolImpl::PostTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
+ base::OnceClosure task,
bool task_is_slow) {
- pool_->PostTask(from_here, task);
+ pool_->PostTask(from_here, std::move(task));
}
base::LazyInstance<WorkerPoolImpl>::Leaky g_lazy_worker_pool =
@@ -112,9 +114,10 @@ void WorkerThread::ThreadMain() {
// static
bool WorkerPool::PostTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
+ base::OnceClosure task,
bool task_is_slow) {
- g_lazy_worker_pool.Pointer()->PostTask(from_here, task, task_is_slow);
+ g_lazy_worker_pool.Pointer()->PostTask(from_here, std::move(task),
+ task_is_slow);
return true;
}
@@ -137,12 +140,18 @@ PosixDynamicThreadPool::~PosixDynamicThreadPool() {
void PosixDynamicThreadPool::PostTask(
const tracked_objects::Location& from_here,
- const base::Closure& task) {
- PendingTask pending_task(from_here, task);
+ base::OnceClosure task) {
+ PendingTask pending_task(from_here, std::move(task));
AddTask(&pending_task);
}
void PosixDynamicThreadPool::AddTask(PendingTask* pending_task) {
+ DCHECK(pending_task);
+
+ // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+ // for details.
+ CHECK(pending_task->task);
+
AutoLock locked(lock_);
pending_tasks_.push(std::move(*pending_task));
diff --git a/chromium/base/threading/worker_pool_posix.h b/chromium/base/threading/worker_pool_posix.h
index d65ae8f8cf6..0b10adf8f3c 100644
--- a/chromium/base/threading/worker_pool_posix.h
+++ b/chromium/base/threading/worker_pool_posix.h
@@ -28,7 +28,7 @@
#include <queue>
#include <string>
-#include "base/callback_forward.h"
+#include "base/callback.h"
#include "base/location.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -51,8 +51,7 @@ class BASE_EXPORT PosixDynamicThreadPool
int idle_seconds_before_exit);
// Adds |task| to the thread pool.
- void PostTask(const tracked_objects::Location& from_here,
- const Closure& task);
+ void PostTask(const tracked_objects::Location& from_here, OnceClosure task);
// Worker thread method to wait for up to |idle_seconds_before_exit| for more
// work from the thread pool. Returns NULL if no work is available.
diff --git a/chromium/base/threading/worker_pool_unittest.cc b/chromium/base/threading/worker_pool_unittest.cc
index ef4bed136e9..a6d2e757452 100644
--- a/chromium/base/threading/worker_pool_unittest.cc
+++ b/chromium/base/threading/worker_pool_unittest.cc
@@ -34,10 +34,9 @@ class PostTaskAndReplyTester
void RunTest() {
ASSERT_TRUE(thread_checker_.CalledOnValidThread());
WorkerPool::PostTaskAndReply(
- FROM_HERE,
- base::Bind(&PostTaskAndReplyTester::OnWorkerThread, this),
- base::Bind(&PostTaskAndReplyTester::OnOriginalThread, this),
- false);
+ FROM_HERE,
+ base::BindOnce(&PostTaskAndReplyTester::OnWorkerThread, this),
+ base::BindOnce(&PostTaskAndReplyTester::OnOriginalThread, this), false);
test_event_.Wait();
}
@@ -77,13 +76,13 @@ TEST_F(WorkerPoolTest, PostTask) {
WaitableEvent long_test_event(WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
+ WorkerPool::PostTask(
+ FROM_HERE,
+ base::BindOnce(&WaitableEvent::Signal, base::Unretained(&test_event)),
+ false);
WorkerPool::PostTask(FROM_HERE,
- base::Bind(&WaitableEvent::Signal,
- base::Unretained(&test_event)),
- false);
- WorkerPool::PostTask(FROM_HERE,
- base::Bind(&WaitableEvent::Signal,
- base::Unretained(&long_test_event)),
+ base::BindOnce(&WaitableEvent::Signal,
+ base::Unretained(&long_test_event)),
true);
test_event.Wait();
diff --git a/chromium/base/threading/worker_pool_win.cc b/chromium/base/threading/worker_pool_win.cc
index 05743ae15a0..d0ba4a8d4ec 100644
--- a/chromium/base/threading/worker_pool_win.cc
+++ b/chromium/base/threading/worker_pool_win.cc
@@ -4,6 +4,8 @@
#include "base/threading/worker_pool.h"
+#include <utility>
+
#include "base/bind.h"
#include "base/callback.h"
#include "base/logging.h"
@@ -43,6 +45,10 @@ DWORD CALLBACK WorkItemCallback(void* param) {
// Takes ownership of |pending_task|
bool PostTaskInternal(PendingTask* pending_task, bool task_is_slow) {
+ // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+ // for details.
+ CHECK(pending_task->task);
+
ULONG flags = 0;
if (task_is_slow)
flags |= WT_EXECUTELONGFUNCTION;
@@ -60,8 +66,9 @@ bool PostTaskInternal(PendingTask* pending_task, bool task_is_slow) {
// static
bool WorkerPool::PostTask(const tracked_objects::Location& from_here,
- const base::Closure& task, bool task_is_slow) {
- PendingTask* pending_task = new PendingTask(from_here, task);
+ base::OnceClosure task,
+ bool task_is_slow) {
+ PendingTask* pending_task = new PendingTask(from_here, std::move(task));
return PostTaskInternal(pending_task, task_is_slow);
}
diff --git a/chromium/base/timer/timer.cc b/chromium/base/timer/timer.cc
index 6ec18f18148..31eec1bb574 100644
--- a/chromium/base/timer/timer.cc
+++ b/chromium/base/timer/timer.cc
@@ -170,13 +170,16 @@ void Timer::PostNewScheduledTask(TimeDelta delay) {
is_running_ = true;
scheduled_task_ = new BaseTimerTaskInternal(this);
if (delay > TimeDelta::FromMicroseconds(0)) {
- GetTaskRunner()->PostDelayedTask(posted_from_,
- base::Bind(&BaseTimerTaskInternal::Run, base::Owned(scheduled_task_)),
+ GetTaskRunner()->PostDelayedTask(
+ posted_from_,
+ base::BindOnce(&BaseTimerTaskInternal::Run,
+ base::Owned(scheduled_task_)),
delay);
scheduled_run_time_ = desired_run_time_ = Now() + delay;
} else {
GetTaskRunner()->PostTask(posted_from_,
- base::Bind(&BaseTimerTaskInternal::Run, base::Owned(scheduled_task_)));
+ base::BindOnce(&BaseTimerTaskInternal::Run,
+ base::Owned(scheduled_task_)));
scheduled_run_time_ = desired_run_time_ = TimeTicks();
}
// Remember the thread ID that posts the first task -- this will be verified
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
index b47dc16eddd..64a9975baab 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -8,6 +8,7 @@
#include <iterator>
#include "base/atomicops.h"
+#include "base/debug/debugging_flags.h"
#include "base/debug/leak_annotations.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_local_storage.h"
@@ -203,18 +204,21 @@ bool AllocationContextTracker::GetContextSnapshot(AllocationContext* ctx) {
// from main() and up. Stack unwinding produces top frames, i.e.
// from this point and up until main(). We request many frames to
// make sure we reach main(), and then copy bottom portion of them.
+#if !defined(OS_NACL) // We don't build base/debug/stack_trace.cc for NaCl.
+#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
const void* frames[128];
static_assert(arraysize(frames) >= Backtrace::kMaxFrameCount,
"not requesting enough frames to fill Backtrace");
-#if HAVE_TRACE_STACK_FRAME_POINTERS && !defined(OS_NACL)
size_t frame_count = debug::TraceStackFramePointers(
- frames,
- arraysize(frames),
- 1 /* exclude this function from the trace */ );
-#else
- size_t frame_count = 0;
- NOTREACHED();
-#endif
+ frames, arraysize(frames),
+ 1 /* exclude this function from the trace */);
+#else // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+ // Fall-back to capturing the stack with base::debug::StackTrace,
+ // which is likely slower, but more reliable.
+ base::debug::StackTrace stack_trace(Backtrace::kMaxFrameCount);
+ size_t frame_count = 0u;
+ const void* const* frames = stack_trace.Addresses(&frame_count);
+#endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
// Copy frames backwards
size_t backtrace_capacity = backtrace_end - backtrace;
@@ -225,6 +229,7 @@ bool AllocationContextTracker::GetContextSnapshot(AllocationContext* ctx) {
const void* frame = frames[i];
*backtrace++ = StackFrame::FromProgramCounter(frame);
}
+#endif // !defined(OS_NACL)
break;
}
}
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
index 577f50043da..6317886b0d7 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -34,7 +34,9 @@ const char kFilteringTraceConfig[] =
" \"excluded_categories\": [],"
" \"filter_args\": {},"
" \"filter_predicate\": \"heap_profiler_predicate\","
- " \"included_categories\": [\"*\"]"
+ " \"included_categories\": ["
+ " \"*\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("Testing") "\"]"
" }"
" ]"
"}";
@@ -122,6 +124,7 @@ TEST_F(AllocationContextTrackerTest, PseudoStackScopedTrace) {
}
{
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("NotTesting"), kDonut);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("Testing"), kCupcake);
StackFrame frame_cc[] = {t, c, c};
AssertBacktraceEquals(frame_cc);
diff --git a/chromium/base/trace_event/heap_profiler_allocation_register.cc b/chromium/base/trace_event/heap_profiler_allocation_register.cc
index 63d40611a6f..b9f440adb69 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_register.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_register.cc
@@ -5,6 +5,7 @@
#include "base/trace_event/heap_profiler_allocation_register.h"
#include <algorithm>
+#include <limits>
#include "base/trace_event/trace_event_memory_overhead.h"
@@ -12,9 +13,9 @@ namespace base {
namespace trace_event {
AllocationRegister::ConstIterator::ConstIterator(
- const AllocationRegister& alloc_register, AllocationIndex index)
- : register_(alloc_register),
- index_(index) {}
+ const AllocationRegister& alloc_register,
+ AllocationIndex index)
+ : register_(alloc_register), index_(index) {}
void AllocationRegister::ConstIterator::operator++() {
index_ = register_.allocations_.Next(index_ + 1);
@@ -25,12 +26,12 @@ bool AllocationRegister::ConstIterator::operator!=(
return index_ != other.index_;
}
-AllocationRegister::Allocation
-AllocationRegister::ConstIterator::operator*() const {
+AllocationRegister::Allocation AllocationRegister::ConstIterator::operator*()
+ const {
return register_.GetAllocation(index_);
}
-size_t AllocationRegister::BacktraceHasher::operator () (
+size_t AllocationRegister::BacktraceHasher::operator()(
const Backtrace& backtrace) const {
const size_t kSampleLength = 10;
@@ -42,7 +43,7 @@ size_t AllocationRegister::BacktraceHasher::operator () (
}
size_t tail_start = backtrace.frame_count -
- std::min(backtrace.frame_count - head_end, kSampleLength);
+ std::min(backtrace.frame_count - head_end, kSampleLength);
for (size_t i = tail_start; i != backtrace.frame_count; ++i) {
total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
}
@@ -55,7 +56,7 @@ size_t AllocationRegister::BacktraceHasher::operator () (
return (total_value * 131101) >> 14;
}
-size_t AllocationRegister::AddressHasher::operator () (
+size_t AllocationRegister::AddressHasher::operator()(
const void* address) const {
// The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
// been chosen carefully based on measurements with real-word data (addresses
@@ -75,34 +76,48 @@ AllocationRegister::AllocationRegister()
AllocationRegister::AllocationRegister(size_t allocation_capacity,
size_t backtrace_capacity)
- : allocations_(allocation_capacity),
- backtraces_(backtrace_capacity) {}
-
-AllocationRegister::~AllocationRegister() {
+ : allocations_(allocation_capacity), backtraces_(backtrace_capacity) {
+ Backtrace sentinel = {};
+ sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]");
+ sentinel.frame_count = 1;
+
+ // Rationale for max / 2: in theory we could just start the sentinel with a
+ // refcount == 0. However, using max / 2 allows short circuiting of the
+ // conditional in RemoveBacktrace() keeping the sentinel logic out of the fast
+ // path. From a functional viewpoint, the sentinel is safe even if we wrap
+ // over refcount because .
+ BacktraceMap::KVPair::second_type sentinel_refcount =
+ std::numeric_limits<BacktraceMap::KVPair::second_type>::max() / 2;
+ auto index_and_flag = backtraces_.Insert(sentinel, sentinel_refcount);
+ DCHECK(index_and_flag.second);
+ DCHECK_EQ(index_and_flag.first, kOutOfStorageBacktraceIndex);
}
-void AllocationRegister::Insert(const void* address,
+AllocationRegister::~AllocationRegister() {}
+
+bool AllocationRegister::Insert(const void* address,
size_t size,
const AllocationContext& context) {
DCHECK(address != nullptr);
if (size == 0) {
- return;
+ return false;
}
- AllocationInfo info = {
- size,
- context.type_name,
- InsertBacktrace(context.backtrace)
- };
+ AllocationInfo info = {size, context.type_name,
+ InsertBacktrace(context.backtrace)};
// Try to insert the allocation.
auto index_and_flag = allocations_.Insert(address, info);
- if (!index_and_flag.second) {
+ if (!index_and_flag.second &&
+ index_and_flag.first != AllocationMap::kInvalidKVIndex) {
// |address| is already there - overwrite the allocation info.
auto& old_info = allocations_.Get(index_and_flag.first).second;
RemoveBacktrace(old_info.backtrace_index);
old_info = info;
+ return true;
}
+
+ return index_and_flag.second;
}
void AllocationRegister::Remove(const void* address) {
@@ -140,15 +155,17 @@ AllocationRegister::ConstIterator AllocationRegister::end() const {
void AllocationRegister::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) const {
size_t allocated = sizeof(AllocationRegister);
- size_t resident = sizeof(AllocationRegister)
- + allocations_.EstimateUsedMemory()
- + backtraces_.EstimateUsedMemory();
+ size_t resident = sizeof(AllocationRegister) +
+ allocations_.EstimateUsedMemory() +
+ backtraces_.EstimateUsedMemory();
overhead->Add("AllocationRegister", allocated, resident);
}
AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
const Backtrace& backtrace) {
auto index = backtraces_.Insert(backtrace, 0).first;
+ if (index == BacktraceMap::kInvalidKVIndex)
+ return kOutOfStorageBacktraceIndex;
auto& backtrace_and_count = backtraces_.Get(index);
backtrace_and_count.second++;
return index;
@@ -156,7 +173,8 @@ AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
auto& backtrace_and_count = backtraces_.Get(index);
- if (--backtrace_and_count.second == 0) {
+ if (--backtrace_and_count.second == 0 &&
+ index != kOutOfStorageBacktraceIndex) {
// Backtrace is not referenced anymore - remove it.
backtraces_.Remove(index);
}
@@ -165,15 +183,11 @@ void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
AllocationRegister::Allocation AllocationRegister::GetAllocation(
AllocationMap::KVIndex index) const {
const auto& address_and_info = allocations_.Get(index);
- const auto& backtrace_and_count = backtraces_.Get(
- address_and_info.second.backtrace_index);
- return {
- address_and_info.first,
- address_and_info.second.size,
- AllocationContext(
- backtrace_and_count.first,
- address_and_info.second.type_name)
- };
+ const auto& backtrace_and_count =
+ backtraces_.Get(address_and_info.second.backtrace_index);
+ return {address_and_info.first, address_and_info.second.size,
+ AllocationContext(backtrace_and_count.first,
+ address_and_info.second.type_name)};
}
} // namespace trace_event
diff --git a/chromium/base/trace_event/heap_profiler_allocation_register.h b/chromium/base/trace_event/heap_profiler_allocation_register.h
index d6a02faeaea..f491e41a3f2 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_register.h
+++ b/chromium/base/trace_event/heap_profiler_allocation_register.h
@@ -8,13 +8,13 @@
#include <stddef.h>
#include <stdint.h>
+#include <type_traits>
#include <utility>
#include "base/bits.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/process/process_metrics.h"
-#include "base/template_util.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
#include "build/build_config.h"
@@ -39,33 +39,36 @@ void FreeGuardedVirtualMemory(void* address, size_t allocated_size);
template <size_t NumBuckets, class Key, class Value, class KeyHasher>
class FixedHashMap {
// To keep things simple we don't call destructors.
- static_assert(is_trivially_destructible<Key>::value &&
- is_trivially_destructible<Value>::value,
+ static_assert(std::is_trivially_destructible<Key>::value &&
+ std::is_trivially_destructible<Value>::value,
"Key and Value shouldn't have destructors");
+
public:
using KVPair = std::pair<const Key, Value>;
// For implementation simplicity API uses integer index instead
// of iterators. Most operations (except Find) on KVIndex are O(1).
using KVIndex = size_t;
- static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1);
+ enum : KVIndex { kInvalidKVIndex = static_cast<KVIndex>(-1) };
// Capacity controls how many items this hash map can hold, and largely
// affects memory footprint.
- FixedHashMap(size_t capacity)
- : num_cells_(capacity),
- cells_(static_cast<Cell*>(
- AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
- buckets_(static_cast<Bucket*>(
- AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
- free_list_(nullptr),
- next_unused_cell_(0) {}
+ explicit FixedHashMap(size_t capacity)
+ : num_cells_(capacity),
+ num_inserts_dropped_(0),
+ cells_(static_cast<Cell*>(
+ AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
+ buckets_(static_cast<Bucket*>(
+ AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
+ free_list_(nullptr),
+ next_unused_cell_(0) {}
~FixedHashMap() {
FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell));
FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket));
}
+ // Returns {kInvalidKVIndex, false} if the table is full.
std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) {
Cell** p_cell = Lookup(key);
Cell* cell = *p_cell;
@@ -74,7 +77,15 @@ class FixedHashMap {
}
// Get a free cell and link it.
- *p_cell = cell = GetFreeCell();
+ cell = GetFreeCell();
+ if (!cell) {
+ if (num_inserts_dropped_ <
+ std::numeric_limits<decltype(num_inserts_dropped_)>::max()) {
+ ++num_inserts_dropped_;
+ }
+ return {kInvalidKVIndex, false};
+ }
+ *p_cell = cell;
cell->p_prev = p_cell;
cell->next = nullptr;
@@ -137,6 +148,8 @@ class FixedHashMap {
bits::Align(sizeof(Bucket) * NumBuckets, page_size);
}
+ size_t num_inserts_dropped() const { return num_inserts_dropped_; }
+
private:
friend base::trace_event::AllocationRegisterTest;
@@ -175,7 +188,8 @@ class FixedHashMap {
}
// Returns a cell that is not being used to store an entry (either by
- // recycling from the free list or by taking a fresh cell).
+ // recycling from the free list or by taking a fresh cell). May return
+ // nullptr if the hash table has run out of memory.
Cell* GetFreeCell() {
// First try to re-use a cell from the free list.
if (free_list_) {
@@ -184,26 +198,14 @@ class FixedHashMap {
return cell;
}
- // Otherwise pick the next cell that has not been touched before.
- size_t idx = next_unused_cell_;
- next_unused_cell_++;
-
// If the hash table has too little capacity (when too little address space
- // was reserved for |cells_|), |next_unused_cell_| can be an index outside
- // of the allocated storage. A guard page is allocated there to crash the
- // program in that case. There are alternative solutions:
- // - Deal with it, increase capacity by reallocating |cells_|.
- // - Refuse to insert and let the caller deal with it.
- // Because free cells are re-used before accessing fresh cells with a higher
- // index, and because reserving address space without touching it is cheap,
- // the simplest solution is to just allocate a humongous chunk of address
- // space.
-
- CHECK_LT(next_unused_cell_, num_cells_ + 1)
- << "Allocation Register hash table has too little capacity. Increase "
- "the capacity to run heap profiler in large sessions.";
-
- return &cells_[idx];
+ // was reserved for |cells_|), return nullptr.
+ if (next_unused_cell_ >= num_cells_) {
+ return nullptr;
+ }
+
+ // Otherwise pick the next cell that has not been touched before.
+ return &cells_[next_unused_cell_++];
}
// Returns a value in the range [0, NumBuckets - 1] (inclusive).
@@ -219,6 +221,9 @@ class FixedHashMap {
// Number of cells.
size_t const num_cells_;
+ // Number of calls to Insert() that were lost because the hashtable was full.
+ size_t num_inserts_dropped_;
+
// The array of cells. This array is backed by mmapped memory. Lower indices
// are accessed first, higher indices are accessed only when the |free_list_|
// is empty. This is to minimize the amount of resident memory used.
@@ -248,6 +253,8 @@ class TraceEventMemoryOverhead;
// freed. Internally it has two hashtables: one for Backtraces and one for
// actual allocations. Sizes of both hashtables are fixed, and this class
// allocates (mmaps) only in its constructor.
+//
+// When either hash table hits max size, new inserts are dropped.
class BASE_EXPORT AllocationRegister {
public:
// Details about an allocation.
@@ -282,7 +289,10 @@ class BASE_EXPORT AllocationRegister {
// Inserts allocation details into the table. If the address was present
// already, its details are updated. |address| must not be null.
- void Insert(const void* address,
+ //
+ // Returns true if an insert occurred. Inserts may fail because the table
+ // is full.
+ bool Insert(const void* address,
size_t size,
const AllocationContext& context);
@@ -359,6 +369,14 @@ class BASE_EXPORT AllocationRegister {
AllocationMap allocations_;
BacktraceMap backtraces_;
+ // Sentinel used when the |backtraces_| table is full.
+ //
+ // This is a slightly abstraction to allow for constant propagation. It
+ // knows that the sentinel will be the first item inserted into the table
+ // and that the first index retuned will be 0. The constructor DCHECKs
+ // this assumption.
+ enum : BacktraceMap::KVIndex { kOutOfStorageBacktraceIndex = 0 };
+
DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
};
diff --git a/chromium/base/trace_event/heap_profiler_allocation_register_unittest.cc b/chromium/base/trace_event/heap_profiler_allocation_register_unittest.cc
index 7eee61aa35e..950bab38740 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_register_unittest.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_register_unittest.cc
@@ -245,24 +245,59 @@ TEST_F(AllocationRegisterTest, ChangeContextAfterInsertion) {
EXPECT_FALSE(reg.Get(reinterpret_cast<void*>(19), &a));
}
-// Check that the process aborts due to hitting the guard page when inserting
-// too many elements.
-#if GTEST_HAS_DEATH_TEST
-TEST_F(AllocationRegisterTest, OverflowDeathTest) {
+// Check that the table handles overflows of the allocation storage gracefully.
+TEST_F(AllocationRegisterTest, OverflowAllocationTest) {
const size_t allocation_capacity = GetAllocationCapacityPerPage();
AllocationRegister reg(allocation_capacity, kBacktraceCapacity);
AllocationContext ctx;
size_t i;
- // Fill up all of the memory allocated for the register's allocation map.
- for (i = 0; i < allocation_capacity; i++) {
- reg.Insert(reinterpret_cast<void*>(i + 1), 1, ctx);
+ for (int repetition = 0; repetition < 3; repetition++) {
+ // Fill up all of the memory allocated for the register's allocation map.
+ for (i = 0; i < allocation_capacity; i++)
+ ASSERT_TRUE(reg.Insert(reinterpret_cast<void*>(i + 1), 1, ctx));
+
+ // Adding just one extra element should cause overflow.
+ ASSERT_FALSE(reg.Insert(reinterpret_cast<void*>(i + 1), 1, ctx));
+
+ // Removing all allocations shouldn't cause any crash.
+ for (i = 0; i < allocation_capacity; i++) {
+ reg.Remove(reinterpret_cast<void*>(i + 1));
+ }
+ }
+}
+
+// Check that the table handles overflows of the backtrace storage (but not the
+// allocations storage) gracefully.
+TEST_F(AllocationRegisterTest, OverflowBacktraceTest) {
+ const size_t backtrace_capacity = GetAllocationCapacityPerPage();
+ const size_t allocation_capacity = 3 * GetAllocationCapacityPerPage();
+ AllocationRegister reg(allocation_capacity, backtrace_capacity);
+ AllocationContext ctx;
+ size_t i;
+
+ // Fill up all of the memory allocated for the backtrace allocation map,
+ // but do not fill the allocations map.
+ for (i = 1; i <= backtrace_capacity * 2; i++) {
+ void* addr = reinterpret_cast<void*>(i);
+ ctx.backtrace.frames[0] = StackFrame::FromProgramCounter(addr);
+ ctx.backtrace.frame_count = 1;
+ ASSERT_TRUE(reg.Insert(addr, 1, ctx));
}
- // Adding just one extra element should cause overflow.
- ASSERT_DEATH(reg.Insert(reinterpret_cast<void*>(i + 1), 1, ctx), "");
+ // Removing all allocations shouldn't cause any crash.
+ for (i = 1; i <= backtrace_capacity * 2; i++) {
+ AllocationRegister::Allocation allocation = {};
+ ASSERT_TRUE(reg.Get(reinterpret_cast<void*>(i), &allocation));
+
+ // This is just to check the integrity of the backtrace sentinel and to make
+ // sure that we don't hit the guard page.
+ ASSERT_LT(allocation.context.backtrace.frame_count,
+ static_cast<size_t>(Backtrace::kMaxFrameCount));
+
+ reg.Remove(reinterpret_cast<void*>(i));
+ }
}
-#endif
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/malloc_dump_provider.cc b/chromium/base/trace_event/malloc_dump_provider.cc
index 52a432b8633..7f2706092ee 100644
--- a/chromium/base/trace_event/malloc_dump_provider.cc
+++ b/chromium/base/trace_event/malloc_dump_provider.cc
@@ -54,10 +54,10 @@ void* HookZeroInitAlloc(const AllocatorDispatch* self,
return ptr;
}
-void* HookllocAligned(const AllocatorDispatch* self,
- size_t alignment,
- size_t size,
- void* context) {
+void* HookAllocAligned(const AllocatorDispatch* self,
+ size_t alignment,
+ size_t size,
+ void* context) {
const AllocatorDispatch* const next = self->next;
void* ptr = next->alloc_aligned_function(next, alignment, size, context);
if (ptr)
@@ -129,7 +129,7 @@ void HookFreeDefiniteSize(const AllocatorDispatch* self,
AllocatorDispatch g_allocator_hooks = {
&HookAlloc, /* alloc_function */
&HookZeroInitAlloc, /* alloc_zero_initialized_function */
- &HookllocAligned, /* alloc_aligned_function */
+ &HookAllocAligned, /* alloc_aligned_function */
&HookRealloc, /* realloc_function */
&HookFree, /* free_function */
&HookGetSizeEstimate, /* get_size_estimate_function */
@@ -216,12 +216,18 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
total_virtual_size = stats.size_allocated;
allocated_objects_size = stats.size_in_use;
- // The resident size is approximated to the max size in use, which would count
- // the total size of all regions other than the free bytes at the end of each
- // region. In each allocation region the allocations are rounded off to a
- // fixed quantum, so the excess region will not be resident.
- // See crrev.com/1531463004 for detailed explanation.
- resident_size = stats.max_size_in_use;
+ // Resident size is approximated pretty well by stats.max_size_in_use.
+ // However, on macOS, freed blocks are both resident and reusable, which is
+ // semantically equivalent to deallocated. The implementation of libmalloc
+ // will also only hold a fixed number of freed regions before actually
+ // starting to deallocate them, so stats.max_size_in_use is also not
+ // representative of the peak size. As a result, stats.max_size_in_use is
+ // typically somewhere between actually resident [non-reusable] pages, and
+ // peak size. This is not very useful, so we just use stats.size_in_use for
+ // resident_size, even though it's an underestimate and fails to account for
+ // fragmentation. See
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=695263#c1.
+ resident_size = stats.size_in_use;
#elif defined(OS_WIN)
WinHeapInfo main_heap_info = {};
WinHeapMemoryDumpImpl(&main_heap_info);
diff --git a/chromium/base/trace_event/memory_allocator_dump.cc b/chromium/base/trace_event/memory_allocator_dump.cc
index 7583763889e..2692521c096 100644
--- a/chromium/base/trace_event/memory_allocator_dump.cc
+++ b/chromium/base/trace_event/memory_allocator_dump.cc
@@ -29,7 +29,8 @@ MemoryAllocatorDump::MemoryAllocatorDump(const std::string& absolute_name,
process_memory_dump_(process_memory_dump),
attributes_(new TracedValue),
guid_(guid),
- flags_(Flags::DEFAULT) {
+ flags_(Flags::DEFAULT),
+ size_(0) {
// The |absolute_name| cannot be empty.
DCHECK(!absolute_name.empty());
@@ -59,6 +60,8 @@ MemoryAllocatorDump::~MemoryAllocatorDump() {
void MemoryAllocatorDump::AddScalar(const char* name,
const char* units,
uint64_t value) {
+ if (strcmp(kNameSize, name) == 0)
+ size_ = value;
SStringPrintf(&string_conversion_buffer_, "%" PRIx64, value);
attributes_->BeginDictionary(name);
attributes_->SetString("type", kTypeScalar);
diff --git a/chromium/base/trace_event/memory_allocator_dump.h b/chromium/base/trace_event/memory_allocator_dump.h
index c781f071bba..99ff114e5c7 100644
--- a/chromium/base/trace_event/memory_allocator_dump.h
+++ b/chromium/base/trace_event/memory_allocator_dump.h
@@ -11,6 +11,7 @@
#include <string>
#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/trace_event/memory_allocator_dump_guid.h"
@@ -85,11 +86,21 @@ class BASE_EXPORT MemoryAllocatorDump {
TracedValue* attributes_for_testing() const { return attributes_.get(); }
private:
+ // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
+ friend class MemoryDumpManager;
+ FRIEND_TEST_ALL_PREFIXES(MemoryAllocatorDumpTest, GetSize);
+
+ // Get the size for this dump.
+ // The size is the value set with AddScalar(kNameSize, kUnitsBytes, size);
+ // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
+ uint64_t GetSize() const { return size_; };
+
const std::string absolute_name_;
ProcessMemoryDump* const process_memory_dump_; // Not owned (PMD owns this).
std::unique_ptr<TracedValue> attributes_;
MemoryAllocatorDumpGuid guid_;
int flags_; // See enum Flags.
+ uint64_t size_;
// A local buffer for Sprintf conversion on fastpath. Avoids allocating
// temporary strings on each AddScalar() call.
diff --git a/chromium/base/trace_event/memory_allocator_dump_unittest.cc b/chromium/base/trace_event/memory_allocator_dump_unittest.cc
index 1bf9715917d..e1818f6eecc 100644
--- a/chromium/base/trace_event/memory_allocator_dump_unittest.cc
+++ b/chromium/base/trace_event/memory_allocator_dump_unittest.cc
@@ -172,6 +172,16 @@ TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
pmd.AsValueInto(traced_value.get());
}
+TEST(MemoryAllocatorDumpTest, GetSize) {
+ MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+ ProcessMemoryDump pmd(new MemoryDumpSessionState, dump_args);
+ MemoryAllocatorDump* dump = pmd.CreateAllocatorDump("allocator_for_size");
+ dump->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes, 1);
+ dump->AddScalar("foo", MemoryAllocatorDump::kUnitsBytes, 2);
+ EXPECT_EQ(1u, dump->GetSize());
+}
+
// DEATH tests are not supported in Android / iOS.
#if !defined(NDEBUG) && !defined(OS_ANDROID) && !defined(OS_IOS)
TEST(MemoryAllocatorDumpTest, ForbidDuplicatesDeathTest) {
diff --git a/chromium/base/trace_event/memory_dump_manager.cc b/chromium/base/trace_event/memory_dump_manager.cc
index 1e8a416a380..5f217eaf9a0 100644
--- a/chromium/base/trace_event/memory_dump_manager.cc
+++ b/chromium/base/trace_event/memory_dump_manager.cc
@@ -20,6 +20,9 @@
#include "base/debug/stack_trace.h"
#include "base/debug/thread_heap_usage_tracker.h"
#include "base/memory/ptr_util.h"
+#include "base/optional.h"
+#include "base/strings/pattern.h"
+#include "base/strings/string_piece.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/heap_profiler.h"
@@ -32,6 +35,7 @@
#include "base/trace_event/memory_dump_scheduler.h"
#include "base/trace_event/memory_dump_session_state.h"
#include "base/trace_event/memory_infra_background_whitelist.h"
+#include "base/trace_event/memory_peak_detector.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_argument.h"
@@ -80,7 +84,7 @@ const char* const kStrictThreadCheckBlacklist[] = {
// Callback wrapper to hook upon the completion of RequestGlobalDump() and
// inject trace markers.
-void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
+void OnGlobalDumpDone(GlobalMemoryDumpCallback wrapped_callback,
uint64_t dump_guid,
bool success) {
char guid_str[20];
@@ -96,6 +100,15 @@ void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
}
}
+void FillOsDumpFromProcessMemoryDump(
+ const ProcessMemoryDump* pmd,
+ MemoryDumpCallbackResult::OSMemDump* osDump) {
+ if (pmd->has_process_totals()) {
+ const ProcessMemoryTotals* totals = pmd->process_totals();
+ osDump->resident_set_kb = totals->resident_set_bytes() / 1024;
+ }
+}
+
// Proxy class which wraps a ConvertableToTraceFormat owned by the
// |session_state| into a proxy object that can be added to the trace event log.
// This is to solve the problem that the MemoryDumpSessionState is refcounted
@@ -123,6 +136,16 @@ struct SessionStateConvertableProxy : public ConvertableToTraceFormat {
GetterFunctPtr const getter_function;
};
+void OnPeakDetected(MemoryDumpLevelOfDetail level_of_detail) {
+ MemoryDumpManager::GetInstance()->RequestGlobalDump(
+ MemoryDumpType::PEAK_MEMORY_USAGE, level_of_detail);
+}
+
+void OnPeriodicSchedulerTick(MemoryDumpLevelOfDetail level_of_detail) {
+ MemoryDumpManager::GetInstance()->RequestGlobalDump(
+ MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
+}
+
} // namespace
// static
@@ -161,9 +184,7 @@ void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
}
MemoryDumpManager::MemoryDumpManager()
- : delegate_(nullptr),
- is_coordinator_(false),
- memory_tracing_enabled_(0),
+ : memory_tracing_enabled_(0),
tracing_process_id_(kInvalidTracingProcessId),
dumper_registrations_ignored_for_testing_(false),
heap_profiling_enabled_(false) {
@@ -195,21 +216,20 @@ void MemoryDumpManager::EnableHeapProfilingIfNeeded() {
if (profiling_mode == "") {
AllocationContextTracker::SetCaptureMode(
AllocationContextTracker::CaptureMode::PSEUDO_STACK);
-#if HAVE_TRACE_STACK_FRAME_POINTERS && \
- (BUILDFLAG(ENABLE_PROFILING) || !defined(NDEBUG))
+#if !defined(OS_NACL)
} else if (profiling_mode == switches::kEnableHeapProfilingModeNative) {
- // We need frame pointers for native tracing to work, and they are
- // enabled in profiling and debug builds.
+ // If we don't have frame pointers then native tracing falls-back to
+ // using base::debug::StackTrace, which may be slow.
AllocationContextTracker::SetCaptureMode(
AllocationContextTracker::CaptureMode::NATIVE_STACK);
-#endif
+#endif // !defined(OS_NACL)
#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
} else if (profiling_mode == switches::kEnableHeapProfilingTaskProfiler) {
// Enable heap tracking, which in turn enables capture of heap usage
// tracking in tracked_objects.cc.
if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled())
base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
-#endif
+#endif // BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
} else {
CHECK(false) << "Invalid mode '" << profiling_mode << "' for "
<< switches::kEnableHeapProfiling << " flag.";
@@ -220,14 +240,13 @@ void MemoryDumpManager::EnableHeapProfilingIfNeeded() {
heap_profiling_enabled_ = true;
}
-void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
- bool is_coordinator) {
+void MemoryDumpManager::Initialize(
+ std::unique_ptr<MemoryDumpManagerDelegate> delegate) {
{
AutoLock lock(lock_);
DCHECK(delegate);
DCHECK(!delegate_);
- delegate_ = delegate;
- is_coordinator_ = is_coordinator;
+ delegate_ = std::move(delegate);
EnableHeapProfilingIfNeeded();
}
@@ -249,11 +268,19 @@ void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
AllocationContextTracker::CaptureMode::PSEUDO_STACK &&
!(TraceLog::GetInstance()->enabled_modes() & TraceLog::FILTERING_MODE)) {
// Create trace config with heap profiling filter.
+ std::string filter_string = "*";
+ const char* const kFilteredCategories[] = {
+ TRACE_DISABLED_BY_DEFAULT("net"), TRACE_DISABLED_BY_DEFAULT("cc"),
+ MemoryDumpManager::kTraceCategory};
+ for (const char* cat : kFilteredCategories)
+ filter_string = filter_string + "," + cat;
+ TraceConfigCategoryFilter category_filter;
+ category_filter.InitializeFromString(filter_string);
+
TraceConfig::EventFilterConfig heap_profiler_filter_config(
HeapProfilerEventFilter::kName);
- heap_profiler_filter_config.AddIncludedCategory("*");
- heap_profiler_filter_config.AddIncludedCategory(
- MemoryDumpManager::kTraceCategory);
+ heap_profiler_filter_config.SetCategoryFilter(category_filter);
+
TraceConfig::EventFilters filters;
filters.push_back(heap_profiler_filter_config);
TraceConfig filtering_trace_config;
@@ -329,14 +356,8 @@ void MemoryDumpManager::RegisterDumpProviderInternal(
if (already_registered)
return;
- // The list of polling MDPs is populated OnTraceLogEnabled(). This code
- // deals with the case of a MDP capable of fast polling that is registered
- // after the OnTraceLogEnabled()
- if (options.is_fast_polling_supported && dump_thread_) {
- dump_thread_->task_runner()->PostTask(
- FROM_HERE, Bind(&MemoryDumpManager::RegisterPollingMDPOnDumpThread,
- Unretained(this), mdpinfo));
- }
+ if (options.is_fast_polling_supported)
+ MemoryPeakDetector::GetInstance()->NotifyMemoryDumpProvidersChanged();
}
if (heap_profiling_enabled_)
@@ -375,7 +396,7 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
// - At the end of this function, if no dump is in progress.
// - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is
// removed from |pending_dump_providers|.
- // - When the provider is removed from |dump_providers_for_polling_|.
+ // - When the provider is removed from other clients (MemoryPeakDetector).
DCHECK(!(*mdp_iter)->owned_dump_provider);
(*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
} else if (strict_thread_check_blacklist_.count((*mdp_iter)->name) == 0 ||
@@ -402,11 +423,9 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
<< "unregister itself in a racy way. Please file a crbug.";
}
- if ((*mdp_iter)->options.is_fast_polling_supported && dump_thread_) {
+ if ((*mdp_iter)->options.is_fast_polling_supported) {
DCHECK(take_mdp_ownership_and_delete_async);
- dump_thread_->task_runner()->PostTask(
- FROM_HERE, Bind(&MemoryDumpManager::UnregisterPollingMDPOnDumpThread,
- Unretained(this), *mdp_iter));
+ MemoryPeakDetector::GetInstance()->NotifyMemoryDumpProvidersChanged();
}
// The MDPInfo instance can still be referenced by the
@@ -418,32 +437,10 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
dump_providers_.erase(mdp_iter);
}
-void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
- scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
- AutoLock lock(lock_);
- dump_providers_for_polling_.insert(mdpinfo);
-
- // Notify ready for polling when first polling supported provider is
- // registered. This handles the case where OnTraceLogEnabled() did not notify
- // ready since no polling supported mdp has yet been registered.
- if (dump_providers_for_polling_.size() == 1)
- dump_scheduler_->NotifyPollingSupported();
-}
-
-void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
- scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
- mdpinfo->dump_provider->SuspendFastMemoryPolling();
-
- AutoLock lock(lock_);
- dump_providers_for_polling_.erase(mdpinfo);
- DCHECK(!dump_providers_for_polling_.empty())
- << "All polling MDPs cannot be unregistered.";
-}
-
void MemoryDumpManager::RequestGlobalDump(
MemoryDumpType dump_type,
MemoryDumpLevelOfDetail level_of_detail,
- const MemoryDumpCallback& callback) {
+ const GlobalMemoryDumpCallback& callback) {
// Bail out immediately if tracing is not enabled at all or if the dump mode
// is not allowed.
if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) ||
@@ -466,29 +463,28 @@ void MemoryDumpManager::RequestGlobalDump(
kTraceCategory, "GlobalMemoryDump", TRACE_ID_LOCAL(guid), "dump_type",
MemoryDumpTypeToString(dump_type), "level_of_detail",
MemoryDumpLevelOfDetailToString(level_of_detail));
- MemoryDumpCallback wrapped_callback = Bind(&OnGlobalDumpDone, callback);
-
- // Technically there is no need to grab the |lock_| here as the delegate is
- // long-lived and can only be set by Initialize(), which is locked and
- // necessarily happens before memory_tracing_enabled_ == true.
- // Not taking the |lock_|, though, is lakely make TSan barf and, at this point
- // (memory-infra is enabled) we're not in the fast-path anymore.
- MemoryDumpManagerDelegate* delegate;
- {
- AutoLock lock(lock_);
- delegate = delegate_;
- }
+ GlobalMemoryDumpCallback wrapped_callback = Bind(&OnGlobalDumpDone, callback);
// The delegate will coordinate the IPC broadcast and at some point invoke
// CreateProcessDump() to get a dump for the current process.
MemoryDumpRequestArgs args = {guid, dump_type, level_of_detail};
- delegate->RequestGlobalMemoryDump(args, wrapped_callback);
+ delegate_->RequestGlobalMemoryDump(args, wrapped_callback);
+}
+
+void MemoryDumpManager::GetDumpProvidersForPolling(
+ std::vector<scoped_refptr<MemoryDumpProviderInfo>>* providers) {
+ DCHECK(providers->empty());
+ AutoLock lock(lock_);
+ for (const scoped_refptr<MemoryDumpProviderInfo>& mdp : dump_providers_) {
+ if (mdp->options.is_fast_polling_supported)
+ providers->push_back(mdp);
+ }
}
void MemoryDumpManager::RequestGlobalDump(
MemoryDumpType dump_type,
MemoryDumpLevelOfDetail level_of_detail) {
- RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback());
+ RequestGlobalDump(dump_type, level_of_detail, GlobalMemoryDumpCallback());
}
bool MemoryDumpManager::IsDumpProviderRegisteredForTesting(
@@ -502,8 +498,9 @@ bool MemoryDumpManager::IsDumpProviderRegisteredForTesting(
return false;
}
-void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
- const MemoryDumpCallback& callback) {
+void MemoryDumpManager::CreateProcessDump(
+ const MemoryDumpRequestArgs& args,
+ const ProcessMemoryDumpCallback& callback) {
char guid_str[20];
sprintf(guid_str, "0x%" PRIx64, args.dump_guid);
TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(kTraceCategory, "ProcessMemoryDump",
@@ -536,6 +533,9 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
// disabled.
CHECK(!session_state_ ||
session_state_->IsDumpModeAllowed(args.level_of_detail));
+
+ // If enabled, holds back the peak detector resetting its estimation window.
+ MemoryPeakDetector::GetInstance()->Throttle();
}
// Start the process dump. This involves task runner hops as specified by the
@@ -610,8 +610,8 @@ void MemoryDumpManager::SetupNextMemoryDump(
}
bool did_post_task = task_runner->PostTask(
- FROM_HERE, Bind(&MemoryDumpManager::InvokeOnMemoryDump, Unretained(this),
- Unretained(pmd_async_state.get())));
+ FROM_HERE, BindOnce(&MemoryDumpManager::InvokeOnMemoryDump,
+ Unretained(this), Unretained(pmd_async_state.get())));
if (did_post_task) {
// Ownership is tranferred to InvokeOnMemoryDump().
@@ -711,26 +711,16 @@ void MemoryDumpManager::InvokeOnMemoryDump(
SetupNextMemoryDump(std::move(pmd_async_state));
}
-bool MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) {
-#if DCHECK_IS_ON()
- {
- AutoLock lock(lock_);
- if (dump_thread_)
- DCHECK(dump_thread_->task_runner()->BelongsToCurrentThread());
- }
-#endif
- if (dump_providers_for_polling_.empty())
- return false;
-
- *memory_total = 0;
- // Note that we call PollFastMemoryTotal() even if the dump provider is
- // disabled (unregistered). This is to avoid taking lock while polling.
- for (const auto& mdpinfo : dump_providers_for_polling_) {
- uint64_t value = 0;
- mdpinfo->dump_provider->PollFastMemoryTotal(&value);
- *memory_total += value;
+// static
+uint32_t MemoryDumpManager::GetDumpsSumKb(const std::string& pattern,
+ const ProcessMemoryDump* pmd) {
+ uint64_t sum = 0;
+ for (const auto& kv : pmd->allocator_dumps()) {
+ auto name = StringPiece(kv.first);
+ if (MatchPattern(name, pattern))
+ sum += kv.second->GetSize();
}
- return true;
+ return sum / 1024;
}
// static
@@ -743,13 +733,17 @@ void MemoryDumpManager::FinalizeDumpAndAddToTrace(
scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
pmd_async_state->callback_task_runner;
callback_task_runner->PostTask(
- FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace,
- Passed(&pmd_async_state)));
+ FROM_HERE, BindOnce(&MemoryDumpManager::FinalizeDumpAndAddToTrace,
+ Passed(&pmd_async_state)));
return;
}
TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinalizeDumpAndAddToTrace");
+ // The results struct to fill.
+ // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
+ base::Optional<MemoryDumpCallbackResult> result_opt;
+
for (const auto& kv : pmd_async_state->process_dumps) {
ProcessId pid = kv.first; // kNullProcessId for the current process.
ProcessMemoryDump* process_memory_dump = kv.second.get();
@@ -770,6 +764,35 @@ void MemoryDumpManager::FinalizeDumpAndAddToTrace(
kTraceEventNumArgs, kTraceEventArgNames,
kTraceEventArgTypes, nullptr /* arg_values */, &event_value,
TRACE_EVENT_FLAG_HAS_ID);
+
+ // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
+ // Don't try to fill the struct in detailed mode since it is hard to avoid
+ // double counting.
+ if (pmd_async_state->req_args.level_of_detail ==
+ MemoryDumpLevelOfDetail::DETAILED)
+ continue;
+ MemoryDumpCallbackResult result;
+ // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
+ if (pid == kNullProcessId) {
+ result.chrome_dump.malloc_total_kb =
+ GetDumpsSumKb("malloc", process_memory_dump);
+ result.chrome_dump.v8_total_kb =
+ GetDumpsSumKb("v8/*", process_memory_dump);
+
+ // partition_alloc reports sizes for both allocated_objects and
+ // partitions. The memory allocated_objects uses is a subset of
+ // the partitions memory so to avoid double counting we only
+ // count partitions memory.
+ result.chrome_dump.partition_alloc_total_kb =
+ GetDumpsSumKb("partition_alloc/partitions/*", process_memory_dump);
+ result.chrome_dump.blink_gc_total_kb =
+ GetDumpsSumKb("blink_gc", process_memory_dump);
+ FillOsDumpFromProcessMemoryDump(process_memory_dump, &result.os_dump);
+ } else {
+ auto& os_dump = result.extra_processes_dump[pid];
+ FillOsDumpFromProcessMemoryDump(process_memory_dump, &os_dump);
+ }
+ result_opt = result;
}
bool tracing_still_enabled;
@@ -781,7 +804,8 @@ void MemoryDumpManager::FinalizeDumpAndAddToTrace(
}
if (!pmd_async_state->callback.is_null()) {
- pmd_async_state->callback.Run(dump_guid, pmd_async_state->dump_successful);
+ pmd_async_state->callback.Run(dump_guid, pmd_async_state->dump_successful,
+ result_opt);
pmd_async_state->callback.Reset();
}
@@ -839,46 +863,60 @@ void MemoryDumpManager::OnTraceLogEnabled() {
session_state, &MemoryDumpSessionState::type_name_deduplicator));
}
- std::unique_ptr<MemoryDumpScheduler> dump_scheduler(
- new MemoryDumpScheduler(this, dump_thread->task_runner()));
- DCHECK_LE(memory_dump_config.triggers.size(), 3u);
- for (const auto& trigger : memory_dump_config.triggers) {
- if (!session_state->IsDumpModeAllowed(trigger.level_of_detail)) {
- NOTREACHED();
- continue;
- }
- dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail,
- trigger.min_time_between_dumps_ms);
- }
-
- {
- AutoLock lock(lock_);
+ AutoLock lock(lock_);
- DCHECK(delegate_); // At this point we must have a delegate.
- session_state_ = session_state;
+ DCHECK(delegate_); // At this point we must have a delegate.
+ session_state_ = session_state;
- DCHECK(!dump_thread_);
- dump_thread_ = std::move(dump_thread);
- dump_scheduler_ = std::move(dump_scheduler);
+ DCHECK(!dump_thread_);
+ dump_thread_ = std::move(dump_thread);
- subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
+ subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
- dump_providers_for_polling_.clear();
- for (const auto& mdpinfo : dump_providers_) {
- if (mdpinfo->options.is_fast_polling_supported)
- dump_providers_for_polling_.insert(mdpinfo);
+ MemoryDumpScheduler::Config periodic_config;
+ bool peak_detector_configured = false;
+ for (const auto& trigger : memory_dump_config.triggers) {
+ if (!session_state_->IsDumpModeAllowed(trigger.level_of_detail)) {
+ NOTREACHED();
+ continue;
+ }
+ if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
+ if (periodic_config.triggers.empty()) {
+ periodic_config.callback = BindRepeating(&OnPeriodicSchedulerTick);
+ }
+ periodic_config.triggers.push_back(
+ {trigger.level_of_detail, trigger.min_time_between_dumps_ms});
+ } else if (trigger.trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) {
+ // At most one peak trigger is allowed.
+ CHECK(!peak_detector_configured);
+ peak_detector_configured = true;
+ MemoryPeakDetector::GetInstance()->Setup(
+ BindRepeating(&MemoryDumpManager::GetDumpProvidersForPolling,
+ Unretained(this)),
+ dump_thread_->task_runner(),
+ BindRepeating(&OnPeakDetected, trigger.level_of_detail));
+
+ MemoryPeakDetector::Config peak_config;
+ peak_config.polling_interval_ms = 10;
+ peak_config.min_time_between_peaks_ms = trigger.min_time_between_dumps_ms;
+ peak_config.enable_verbose_poll_tracing =
+ trigger.level_of_detail == MemoryDumpLevelOfDetail::DETAILED;
+ MemoryPeakDetector::GetInstance()->Start(peak_config);
+
+ // When peak detection is enabled, trigger a dump straight away as it
+ // gives a good reference point for analyzing the trace.
+ if (delegate_->IsCoordinator()) {
+ dump_thread_->task_runner()->PostTask(
+ FROM_HERE, BindRepeating(&OnPeakDetected, trigger.level_of_detail));
+ }
}
- // Notify polling supported only if some polling supported provider was
- // registered, else RegisterPollingMDPOnDumpThread() will notify when first
- // polling MDP registers.
- if (!dump_providers_for_polling_.empty())
- dump_scheduler_->NotifyPollingSupported();
-
- // Only coordinator process triggers periodic global memory dumps.
- if (is_coordinator_)
- dump_scheduler_->NotifyPeriodicTriggerSupported();
}
+ // Only coordinator process triggers periodic global memory dumps.
+ if (delegate_->IsCoordinator() && !periodic_config.triggers.empty()) {
+ MemoryDumpScheduler::GetInstance()->Start(periodic_config,
+ dump_thread_->task_runner());
+ }
}
void MemoryDumpManager::OnTraceLogDisabled() {
@@ -889,28 +927,18 @@ void MemoryDumpManager::OnTraceLogDisabled() {
return;
subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
std::unique_ptr<Thread> dump_thread;
- std::unique_ptr<MemoryDumpScheduler> scheduler;
{
AutoLock lock(lock_);
+ MemoryDumpScheduler::GetInstance()->Stop();
+ MemoryPeakDetector::GetInstance()->TearDown();
dump_thread = std::move(dump_thread_);
session_state_ = nullptr;
- scheduler = std::move(dump_scheduler_);
}
- scheduler->DisableAllTriggers();
// Thread stops are blocking and must be performed outside of the |lock_|
// or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
if (dump_thread)
dump_thread->Stop();
-
- // |dump_providers_for_polling_| must be cleared only after the dump thread is
- // stopped (polling tasks are done).
- {
- AutoLock lock(lock_);
- for (const auto& mdpinfo : dump_providers_for_polling_)
- mdpinfo->dump_provider->SuspendFastMemoryPolling();
- dump_providers_for_polling_.clear();
- }
}
bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
@@ -920,43 +948,11 @@ bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
return session_state_->IsDumpModeAllowed(dump_mode);
}
-uint64_t MemoryDumpManager::GetTracingProcessId() const {
- return delegate_->GetTracingProcessId();
-}
-
-MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
- MemoryDumpProvider* dump_provider,
- const char* name,
- scoped_refptr<SequencedTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options,
- bool whitelisted_for_background_mode)
- : dump_provider(dump_provider),
- name(name),
- task_runner(std::move(task_runner)),
- options(options),
- consecutive_failures(0),
- disabled(false),
- whitelisted_for_background_mode(whitelisted_for_background_mode) {}
-
-MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
-
-bool MemoryDumpManager::MemoryDumpProviderInfo::Comparator::operator()(
- const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& a,
- const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& b) const {
- if (!a || !b)
- return a.get() < b.get();
- // Ensure that unbound providers (task_runner == nullptr) always run last.
- // Rationale: some unbound dump providers are known to be slow, keep them last
- // to avoid skewing timings of the other dump providers.
- return std::tie(a->task_runner, a->dump_provider) >
- std::tie(b->task_runner, b->dump_provider);
-}
-
MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
MemoryDumpRequestArgs req_args,
const MemoryDumpProviderInfo::OrderedSet& dump_providers,
scoped_refptr<MemoryDumpSessionState> session_state,
- MemoryDumpCallback callback,
+ ProcessMemoryDumpCallback callback,
scoped_refptr<SingleThreadTaskRunner> dump_thread_task_runner)
: req_args(req_args),
session_state(std::move(session_state)),
diff --git a/chromium/base/trace_event/memory_dump_manager.h b/chromium/base/trace_event/memory_dump_manager.h
index a50c7ace70b..d359a8c3e97 100644
--- a/chromium/base/trace_event/memory_dump_manager.h
+++ b/chromium/base/trace_event/memory_dump_manager.h
@@ -9,7 +9,7 @@
#include <map>
#include <memory>
-#include <set>
+#include <unordered_set>
#include <vector>
#include "base/atomicops.h"
@@ -18,10 +18,20 @@
#include "base/memory/ref_counted.h"
#include "base/memory/singleton.h"
#include "base/synchronization/lock.h"
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/memory_dump_provider_info.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
+// Forward declare |MemoryDumpManagerDelegateImplTest| so that we can make it a
+// friend of |MemoryDumpManager| and give it access to |SetInstanceForTesting|.
+namespace memory_instrumentation {
+
+class MemoryDumpManagerDelegateImplTest;
+
+} // namespace memory_instrumentation
+
namespace base {
class SingleThreadTaskRunner;
@@ -32,7 +42,6 @@ namespace trace_event {
class MemoryDumpManagerDelegate;
class MemoryDumpProvider;
class MemoryDumpSessionState;
-class MemoryDumpScheduler;
// This is the interface exposed to the rest of the codebase to deal with
// memory tracing. The main entry point for clients is represented by
@@ -54,13 +63,10 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// On the other side, the MemoryDumpManager will not be fully operational
// (i.e. will NACK any RequestGlobalMemoryDump()) until initialized.
// Arguments:
- // is_coordinator: if true this MemoryDumpManager instance will act as a
- // coordinator and schedule periodic dumps (if enabled via TraceConfig);
- // false when the MemoryDumpManager is initialized in a slave process.
// delegate: inversion-of-control interface for embedder-specific behaviors
// (multiprocess handshaking). See the lifetime and thread-safety
// requirements in the |MemoryDumpManagerDelegate| docstring.
- void Initialize(MemoryDumpManagerDelegate* delegate, bool is_coordinator);
+ void Initialize(std::unique_ptr<MemoryDumpManagerDelegate> delegate);
// (Un)Registers a MemoryDumpProvider instance.
// Args:
@@ -107,7 +113,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// successful).
void RequestGlobalDump(MemoryDumpType dump_type,
MemoryDumpLevelOfDetail level_of_detail,
- const MemoryDumpCallback& callback);
+ const GlobalMemoryDumpCallback& callback);
// Same as above (still asynchronous), but without callback.
void RequestGlobalDump(MemoryDumpType dump_type,
@@ -138,7 +144,10 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// retrieved by child processes only when tracing is enabled. This is
// intended to express cross-process sharing of memory dumps on the
// child-process side, without having to know its own child process id.
- uint64_t GetTracingProcessId() const;
+ uint64_t GetTracingProcessId() const { return tracing_process_id_; }
+ void set_tracing_process_id(uint64_t tracing_process_id) {
+ tracing_process_id_ = tracing_process_id;
+ }
// Returns the name for a the allocated_objects dump. Use this to declare
// suballocator dumps from other dump providers.
@@ -158,71 +167,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
friend struct DefaultSingletonTraits<MemoryDumpManager>;
friend class MemoryDumpManagerDelegate;
friend class MemoryDumpManagerTest;
- friend class MemoryDumpScheduler;
-
- // Descriptor used to hold information about registered MDPs.
- // Some important considerations about lifetime of this object:
- // - In nominal conditions, all the MemoryDumpProviderInfo instances live in
- // the |dump_providers_| collection (% unregistration while dumping).
- // - Upon each dump they (actually their scoped_refptr-s) are copied into
- // the ProcessMemoryDumpAsyncState. This is to allow removal (see below).
- // - When the MDP.OnMemoryDump() is invoked, the corresponding MDPInfo copy
- // inside ProcessMemoryDumpAsyncState is removed.
- // - In most cases, the MDPInfo is destroyed within UnregisterDumpProvider().
- // - If UnregisterDumpProvider() is called while a dump is in progress, the
- // MDPInfo is destroyed in SetupNextMemoryDump() or InvokeOnMemoryDump(),
- // when the copy inside ProcessMemoryDumpAsyncState is erase()-d.
- // - The non-const fields of MemoryDumpProviderInfo are safe to access only
- // on tasks running in the |task_runner|, unless the thread has been
- // destroyed.
- struct MemoryDumpProviderInfo
- : public RefCountedThreadSafe<MemoryDumpProviderInfo> {
- // Define a total order based on the |task_runner| affinity, so that MDPs
- // belonging to the same SequencedTaskRunner are adjacent in the set.
- struct Comparator {
- bool operator()(const scoped_refptr<MemoryDumpProviderInfo>& a,
- const scoped_refptr<MemoryDumpProviderInfo>& b) const;
- };
- using OrderedSet =
- std::set<scoped_refptr<MemoryDumpProviderInfo>, Comparator>;
-
- MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
- const char* name,
- scoped_refptr<SequencedTaskRunner> task_runner,
- const MemoryDumpProvider::Options& options,
- bool whitelisted_for_background_mode);
-
- MemoryDumpProvider* const dump_provider;
-
- // Used to transfer ownership for UnregisterAndDeleteDumpProviderSoon().
- // nullptr in all other cases.
- std::unique_ptr<MemoryDumpProvider> owned_dump_provider;
-
- // Human readable name, for debugging and testing. Not necessarily unique.
- const char* const name;
-
- // The task runner affinity. Can be nullptr, in which case the dump provider
- // will be invoked on |dump_thread_|.
- const scoped_refptr<SequencedTaskRunner> task_runner;
-
- // The |options| arg passed to RegisterDumpProvider().
- const MemoryDumpProvider::Options options;
-
- // For fail-safe logic (auto-disable failing MDPs).
- int consecutive_failures;
-
- // Flagged either by the auto-disable logic or during unregistration.
- bool disabled;
-
- // True if the dump provider is whitelisted for background mode.
- const bool whitelisted_for_background_mode;
-
- private:
- friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
- ~MemoryDumpProviderInfo();
-
- DISALLOW_COPY_AND_ASSIGN(MemoryDumpProviderInfo);
- };
+ friend class memory_instrumentation::MemoryDumpManagerDelegateImplTest;
// Holds the state of a process memory dump that needs to be carried over
// across task runners in order to fulfil an asynchronous CreateProcessDump()
@@ -233,7 +178,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
MemoryDumpRequestArgs req_args,
const MemoryDumpProviderInfo::OrderedSet& dump_providers,
scoped_refptr<MemoryDumpSessionState> session_state,
- MemoryDumpCallback callback,
+ ProcessMemoryDumpCallback callback,
scoped_refptr<SingleThreadTaskRunner> dump_thread_task_runner);
~ProcessMemoryDumpAsyncState();
@@ -260,7 +205,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
scoped_refptr<MemoryDumpSessionState> session_state;
// Callback passed to the initial call to CreateProcessDump().
- MemoryDumpCallback callback;
+ ProcessMemoryDumpCallback callback;
// The |success| field that will be passed as argument to the |callback|.
bool dump_successful;
@@ -288,6 +233,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
~MemoryDumpManager() override;
static void SetInstanceForTesting(MemoryDumpManager* instance);
+ static uint32_t GetDumpsSumKb(const std::string&, const ProcessMemoryDump*);
static void FinalizeDumpAndAddToTrace(
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
@@ -296,7 +242,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// |callback| will be invoked asynchronously upon completion on the same
// thread on which CreateProcessDump() was called.
void CreateProcessDump(const MemoryDumpRequestArgs& args,
- const MemoryDumpCallback& callback);
+ const ProcessMemoryDumpCallback& callback);
// Calls InvokeOnMemoryDump() for the next MDP on the task runner specified by
// the MDP while registration. On failure to do so, skips and continues to
@@ -309,14 +255,6 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// runner.
void InvokeOnMemoryDump(ProcessMemoryDumpAsyncState* owned_pmd_async_state);
- // Records a quick total memory usage in |memory_total|. This is used to track
- // and detect peaks in the memory usage of the process without having to
- // record all data from dump providers. This value is approximate to trade-off
- // speed, and not consistent with the rest of the memory-infra metrics. Must
- // be called on the dump thread.
- // Returns true if |memory_total| was updated by polling at least 1 MDP.
- bool PollFastMemoryTotal(uint64_t* memory_total);
-
// Helper for RegierDumpProvider* functions.
void RegisterDumpProviderInternal(
MemoryDumpProvider* mdp,
@@ -328,21 +266,15 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
void UnregisterDumpProviderInternal(MemoryDumpProvider* mdp,
bool take_mdp_ownership_and_delete_async);
- // Adds / removes provider that supports polling to
- // |dump_providers_for_polling_|.
- void RegisterPollingMDPOnDumpThread(
- scoped_refptr<MemoryDumpProviderInfo> mdpinfo);
- void UnregisterPollingMDPOnDumpThread(
- scoped_refptr<MemoryDumpProviderInfo> mdpinfo);
+ // Fills the passed vector with the subset of dump providers which were
+ // registered with is_fast_polling_supported == true.
+ void GetDumpProvidersForPolling(
+ std::vector<scoped_refptr<MemoryDumpProviderInfo>>*);
// An ordererd set of registered MemoryDumpProviderInfo(s), sorted by task
// runner affinity (MDPs belonging to the same task runners are adjacent).
MemoryDumpProviderInfo::OrderedSet dump_providers_;
- // A copy of mdpinfo list that support polling. It must be accessed only on
- // the dump thread if dump thread exists.
- MemoryDumpProviderInfo::OrderedSet dump_providers_for_polling_;
-
// Shared among all the PMDs to keep state scoped to the tracing session.
scoped_refptr<MemoryDumpSessionState> session_state_;
@@ -351,10 +283,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
std::unordered_set<StringPiece, StringPieceHash>
strict_thread_check_blacklist_;
- MemoryDumpManagerDelegate* delegate_; // Not owned.
-
- // When true, this instance is in charge of coordinating periodic dumps.
- bool is_coordinator_;
+ std::unique_ptr<MemoryDumpManagerDelegate> delegate_;
// Protects from concurrent accesses to the |dump_providers_*| and |delegate_|
// to guard against disabling logging while dumping on another thread.
@@ -364,9 +293,6 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// dump_providers_enabled_ list) when tracing is not enabled.
subtle::AtomicWord memory_tracing_enabled_;
- // For triggering memory dumps.
- std::unique_ptr<MemoryDumpScheduler> dump_scheduler_;
-
// Thread used for MemoryDumpProviders which don't specify a task runner
// affinity.
std::unique_ptr<Thread> dump_thread_;
@@ -388,19 +314,18 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// safe (i.e. should expect calls from any thread and handle thread hopping).
class BASE_EXPORT MemoryDumpManagerDelegate {
public:
- virtual void RequestGlobalMemoryDump(const MemoryDumpRequestArgs& args,
- const MemoryDumpCallback& callback) = 0;
-
- // Returns tracing process id of the current process. This is used by
- // MemoryDumpManager::GetTracingProcessId.
- virtual uint64_t GetTracingProcessId() const = 0;
-
- protected:
MemoryDumpManagerDelegate() {}
virtual ~MemoryDumpManagerDelegate() {}
+ virtual void RequestGlobalMemoryDump(
+ const MemoryDumpRequestArgs& args,
+ const GlobalMemoryDumpCallback& callback) = 0;
+
+ virtual bool IsCoordinator() const = 0;
+
+ protected:
void CreateProcessDump(const MemoryDumpRequestArgs& args,
- const MemoryDumpCallback& callback) {
+ const ProcessMemoryDumpCallback& callback) {
MemoryDumpManager::GetInstance()->CreateProcessDump(args, callback);
}
diff --git a/chromium/base/trace_event/memory_dump_manager_unittest.cc b/chromium/base/trace_event/memory_dump_manager_unittest.cc
index 51d41943fba..ba76b9adec2 100644
--- a/chromium/base/trace_event/memory_dump_manager_unittest.cc
+++ b/chromium/base/trace_event/memory_dump_manager_unittest.cc
@@ -7,9 +7,11 @@
#include <stdint.h>
#include <memory>
+#include <utility>
#include <vector>
#include "base/bind_helpers.h"
+#include "base/callback.h"
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted_memory.h"
#include "base/message_loop/message_loop.h"
@@ -30,6 +32,7 @@
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_buffer.h"
#include "base/trace_event/trace_config_memory_test_util.h"
+#include "build/build_config.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -102,40 +105,55 @@ void OnTraceDataCollected(Closure quit_closure,
// Posts |task| to |task_runner| and blocks until it is executed.
void PostTaskAndWait(const tracked_objects::Location& from_here,
SequencedTaskRunner* task_runner,
- const base::Closure& task) {
+ base::OnceClosure task) {
base::WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- task_runner->PostTask(from_here, task);
- task_runner->PostTask(
- FROM_HERE, base::Bind(&WaitableEvent::Signal, base::Unretained(&event)));
+ task_runner->PostTask(from_here, std::move(task));
+ task_runner->PostTask(FROM_HERE, base::BindOnce(&WaitableEvent::Signal,
+ base::Unretained(&event)));
// The SequencedTaskRunner guarantees that |event| will only be signaled after
// |task| is executed.
event.Wait();
}
-} // namespace
+// Adapts a ProcessMemoryDumpCallback into a GlobalMemoryDumpCallback by
+// trimming off the result argument and calling the global callback.
+// TODO (fmeawad): we should keep the results for verification, but currently
+// all results are empty.
+void ProcessDumpCallbackAdapter(
+ GlobalMemoryDumpCallback callback,
+ uint64_t dump_guid,
+ bool success,
+ const base::Optional<base::trace_event::MemoryDumpCallbackResult>&) {
+ callback.Run(dump_guid, success);
+}
// Testing MemoryDumpManagerDelegate which, by default, short-circuits dump
// requests locally to the MemoryDumpManager instead of performing IPC dances.
class MemoryDumpManagerDelegateForTesting : public MemoryDumpManagerDelegate {
public:
- MemoryDumpManagerDelegateForTesting() {
+ MemoryDumpManagerDelegateForTesting(bool is_coordinator)
+ : is_coordinator_(is_coordinator) {
ON_CALL(*this, RequestGlobalMemoryDump(_, _))
- .WillByDefault(Invoke(
- this, &MemoryDumpManagerDelegateForTesting::CreateProcessDump));
+ .WillByDefault(Invoke([this](const MemoryDumpRequestArgs& args,
+ const GlobalMemoryDumpCallback& callback) {
+ ProcessMemoryDumpCallback process_callback =
+ Bind(&ProcessDumpCallbackAdapter, callback);
+ CreateProcessDump(args, process_callback);
+ }));
}
MOCK_METHOD2(RequestGlobalMemoryDump,
void(const MemoryDumpRequestArgs& args,
- const MemoryDumpCallback& callback));
+ const GlobalMemoryDumpCallback& callback));
- uint64_t GetTracingProcessId() const override {
- NOTREACHED();
- return MemoryDumpManager::kInvalidTracingProcessId;
- }
+ bool IsCoordinator() const override { return is_coordinator_; }
// Promote the CreateProcessDump to public so it can be used by test fixtures.
using MemoryDumpManagerDelegate::CreateProcessDump;
+
+ private:
+ bool is_coordinator_;
};
class MockMemoryDumpProvider : public MemoryDumpProvider {
@@ -180,19 +198,19 @@ class TestSequencedTaskRunner : public SequencedTaskRunner {
unsigned no_of_post_tasks() const { return num_of_post_tasks_; }
bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override {
NOTREACHED();
return false;
}
bool PostDelayedTask(const tracked_objects::Location& from_here,
- const Closure& task,
+ OnceClosure task,
TimeDelta delay) override {
num_of_post_tasks_++;
if (enabled_) {
return worker_pool_.pool()->PostSequencedWorkerTask(token_, from_here,
- task);
+ std::move(task));
}
return false;
}
@@ -210,6 +228,8 @@ class TestSequencedTaskRunner : public SequencedTaskRunner {
unsigned num_of_post_tasks_;
};
+} // namespace
+
class MemoryDumpManagerTest : public testing::Test {
public:
MemoryDumpManagerTest() : testing::Test(), kDefaultOptions() {}
@@ -220,43 +240,41 @@ class MemoryDumpManagerTest : public testing::Test {
mdm_.reset(new MemoryDumpManager());
MemoryDumpManager::SetInstanceForTesting(mdm_.get());
ASSERT_EQ(mdm_.get(), MemoryDumpManager::GetInstance());
- delegate_.reset(new MemoryDumpManagerDelegateForTesting);
}
void TearDown() override {
MemoryDumpManager::SetInstanceForTesting(nullptr);
+ delegate_ = nullptr;
mdm_.reset();
- delegate_.reset();
message_loop_.reset();
TraceLog::DeleteForTesting();
}
- // Turns a Closure into a MemoryDumpCallback, keeping track of the callback
- // result and taking care of posting the closure on the correct task runner.
- void DumpCallbackAdapter(scoped_refptr<SingleThreadTaskRunner> task_runner,
- Closure closure,
- uint64_t dump_guid,
- bool success) {
+ // Turns a Closure into a GlobalMemoryDumpCallback, keeping track of the
+ // callback result and taking care of posting the closure on the correct task
+ // runner.
+ void GlobalDumpCallbackAdapter(
+ scoped_refptr<SingleThreadTaskRunner> task_runner,
+ Closure closure,
+ uint64_t dump_guid,
+ bool success) {
last_callback_success_ = success;
task_runner->PostTask(FROM_HERE, closure);
}
- void PollFastMemoryTotal(uint64_t* memory_total) {
- mdm_->PollFastMemoryTotal(memory_total);
- }
-
protected:
void InitializeMemoryDumpManager(bool is_coordinator) {
mdm_->set_dumper_registrations_ignored_for_testing(true);
- mdm_->Initialize(delegate_.get(), is_coordinator);
+ delegate_ = new MemoryDumpManagerDelegateForTesting(is_coordinator);
+ mdm_->Initialize(base::WrapUnique(delegate_));
}
void RequestGlobalDumpAndWait(MemoryDumpType dump_type,
MemoryDumpLevelOfDetail level_of_detail) {
RunLoop run_loop;
- MemoryDumpCallback callback =
- Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
- ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
+ GlobalMemoryDumpCallback callback = Bind(
+ &MemoryDumpManagerTest::GlobalDumpCallbackAdapter, Unretained(this),
+ ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
mdm_->RequestGlobalDump(dump_type, level_of_detail, callback);
run_loop.Run();
}
@@ -274,7 +292,7 @@ class MemoryDumpManagerTest : public testing::Test {
void DisableTracing() { TraceLog::GetInstance()->SetDisabled(); }
bool IsPeriodicDumpingEnabled() const {
- return mdm_->dump_scheduler_->IsPeriodicTimerRunningForTesting();
+ return MemoryDumpScheduler::GetInstance()->is_enabled_for_testing();
}
int GetMaxConsecutiveFailuresCount() const {
@@ -283,7 +301,7 @@ class MemoryDumpManagerTest : public testing::Test {
const MemoryDumpProvider::Options kDefaultOptions;
std::unique_ptr<MemoryDumpManager> mdm_;
- std::unique_ptr<MemoryDumpManagerDelegateForTesting> delegate_;
+ MemoryDumpManagerDelegateForTesting* delegate_;
bool last_callback_success_;
private:
@@ -313,9 +331,10 @@ TEST_F(MemoryDumpManagerTest, SingleDumper) {
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(3);
EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(3).WillRepeatedly(Return(true));
- for (int i = 0; i < 3; ++i)
+ for (int i = 0; i < 3; ++i) {
RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
MemoryDumpLevelOfDetail::DETAILED);
+ }
DisableTracing();
mdm_->UnregisterDumpProvider(&mdp);
@@ -435,7 +454,13 @@ TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
// Checks that the dump provider invocations depend only on the current
// registration state and not on previous registrations and dumps.
-TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
+// Flaky on iOS, see crbug.com/706874
+#if defined(OS_IOS)
+#define MAYBE_RegistrationConsistency DISABLED_RegistrationConsistency
+#else
+#define MAYBE_RegistrationConsistency RegistrationConsistency
+#endif
+TEST_F(MemoryDumpManagerTest, MAYBE_RegistrationConsistency) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp;
@@ -723,10 +748,10 @@ TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) {
threads[other_idx]->task_runner();
MockMemoryDumpProvider* other_mdp = mdps[other_idx].get();
auto on_dump = [this, other_runner, other_mdp, &on_memory_dump_call_count](
- const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
+ const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
PostTaskAndWait(FROM_HERE, other_runner.get(),
- base::Bind(&MemoryDumpManager::UnregisterDumpProvider,
- base::Unretained(&*mdm_), other_mdp));
+ base::BindOnce(&MemoryDumpManager::UnregisterDumpProvider,
+ base::Unretained(&*mdm_), other_mdp));
on_memory_dump_call_count++;
return true;
};
@@ -755,9 +780,6 @@ TEST_F(MemoryDumpManagerTest, TestPollingOnDumpThread) {
std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider());
mdp1->enable_mock_destructor = true;
mdp2->enable_mock_destructor = true;
-
- EXPECT_CALL(*mdp1, SuspendFastMemoryPolling()).Times(1);
- EXPECT_CALL(*mdp2, SuspendFastMemoryPolling()).Times(1);
EXPECT_CALL(*mdp1, Destructor());
EXPECT_CALL(*mdp2, Destructor());
@@ -766,53 +788,37 @@ TEST_F(MemoryDumpManagerTest, TestPollingOnDumpThread) {
RegisterDumpProvider(mdp1.get(), nullptr, options);
RunLoop run_loop;
- scoped_refptr<SingleThreadTaskRunner> test_task_runner =
- ThreadTaskRunnerHandle::Get();
+ auto test_task_runner = ThreadTaskRunnerHandle::Get();
auto quit_closure = run_loop.QuitClosure();
-
- const int kPollsToQuit = 10;
- int call_count = 0;
MemoryDumpManager* mdm = mdm_.get();
- const auto poll_function1 = [&call_count, &test_task_runner, quit_closure,
- &mdp2, mdm, &options, kPollsToQuit,
- this](uint64_t* total) -> void {
- ++call_count;
- if (call_count == 1)
- RegisterDumpProvider(mdp2.get(), nullptr, options, kMDPName);
- else if (call_count == 4)
- mdm->UnregisterAndDeleteDumpProviderSoon(std::move(mdp2));
- else if (call_count == kPollsToQuit)
- test_task_runner->PostTask(FROM_HERE, quit_closure);
-
- // Record increase of 1 GiB of memory at each call.
- *total = static_cast<uint64_t>(call_count) * 1024 * 1024 * 1024;
- };
- EXPECT_CALL(*mdp1, PollFastMemoryTotal(_))
- .Times(testing::AtLeast(kPollsToQuit))
- .WillRepeatedly(Invoke(poll_function1));
- // Depending on the order of PostTask calls the mdp2 might be registered after
- // all polls or in between polls.
- EXPECT_CALL(*mdp2, PollFastMemoryTotal(_))
- .Times(Between(0, kPollsToQuit - 1))
+ EXPECT_CALL(*mdp1, PollFastMemoryTotal(_))
+ .WillOnce(Invoke([&mdp2, options, this](uint64_t*) {
+ RegisterDumpProvider(mdp2.get(), nullptr, options);
+ }))
+ .WillOnce(Return())
+ .WillOnce(Invoke([mdm, &mdp2](uint64_t*) {
+ mdm->UnregisterAndDeleteDumpProviderSoon(std::move(mdp2));
+ }))
+ .WillOnce(Invoke([test_task_runner, quit_closure](uint64_t*) {
+ test_task_runner->PostTask(FROM_HERE, quit_closure);
+ }))
.WillRepeatedly(Return());
- MemoryDumpScheduler::SetPollingIntervalForTesting(1);
- EnableTracingWithTraceConfig(
- TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(3));
+ // We expect a call to |mdp1| because it is still registered at the time the
+ // Peak detector is Stop()-ed (upon OnTraceLogDisabled(). We do NOT expect
+ // instead a call for |mdp2|, because that gets unregisterd before the Stop().
+ EXPECT_CALL(*mdp1, SuspendFastMemoryPolling()).Times(1);
+ EXPECT_CALL(*mdp2, SuspendFastMemoryPolling()).Times(0);
- int last_poll_to_request_dump = -2;
- EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _))
- .Times(testing::AtLeast(2))
- .WillRepeatedly(Invoke([&last_poll_to_request_dump, &call_count](
- const MemoryDumpRequestArgs& args,
- const MemoryDumpCallback& callback) -> void {
- // Minimum number of polls between dumps must be 3 (polling interval is
- // 1ms).
- EXPECT_GE(call_count - last_poll_to_request_dump, 3);
- last_poll_to_request_dump = call_count;
- }));
+ // |mdp2| should invoke exactly twice:
+ // - once after the registrarion, when |mdp1| hits the first Return()
+ // - the 2nd time when |mdp1| unregisters |mdp1|. The unregistration is
+ // posted and will necessarily happen after the polling task.
+ EXPECT_CALL(*mdp2, PollFastMemoryTotal(_)).Times(2).WillRepeatedly(Return());
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(1));
run_loop.Run();
DisableTracing();
mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdp1));
@@ -844,10 +850,10 @@ TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
scoped_refptr<SequencedTaskRunner> main_runner =
SequencedTaskRunnerHandle::Get();
auto on_dump = [other_thread, main_runner, &on_memory_dump_call_count](
- const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
+ const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
PostTaskAndWait(
FROM_HERE, main_runner.get(),
- base::Bind(&TestIOThread::Stop, base::Unretained(other_thread)));
+ base::BindOnce(&TestIOThread::Stop, base::Unretained(other_thread)));
on_memory_dump_call_count++;
return true;
};
@@ -897,7 +903,6 @@ TEST_F(MemoryDumpManagerTest, InitializedAfterStartOfTracing) {
// initialization gets NACK-ed cleanly.
{
EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
- EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
MemoryDumpLevelOfDetail::DETAILED);
EXPECT_FALSE(last_callback_success_);
@@ -906,9 +911,9 @@ TEST_F(MemoryDumpManagerTest, InitializedAfterStartOfTracing) {
// Now late-initialize the MemoryDumpManager and check that the
// RequestGlobalDump completes successfully.
{
+ InitializeMemoryDumpManager(false /* is_coordinator */);
EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(1);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
- InitializeMemoryDumpManager(false /* is_coordinator */);
RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
MemoryDumpLevelOfDetail::DETAILED);
EXPECT_TRUE(last_callback_success_);
@@ -979,6 +984,7 @@ TEST_F(MemoryDumpManagerTest, TraceConfigExpectationsWhenIsCoordinator) {
// process with a fully defined trigger config should cause periodic dumps to
// be performed in the correct order.
RunLoop run_loop;
+ auto test_task_runner = ThreadTaskRunnerHandle::Get();
auto quit_closure = run_loop.QuitClosure();
const int kHeavyDumpRate = 5;
@@ -993,9 +999,10 @@ TEST_F(MemoryDumpManagerTest, TraceConfigExpectationsWhenIsCoordinator) {
EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
.Times(kHeavyDumpRate - 2);
EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
- .WillOnce(Invoke([quit_closure](const MemoryDumpRequestArgs& args,
- const MemoryDumpCallback& callback) {
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, quit_closure);
+ .WillOnce(Invoke([test_task_runner, quit_closure](
+ const MemoryDumpRequestArgs& args,
+ const GlobalMemoryDumpCallback& callback) {
+ test_task_runner->PostTask(FROM_HERE, quit_closure);
}));
// Swallow all the final spurious calls until tracing gets disabled.
@@ -1010,7 +1017,13 @@ TEST_F(MemoryDumpManagerTest, TraceConfigExpectationsWhenIsCoordinator) {
// Tests against race conditions that might arise when disabling tracing in the
// middle of a global memory dump.
-TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
+// Flaky on iOS, see crbug.com/706961
+#if defined(OS_IOS)
+#define MAYBE_DisableTracingWhileDumping DISABLED_DisableTracingWhileDumping
+#else
+#define MAYBE_DisableTracingWhileDumping DisableTracingWhileDumping
+#endif
+TEST_F(MemoryDumpManagerTest, MAYBE_DisableTracingWhileDumping) {
base::WaitableEvent tracing_disabled_event(
WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
@@ -1048,8 +1061,8 @@ TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
last_callback_success_ = true;
RunLoop run_loop;
- MemoryDumpCallback callback =
- Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
+ GlobalMemoryDumpCallback callback =
+ Bind(&MemoryDumpManagerTest::GlobalDumpCallbackAdapter, Unretained(this),
ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
MemoryDumpLevelOfDetail::DETAILED, callback);
@@ -1080,9 +1093,11 @@ TEST_F(MemoryDumpManagerTest, DisableTracingRightBeforeStartOfDump) {
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _))
.WillOnce(Invoke([this](const MemoryDumpRequestArgs& args,
- const MemoryDumpCallback& callback) {
+ const GlobalMemoryDumpCallback& callback) {
DisableTracing();
- delegate_->CreateProcessDump(args, callback);
+ ProcessMemoryDumpCallback process_callback =
+ Bind(&ProcessDumpCallbackAdapter, callback);
+ delegate_->CreateProcessDump(args, process_callback);
}));
// If tracing is disabled for current session CreateProcessDump() should NOT
@@ -1194,7 +1209,7 @@ TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoonDuringDump) {
TestIOThread thread_for_unregistration(TestIOThread::kAutoStart);
PostTaskAndWait(
FROM_HERE, thread_for_unregistration.task_runner().get(),
- base::Bind(
+ base::BindOnce(
&MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon,
base::Unretained(MemoryDumpManager::GetInstance()),
base::Passed(std::unique_ptr<MemoryDumpProvider>(std::move(mdp)))));
@@ -1243,15 +1258,17 @@ TEST_F(MemoryDumpManagerTest, TestBackgroundTracingSetup) {
InitializeMemoryDumpManager(true /* is_coordinator */);
RunLoop run_loop;
+ auto test_task_runner = ThreadTaskRunnerHandle::Get();
auto quit_closure = run_loop.QuitClosure();
testing::InSequence sequence;
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(IsBackgroundDump(), _))
.Times(5);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(IsBackgroundDump(), _))
- .WillOnce(Invoke([quit_closure](const MemoryDumpRequestArgs& args,
- const MemoryDumpCallback& callback) {
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, quit_closure);
+ .WillOnce(Invoke([test_task_runner, quit_closure](
+ const MemoryDumpRequestArgs& args,
+ const GlobalMemoryDumpCallback& callback) {
+ test_task_runner->PostTask(FROM_HERE, quit_closure);
}));
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(AnyNumber());
diff --git a/chromium/base/trace_event/memory_dump_provider_info.cc b/chromium/base/trace_event/memory_dump_provider_info.cc
new file mode 100644
index 00000000000..6bb711018ba
--- /dev/null
+++ b/chromium/base/trace_event/memory_dump_provider_info.cc
@@ -0,0 +1,43 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_provider_info.h"
+
+#include <tuple>
+
+#include "base/sequenced_task_runner.h"
+
+namespace base {
+namespace trace_event {
+
+MemoryDumpProviderInfo::MemoryDumpProviderInfo(
+ MemoryDumpProvider* dump_provider,
+ const char* name,
+ scoped_refptr<SequencedTaskRunner> task_runner,
+ const MemoryDumpProvider::Options& options,
+ bool whitelisted_for_background_mode)
+ : dump_provider(dump_provider),
+ options(options),
+ name(name),
+ task_runner(std::move(task_runner)),
+ whitelisted_for_background_mode(whitelisted_for_background_mode),
+ consecutive_failures(0),
+ disabled(false) {}
+
+MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
+
+bool MemoryDumpProviderInfo::Comparator::operator()(
+ const scoped_refptr<MemoryDumpProviderInfo>& a,
+ const scoped_refptr<MemoryDumpProviderInfo>& b) const {
+ if (!a || !b)
+ return a.get() < b.get();
+ // Ensure that unbound providers (task_runner == nullptr) always run last.
+ // Rationale: some unbound dump providers are known to be slow, keep them last
+ // to avoid skewing timings of the other dump providers.
+ return std::tie(a->task_runner, a->dump_provider) >
+ std::tie(b->task_runner, b->dump_provider);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/memory_dump_provider_info.h b/chromium/base/trace_event/memory_dump_provider_info.h
new file mode 100644
index 00000000000..ca63a987b2c
--- /dev/null
+++ b/chromium/base/trace_event/memory_dump_provider_info.h
@@ -0,0 +1,108 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
+#define BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
+
+#include <memory>
+#include <set>
+
+#include "base/base_export.h"
+#include "base/memory/ref_counted.h"
+#include "base/trace_event/memory_dump_provider.h"
+
+namespace base {
+
+class SequencedTaskRunner;
+
+namespace trace_event {
+
+// Wraps a MemoryDumpProvider (MDP), which is registered via
+// MemoryDumpManager(MDM)::RegisterDumpProvider(), holding the extra information
+// required to deal with it (which task runner it should be invoked onto,
+// whether it has been disabled, etc.)
+// More importantly, having a refptr to this object guarantees that a MDP that
+// is not thread-bound (hence which can only be unregistered via
+// MDM::UnregisterAndDeleteDumpProviderSoon()) will stay alive as long as the
+// refptr is held.
+//
+// Lifetime:
+// At any time, there is at most one instance of this class for each instance
+// of a given MemoryDumpProvider, but there might be several scoped_refptr
+// holding onto each of this. Specifically:
+// - In nominal conditions, there is a refptr for each registerd MDP in the
+// MDM's |dump_providers_| list.
+// - In most cases, the only refptr (in the |dump_providers_| list) is destroyed
+// by MDM::UnregisterDumpProvider().
+// - However, when MDM starts a dump, the list of refptrs is copied into the
+// ProcessMemoryDumpAsyncState. That list is pruned as MDP(s) are invoked.
+// - If UnregisterDumpProvider() is called on a non-thread-bound MDP while a
+// dump is in progress, the extar extra of the handle is destroyed in
+// MDM::SetupNextMemoryDump() or MDM::InvokeOnMemoryDump(), when the copy
+// inside ProcessMemoryDumpAsyncState is erase()-d.
+// - The PeakDetector can keep extra refptrs when enabled.
+struct BASE_EXPORT MemoryDumpProviderInfo
+ : public RefCountedThreadSafe<MemoryDumpProviderInfo> {
+ public:
+ // Define a total order based on the |task_runner| affinity, so that MDPs
+ // belonging to the same SequencedTaskRunner are adjacent in the set.
+ struct Comparator {
+ bool operator()(const scoped_refptr<MemoryDumpProviderInfo>& a,
+ const scoped_refptr<MemoryDumpProviderInfo>& b) const;
+ };
+ using OrderedSet =
+ std::set<scoped_refptr<MemoryDumpProviderInfo>, Comparator>;
+
+ MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
+ const char* name,
+ scoped_refptr<SequencedTaskRunner> task_runner,
+ const MemoryDumpProvider::Options& options,
+ bool whitelisted_for_background_mode);
+
+ // It is safe to access the const fields below from any thread as they are
+ // never mutated.
+
+ MemoryDumpProvider* const dump_provider;
+
+ // The |options| arg passed to MDM::RegisterDumpProvider().
+ const MemoryDumpProvider::Options options;
+
+ // Human readable name, not unique (distinct MDP instances might have the same
+ // name). Used for debugging, testing and whitelisting for BACKGROUND mode.
+ const char* const name;
+
+ // The task runner on which the MDP::OnMemoryDump call should be posted onto.
+ // Can be nullptr, in which case the MDP will be invoked on a background
+ // thread handled by MDM.
+ const scoped_refptr<SequencedTaskRunner> task_runner;
+
+ // True if the dump provider is whitelisted for background mode.
+ const bool whitelisted_for_background_mode;
+
+ // These fields below, instead, are not thread safe and can be mutated only:
+ // - On the |task_runner|, when not null (i.e. for thread-bound MDPS).
+ // - By the MDM's background thread (or in any other way that guarantees
+ // sequencing) for non-thread-bound MDPs.
+
+ // Used to transfer ownership for UnregisterAndDeleteDumpProviderSoon().
+ // nullptr in all other cases.
+ std::unique_ptr<MemoryDumpProvider> owned_dump_provider;
+
+ // For fail-safe logic (auto-disable failing MDPs).
+ int consecutive_failures;
+
+ // Flagged either by the auto-disable logic or during unregistration.
+ bool disabled;
+
+ private:
+ friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
+ ~MemoryDumpProviderInfo();
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryDumpProviderInfo);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
diff --git a/chromium/base/trace_event/memory_dump_request_args.cc b/chromium/base/trace_event/memory_dump_request_args.cc
index bf72bef5e4d..0b525d4213e 100644
--- a/chromium/base/trace_event/memory_dump_request_args.cc
+++ b/chromium/base/trace_event/memory_dump_request_args.cc
@@ -60,5 +60,12 @@ MemoryDumpLevelOfDetail StringToMemoryDumpLevelOfDetail(
return MemoryDumpLevelOfDetail::LAST;
}
+MemoryDumpCallbackResult::MemoryDumpCallbackResult() {}
+
+MemoryDumpCallbackResult::MemoryDumpCallbackResult(
+ const MemoryDumpCallbackResult&) = default;
+
+MemoryDumpCallbackResult::~MemoryDumpCallbackResult() {}
+
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/memory_dump_request_args.h b/chromium/base/trace_event/memory_dump_request_args.h
index 90a866fa7a3..0f42a185310 100644
--- a/chromium/base/trace_event/memory_dump_request_args.h
+++ b/chromium/base/trace_event/memory_dump_request_args.h
@@ -9,18 +9,21 @@
// These are also used in the IPCs for coordinating inter-process memory dumps.
#include <stdint.h>
+#include <map>
#include <string>
#include "base/base_export.h"
#include "base/callback.h"
+#include "base/optional.h"
+#include "base/process/process_handle.h"
namespace base {
namespace trace_event {
// Captures the reason why a memory dump is being requested. This is to allow
-// selective enabling of dumps, filtering and post-processing. Important: this
-// must be kept consistent with
-// services/resource_coordinator/public/cpp/memory/memory_infra_traits.cc.
+// selective enabling of dumps, filtering and post-processing. Keep this
+// consistent with memory_instrumentation.mojo and
+// memory_instrumentation_struct_traits.{h,cc}
enum class MemoryDumpType {
PERIODIC_INTERVAL, // Dumping memory at periodic intervals.
EXPLICITLY_TRIGGERED, // Non maskable dump request.
@@ -29,8 +32,8 @@ enum class MemoryDumpType {
};
// Tells the MemoryDumpProvider(s) how much detailed their dumps should be.
-// Important: this must be kept consistent with
-// services/resource_Coordinator/public/cpp/memory/memory_infra_traits.cc.
+// Keep this consistent with memory_instrumentation.mojo and
+// memory_instrumentation_struct_traits.{h,cc}
enum class MemoryDumpLevelOfDetail : uint32_t {
FIRST,
@@ -53,8 +56,8 @@ enum class MemoryDumpLevelOfDetail : uint32_t {
};
// Initial request arguments for a global memory dump. (see
-// MemoryDumpManager::RequestGlobalMemoryDump()). Important: this must be kept
-// consistent with services/memory_infra/public/cpp/memory_infra_traits.cc.
+// MemoryDumpManager::RequestGlobalMemoryDump()). Keep this consistent with
+// memory_instrumentation.mojo and memory_instrumentation_struct_traits.{h,cc}
struct BASE_EXPORT MemoryDumpRequestArgs {
// Globally unique identifier. In multi-process dumps, all processes issue a
// local dump with the same guid. This allows the trace importers to
@@ -72,7 +75,41 @@ struct MemoryDumpArgs {
MemoryDumpLevelOfDetail level_of_detail;
};
-using MemoryDumpCallback = Callback<void(uint64_t dump_guid, bool success)>;
+// TODO(hjd): Not used yet, see crbug.com/703184
+// Summarises information about memory use as seen by a single process.
+// This information will eventually be passed to a service to be colated
+// and reported.
+struct BASE_EXPORT MemoryDumpCallbackResult {
+ struct OSMemDump {
+ uint32_t resident_set_kb = 0;
+ };
+ struct ChromeMemDump {
+ uint32_t malloc_total_kb = 0;
+ uint32_t partition_alloc_total_kb = 0;
+ uint32_t blink_gc_total_kb = 0;
+ uint32_t v8_total_kb = 0;
+ };
+
+ // These are for the current process.
+ OSMemDump os_dump;
+ ChromeMemDump chrome_dump;
+
+ // In some cases, OS stats can only be dumped from a privileged process to
+ // get around to sandboxing/selinux restrictions (see crbug.com/461788).
+ std::map<ProcessId, OSMemDump> extra_processes_dump;
+
+ MemoryDumpCallbackResult();
+ MemoryDumpCallbackResult(const MemoryDumpCallbackResult&);
+ ~MemoryDumpCallbackResult();
+};
+
+using GlobalMemoryDumpCallback =
+ Callback<void(uint64_t dump_guid, bool success)>;
+
+using ProcessMemoryDumpCallback =
+ Callback<void(uint64_t dump_guid,
+ bool success,
+ const Optional<MemoryDumpCallbackResult>& result)>;
BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
diff --git a/chromium/base/trace_event/memory_dump_scheduler.cc b/chromium/base/trace_event/memory_dump_scheduler.cc
index 9b9933b1c98..b1d34d93403 100644
--- a/chromium/base/trace_event/memory_dump_scheduler.cc
+++ b/chromium/base/trace_event/memory_dump_scheduler.cc
@@ -4,219 +4,112 @@
#include "base/trace_event/memory_dump_scheduler.h"
-#include "base/single_thread_task_runner.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "base/trace_event/memory_dump_manager.h"
-#include "build/build_config.h"
+#include <algorithm>
+#include <limits>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/threading/sequenced_task_runner_handle.h"
namespace base {
namespace trace_event {
-namespace {
-// Threshold on increase in memory from last dump beyond which a new dump must
-// be triggered.
-int64_t kMemoryIncreaseThreshold = 50 * 1024 * 1024; // 50MiB
-const uint32_t kMemoryTotalsPollingInterval = 25;
-uint32_t g_polling_interval_ms_for_testing = 0;
-} // namespace
-
-MemoryDumpScheduler::MemoryDumpScheduler(
- MemoryDumpManager* mdm,
- scoped_refptr<SingleThreadTaskRunner> polling_task_runner)
- : mdm_(mdm), polling_state_(polling_task_runner) {}
-
-MemoryDumpScheduler::~MemoryDumpScheduler() {}
-
-void MemoryDumpScheduler::AddTrigger(MemoryDumpType trigger_type,
- MemoryDumpLevelOfDetail level_of_detail,
- uint32_t min_time_between_dumps_ms) {
- if (trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) {
- DCHECK(!periodic_state_.is_configured);
- DCHECK(!polling_state_.is_configured);
- DCHECK_NE(0u, min_time_between_dumps_ms);
-
- polling_state_.level_of_detail = level_of_detail;
- polling_state_.min_polls_between_dumps =
- (min_time_between_dumps_ms + polling_state_.polling_interval_ms - 1) /
- polling_state_.polling_interval_ms;
- polling_state_.is_configured = true;
- } else if (trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
- DCHECK(!polling_state_.is_configured);
- periodic_state_.is_configured = true;
- DCHECK_NE(0u, min_time_between_dumps_ms);
- switch (level_of_detail) {
+// static
+MemoryDumpScheduler* MemoryDumpScheduler::GetInstance() {
+ static MemoryDumpScheduler* instance = new MemoryDumpScheduler();
+ return instance;
+}
+
+MemoryDumpScheduler::MemoryDumpScheduler() : period_ms_(0), generation_(0) {}
+MemoryDumpScheduler::~MemoryDumpScheduler() {
+ // Hit only in tests. Check that tests don't leave without stopping.
+ DCHECK(!is_enabled_for_testing());
+}
+
+void MemoryDumpScheduler::Start(
+ MemoryDumpScheduler::Config config,
+ scoped_refptr<SequencedTaskRunner> task_runner) {
+ DCHECK(!task_runner_);
+ task_runner_ = task_runner;
+ task_runner->PostTask(FROM_HERE, BindOnce(&MemoryDumpScheduler::StartInternal,
+ Unretained(this), config));
+}
+
+void MemoryDumpScheduler::Stop() {
+ if (!task_runner_)
+ return;
+ task_runner_->PostTask(FROM_HERE, BindOnce(&MemoryDumpScheduler::StopInternal,
+ Unretained(this)));
+ task_runner_ = nullptr;
+}
+
+void MemoryDumpScheduler::StartInternal(MemoryDumpScheduler::Config config) {
+ uint32_t light_dump_period_ms = 0;
+ uint32_t heavy_dump_period_ms = 0;
+ uint32_t min_period_ms = std::numeric_limits<uint32_t>::max();
+ for (const Config::Trigger& trigger : config.triggers) {
+ DCHECK_GT(trigger.period_ms, 0u);
+ switch (trigger.level_of_detail) {
case MemoryDumpLevelOfDetail::BACKGROUND:
break;
case MemoryDumpLevelOfDetail::LIGHT:
- DCHECK_EQ(0u, periodic_state_.light_dump_period_ms);
- periodic_state_.light_dump_period_ms = min_time_between_dumps_ms;
+ DCHECK_EQ(0u, light_dump_period_ms);
+ light_dump_period_ms = trigger.period_ms;
break;
case MemoryDumpLevelOfDetail::DETAILED:
- DCHECK_EQ(0u, periodic_state_.heavy_dump_period_ms);
- periodic_state_.heavy_dump_period_ms = min_time_between_dumps_ms;
+ DCHECK_EQ(0u, heavy_dump_period_ms);
+ heavy_dump_period_ms = trigger.period_ms;
break;
}
-
- periodic_state_.min_timer_period_ms = std::min(
- periodic_state_.min_timer_period_ms, min_time_between_dumps_ms);
- DCHECK_EQ(0u, periodic_state_.light_dump_period_ms %
- periodic_state_.min_timer_period_ms);
- DCHECK_EQ(0u, periodic_state_.heavy_dump_period_ms %
- periodic_state_.min_timer_period_ms);
+ min_period_ms = std::min(min_period_ms, trigger.period_ms);
}
-}
-
-void MemoryDumpScheduler::NotifyPeriodicTriggerSupported() {
- if (!periodic_state_.is_configured || periodic_state_.timer.IsRunning())
- return;
- periodic_state_.light_dumps_rate = periodic_state_.light_dump_period_ms /
- periodic_state_.min_timer_period_ms;
- periodic_state_.heavy_dumps_rate = periodic_state_.heavy_dump_period_ms /
- periodic_state_.min_timer_period_ms;
- periodic_state_.dump_count = 0;
- periodic_state_.timer.Start(
+ DCHECK_EQ(0u, light_dump_period_ms % min_period_ms);
+ DCHECK_EQ(0u, heavy_dump_period_ms % min_period_ms);
+ DCHECK(!config.callback.is_null());
+ callback_ = config.callback;
+ period_ms_ = min_period_ms;
+ tick_count_ = 0;
+ light_dump_rate_ = light_dump_period_ms / min_period_ms;
+ heavy_dump_rate_ = heavy_dump_period_ms / min_period_ms;
+
+ // Trigger the first dump after |period_ms_| and not as soon as timer starts.
+ SequencedTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
- TimeDelta::FromMilliseconds(periodic_state_.min_timer_period_ms),
- Bind(&MemoryDumpScheduler::RequestPeriodicGlobalDump, Unretained(this)));
+ BindOnce(&MemoryDumpScheduler::Tick, Unretained(this), ++generation_),
+ TimeDelta::FromMilliseconds(period_ms_));
}
-void MemoryDumpScheduler::NotifyPollingSupported() {
- if (!polling_state_.is_configured || polling_state_.is_polling_enabled)
- return;
- polling_state_.is_polling_enabled = true;
- polling_state_.num_polls_from_last_dump = 0;
- polling_state_.last_dump_memory_total = 0;
- polling_state_.polling_task_runner->PostTask(
- FROM_HERE,
- Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)));
+void MemoryDumpScheduler::StopInternal() {
+ period_ms_ = 0;
+ generation_++;
+ callback_.Reset();
}
-void MemoryDumpScheduler::DisableAllTriggers() {
- if (periodic_state_.timer.IsRunning())
- periodic_state_.timer.Stop();
- DisablePolling();
-}
-
-void MemoryDumpScheduler::DisablePolling() {
- if (ThreadTaskRunnerHandle::Get() != polling_state_.polling_task_runner) {
- if (polling_state_.polling_task_runner->PostTask(
- FROM_HERE,
- Bind(&MemoryDumpScheduler::DisablePolling, Unretained(this))))
- return;
- }
- polling_state_.is_polling_enabled = false;
- polling_state_.is_configured = false;
- polling_state_.polling_task_runner = nullptr;
-}
-
-// static
-void MemoryDumpScheduler::SetPollingIntervalForTesting(uint32_t interval) {
- g_polling_interval_ms_for_testing = interval;
-}
+void MemoryDumpScheduler::Tick(uint32_t expected_generation) {
+ if (period_ms_ == 0 || generation_ != expected_generation)
+ return;
-bool MemoryDumpScheduler::IsPeriodicTimerRunningForTesting() {
- return periodic_state_.timer.IsRunning();
-}
+ SequencedTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&MemoryDumpScheduler::Tick, Unretained(this),
+ expected_generation),
+ TimeDelta::FromMilliseconds(period_ms_));
-void MemoryDumpScheduler::RequestPeriodicGlobalDump() {
MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
- if (periodic_state_.light_dumps_rate > 0 &&
- periodic_state_.dump_count % periodic_state_.light_dumps_rate == 0)
+ if (light_dump_rate_ > 0 && tick_count_ % light_dump_rate_ == 0)
level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
- if (periodic_state_.heavy_dumps_rate > 0 &&
- periodic_state_.dump_count % periodic_state_.heavy_dumps_rate == 0)
+ if (heavy_dump_rate_ > 0 && tick_count_ % heavy_dump_rate_ == 0)
level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
- ++periodic_state_.dump_count;
-
- mdm_->RequestGlobalDump(MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
-}
-
-void MemoryDumpScheduler::PollMemoryOnPollingThread() {
- if (!polling_state_.is_configured)
- return;
-
- uint64_t polled_memory = 0;
- bool res = mdm_->PollFastMemoryTotal(&polled_memory);
- DCHECK(res);
- if (polling_state_.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
- TRACE_COUNTER1(MemoryDumpManager::kTraceCategory, "PolledMemoryMB",
- polled_memory / 1024 / 1024);
- }
-
- if (ShouldTriggerDump(polled_memory)) {
- TRACE_EVENT_INSTANT1(MemoryDumpManager::kTraceCategory,
- "Peak memory dump Triggered",
- TRACE_EVENT_SCOPE_PROCESS, "total_usage_MB",
- polled_memory / 1024 / 1024);
+ tick_count_++;
- mdm_->RequestGlobalDump(MemoryDumpType::PEAK_MEMORY_USAGE,
- polling_state_.level_of_detail);
- }
-
- // TODO(ssid): Use RequestSchedulerCallback, crbug.com/607533.
- ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE,
- Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)),
- TimeDelta::FromMilliseconds(polling_state_.polling_interval_ms));
+ callback_.Run(level_of_detail);
}
-bool MemoryDumpScheduler::ShouldTriggerDump(uint64_t current_memory_total) {
- // This function tries to detect peak memory usage as discussed in
- // https://goo.gl/0kOU4A.
-
- if (current_memory_total == 0)
- return false;
-
- bool should_dump = false;
- ++polling_state_.num_polls_from_last_dump;
- if (polling_state_.last_dump_memory_total == 0) {
- // If it's first sample then trigger memory dump.
- should_dump = true;
- } else if (polling_state_.min_polls_between_dumps >
- polling_state_.num_polls_from_last_dump) {
- return false;
- }
-
- int64_t increase_from_last_dump =
- current_memory_total - polling_state_.last_dump_memory_total;
- should_dump |= increase_from_last_dump > kMemoryIncreaseThreshold;
- if (should_dump) {
- polling_state_.last_dump_memory_total = current_memory_total;
- polling_state_.num_polls_from_last_dump = 0;
- }
- return should_dump;
-}
-
-MemoryDumpScheduler::PeriodicTriggerState::PeriodicTriggerState()
- : is_configured(false),
- dump_count(0),
- min_timer_period_ms(std::numeric_limits<uint32_t>::max()),
- light_dumps_rate(0),
- heavy_dumps_rate(0),
- light_dump_period_ms(0),
- heavy_dump_period_ms(0) {}
-
-MemoryDumpScheduler::PeriodicTriggerState::~PeriodicTriggerState() {
- DCHECK(!timer.IsRunning());
-}
-
-MemoryDumpScheduler::PollingTriggerState::PollingTriggerState(
- scoped_refptr<SingleThreadTaskRunner> polling_task_runner)
- : is_configured(false),
- is_polling_enabled(false),
- level_of_detail(MemoryDumpLevelOfDetail::FIRST),
- polling_task_runner(polling_task_runner),
- polling_interval_ms(g_polling_interval_ms_for_testing
- ? g_polling_interval_ms_for_testing
- : kMemoryTotalsPollingInterval),
- min_polls_between_dumps(0),
- num_polls_from_last_dump(0),
- last_dump_memory_total(0) {}
-
-MemoryDumpScheduler::PollingTriggerState::~PollingTriggerState() {
- DCHECK(!polling_task_runner);
-}
+MemoryDumpScheduler::Config::Config() {}
+MemoryDumpScheduler::Config::~Config() {}
+MemoryDumpScheduler::Config::Config(const MemoryDumpScheduler::Config&) =
+ default;
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/memory_dump_scheduler.h b/chromium/base/trace_event/memory_dump_scheduler.h
index 2214e053b46..21334f0edd0 100644
--- a/chromium/base/trace_event/memory_dump_scheduler.h
+++ b/chromium/base/trace_event/memory_dump_scheduler.h
@@ -5,112 +5,67 @@
#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
#define BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
+#include <stdint.h>
+
+#include <vector>
+
#include "base/base_export.h"
-#include "base/gtest_prod_util.h"
+#include "base/callback.h"
#include "base/memory/ref_counted.h"
-#include "base/timer/timer.h"
#include "base/trace_event/memory_dump_request_args.h"
namespace base {
-class SingleThreadTaskRunner;
+class SequencedTaskRunner;
namespace trace_event {
-class MemoryDumpManager;
-
-// Schedules global dump requests based on the triggers added.
+// Schedules global dump requests based on the triggers added. The methods of
+// this class are NOT thread safe and the client has to take care of invoking
+// all the methods of the class safely.
class BASE_EXPORT MemoryDumpScheduler {
public:
- MemoryDumpScheduler(
- MemoryDumpManager* mdm_,
- scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
- ~MemoryDumpScheduler();
-
- // Adds triggers for scheduling global dumps. Both periodic and peak triggers
- // cannot be added together. At the moment the periodic support is limited to
- // at most one periodic trigger per dump mode and peak triggers are limited to
- // at most one. All intervals should be an integeral multiple of the smallest
- // interval specified.
- void AddTrigger(MemoryDumpType trigger_type,
- MemoryDumpLevelOfDetail level_of_detail,
- uint32_t min_time_between_dumps_ms);
-
- // Starts periodic dumps.
- void NotifyPeriodicTriggerSupported();
-
- // Starts polling memory total.
- void NotifyPollingSupported();
-
- // Disables all triggers.
- void DisableAllTriggers();
-
- private:
- friend class MemoryDumpManagerTest;
- FRIEND_TEST_ALL_PREFIXES(MemoryDumpManagerTest, TestPollingOnDumpThread);
-
- // Helper class to schdule periodic memory dumps.
- struct PeriodicTriggerState {
- PeriodicTriggerState();
- ~PeriodicTriggerState();
-
- bool is_configured;
+ using PeriodicCallback = RepeatingCallback<void(MemoryDumpLevelOfDetail)>;
- RepeatingTimer timer;
- uint32_t dump_count;
- uint32_t min_timer_period_ms;
- uint32_t light_dumps_rate;
- uint32_t heavy_dumps_rate;
+ // Passed to Start().
+ struct BASE_EXPORT Config {
+ struct Trigger {
+ MemoryDumpLevelOfDetail level_of_detail;
+ uint32_t period_ms;
+ };
- uint32_t light_dump_period_ms;
- uint32_t heavy_dump_period_ms;
+ Config();
+ Config(const Config&);
+ ~Config();
- DISALLOW_COPY_AND_ASSIGN(PeriodicTriggerState);
+ std::vector<Trigger> triggers;
+ PeriodicCallback callback;
};
- struct PollingTriggerState {
- explicit PollingTriggerState(
- scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
- ~PollingTriggerState();
+ static MemoryDumpScheduler* GetInstance();
- bool is_configured;
- bool is_polling_enabled;
- MemoryDumpLevelOfDetail level_of_detail;
+ void Start(Config, scoped_refptr<SequencedTaskRunner> task_runner);
+ void Stop();
+ bool is_enabled_for_testing() const { return bool(task_runner_); }
- scoped_refptr<SingleThreadTaskRunner> polling_task_runner;
- uint32_t polling_interval_ms;
-
- // Minimum numer of polls after the last dump at which next dump can be
- // triggered.
- int min_polls_between_dumps;
- int num_polls_from_last_dump;
-
- uint64_t last_dump_memory_total;
-
- DISALLOW_COPY_AND_ASSIGN(PollingTriggerState);
- };
-
- // Helper to set polling disabled on the polling thread.
- void DisablePolling();
-
- // Periodically called by the timer.
- void RequestPeriodicGlobalDump();
-
- // Called for polling memory usage and trigger dumps if peak is detected.
- void PollMemoryOnPollingThread();
-
- // Returns true if peak memory value is detected.
- bool ShouldTriggerDump(uint64_t current_memory_total);
-
- // Must be set before enabling tracing.
- static void SetPollingIntervalForTesting(uint32_t interval);
+ private:
+ friend class MemoryDumpSchedulerTest;
+ MemoryDumpScheduler();
+ ~MemoryDumpScheduler();
- // True if periodic dumping is enabled.
- bool IsPeriodicTimerRunningForTesting();
+ void StartInternal(Config);
+ void StopInternal();
+ void Tick(uint32_t expected_generation);
- MemoryDumpManager* mdm_;
+ // Accessed only by the public methods (never from the task runner itself).
+ scoped_refptr<SequencedTaskRunner> task_runner_;
- PeriodicTriggerState periodic_state_;
- PollingTriggerState polling_state_;
+ // These fields instead are only accessed from within the task runner.
+ uint32_t period_ms_; // 0 == disabled.
+ uint32_t generation_; // Used to invalidate outstanding tasks after Stop().
+ uint32_t tick_count_;
+ uint32_t light_dump_rate_;
+ uint32_t heavy_dump_rate_;
+ PeriodicCallback callback_;
DISALLOW_COPY_AND_ASSIGN(MemoryDumpScheduler);
};
diff --git a/chromium/base/trace_event/memory_dump_scheduler_unittest.cc b/chromium/base/trace_event/memory_dump_scheduler_unittest.cc
new file mode 100644
index 00000000000..17fb74361f5
--- /dev/null
+++ b/chromium/base/trace_event/memory_dump_scheduler_unittest.cc
@@ -0,0 +1,201 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_scheduler.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::Invoke;
+using ::testing::_;
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+// Wrapper to use gmock on a callback.
+struct CallbackWrapper {
+ MOCK_METHOD1(OnTick, void(MemoryDumpLevelOfDetail));
+};
+
+} // namespace
+
+class MemoryDumpSchedulerTest : public testing::Test {
+ public:
+ struct FriendDeleter {
+ void operator()(MemoryDumpScheduler* inst) { delete inst; }
+ };
+
+ MemoryDumpSchedulerTest() : testing::Test() {}
+
+ void SetUp() override {
+ bg_thread_.reset(new Thread("MemoryDumpSchedulerTest Thread"));
+ bg_thread_->Start();
+ scheduler_.reset(new MemoryDumpScheduler());
+ }
+
+ void TearDown() override {
+ bg_thread_.reset();
+ scheduler_.reset();
+ }
+
+ protected:
+ std::unique_ptr<MemoryDumpScheduler, FriendDeleter> scheduler_;
+ std::unique_ptr<Thread> bg_thread_;
+ CallbackWrapper on_tick_;
+};
+
+TEST_F(MemoryDumpSchedulerTest, SingleTrigger) {
+ const uint32_t kPeriodMs = 1;
+ const auto kLevelOfDetail = MemoryDumpLevelOfDetail::DETAILED;
+ const uint32_t kTicks = 5;
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ MemoryDumpScheduler::Config config;
+ config.triggers.push_back({kLevelOfDetail, kPeriodMs});
+ config.callback = Bind(&CallbackWrapper::OnTick, Unretained(&on_tick_));
+
+ testing::InSequence sequence;
+ EXPECT_CALL(on_tick_, OnTick(_)).Times(kTicks - 1);
+ EXPECT_CALL(on_tick_, OnTick(_))
+ .WillRepeatedly(Invoke(
+ [&evt, kLevelOfDetail](MemoryDumpLevelOfDetail level_of_detail) {
+ EXPECT_EQ(kLevelOfDetail, level_of_detail);
+ evt.Signal();
+ }));
+
+ // Check that Stop() before Start() doesn't cause any error.
+ scheduler_->Stop();
+
+ const TimeTicks tstart = TimeTicks::Now();
+ scheduler_->Start(config, bg_thread_->task_runner());
+ evt.Wait();
+ const double time_ms = (TimeTicks::Now() - tstart).InMillisecondsF();
+
+ // It takes N-1 ms to perform N ticks of 1ms each.
+ EXPECT_GE(time_ms, kPeriodMs * (kTicks - 1));
+
+ // Check that stopping twice doesn't cause any problems.
+ scheduler_->Stop();
+ scheduler_->Stop();
+}
+
+TEST_F(MemoryDumpSchedulerTest, MultipleTriggers) {
+ const uint32_t kPeriodLightMs = 3;
+ const uint32_t kPeriodDetailedMs = 9;
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ MemoryDumpScheduler::Config config;
+ const MemoryDumpLevelOfDetail kLight = MemoryDumpLevelOfDetail::LIGHT;
+ const MemoryDumpLevelOfDetail kDetailed = MemoryDumpLevelOfDetail::DETAILED;
+ config.triggers.push_back({kLight, kPeriodLightMs});
+ config.triggers.push_back({kDetailed, kPeriodDetailedMs});
+ config.callback = Bind(&CallbackWrapper::OnTick, Unretained(&on_tick_));
+
+ TimeTicks t1, t2, t3;
+
+ testing::InSequence sequence;
+ EXPECT_CALL(on_tick_, OnTick(kDetailed))
+ .WillOnce(
+ Invoke([&t1](MemoryDumpLevelOfDetail) { t1 = TimeTicks::Now(); }));
+ EXPECT_CALL(on_tick_, OnTick(kLight)).Times(1);
+ EXPECT_CALL(on_tick_, OnTick(kLight)).Times(1);
+ EXPECT_CALL(on_tick_, OnTick(kDetailed))
+ .WillOnce(
+ Invoke([&t2](MemoryDumpLevelOfDetail) { t2 = TimeTicks::Now(); }));
+ EXPECT_CALL(on_tick_, OnTick(kLight))
+ .WillOnce(
+ Invoke([&t3](MemoryDumpLevelOfDetail) { t3 = TimeTicks::Now(); }));
+
+ // Rationale for WillRepeatedly and not just WillOnce: Extra ticks might
+ // happen if the Stop() takes time. Not an interesting case, but we need to
+ // avoid gmock to shout in that case.
+ EXPECT_CALL(on_tick_, OnTick(_))
+ .WillRepeatedly(
+ Invoke([&evt](MemoryDumpLevelOfDetail) { evt.Signal(); }));
+
+ scheduler_->Start(config, bg_thread_->task_runner());
+ evt.Wait();
+ scheduler_->Stop();
+ EXPECT_GE((t2 - t1).InMillisecondsF(), kPeriodDetailedMs);
+ EXPECT_GE((t3 - t2).InMillisecondsF(), kPeriodLightMs);
+}
+
+TEST_F(MemoryDumpSchedulerTest, StartStopQuickly) {
+ const uint32_t kPeriodMs = 1;
+ const uint32_t kTicks = 10;
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ MemoryDumpScheduler::Config config;
+ config.triggers.push_back({MemoryDumpLevelOfDetail::DETAILED, kPeriodMs});
+ config.callback = Bind(&CallbackWrapper::OnTick, Unretained(&on_tick_));
+
+ testing::InSequence sequence;
+ EXPECT_CALL(on_tick_, OnTick(_)).Times(kTicks - 1);
+ EXPECT_CALL(on_tick_, OnTick(_))
+ .WillRepeatedly(
+ Invoke([&evt](MemoryDumpLevelOfDetail) { evt.Signal(); }));
+
+ const TimeTicks tstart = TimeTicks::Now();
+ for (int i = 0; i < 5; i++) {
+ scheduler_->Stop();
+ scheduler_->Start(config, bg_thread_->task_runner());
+ }
+ evt.Wait();
+ const double time_ms = (TimeTicks::Now() - tstart).InMillisecondsF();
+ scheduler_->Stop();
+
+ // It takes N-1 ms to perform N ticks of 1ms each.
+ EXPECT_GE(time_ms, kPeriodMs * (kTicks - 1));
+}
+
+TEST_F(MemoryDumpSchedulerTest, StopAndStartOnAnotherThread) {
+ const uint32_t kPeriodMs = 1;
+ const uint32_t kTicks = 3;
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ MemoryDumpScheduler::Config config;
+ config.triggers.push_back({MemoryDumpLevelOfDetail::DETAILED, kPeriodMs});
+ config.callback = Bind(&CallbackWrapper::OnTick, Unretained(&on_tick_));
+
+ scoped_refptr<TaskRunner> expected_task_runner = bg_thread_->task_runner();
+ testing::InSequence sequence;
+ EXPECT_CALL(on_tick_, OnTick(_)).Times(kTicks - 1);
+ EXPECT_CALL(on_tick_, OnTick(_))
+ .WillRepeatedly(
+ Invoke([&evt, expected_task_runner](MemoryDumpLevelOfDetail) {
+ EXPECT_TRUE(expected_task_runner->RunsTasksOnCurrentThread());
+ evt.Signal();
+ }));
+
+ scheduler_->Start(config, bg_thread_->task_runner());
+ evt.Wait();
+ scheduler_->Stop();
+ bg_thread_->Stop();
+
+ bg_thread_.reset(new Thread("MemoryDumpSchedulerTest Thread 2"));
+ bg_thread_->Start();
+ evt.Reset();
+ expected_task_runner = bg_thread_->task_runner();
+ scheduler_->Start(config, bg_thread_->task_runner());
+ EXPECT_CALL(on_tick_, OnTick(_)).Times(kTicks - 1);
+ EXPECT_CALL(on_tick_, OnTick(_))
+ .WillRepeatedly(
+ Invoke([&evt, expected_task_runner](MemoryDumpLevelOfDetail) {
+ EXPECT_TRUE(expected_task_runner->RunsTasksOnCurrentThread());
+ evt.Signal();
+ }));
+ evt.Wait();
+ scheduler_->Stop();
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/memory_infra_background_whitelist.cc b/chromium/base/trace_event/memory_infra_background_whitelist.cc
index ae74322040f..746068a7b1e 100644
--- a/chromium/base/trace_event/memory_infra_background_whitelist.cc
+++ b/chromium/base/trace_event/memory_infra_background_whitelist.cc
@@ -69,10 +69,70 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"net/http_network_session_0x?/stream_factory",
"net/sdch_manager_0x?",
"net/ssl_session_cache",
- "net/url_request_context_0x?",
- "net/url_request_context_0x?/http_cache",
- "net/url_request_context_0x?/http_network_session",
- "net/url_request_context_0x?/sdch_manager",
+ "net/url_request_context",
+ "net/url_request_context/app_request",
+ "net/url_request_context/app_request/0x?",
+ "net/url_request_context/app_request/0x?/http_cache",
+ "net/url_request_context/app_request/0x?/http_cache/memory_backend",
+ "net/url_request_context/app_request/0x?/http_cache/simple_backend",
+ "net/url_request_context/app_request/0x?/http_network_session",
+ "net/url_request_context/app_request/0x?/sdch_manager",
+ "net/url_request_context/extensions",
+ "net/url_request_context/extensions/0x?",
+ "net/url_request_context/extensions/0x?/http_cache",
+ "net/url_request_context/extensions/0x?/http_cache/memory_backend",
+ "net/url_request_context/extensions/0x?/http_cache/simple_backend",
+ "net/url_request_context/extensions/0x?/http_network_session",
+ "net/url_request_context/extensions/0x?/sdch_manager",
+ "net/url_request_context/isolated_media",
+ "net/url_request_context/isolated_media/0x?",
+ "net/url_request_context/isolated_media/0x?/http_cache",
+ "net/url_request_context/isolated_media/0x?/http_cache/memory_backend",
+ "net/url_request_context/isolated_media/0x?/http_cache/simple_backend",
+ "net/url_request_context/isolated_media/0x?/http_network_session",
+ "net/url_request_context/isolated_media/0x?/sdch_manager",
+ "net/url_request_context/main",
+ "net/url_request_context/main/0x?",
+ "net/url_request_context/main/0x?/http_cache",
+ "net/url_request_context/main/0x?/http_cache/memory_backend",
+ "net/url_request_context/main/0x?/http_cache/simple_backend",
+ "net/url_request_context/main/0x?/http_network_session",
+ "net/url_request_context/main/0x?/sdch_manager",
+ "net/url_request_context/main_media",
+ "net/url_request_context/main_media/0x?",
+ "net/url_request_context/main_media/0x?/http_cache",
+ "net/url_request_context/main_media/0x?/http_cache/memory_backend",
+ "net/url_request_context/main_media/0x?/http_cache/simple_backend",
+ "net/url_request_context/main_media/0x?/http_network_session",
+ "net/url_request_context/main_media/0x?/sdch_manager",
+ "net/url_request_context/proxy",
+ "net/url_request_context/proxy/0x?",
+ "net/url_request_context/proxy/0x?/http_cache",
+ "net/url_request_context/proxy/0x?/http_cache/memory_backend",
+ "net/url_request_context/proxy/0x?/http_cache/simple_backend",
+ "net/url_request_context/proxy/0x?/http_network_session",
+ "net/url_request_context/proxy/0x?/sdch_manager",
+ "net/url_request_context/safe_browsing",
+ "net/url_request_context/safe_browsing/0x?",
+ "net/url_request_context/safe_browsing/0x?/http_cache",
+ "net/url_request_context/safe_browsing/0x?/http_cache/memory_backend",
+ "net/url_request_context/safe_browsing/0x?/http_cache/simple_backend",
+ "net/url_request_context/safe_browsing/0x?/http_network_session",
+ "net/url_request_context/safe_browsing/0x?/sdch_manager",
+ "net/url_request_context/system",
+ "net/url_request_context/system/0x?",
+ "net/url_request_context/system/0x?/http_cache",
+ "net/url_request_context/system/0x?/http_cache/memory_backend",
+ "net/url_request_context/system/0x?/http_cache/simple_backend",
+ "net/url_request_context/system/0x?/http_network_session",
+ "net/url_request_context/system/0x?/sdch_manager",
+ "net/url_request_context/unknown",
+ "net/url_request_context/unknown/0x?",
+ "net/url_request_context/unknown/0x?/http_cache",
+ "net/url_request_context/unknown/0x?/http_cache/memory_backend",
+ "net/url_request_context/unknown/0x?/http_cache/simple_backend",
+ "net/url_request_context/unknown/0x?/http_network_session",
+ "net/url_request_context/unknown/0x?/sdch_manager",
"web_cache/Image_resources",
"web_cache/CSS stylesheet_resources",
"web_cache/Script_resources",
diff --git a/chromium/base/trace_event/memory_peak_detector.cc b/chromium/base/trace_event/memory_peak_detector.cc
new file mode 100644
index 00000000000..541959406ca
--- /dev/null
+++ b/chromium/base/trace_event/memory_peak_detector.cc
@@ -0,0 +1,288 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_peak_detector.h"
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/sys_info.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/time/time.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/memory_dump_provider_info.h"
+#include "base/trace_event/trace_event.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace trace_event {
+
+// static
+MemoryPeakDetector* MemoryPeakDetector::GetInstance() {
+ static MemoryPeakDetector* instance = new MemoryPeakDetector();
+ return instance;
+}
+
+MemoryPeakDetector::MemoryPeakDetector()
+ : generation_(0),
+ state_(NOT_INITIALIZED),
+ poll_tasks_count_for_testing_(0) {}
+
+MemoryPeakDetector::~MemoryPeakDetector() {
+ // This is hit only in tests, in which case the test is expected to TearDown()
+ // cleanly and not leave the peak detector running.
+ DCHECK_EQ(NOT_INITIALIZED, state_);
+}
+
+void MemoryPeakDetector::Setup(
+ const GetDumpProvidersFunction& get_dump_providers_function,
+ const scoped_refptr<SequencedTaskRunner>& task_runner,
+ const OnPeakDetectedCallback& on_peak_detected_callback) {
+ DCHECK(!get_dump_providers_function.is_null());
+ DCHECK(task_runner);
+ DCHECK(!on_peak_detected_callback.is_null());
+ DCHECK(state_ == NOT_INITIALIZED || state_ == DISABLED);
+ DCHECK(dump_providers_.empty());
+ get_dump_providers_function_ = get_dump_providers_function;
+ task_runner_ = task_runner;
+ on_peak_detected_callback_ = on_peak_detected_callback;
+ state_ = DISABLED;
+ config_ = {};
+ ResetPollHistory();
+
+ static_threshold_bytes_ = 0;
+#if !defined(OS_NACL)
+ // Set threshold to 1% of total system memory.
+ static_threshold_bytes_ =
+ static_cast<uint64_t>(SysInfo::AmountOfPhysicalMemory()) / 100;
+#endif
+ // Fallback, mostly for test environments where AmountOfPhysicalMemory() is
+ // broken.
+ static_threshold_bytes_ =
+ std::max(static_threshold_bytes_, static_cast<uint64_t>(5 * 1024 * 1024));
+}
+
+void MemoryPeakDetector::TearDown() {
+ if (task_runner_) {
+ task_runner_->PostTask(
+ FROM_HERE,
+ BindOnce(&MemoryPeakDetector::TearDownInternal, Unretained(this)));
+ }
+ task_runner_ = nullptr;
+}
+
+void MemoryPeakDetector::Start(MemoryPeakDetector::Config config) {
+ if (!config.polling_interval_ms) {
+ NOTREACHED();
+ return;
+ }
+ task_runner_->PostTask(FROM_HERE, BindOnce(&MemoryPeakDetector::StartInternal,
+ Unretained(this), config));
+}
+
+void MemoryPeakDetector::Stop() {
+ task_runner_->PostTask(
+ FROM_HERE, BindOnce(&MemoryPeakDetector::StopInternal, Unretained(this)));
+}
+
+void MemoryPeakDetector::Throttle() {
+ if (!task_runner_)
+ return; // Can be called before Setup().
+ task_runner_->PostTask(
+ FROM_HERE, BindOnce(&MemoryPeakDetector::ResetPollHistory,
+ Unretained(this), true /* keep_last_sample */));
+}
+
+void MemoryPeakDetector::NotifyMemoryDumpProvidersChanged() {
+ if (!task_runner_)
+ return; // Can be called before Setup().
+ task_runner_->PostTask(
+ FROM_HERE,
+ BindOnce(&MemoryPeakDetector::ReloadDumpProvidersAndStartPollingIfNeeded,
+ Unretained(this)));
+}
+
+void MemoryPeakDetector::StartInternal(MemoryPeakDetector::Config config) {
+ DCHECK_EQ(DISABLED, state_);
+ state_ = ENABLED;
+ config_ = config;
+ ResetPollHistory();
+
+ // If there are any dump providers available,
+ // NotifyMemoryDumpProvidersChanged will fetch them and start the polling.
+ // Otherwise this will remain in the ENABLED state and the actual polling
+ // will start on the next call to
+ // ReloadDumpProvidersAndStartPollingIfNeeded().
+ // Depending on the sandbox model, it is possible that no polling-capable
+ // dump providers will be ever available.
+ ReloadDumpProvidersAndStartPollingIfNeeded();
+}
+
+void MemoryPeakDetector::StopInternal() {
+ DCHECK_NE(NOT_INITIALIZED, state_);
+ state_ = DISABLED;
+ ++generation_;
+ for (const scoped_refptr<MemoryDumpProviderInfo>& mdp_info : dump_providers_)
+ mdp_info->dump_provider->SuspendFastMemoryPolling();
+ dump_providers_.clear();
+}
+
+void MemoryPeakDetector::TearDownInternal() {
+ StopInternal();
+ get_dump_providers_function_.Reset();
+ on_peak_detected_callback_.Reset();
+ state_ = NOT_INITIALIZED;
+}
+
+void MemoryPeakDetector::ReloadDumpProvidersAndStartPollingIfNeeded() {
+ if (state_ == DISABLED || state_ == NOT_INITIALIZED)
+ return; // Start() will re-fetch the MDP list later.
+
+ DCHECK((state_ == RUNNING && !dump_providers_.empty()) ||
+ (state_ == ENABLED && dump_providers_.empty()));
+
+ dump_providers_.clear();
+
+ // This is really MemoryDumpManager::GetDumpProvidersForPolling, % testing.
+ get_dump_providers_function_.Run(&dump_providers_);
+
+ if (state_ == ENABLED && !dump_providers_.empty()) {
+ // It's now time to start polling for realz.
+ state_ = RUNNING;
+ task_runner_->PostTask(
+ FROM_HERE, BindOnce(&MemoryPeakDetector::PollMemoryAndDetectPeak,
+ Unretained(this), ++generation_));
+ } else if (state_ == RUNNING && dump_providers_.empty()) {
+ // Will cause the next PollMemoryAndDetectPeak() task to early return.
+ state_ = ENABLED;
+ ++generation_;
+ }
+}
+
+void MemoryPeakDetector::PollMemoryAndDetectPeak(uint32_t expected_generation) {
+ if (state_ != RUNNING || generation_ != expected_generation)
+ return;
+
+ // We should never end up in a situation where state_ == RUNNING but all dump
+ // providers are gone.
+ DCHECK(!dump_providers_.empty());
+
+ poll_tasks_count_for_testing_++;
+ uint64_t polled_mem_bytes = 0;
+ for (const scoped_refptr<MemoryDumpProviderInfo>& mdp_info :
+ dump_providers_) {
+ DCHECK(mdp_info->options.is_fast_polling_supported);
+ uint64_t value = 0;
+ mdp_info->dump_provider->PollFastMemoryTotal(&value);
+ polled_mem_bytes += value;
+ }
+ if (config_.enable_verbose_poll_tracing) {
+ TRACE_COUNTER1(MemoryDumpManager::kTraceCategory, "PolledMemoryMB",
+ polled_mem_bytes / 1024 / 1024);
+ }
+
+ // Peak detection logic. Design doc: https://goo.gl/0kOU4A .
+ bool is_peak = false;
+ if (skip_polls_ > 0) {
+ skip_polls_--;
+ } else if (last_dump_memory_total_ == 0) {
+ last_dump_memory_total_ = polled_mem_bytes;
+ } else if (polled_mem_bytes > 0) {
+ int64_t diff_from_last_dump = polled_mem_bytes - last_dump_memory_total_;
+
+ DCHECK_GT(static_threshold_bytes_, 0u);
+ is_peak =
+ diff_from_last_dump > static_cast<int64_t>(static_threshold_bytes_);
+
+ if (!is_peak)
+ is_peak = DetectPeakUsingSlidingWindowStddev(polled_mem_bytes);
+ }
+
+ DCHECK_GT(config_.polling_interval_ms, 0u);
+ SequencedTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&MemoryPeakDetector::PollMemoryAndDetectPeak, Unretained(this),
+ expected_generation),
+ TimeDelta::FromMilliseconds(config_.polling_interval_ms));
+
+ if (!is_peak)
+ return;
+ TRACE_EVENT_INSTANT1(MemoryDumpManager::kTraceCategory,
+ "Peak memory detected", TRACE_EVENT_SCOPE_PROCESS,
+ "PolledMemoryMB", polled_mem_bytes / 1024 / 1024);
+ ResetPollHistory(true /* keep_last_sample */);
+ last_dump_memory_total_ = polled_mem_bytes;
+ on_peak_detected_callback_.Run();
+}
+
+bool MemoryPeakDetector::DetectPeakUsingSlidingWindowStddev(
+ uint64_t polled_mem_bytes) {
+ DCHECK(polled_mem_bytes);
+ samples_bytes_[samples_index_] = polled_mem_bytes;
+ samples_index_ = (samples_index_ + 1) % kSlidingWindowNumSamples;
+ float mean = 0;
+ for (uint32_t i = 0; i < kSlidingWindowNumSamples; ++i) {
+ if (samples_bytes_[i] == 0)
+ return false; // Not enough samples to detect peaks.
+ mean += samples_bytes_[i];
+ }
+ mean /= kSlidingWindowNumSamples;
+ float variance = 0;
+ for (uint32_t i = 0; i < kSlidingWindowNumSamples; ++i) {
+ const float deviation = samples_bytes_[i] - mean;
+ variance += deviation * deviation;
+ }
+ variance /= kSlidingWindowNumSamples;
+
+ // If stddev is less than 0.2% then we consider that the process is inactive.
+ if (variance < (mean / 500) * (mean / 500))
+ return false;
+
+ // (mean + 3.69 * stddev) corresponds to a value that is higher than current
+ // sample with 99.99% probability.
+ const float cur_sample_deviation = polled_mem_bytes - mean;
+ return cur_sample_deviation * cur_sample_deviation > (3.69 * 3.69 * variance);
+}
+
+void MemoryPeakDetector::ResetPollHistory(bool keep_last_sample) {
+ // TODO(primiano,ssid): this logic should probably be revisited. In the case
+ // of Android, the browser process sees the total of all processes memory in
+ // the same peak detector instance. Perhaps the best thing to do here is to
+ // keep the window of samples around and just bump the skip_polls_.
+ last_dump_memory_total_ = 0;
+ if (keep_last_sample) {
+ const uint32_t prev_index =
+ samples_index_ > 0 ? samples_index_ - 1 : kSlidingWindowNumSamples - 1;
+ last_dump_memory_total_ = samples_bytes_[prev_index];
+ }
+ memset(samples_bytes_, 0, sizeof(samples_bytes_));
+ samples_index_ = 0;
+ skip_polls_ = 0;
+ if (config_.polling_interval_ms > 0) {
+ skip_polls_ =
+ (config_.min_time_between_peaks_ms + config_.polling_interval_ms - 1) /
+ config_.polling_interval_ms;
+ }
+}
+
+void MemoryPeakDetector::SetStaticThresholdForTesting(
+ uint64_t static_threshold_bytes) {
+ DCHECK_EQ(DISABLED, state_);
+ static_threshold_bytes_ = static_threshold_bytes;
+}
+
+MemoryPeakDetector::MemoryPeakDetector::Config::Config()
+ : Config(0, 0, false) {}
+
+MemoryPeakDetector::MemoryPeakDetector::Config::Config(
+ uint32_t polling_interval_ms,
+ uint32_t min_time_between_peaks_ms,
+ bool enable_verbose_poll_tracing)
+ : polling_interval_ms(polling_interval_ms),
+ min_time_between_peaks_ms(min_time_between_peaks_ms),
+ enable_verbose_poll_tracing(enable_verbose_poll_tracing) {}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/memory_peak_detector.h b/chromium/base/trace_event/memory_peak_detector.h
new file mode 100644
index 00000000000..c08d91bd8a6
--- /dev/null
+++ b/chromium/base/trace_event/memory_peak_detector.h
@@ -0,0 +1,184 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
+#define BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+
+namespace base {
+
+class SequencedTaskRunner;
+
+namespace trace_event {
+
+struct MemoryDumpProviderInfo;
+
+// Detects temporally local memory peaks. Peak detection is based on
+// continuously querying memory usage using MemoryDumpprovider(s) that support
+// fast polling (e.g., ProcessMetricsDumpProvider which under the hoods reads
+// /proc/PID/statm on Linux) and using a cobination of:
+// - An static threshold (currently 1% of total system memory).
+// - Sliding window stddev analysis.
+// Design doc: https://goo.gl/0kOU4A .
+// This class is NOT thread-safe, the caller has to ensure linearization of
+// the calls to the public methods. In any case, the public methods do NOT have
+// to be called from the |task_runner| on which the polling tasks run.
+class BASE_EXPORT MemoryPeakDetector {
+ public:
+ using OnPeakDetectedCallback = RepeatingClosure;
+ using DumpProvidersList = std::vector<scoped_refptr<MemoryDumpProviderInfo>>;
+ using GetDumpProvidersFunction = RepeatingCallback<void(DumpProvidersList*)>;
+
+ enum State {
+ NOT_INITIALIZED = 0, // Before Setup()
+ DISABLED, // Before Start() or after Stop().
+ ENABLED, // After Start() but no dump_providers_ are available.
+ RUNNING // After Start(). The PollMemoryAndDetectPeak() task is scheduled.
+ };
+
+ // Peak detector configuration, passed to Start().
+ struct BASE_EXPORT Config {
+ Config();
+ Config(uint32_t polling_interval_ms,
+ uint32_t min_time_between_peaks_ms,
+ bool enable_verbose_poll_tracing);
+
+ // The rate at which memory will be polled. Polls will happen on the task
+ // runner passed to Setup().
+ uint32_t polling_interval_ms;
+
+ // Two consecutive peak detection callbacks will happen at least
+ // |min_time_between_peaks_ms| apart from each other.
+ uint32_t min_time_between_peaks_ms;
+
+ // When enabled causes a TRACE_COUNTER event to be injected in the trace
+ // for each poll (if tracing is enabled).
+ bool enable_verbose_poll_tracing;
+ };
+
+ static MemoryPeakDetector* GetInstance();
+
+ // Configures the peak detector, binding the polling tasks on the given
+ // thread. Setup() can be called several times, provided that: (1) Stop()
+ // is called; (2a) the previous task_runner is flushed or (2b) the task_runner
+ // remains the same.
+ // GetDumpProvidersFunction: is the function that will be invoked to get
+ // an updated list of polling-capable dump providers. This is really just
+ // MemoryDumpManager::GetDumpProvidersForPolling, but this extra level of
+ // indirection allows easier testing.
+ // SequencedTaskRunner: the task runner where PollMemoryAndDetectPeak() will
+ // be periodically called.
+ // OnPeakDetectedCallback: a callback that will be invoked on the
+ // given task runner when a memory peak is detected.
+ void Setup(const GetDumpProvidersFunction&,
+ const scoped_refptr<SequencedTaskRunner>&,
+ const OnPeakDetectedCallback&);
+
+ // Releases the |task_runner_| and the bound callbacks.
+ void TearDown();
+
+ // This posts a task onto the passed task runner which refreshes the list of
+ // dump providers via the GetDumpProvidersFunction. If at least one dump
+ // provider is available, this starts immediately polling on the task runner.
+ // If not, the detector remains in the ENABLED state and will start polling
+ // automatically (i.e. without requiring another call to Start()) on the
+ // next call to NotifyMemoryDumpProvidersChanged().
+ void Start(Config);
+
+ // Stops the polling on the task runner (if was active at all). This doesn't
+ // wait for the task runner to drain pending tasks, so it is possible that
+ // a polling will happen concurrently (or in the immediate future) with the
+ // Stop() call. It is responsibility of the caller to drain or synchronize
+ // with the task runner.
+ void Stop();
+
+ // If Start()-ed, prevents that a peak callback is triggered before the next
+ // |min_time_between_peaks_ms|. No-op if the peak detector is not enabled.
+ void Throttle();
+
+ // Used by MemoryDumpManager to notify that the list of polling-capable dump
+ // providers has changed. The peak detector will reload the list on the next
+ // polling task. This function can be called before Setup(), in which
+ // case will be just a no-op.
+ void NotifyMemoryDumpProvidersChanged();
+
+ void SetStaticThresholdForTesting(uint64_t static_threshold_bytes);
+
+ private:
+ friend class MemoryPeakDetectorTest;
+
+ static constexpr uint32_t kSlidingWindowNumSamples = 50;
+
+ MemoryPeakDetector();
+ ~MemoryPeakDetector();
+
+ // All these methods are always called on the |task_runner_|.
+ void StartInternal(Config);
+ void StopInternal();
+ void TearDownInternal();
+ void ReloadDumpProvidersAndStartPollingIfNeeded();
+ void PollMemoryAndDetectPeak(uint32_t expected_generation);
+ bool DetectPeakUsingSlidingWindowStddev(uint64_t last_sample_bytes);
+ void ResetPollHistory(bool keep_last_sample = false);
+
+ // It is safe to call these testing methods only on the |task_runner_|.
+ State state_for_testing() const { return state_; }
+ uint32_t poll_tasks_count_for_testing() const {
+ return poll_tasks_count_for_testing_;
+ }
+
+ // The task runner where all the internal calls are posted onto. This field
+ // must be NOT be accessed by the tasks posted on the |task_runner_| because
+ // there might still be outstanding tasks on the |task_runner_| while this
+ // refptr is reset. This can only be safely accessed by the public methods
+ // above, which the client of this class is supposed to call sequentially.
+ scoped_refptr<SequencedTaskRunner> task_runner_;
+
+ // After the Setup() call, the fields below, must be accessed only from
+ // the |task_runner_|.
+
+ // Bound function to get an updated list of polling-capable dump providers.
+ GetDumpProvidersFunction get_dump_providers_function_;
+
+ // The callback to invoke when peaks are detected.
+ OnPeakDetectedCallback on_peak_detected_callback_;
+
+ // List of polling-aware dump providers to invoke upon each poll.
+ DumpProvidersList dump_providers_;
+
+ // The generation is incremented every time the |state_| is changed and causes
+ // PollMemoryAndDetectPeak() to early out if the posted task doesn't match the
+ // most recent |generation_|. This allows to drop on the floor outstanding
+ // PostDelayedTask that refer to an old sequence that was later Stop()-ed or
+ // disabled because of NotifyMemoryDumpProvidersChanged().
+ uint32_t generation_;
+
+ State state_;
+
+ // Config passed to Start(), only valid when |state_| = {ENABLED, RUNNING}.
+ Config config_;
+
+ uint64_t static_threshold_bytes_;
+ uint32_t skip_polls_;
+ uint64_t last_dump_memory_total_;
+ uint64_t samples_bytes_[kSlidingWindowNumSamples];
+ uint32_t samples_index_;
+ uint32_t poll_tasks_count_for_testing_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryPeakDetector);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
diff --git a/chromium/base/trace_event/memory_peak_detector_unittest.cc b/chromium/base/trace_event/memory_peak_detector_unittest.cc
new file mode 100644
index 00000000000..c158466d6ea
--- /dev/null
+++ b/chromium/base/trace_event/memory_peak_detector_unittest.cc
@@ -0,0 +1,558 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_peak_detector.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/run_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_dump_provider_info.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::Invoke;
+using ::testing::Return;
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+const TimeDelta kMs = TimeDelta::FromMilliseconds(1);
+const MemoryPeakDetector::Config kConfigNoCallbacks(
+ 1 /* polling_interval_ms */,
+ 60000 /* min_time_between_peaks_ms */,
+ false /* enable_verbose_poll_tracing */
+ );
+
+class MockMemoryDumpProvider : public MemoryDumpProvider {
+ public:
+ bool OnMemoryDump(const MemoryDumpArgs&, ProcessMemoryDump*) override {
+ NOTREACHED();
+ return true;
+ }
+
+ MOCK_METHOD1(PollFastMemoryTotal, void(uint64_t*));
+};
+
+// Wrapper to use gmock on a callback.
+struct OnPeakDetectedWrapper {
+ MOCK_METHOD0(OnPeak, void());
+};
+
+} // namespace
+
+class MemoryPeakDetectorTest : public testing::Test {
+ public:
+ struct FriendDeleter {
+ void operator()(MemoryPeakDetector* inst) { delete inst; }
+ };
+
+ MemoryPeakDetectorTest() : testing::Test() {}
+ static const uint64_t kSlidingWindowNumSamples =
+ MemoryPeakDetector::kSlidingWindowNumSamples;
+
+ std::unique_ptr<MemoryPeakDetector, FriendDeleter> NewInstance() {
+ return std::unique_ptr<MemoryPeakDetector, FriendDeleter>(
+ new MemoryPeakDetector());
+ }
+
+ void RestartThreadAndReinitializePeakDetector() {
+ bg_thread_.reset(new Thread("Peak Detector Test Thread"));
+ bg_thread_->Start();
+ peak_detector_ = NewInstance();
+ peak_detector_->Setup(
+ Bind(&MemoryPeakDetectorTest::MockGetDumpProviders, Unretained(this)),
+ bg_thread_->task_runner(),
+ Bind(&OnPeakDetectedWrapper::OnPeak, Unretained(&on_peak_callback_)));
+ }
+
+ void SetUp() override {
+ get_mdp_call_count_ = 0;
+ RestartThreadAndReinitializePeakDetector();
+ }
+
+ void TearDown() override {
+ peak_detector_->TearDown();
+ bg_thread_->FlushForTesting();
+ EXPECT_EQ(MemoryPeakDetector::NOT_INITIALIZED, GetPeakDetectorState());
+ bg_thread_.reset();
+ dump_providers_.clear();
+ }
+
+ // Calls MemoryPeakDetector::state_for_testing() on the bg thread and returns
+ // the result on the current thread.
+ MemoryPeakDetector::State GetPeakDetectorState() {
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ MemoryPeakDetector::State res = MemoryPeakDetector::NOT_INITIALIZED;
+ auto get_fn = [](MemoryPeakDetector* peak_detector, WaitableEvent* evt,
+ MemoryPeakDetector::State* res) {
+ *res = peak_detector->state_for_testing();
+ evt->Signal();
+ };
+ bg_thread_->task_runner()->PostTask(
+ FROM_HERE, Bind(get_fn, Unretained(&*peak_detector_), Unretained(&evt),
+ Unretained(&res)));
+ evt.Wait();
+ return res;
+ }
+
+ // Calls MemoryPeakDetector::poll_tasks_count_for_testing() on the bg thread
+ // and returns the result on the current thread.
+ uint32_t GetNumPollingTasksRan() {
+ uint32_t res = 0;
+ auto get_fn = [](MemoryPeakDetector* peak_detector, WaitableEvent* evt,
+ uint32_t* res) {
+ *res = peak_detector->poll_tasks_count_for_testing();
+ evt->Signal();
+ };
+
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ bg_thread_->task_runner()->PostTask(
+ FROM_HERE, Bind(get_fn, Unretained(&*peak_detector_), Unretained(&evt),
+ Unretained(&res)));
+ evt.Wait();
+ return res;
+ }
+
+ // Runs the peak detector with a mock MDP with the given
+ // |config|. The mock MDP will invoke the |poll_function| on any call to
+ // PollFastMemoryTotal(), until |num_samples| have been polled.
+ // It returns the number of peaks detected.
+ uint32_t RunWithCustomPollFunction(
+ MemoryPeakDetector::Config config,
+ uint32_t num_samples,
+ RepeatingCallback<uint64_t(uint32_t)> poll_function) {
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+ dump_providers_.push_back(mdp);
+ uint32_t cur_sample_idx = 0;
+ EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+ .WillRepeatedly(Invoke(
+ [&cur_sample_idx, &evt, poll_function, num_samples](uint64_t* mem) {
+ if (cur_sample_idx >= num_samples) {
+ *mem = 1;
+ evt.Signal();
+ } else {
+ *mem = poll_function.Run(cur_sample_idx++);
+ }
+ }));
+
+ uint32_t num_peaks = 0;
+ EXPECT_CALL(on_peak_callback_, OnPeak())
+ .WillRepeatedly(Invoke([&num_peaks] { num_peaks++; }));
+ peak_detector_->Start(config);
+ evt.Wait(); // Wait for |num_samples| invocations of PollFastMemoryTotal().
+ peak_detector_->Stop();
+ EXPECT_EQ(num_samples, cur_sample_idx);
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+ return num_peaks;
+ }
+
+ // Called on the |bg_thread_|.
+ void MockGetDumpProviders(MemoryPeakDetector::DumpProvidersList* mdps) {
+ get_mdp_call_count_++;
+ *mdps = dump_providers_;
+ }
+
+ uint32_t GetNumGetDumpProvidersCalls() {
+ bg_thread_->FlushForTesting();
+ return get_mdp_call_count_;
+ }
+
+ scoped_refptr<MemoryDumpProviderInfo> CreateMockDumpProvider() {
+ std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider());
+ MemoryDumpProvider::Options opt;
+ opt.is_fast_polling_supported = true;
+ scoped_refptr<MemoryDumpProviderInfo> mdp_info(
+ new MemoryDumpProviderInfo(mdp.get(), "Mock MDP", nullptr, opt, false));
+
+ // The |mdp| instance will be destroyed together with the |mdp_info|.
+ mdp_info->owned_dump_provider = std::move(mdp);
+ return mdp_info;
+ }
+
+ static MockMemoryDumpProvider& GetMockMDP(
+ const scoped_refptr<MemoryDumpProviderInfo>& mdp_info) {
+ return *static_cast<MockMemoryDumpProvider*>(mdp_info->dump_provider);
+ }
+
+ static uint64_t PollFunctionThatCausesPeakViaStdDev(uint32_t sample_idx) {
+ // Start with a baseline of 50 MB.
+ if (sample_idx < kSlidingWindowNumSamples)
+ return 50000 + (sample_idx % 3) * 100;
+
+ // Then 10 samples around 80 MB
+ if (sample_idx < 10 + kSlidingWindowNumSamples)
+ return 80000 + (sample_idx % 3) * 200;
+
+ // Than back to 60 MB.
+ if (sample_idx < 2 * kSlidingWindowNumSamples)
+ return 60000 + (sample_idx % 3) * 100;
+
+ // Then 20 samples around 120 MB.
+ if (sample_idx < 20 + 2 * kSlidingWindowNumSamples)
+ return 120000 + (sample_idx % 3) * 200;
+
+ // Then back to idle to around 50 MB until the end.
+ return 50000 + (sample_idx % 3) * 100;
+ }
+
+ protected:
+ MemoryPeakDetector::DumpProvidersList dump_providers_;
+ uint32_t get_mdp_call_count_;
+ std::unique_ptr<MemoryPeakDetector, FriendDeleter> peak_detector_;
+ std::unique_ptr<Thread> bg_thread_;
+ OnPeakDetectedWrapper on_peak_callback_;
+};
+
+const uint64_t MemoryPeakDetectorTest::kSlidingWindowNumSamples;
+
+TEST_F(MemoryPeakDetectorTest, GetDumpProvidersFunctionCalled) {
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+ peak_detector_->Start(kConfigNoCallbacks);
+ EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
+ EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+
+ peak_detector_->Stop();
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+ EXPECT_EQ(0u, GetNumPollingTasksRan());
+}
+
+TEST_F(MemoryPeakDetectorTest, ThrottleAndNotifyBeforeInitialize) {
+ peak_detector_->TearDown();
+
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+ EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+ .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
+ dump_providers_.push_back(mdp);
+ peak_detector_->Throttle();
+ peak_detector_->NotifyMemoryDumpProvidersChanged();
+ EXPECT_EQ(MemoryPeakDetector::NOT_INITIALIZED, GetPeakDetectorState());
+ RestartThreadAndReinitializePeakDetector();
+
+ peak_detector_->Start(kConfigNoCallbacks);
+ EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+ evt.Wait(); // Wait for a PollFastMemoryTotal() call.
+
+ peak_detector_->Stop();
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+ EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
+ EXPECT_GE(GetNumPollingTasksRan(), 1u);
+}
+
+TEST_F(MemoryPeakDetectorTest, DoubleStop) {
+ peak_detector_->Start(kConfigNoCallbacks);
+ EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+
+ peak_detector_->Stop();
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+
+ peak_detector_->Stop();
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+
+ EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
+ EXPECT_EQ(0u, GetNumPollingTasksRan());
+}
+
+TEST_F(MemoryPeakDetectorTest, OneDumpProviderRegisteredBeforeStart) {
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+ EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+ .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
+ dump_providers_.push_back(mdp);
+
+ peak_detector_->Start(kConfigNoCallbacks);
+ evt.Wait(); // Signaled when PollFastMemoryTotal() is called on the MockMDP.
+ EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+
+ peak_detector_->Stop();
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+ EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
+ EXPECT_GT(GetNumPollingTasksRan(), 0u);
+}
+
+TEST_F(MemoryPeakDetectorTest, ReInitializeAndRebindToNewThread) {
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+ EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+ .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
+ dump_providers_.push_back(mdp);
+
+ for (int i = 0; i < 5; ++i) {
+ evt.Reset();
+ peak_detector_->Start(kConfigNoCallbacks);
+ evt.Wait(); // Wait for a PollFastMemoryTotal() call.
+ // Check that calling TearDown implicitly does a Stop().
+ peak_detector_->TearDown();
+
+ // Reinitialize and re-bind to a new task runner.
+ RestartThreadAndReinitializePeakDetector();
+ }
+}
+
+TEST_F(MemoryPeakDetectorTest, OneDumpProviderRegisteredOutOfBand) {
+ peak_detector_->Start(kConfigNoCallbacks);
+ EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+ EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
+
+ // Check that no poll tasks are posted before any dump provider is registered.
+ PlatformThread::Sleep(5 * kConfigNoCallbacks.polling_interval_ms * kMs);
+ EXPECT_EQ(0u, GetNumPollingTasksRan());
+
+ // Registed the MDP After Start() has been issued and expect that the
+ // PeakDetector transitions ENABLED -> RUNNING on the next
+ // NotifyMemoryDumpProvidersChanged() call.
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+ EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+ .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
+ dump_providers_.push_back(mdp);
+ peak_detector_->NotifyMemoryDumpProvidersChanged();
+
+ evt.Wait(); // Signaled when PollFastMemoryTotal() is called on the MockMDP.
+ EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+ EXPECT_EQ(2u, GetNumGetDumpProvidersCalls());
+
+ // Now simulate the unregisration and expect that the PeakDetector transitions
+ // back to ENABLED.
+ dump_providers_.clear();
+ peak_detector_->NotifyMemoryDumpProvidersChanged();
+ EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+ EXPECT_EQ(3u, GetNumGetDumpProvidersCalls());
+ uint32_t num_poll_tasks = GetNumPollingTasksRan();
+ EXPECT_GT(num_poll_tasks, 0u);
+
+ // At this point, no more polling tasks should be posted.
+ PlatformThread::Sleep(5 * kConfigNoCallbacks.polling_interval_ms * kMs);
+ peak_detector_->Stop();
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+ EXPECT_EQ(num_poll_tasks, GetNumPollingTasksRan());
+}
+
+// Test that a sequence of Start()/Stop() back-to-back doesn't end up creating
+// several outstanding timer tasks and instead respects the polling_interval_ms.
+TEST_F(MemoryPeakDetectorTest, StartStopQuickly) {
+ WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+ dump_providers_.push_back(mdp);
+ const uint32_t kNumPolls = 20;
+ uint32_t polls_done = 0;
+ EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+ .WillRepeatedly(Invoke([&polls_done, &evt, kNumPolls](uint64_t*) {
+ if (++polls_done == kNumPolls)
+ evt.Signal();
+ }));
+
+ const TimeTicks tstart = TimeTicks::Now();
+ for (int i = 0; i < 5; i++) {
+ peak_detector_->Start(kConfigNoCallbacks);
+ peak_detector_->Stop();
+ }
+ peak_detector_->Start(kConfigNoCallbacks);
+ EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+ evt.Wait(); // Wait for kNumPolls.
+ const double time_ms = (TimeTicks::Now() - tstart).InMillisecondsF();
+
+ EXPECT_GE(time_ms, kNumPolls * kConfigNoCallbacks.polling_interval_ms);
+ peak_detector_->Stop();
+}
+
+TEST_F(MemoryPeakDetectorTest, RegisterAndUnregisterTwoDumpProviders) {
+ WaitableEvent evt1(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent evt2(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ scoped_refptr<MemoryDumpProviderInfo> mdp1 = CreateMockDumpProvider();
+ scoped_refptr<MemoryDumpProviderInfo> mdp2 = CreateMockDumpProvider();
+ EXPECT_CALL(GetMockMDP(mdp1), PollFastMemoryTotal(_))
+ .WillRepeatedly(Invoke([&evt1](uint64_t*) { evt1.Signal(); }));
+ EXPECT_CALL(GetMockMDP(mdp2), PollFastMemoryTotal(_))
+ .WillRepeatedly(Invoke([&evt2](uint64_t*) { evt2.Signal(); }));
+
+ // Register only one MDP and start the detector.
+ dump_providers_.push_back(mdp1);
+ peak_detector_->Start(kConfigNoCallbacks);
+ EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+
+ // Wait for one poll task and then register also the other one.
+ evt1.Wait();
+ dump_providers_.push_back(mdp2);
+ peak_detector_->NotifyMemoryDumpProvidersChanged();
+ evt2.Wait();
+ EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+
+ // Now unregister the first MDP and check that everything is still running.
+ dump_providers_.erase(dump_providers_.begin());
+ peak_detector_->NotifyMemoryDumpProvidersChanged();
+ EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+
+ // Now unregister both and check that the detector goes to idle.
+ dump_providers_.clear();
+ peak_detector_->NotifyMemoryDumpProvidersChanged();
+ EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+
+ // Now re-register both and check that the detector re-activates posting
+ // new polling tasks.
+ uint32_t num_poll_tasks = GetNumPollingTasksRan();
+ evt1.Reset();
+ evt2.Reset();
+ dump_providers_.push_back(mdp1);
+ dump_providers_.push_back(mdp2);
+ peak_detector_->NotifyMemoryDumpProvidersChanged();
+ evt1.Wait();
+ evt2.Wait();
+ EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+ EXPECT_GT(GetNumPollingTasksRan(), num_poll_tasks);
+
+ // Stop everything, tear down the MDPs, restart the detector and check that
+ // it detector doesn't accidentally try to re-access them.
+ peak_detector_->Stop();
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+ dump_providers_.clear();
+ mdp1 = nullptr;
+ mdp2 = nullptr;
+
+ num_poll_tasks = GetNumPollingTasksRan();
+ peak_detector_->Start(kConfigNoCallbacks);
+ EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+ PlatformThread::Sleep(5 * kConfigNoCallbacks.polling_interval_ms * kMs);
+
+ peak_detector_->Stop();
+ EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+ EXPECT_EQ(num_poll_tasks, GetNumPollingTasksRan());
+
+ EXPECT_EQ(6u, GetNumGetDumpProvidersCalls());
+}
+
+// Tests the behavior of the static threshold detector, which is supposed to
+// detect a peak whenever an increase >= threshold is detected.
+TEST_F(MemoryPeakDetectorTest, StaticThreshold) {
+ const uint32_t kNumSamples = 2 * kSlidingWindowNumSamples;
+ constexpr uint32_t kNumSamplesPerStep = 10;
+ constexpr uint64_t kThreshold = 1000000;
+ peak_detector_->SetStaticThresholdForTesting(kThreshold);
+ const MemoryPeakDetector::Config kConfig(
+ 1 /* polling_interval_ms */, 0 /* min_time_between_peaks_ms */,
+ false /* enable_verbose_poll_tracing */
+ );
+
+ // The mocked PollFastMemoryTotal() will return a step function,
+ // e.g. (1, 1, 1, 5, 5, 5, ...) where the steps are 2x threshold, in order to
+ // trigger only the static threshold logic.
+ auto poll_fn = Bind(
+ [](const uint32_t kNumSamplesPerStep, const uint64_t kThreshold,
+ uint32_t sample_idx) -> uint64_t {
+ return (1 + sample_idx / kNumSamplesPerStep) * 2 * kThreshold;
+ },
+ kNumSamplesPerStep, kThreshold);
+ uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
+ EXPECT_EQ(kNumSamples / kNumSamplesPerStep - 1, num_peaks);
+}
+
+// Checks the throttling logic of Config's |min_time_between_peaks_ms|.
+TEST_F(MemoryPeakDetectorTest, PeakCallbackThrottling) {
+ const size_t kNumSamples = 2 * kSlidingWindowNumSamples;
+ constexpr uint64_t kThreshold = 1000000;
+ peak_detector_->SetStaticThresholdForTesting(kThreshold);
+ const MemoryPeakDetector::Config kConfig(
+ 1 /* polling_interval_ms */, 4 /* min_time_between_peaks_ms */,
+ false /* enable_verbose_poll_tracing */
+ );
+
+ // Each mock value returned is N * 2 * threshold, so all of them would be
+ // eligible to be a peak if throttling wasn't enabled.
+ auto poll_fn = Bind(
+ [](uint64_t kThreshold, uint32_t sample_idx) -> uint64_t {
+ return (sample_idx + 1) * 2 * kThreshold;
+ },
+ kThreshold);
+ uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
+ const uint32_t kExpectedThrottlingRate =
+ kConfig.min_time_between_peaks_ms / kConfig.polling_interval_ms;
+ EXPECT_LT(num_peaks, kNumSamples / kExpectedThrottlingRate);
+}
+
+TEST_F(MemoryPeakDetectorTest, StdDev) {
+ // Set the threshold to some arbitrarily high value, so that the static
+ // threshold logic is not hit in this test.
+ constexpr uint64_t kThreshold = 1024 * 1024 * 1024;
+ peak_detector_->SetStaticThresholdForTesting(kThreshold);
+ const size_t kNumSamples = 3 * kSlidingWindowNumSamples;
+ const MemoryPeakDetector::Config kConfig(
+ 1 /* polling_interval_ms */, 0 /* min_time_between_peaks_ms */,
+ false /* enable_verbose_poll_tracing */
+ );
+
+ auto poll_fn = Bind(&PollFunctionThatCausesPeakViaStdDev);
+ uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
+ EXPECT_EQ(2u, num_peaks); // 80 MB, 120 MB.
+}
+
+// Tests that Throttle() actually holds back peak notifications.
+TEST_F(MemoryPeakDetectorTest, Throttle) {
+ constexpr uint64_t kThreshold = 1024 * 1024 * 1024;
+ const uint32_t kNumSamples = 3 * kSlidingWindowNumSamples;
+ peak_detector_->SetStaticThresholdForTesting(kThreshold);
+ const MemoryPeakDetector::Config kConfig(
+ 1 /* polling_interval_ms */, 0 /* min_time_between_peaks_ms */,
+ false /* enable_verbose_poll_tracing */
+ );
+
+ auto poll_fn = Bind(
+ [](MemoryPeakDetector* peak_detector, uint32_t sample_idx) -> uint64_t {
+ if (sample_idx % 20 == 20 - 1)
+ peak_detector->Throttle();
+ return PollFunctionThatCausesPeakViaStdDev(sample_idx);
+ },
+ Unretained(&*peak_detector_));
+ uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
+ EXPECT_EQ(0u, num_peaks);
+}
+
+// Tests that the windows stddev state is not carried over through
+// Stop() -> Start() sequences.
+TEST_F(MemoryPeakDetectorTest, RestartClearsState) {
+ constexpr uint64_t kThreshold = 1024 * 1024 * 1024;
+ peak_detector_->SetStaticThresholdForTesting(kThreshold);
+ const size_t kNumSamples = 3 * kSlidingWindowNumSamples;
+ const MemoryPeakDetector::Config kConfig(
+ 1 /* polling_interval_ms */, 0 /* min_time_between_peaks_ms */,
+ false /* enable_verbose_poll_tracing */
+ );
+ auto poll_fn = Bind(
+ [](MemoryPeakDetector* peak_detector,
+ const uint32_t kSlidingWindowNumSamples,
+ MemoryPeakDetector::Config kConfig, uint32_t sample_idx) -> uint64_t {
+ if (sample_idx % kSlidingWindowNumSamples ==
+ kSlidingWindowNumSamples - 1) {
+ peak_detector->Stop();
+ peak_detector->Start(kConfig);
+ }
+ return PollFunctionThatCausesPeakViaStdDev(sample_idx);
+ },
+ Unretained(&*peak_detector_), kSlidingWindowNumSamples, kConfig);
+ uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
+ EXPECT_EQ(0u, num_peaks);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/memory_usage_estimator.h b/chromium/base/trace_event/memory_usage_estimator.h
index 6f02bb93bbb..174429fa975 100644
--- a/chromium/base/trace_event/memory_usage_estimator.h
+++ b/chromium/base/trace_event/memory_usage_estimator.h
@@ -24,7 +24,6 @@
#include "base/base_export.h"
#include "base/containers/linked_list.h"
#include "base/strings/string16.h"
-#include "base/template_util.h"
// Composable memory usage estimators.
//
@@ -207,7 +206,7 @@ template <class T>
struct EMUCaller<
T,
typename std::enable_if<!HasEMU<T>::value &&
- is_trivially_destructible<T>::value>::type> {
+ std::is_trivially_destructible<T>::value>::type> {
static size_t Call(const T& value) { return 0; }
};
diff --git a/chromium/base/trace_event/process_memory_dump.h b/chromium/base/trace_event/process_memory_dump.h
index 6f8d1672733..45a97396423 100644
--- a/chromium/base/trace_event/process_memory_dump.h
+++ b/chromium/base/trace_event/process_memory_dump.h
@@ -167,6 +167,7 @@ class BASE_EXPORT ProcessMemoryDump {
void AsValueInto(TracedValue* value) const;
ProcessMemoryTotals* process_totals() { return &process_totals_; }
+ const ProcessMemoryTotals* process_totals() const { return &process_totals_; }
bool has_process_totals() const { return has_process_totals_; }
void set_has_process_totals() { has_process_totals_ = true; }
diff --git a/chromium/base/trace_event/trace_category_unittest.cc b/chromium/base/trace_event/trace_category_unittest.cc
index a33924f4ffe..25f37ca7398 100644
--- a/chromium/base/trace_event/trace_category_unittest.cc
+++ b/chromium/base/trace_event/trace_category_unittest.cc
@@ -130,7 +130,7 @@ TEST_F(TraceCategoryTest, ThreadRaces) {
WaitableEvent::InitialState::NOT_SIGNALED);
for (int i = 0; i < kNumThreads; i++) {
threads[i]->task_runner()->PostTask(
- FROM_HERE, Bind(&TestRaceThreadMain, Unretained(&sync_event)));
+ FROM_HERE, BindOnce(&TestRaceThreadMain, Unretained(&sync_event)));
}
sync_event.Signal();
for (int i = 0; i < kNumThreads; i++)
diff --git a/chromium/base/trace_event/trace_config.cc b/chromium/base/trace_event/trace_config.cc
index 36de107bf8b..7ee9a4a101b 100644
--- a/chromium/base/trace_event/trace_config.cc
+++ b/chromium/base/trace_event/trace_config.cc
@@ -11,11 +11,7 @@
#include "base/json/json_reader.h"
#include "base/json/json_writer.h"
#include "base/memory/ptr_util.h"
-#include "base/strings/pattern.h"
#include "base/strings/string_split.h"
-#include "base/strings/string_tokenizer.h"
-#include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/trace_event.h"
@@ -37,11 +33,6 @@ const char kEnableArgumentFilter[] = "enable-argument-filter";
const char kRecordModeParam[] = "record_mode";
const char kEnableSystraceParam[] = "enable_systrace";
const char kEnableArgumentFilterParam[] = "enable_argument_filter";
-const char kIncludedCategoriesParam[] = "included_categories";
-const char kExcludedCategoriesParam[] = "excluded_categories";
-const char kSyntheticDelaysParam[] = "synthetic_delays";
-
-const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
// String parameters that is used to parse memory dump config in trace config
// string.
@@ -148,27 +139,36 @@ TraceConfig::EventFilterConfig& TraceConfig::EventFilterConfig::operator=(
return *this;
predicate_name_ = rhs.predicate_name_;
- included_categories_ = rhs.included_categories_;
- excluded_categories_ = rhs.excluded_categories_;
+ category_filter_ = rhs.category_filter_;
+
if (rhs.args_)
args_ = rhs.args_->CreateDeepCopy();
return *this;
}
-void TraceConfig::EventFilterConfig::AddIncludedCategory(
- const std::string& category) {
- included_categories_.push_back(category);
+void TraceConfig::EventFilterConfig::InitializeFromConfigDict(
+ const base::DictionaryValue* event_filter) {
+ category_filter_.InitializeFromConfigDict(*event_filter);
+
+ const base::DictionaryValue* args_dict = nullptr;
+ if (event_filter->GetDictionary(kFilterArgsParam, &args_dict))
+ args_ = args_dict->CreateDeepCopy();
}
-void TraceConfig::EventFilterConfig::AddExcludedCategory(
- const std::string& category) {
- excluded_categories_.push_back(category);
+void TraceConfig::EventFilterConfig::SetCategoryFilter(
+ const TraceConfigCategoryFilter& category_filter) {
+ category_filter_ = category_filter;
}
-void TraceConfig::EventFilterConfig::SetArgs(
- std::unique_ptr<base::DictionaryValue> args) {
- args_ = std::move(args);
+void TraceConfig::EventFilterConfig::ToDict(
+ DictionaryValue* filter_dict) const {
+ filter_dict->SetString(kFilterPredicateParam, predicate_name());
+
+ category_filter_.ToDict(filter_dict);
+
+ if (args_)
+ filter_dict->Set(kFilterArgsParam, args_->CreateDeepCopy());
}
bool TraceConfig::EventFilterConfig::GetArgAsSet(
@@ -186,27 +186,8 @@ bool TraceConfig::EventFilterConfig::GetArgAsSet(
}
bool TraceConfig::EventFilterConfig::IsCategoryGroupEnabled(
- const char* category_group_name) const {
- CStringTokenizer category_group_tokens(
- category_group_name, category_group_name + strlen(category_group_name),
- ",");
- while (category_group_tokens.GetNext()) {
- std::string category_group_token = category_group_tokens.token();
-
- for (const auto& excluded_category : excluded_categories_) {
- if (base::MatchPattern(category_group_token, excluded_category)) {
- return false;
- }
- }
-
- for (const auto& included_category : included_categories_) {
- if (base::MatchPattern(category_group_token, included_category)) {
- return true;
- }
- }
- }
-
- return false;
+ const StringPiece& category_group_name) const {
+ return category_filter_.IsCategoryGroupEnabled(category_group_name);
}
TraceConfig::TraceConfig() {
@@ -255,11 +236,8 @@ TraceConfig::TraceConfig(const TraceConfig& tc)
: record_mode_(tc.record_mode_),
enable_systrace_(tc.enable_systrace_),
enable_argument_filter_(tc.enable_argument_filter_),
+ category_filter_(tc.category_filter_),
memory_dump_config_(tc.memory_dump_config_),
- included_categories_(tc.included_categories_),
- disabled_categories_(tc.disabled_categories_),
- excluded_categories_(tc.excluded_categories_),
- synthetic_delays_(tc.synthetic_delays_),
event_filters_(tc.event_filters_) {}
TraceConfig::~TraceConfig() {
@@ -272,17 +250,14 @@ TraceConfig& TraceConfig::operator=(const TraceConfig& rhs) {
record_mode_ = rhs.record_mode_;
enable_systrace_ = rhs.enable_systrace_;
enable_argument_filter_ = rhs.enable_argument_filter_;
+ category_filter_ = rhs.category_filter_;
memory_dump_config_ = rhs.memory_dump_config_;
- included_categories_ = rhs.included_categories_;
- disabled_categories_ = rhs.disabled_categories_;
- excluded_categories_ = rhs.excluded_categories_;
- synthetic_delays_ = rhs.synthetic_delays_;
event_filters_ = rhs.event_filters_;
return *this;
}
const TraceConfig::StringList& TraceConfig::GetSyntheticDelayValues() const {
- return synthetic_delays_;
+ return category_filter_.synthetic_delays();
}
std::string TraceConfig::ToString() const {
@@ -298,69 +273,14 @@ TraceConfig::AsConvertableToTraceFormat() const {
}
std::string TraceConfig::ToCategoryFilterString() const {
- std::string filter_string;
- WriteCategoryFilterString(included_categories_, &filter_string, true);
- WriteCategoryFilterString(disabled_categories_, &filter_string, true);
- WriteCategoryFilterString(excluded_categories_, &filter_string, false);
- WriteCategoryFilterString(synthetic_delays_, &filter_string);
- return filter_string;
+ return category_filter_.ToFilterString();
}
bool TraceConfig::IsCategoryGroupEnabled(
- const char* category_group_name) const {
+ const StringPiece& category_group_name) const {
// TraceLog should call this method only as part of enabling/disabling
// categories.
-
- bool had_enabled_by_default = false;
- DCHECK(category_group_name);
- std::string category_group_name_str = category_group_name;
- StringTokenizer category_group_tokens(category_group_name_str, ",");
- while (category_group_tokens.GetNext()) {
- std::string category_group_token = category_group_tokens.token();
- // Don't allow empty tokens, nor tokens with leading or trailing space.
- DCHECK(!TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- category_group_token))
- << "Disallowed category string";
- if (IsCategoryEnabled(category_group_token.c_str()))
- return true;
-
- if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
- had_enabled_by_default = true;
- }
- // Do a second pass to check for explicitly disabled categories
- // (those explicitly enabled have priority due to first pass).
- category_group_tokens.Reset();
- bool category_group_disabled = false;
- while (category_group_tokens.GetNext()) {
- std::string category_group_token = category_group_tokens.token();
- for (const std::string& category : excluded_categories_) {
- if (MatchPattern(category_group_token, category)) {
- // Current token of category_group_name is present in excluded_list.
- // Flag the exclusion and proceed further to check if any of the
- // remaining categories of category_group_name is not present in the
- // excluded_ list.
- category_group_disabled = true;
- break;
- }
- // One of the category of category_group_name is not present in
- // excluded_ list. So, if it's not a disabled-by-default category,
- // it has to be included_ list. Enable the category_group_name
- // for recording.
- if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*"))) {
- category_group_disabled = false;
- }
- }
- // One of the categories present in category_group_name is not present in
- // excluded_ list. Implies this category_group_name group can be enabled
- // for recording, since one of its groups is enabled for recording.
- if (!category_group_disabled)
- break;
- }
- // If the category group is not excluded, and there are no included patterns
- // we consider this category group enabled, as long as it had categories
- // other than disabled-by-default.
- return !category_group_disabled && had_enabled_by_default &&
- included_categories_.empty();
+ return category_filter_.IsCategoryGroupEnabled(category_group_name);
}
void TraceConfig::Merge(const TraceConfig& config) {
@@ -371,28 +291,10 @@ void TraceConfig::Merge(const TraceConfig& config) {
<< "set of options.";
}
- // Keep included patterns only if both filters have an included entry.
- // Otherwise, one of the filter was specifying "*" and we want to honor the
- // broadest filter.
- if (HasIncludedPatterns() && config.HasIncludedPatterns()) {
- included_categories_.insert(included_categories_.end(),
- config.included_categories_.begin(),
- config.included_categories_.end());
- } else {
- included_categories_.clear();
- }
+ category_filter_.Merge(config.category_filter_);
memory_dump_config_.Merge(config.memory_dump_config_);
- disabled_categories_.insert(disabled_categories_.end(),
- config.disabled_categories_.begin(),
- config.disabled_categories_.end());
- excluded_categories_.insert(excluded_categories_.end(),
- config.excluded_categories_.begin(),
- config.excluded_categories_.end());
- synthetic_delays_.insert(synthetic_delays_.end(),
- config.synthetic_delays_.begin(),
- config.synthetic_delays_.end());
event_filters_.insert(event_filters_.end(), config.event_filters().begin(),
config.event_filters().end());
}
@@ -401,10 +303,7 @@ void TraceConfig::Clear() {
record_mode_ = RECORD_UNTIL_FULL;
enable_systrace_ = false;
enable_argument_filter_ = false;
- included_categories_.clear();
- disabled_categories_.clear();
- excluded_categories_.clear();
- synthetic_delays_.clear();
+ category_filter_.Clear();
memory_dump_config_.Clear();
event_filters_.clear();
}
@@ -435,19 +334,13 @@ void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
enable_argument_filter_ =
dict.GetBoolean(kEnableArgumentFilterParam, &val) ? val : false;
- const ListValue* category_list = nullptr;
- if (dict.GetList(kIncludedCategoriesParam, &category_list))
- SetCategoriesFromIncludedList(*category_list);
- if (dict.GetList(kExcludedCategoriesParam, &category_list))
- SetCategoriesFromExcludedList(*category_list);
- if (dict.GetList(kSyntheticDelaysParam, &category_list))
- SetSyntheticDelaysFromList(*category_list);
+ category_filter_.InitializeFromConfigDict(dict);
const base::ListValue* category_event_filters = nullptr;
if (dict.GetList(kEventFiltersParam, &category_event_filters))
SetEventFiltersFromConfigList(*category_event_filters);
- if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+ if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
// If dump triggers not set, the client is using the legacy with just
// category enabled. So, use the default periodic dump config.
const DictionaryValue* memory_dump_config = nullptr;
@@ -468,37 +361,8 @@ void TraceConfig::InitializeFromConfigString(StringPiece config_string) {
void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
StringPiece trace_options_string) {
- if (!category_filter_string.empty()) {
- std::vector<std::string> split = SplitString(
- category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
- for (const std::string& category : split) {
- // Ignore empty categories.
- if (category.empty())
- continue;
- // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
- if (StartsWith(category, kSyntheticDelayCategoryFilterPrefix,
- CompareCase::SENSITIVE) &&
- category.back() == ')') {
- std::string synthetic_category = category.substr(
- strlen(kSyntheticDelayCategoryFilterPrefix),
- category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
- size_t name_length = synthetic_category.find(';');
- if (name_length != std::string::npos && name_length > 0 &&
- name_length != synthetic_category.size() - 1) {
- synthetic_delays_.push_back(synthetic_category);
- }
- } else if (category.front() == '-') {
- // Excluded categories start with '-'.
- // Remove '-' from category string.
- excluded_categories_.push_back(category.substr(1));
- } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
- TRACE_DISABLED_BY_DEFAULT("")) == 0) {
- disabled_categories_.push_back(category);
- } else {
- included_categories_.push_back(category);
- }
- }
- }
+ if (!category_filter_string.empty())
+ category_filter_.InitializeFromString(category_filter_string);
record_mode_ = RECORD_UNTIL_FULL;
enable_systrace_ = false;
@@ -523,64 +387,11 @@ void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
}
}
- if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+ if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
SetDefaultMemoryDumpConfig();
}
}
-void TraceConfig::SetCategoriesFromIncludedList(
- const ListValue& included_list) {
- included_categories_.clear();
- for (size_t i = 0; i < included_list.GetSize(); ++i) {
- std::string category;
- if (!included_list.GetString(i, &category))
- continue;
- if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
- TRACE_DISABLED_BY_DEFAULT("")) == 0) {
- disabled_categories_.push_back(category);
- } else {
- included_categories_.push_back(category);
- }
- }
-}
-
-void TraceConfig::SetCategoriesFromExcludedList(
- const ListValue& excluded_list) {
- excluded_categories_.clear();
- for (size_t i = 0; i < excluded_list.GetSize(); ++i) {
- std::string category;
- if (excluded_list.GetString(i, &category))
- excluded_categories_.push_back(category);
- }
-}
-
-void TraceConfig::SetSyntheticDelaysFromList(const ListValue& list) {
- synthetic_delays_.clear();
- for (size_t i = 0; i < list.GetSize(); ++i) {
- std::string delay;
- if (!list.GetString(i, &delay))
- continue;
- // Synthetic delays are of the form "delay;option;option;...".
- size_t name_length = delay.find(';');
- if (name_length != std::string::npos && name_length > 0 &&
- name_length != delay.size() - 1) {
- synthetic_delays_.push_back(delay);
- }
- }
-}
-
-void TraceConfig::AddCategoryToDict(DictionaryValue* dict,
- const char* param,
- const StringList& categories) const {
- if (categories.empty())
- return;
-
- auto list = MakeUnique<ListValue>();
- for (const std::string& category : categories)
- list->AppendString(category);
- dict->Set(param, std::move(list));
-}
-
void TraceConfig::SetMemoryDumpConfigFromConfigDict(
const DictionaryValue& memory_dump_config) {
// Set allowed dump modes.
@@ -673,29 +484,7 @@ void TraceConfig::SetEventFiltersFromConfigList(
<< "Invalid predicate name in category event filter.";
EventFilterConfig new_config(predicate_name);
- const base::ListValue* included_list = nullptr;
- CHECK(event_filter->GetList(kIncludedCategoriesParam, &included_list))
- << "Missing included_categories in category event filter.";
-
- for (size_t i = 0; i < included_list->GetSize(); ++i) {
- std::string category;
- if (included_list->GetString(i, &category))
- new_config.AddIncludedCategory(category);
- }
-
- const base::ListValue* excluded_list = nullptr;
- if (event_filter->GetList(kExcludedCategoriesParam, &excluded_list)) {
- for (size_t i = 0; i < excluded_list->GetSize(); ++i) {
- std::string category;
- if (excluded_list->GetString(i, &category))
- new_config.AddExcludedCategory(category);
- }
- }
-
- const base::DictionaryValue* args_dict = nullptr;
- if (event_filter->GetDictionary(kFilterArgsParam, &args_dict))
- new_config.SetArgs(args_dict->CreateDeepCopy());
-
+ new_config.InitializeFromConfigDict(event_filter);
event_filters_.push_back(new_config);
}
}
@@ -722,50 +511,20 @@ std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
dict->SetBoolean(kEnableSystraceParam, enable_systrace_);
dict->SetBoolean(kEnableArgumentFilterParam, enable_argument_filter_);
- StringList categories(included_categories_);
- categories.insert(categories.end(),
- disabled_categories_.begin(),
- disabled_categories_.end());
- AddCategoryToDict(dict.get(), kIncludedCategoriesParam, categories);
- AddCategoryToDict(dict.get(), kExcludedCategoriesParam, excluded_categories_);
- AddCategoryToDict(dict.get(), kSyntheticDelaysParam, synthetic_delays_);
+ category_filter_.ToDict(dict.get());
if (!event_filters_.empty()) {
std::unique_ptr<base::ListValue> filter_list(new base::ListValue());
for (const EventFilterConfig& filter : event_filters_) {
std::unique_ptr<base::DictionaryValue> filter_dict(
new base::DictionaryValue());
- filter_dict->SetString(kFilterPredicateParam, filter.predicate_name());
-
- std::unique_ptr<base::ListValue> included_categories_list(
- new base::ListValue());
- for (const std::string& included_category : filter.included_categories())
- included_categories_list->AppendString(included_category);
-
- filter_dict->Set(kIncludedCategoriesParam,
- std::move(included_categories_list));
-
- if (!filter.excluded_categories().empty()) {
- std::unique_ptr<base::ListValue> excluded_categories_list(
- new base::ListValue());
- for (const std::string& excluded_category :
- filter.excluded_categories())
- excluded_categories_list->AppendString(excluded_category);
-
- filter_dict->Set(kExcludedCategoriesParam,
- std::move(excluded_categories_list));
- }
-
- if (filter.filter_args())
- filter_dict->Set(kFilterArgsParam,
- filter.filter_args()->CreateDeepCopy());
-
+ filter.ToDict(filter_dict.get());
filter_list->Append(std::move(filter_dict));
}
dict->Set(kEventFiltersParam, std::move(filter_list));
}
- if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+ if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
auto allowed_modes = MakeUnique<ListValue>();
for (auto dump_mode : memory_dump_config_.allowed_dump_modes)
allowed_modes->AppendString(MemoryDumpLevelOfDetailToString(dump_mode));
@@ -829,59 +588,5 @@ std::string TraceConfig::ToTraceOptionsString() const {
return ret;
}
-void TraceConfig::WriteCategoryFilterString(const StringList& values,
- std::string* out,
- bool included) const {
- bool prepend_comma = !out->empty();
- int token_cnt = 0;
- for (const std::string& category : values) {
- if (token_cnt > 0 || prepend_comma)
- StringAppendF(out, ",");
- StringAppendF(out, "%s%s", (included ? "" : "-"), category.c_str());
- ++token_cnt;
- }
-}
-
-void TraceConfig::WriteCategoryFilterString(const StringList& delays,
- std::string* out) const {
- bool prepend_comma = !out->empty();
- int token_cnt = 0;
- for (const std::string& category : delays) {
- if (token_cnt > 0 || prepend_comma)
- StringAppendF(out, ",");
- StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
- category.c_str());
- ++token_cnt;
- }
-}
-
-bool TraceConfig::IsCategoryEnabled(const char* category_name) const {
- // Check the disabled- filters and the disabled-* wildcard first so that a
- // "*" filter does not include the disabled.
- for (const std::string& category : disabled_categories_) {
- if (MatchPattern(category_name, category))
- return true;
- }
-
- if (MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
- return false;
-
- for (const std::string& category : included_categories_) {
- if (MatchPattern(category_name, category))
- return true;
- }
-
- return false;
-}
-
-bool TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- StringPiece str) {
- return str.empty() || str.front() == ' ' || str.back() == ' ';
-}
-
-bool TraceConfig::HasIncludedPatterns() const {
- return !included_categories_.empty();
-}
-
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/trace_config.h b/chromium/base/trace_event/trace_config.h
index 717c2613169..13b2f5f0ee7 100644
--- a/chromium/base/trace_event/trace_config.h
+++ b/chromium/base/trace_event/trace_config.h
@@ -17,6 +17,7 @@
#include "base/gtest_prod_util.h"
#include "base/strings/string_piece.h"
#include "base/trace_event/memory_dump_request_args.h"
+#include "base/trace_event/trace_config_category_filter.h"
#include "base/values.h"
namespace base {
@@ -94,26 +95,25 @@ class BASE_EXPORT TraceConfig {
EventFilterConfig& operator=(const EventFilterConfig& rhs);
- void AddIncludedCategory(const std::string& category);
- void AddExcludedCategory(const std::string& category);
- void SetArgs(std::unique_ptr<base::DictionaryValue> args);
+ void InitializeFromConfigDict(const base::DictionaryValue* event_filter);
+
+ void SetCategoryFilter(const TraceConfigCategoryFilter& category_filter);
+
+ void ToDict(DictionaryValue* filter_dict) const;
+
bool GetArgAsSet(const char* key, std::unordered_set<std::string>*) const;
- bool IsCategoryGroupEnabled(const char* category_group_name) const;
+ bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
const std::string& predicate_name() const { return predicate_name_; }
base::DictionaryValue* filter_args() const { return args_.get(); }
- const StringList& included_categories() const {
- return included_categories_;
- }
- const StringList& excluded_categories() const {
- return excluded_categories_;
+ const TraceConfigCategoryFilter& category_filter() const {
+ return category_filter_;
}
private:
std::string predicate_name_;
- StringList included_categories_;
- StringList excluded_categories_;
+ TraceConfigCategoryFilter category_filter_;
std::unique_ptr<base::DictionaryValue> args_;
};
typedef std::vector<EventFilterConfig> EventFilters;
@@ -231,7 +231,7 @@ class BASE_EXPORT TraceConfig {
// Returns true if at least one category in the list is enabled by this
// trace config. This is used to determine if the category filters are
// enabled in the TRACE_* macros.
- bool IsCategoryGroupEnabled(const char* category_group_name) const;
+ bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
// Merges config with the current TraceConfig
void Merge(const TraceConfig& config);
@@ -241,6 +241,10 @@ class BASE_EXPORT TraceConfig {
// Clears and resets the memory dump config.
void ResetMemoryDumpConfig(const MemoryDumpConfig& memory_dump_config);
+ const TraceConfigCategoryFilter& category_filter() const {
+ return category_filter_;
+ }
+
const MemoryDumpConfig& memory_dump_config() const {
return memory_dump_config_;
}
@@ -254,15 +258,6 @@ class BASE_EXPORT TraceConfig {
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidLegacyFormat);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
TraceConfigFromInvalidLegacyStrings);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidString);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromInvalidString);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
- IsEmptyOrContainsLeadingOrTrailingWhitespace);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromMemoryConfigString);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, LegacyStringToMemoryDumpConfig);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, EmptyMemoryDumpConfigTest);
- FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
- EmptyAndAsteriskCategoryFilterString);
// The default trace config, used when none is provided.
// Allows all non-disabled-by-default categories through, except if they end
@@ -279,13 +274,6 @@ class BASE_EXPORT TraceConfig {
void InitializeFromStrings(StringPiece category_filter_string,
StringPiece trace_options_string);
- void SetCategoriesFromIncludedList(const ListValue& included_list);
- void SetCategoriesFromExcludedList(const ListValue& excluded_list);
- void SetSyntheticDelaysFromList(const ListValue& list);
- void AddCategoryToDict(DictionaryValue* dict,
- const char* param,
- const StringList& categories) const;
-
void SetMemoryDumpConfigFromConfigDict(
const DictionaryValue& memory_dump_config);
void SetDefaultMemoryDumpConfig();
@@ -295,32 +283,14 @@ class BASE_EXPORT TraceConfig {
std::string ToTraceOptionsString() const;
- void WriteCategoryFilterString(const StringList& values,
- std::string* out,
- bool included) const;
- void WriteCategoryFilterString(const StringList& delays,
- std::string* out) const;
-
- // Returns true if the category is enabled according to this trace config.
- // This tells whether a category is enabled from the TraceConfig's
- // perspective. Please refer to IsCategoryGroupEnabled() to determine if a
- // category is enabled from the tracing runtime's perspective.
- bool IsCategoryEnabled(const char* category_name) const;
-
- static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(StringPiece str);
-
- bool HasIncludedPatterns() const;
-
TraceRecordMode record_mode_;
bool enable_systrace_ : 1;
bool enable_argument_filter_ : 1;
+ TraceConfigCategoryFilter category_filter_;
+
MemoryDumpConfig memory_dump_config_;
- StringList included_categories_;
- StringList disabled_categories_;
- StringList excluded_categories_;
- StringList synthetic_delays_;
EventFilters event_filters_;
};
diff --git a/chromium/base/trace_event/trace_config_category_filter.cc b/chromium/base/trace_event/trace_config_category_filter.cc
new file mode 100644
index 00000000000..234db18c5cd
--- /dev/null
+++ b/chromium/base/trace_event/trace_config_category_filter.cc
@@ -0,0 +1,297 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_config_category_filter.h"
+
+#include "base/memory/ptr_util.h"
+#include "base/strings/pattern.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+const char kIncludedCategoriesParam[] = "included_categories";
+const char kExcludedCategoriesParam[] = "excluded_categories";
+const char kSyntheticDelaysParam[] = "synthetic_delays";
+
+const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
+}
+
+TraceConfigCategoryFilter::TraceConfigCategoryFilter() {}
+
+TraceConfigCategoryFilter::TraceConfigCategoryFilter(
+ const TraceConfigCategoryFilter& other)
+ : included_categories_(other.included_categories_),
+ disabled_categories_(other.disabled_categories_),
+ excluded_categories_(other.excluded_categories_),
+ synthetic_delays_(other.synthetic_delays_) {}
+
+TraceConfigCategoryFilter::~TraceConfigCategoryFilter() {}
+
+TraceConfigCategoryFilter& TraceConfigCategoryFilter::operator=(
+ const TraceConfigCategoryFilter& rhs) {
+ included_categories_ = rhs.included_categories_;
+ disabled_categories_ = rhs.disabled_categories_;
+ excluded_categories_ = rhs.excluded_categories_;
+ synthetic_delays_ = rhs.synthetic_delays_;
+ return *this;
+}
+
+void TraceConfigCategoryFilter::InitializeFromString(
+ const StringPiece& category_filter_string) {
+ std::vector<StringPiece> split = SplitStringPiece(
+ category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+ for (const StringPiece& category : split) {
+ // Ignore empty categories.
+ if (category.empty())
+ continue;
+ // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
+ if (StartsWith(category, kSyntheticDelayCategoryFilterPrefix,
+ CompareCase::SENSITIVE) &&
+ category.back() == ')') {
+ StringPiece synthetic_category = category.substr(
+ strlen(kSyntheticDelayCategoryFilterPrefix),
+ category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
+ size_t name_length = synthetic_category.find(';');
+ if (name_length != std::string::npos && name_length > 0 &&
+ name_length != synthetic_category.size() - 1) {
+ synthetic_delays_.push_back(synthetic_category.as_string());
+ }
+ } else if (category.front() == '-') {
+ // Excluded categories start with '-'.
+ // Remove '-' from category string.
+ excluded_categories_.push_back(category.substr(1).as_string());
+ } else if (category.starts_with(TRACE_DISABLED_BY_DEFAULT(""))) {
+ disabled_categories_.push_back(category.as_string());
+ } else {
+ included_categories_.push_back(category.as_string());
+ }
+ }
+}
+
+void TraceConfigCategoryFilter::InitializeFromConfigDict(
+ const DictionaryValue& dict) {
+ const ListValue* category_list = nullptr;
+ if (dict.GetList(kIncludedCategoriesParam, &category_list))
+ SetCategoriesFromIncludedList(*category_list);
+ if (dict.GetList(kExcludedCategoriesParam, &category_list))
+ SetCategoriesFromExcludedList(*category_list);
+ if (dict.GetList(kSyntheticDelaysParam, &category_list))
+ SetSyntheticDelaysFromList(*category_list);
+}
+
+bool TraceConfigCategoryFilter::IsCategoryGroupEnabled(
+ const StringPiece& category_group_name) const {
+ bool had_enabled_by_default = false;
+ DCHECK(!category_group_name.empty());
+ CStringTokenizer category_group_tokens(category_group_name.begin(),
+ category_group_name.end(), ",");
+ while (category_group_tokens.GetNext()) {
+ StringPiece category_group_token = category_group_tokens.token_piece();
+ // Don't allow empty tokens, nor tokens with leading or trailing space.
+ DCHECK(IsCategoryNameAllowed(category_group_token))
+ << "Disallowed category string";
+ if (IsCategoryEnabled(category_group_token))
+ return true;
+
+ if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
+ had_enabled_by_default = true;
+ }
+ // Do a second pass to check for explicitly disabled categories
+ // (those explicitly enabled have priority due to first pass).
+ category_group_tokens.Reset();
+ bool category_group_disabled = false;
+ while (category_group_tokens.GetNext()) {
+ StringPiece category_group_token = category_group_tokens.token_piece();
+ for (const std::string& category : excluded_categories_) {
+ if (MatchPattern(category_group_token, category)) {
+ // Current token of category_group_name is present in excluded_list.
+ // Flag the exclusion and proceed further to check if any of the
+ // remaining categories of category_group_name is not present in the
+ // excluded_ list.
+ category_group_disabled = true;
+ break;
+ }
+ // One of the category of category_group_name is not present in
+ // excluded_ list. So, if it's not a disabled-by-default category,
+ // it has to be included_ list. Enable the category_group_name
+ // for recording.
+ if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
+ category_group_disabled = false;
+ }
+ // One of the categories present in category_group_name is not present in
+ // excluded_ list. Implies this category_group_name group can be enabled
+ // for recording, since one of its groups is enabled for recording.
+ if (!category_group_disabled)
+ break;
+ }
+ // If the category group is not excluded, and there are no included patterns
+ // we consider this category group enabled, as long as it had categories
+ // other than disabled-by-default.
+ return !category_group_disabled && had_enabled_by_default &&
+ included_categories_.empty();
+}
+
+bool TraceConfigCategoryFilter::IsCategoryEnabled(
+ const StringPiece& category_name) const {
+ // Check the disabled- filters and the disabled-* wildcard first so that a
+ // "*" filter does not include the disabled.
+ for (const std::string& category : disabled_categories_) {
+ if (MatchPattern(category_name, category))
+ return true;
+ }
+
+ if (MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
+ return false;
+
+ for (const std::string& category : included_categories_) {
+ if (MatchPattern(category_name, category))
+ return true;
+ }
+
+ return false;
+}
+
+void TraceConfigCategoryFilter::Merge(const TraceConfigCategoryFilter& config) {
+ // Keep included patterns only if both filters have an included entry.
+ // Otherwise, one of the filter was specifying "*" and we want to honor the
+ // broadest filter.
+ if (!included_categories_.empty() && !config.included_categories_.empty()) {
+ included_categories_.insert(included_categories_.end(),
+ config.included_categories_.begin(),
+ config.included_categories_.end());
+ } else {
+ included_categories_.clear();
+ }
+
+ disabled_categories_.insert(disabled_categories_.end(),
+ config.disabled_categories_.begin(),
+ config.disabled_categories_.end());
+ excluded_categories_.insert(excluded_categories_.end(),
+ config.excluded_categories_.begin(),
+ config.excluded_categories_.end());
+ synthetic_delays_.insert(synthetic_delays_.end(),
+ config.synthetic_delays_.begin(),
+ config.synthetic_delays_.end());
+}
+
+void TraceConfigCategoryFilter::Clear() {
+ included_categories_.clear();
+ disabled_categories_.clear();
+ excluded_categories_.clear();
+ synthetic_delays_.clear();
+}
+
+void TraceConfigCategoryFilter::ToDict(DictionaryValue* dict) const {
+ StringList categories(included_categories_);
+ categories.insert(categories.end(), disabled_categories_.begin(),
+ disabled_categories_.end());
+ AddCategoriesToDict(categories, kIncludedCategoriesParam, dict);
+ AddCategoriesToDict(excluded_categories_, kExcludedCategoriesParam, dict);
+ AddCategoriesToDict(synthetic_delays_, kSyntheticDelaysParam, dict);
+}
+
+std::string TraceConfigCategoryFilter::ToFilterString() const {
+ std::string filter_string;
+ WriteCategoryFilterString(included_categories_, &filter_string, true);
+ WriteCategoryFilterString(disabled_categories_, &filter_string, true);
+ WriteCategoryFilterString(excluded_categories_, &filter_string, false);
+ WriteCategoryFilterString(synthetic_delays_, &filter_string);
+ return filter_string;
+}
+
+void TraceConfigCategoryFilter::SetCategoriesFromIncludedList(
+ const ListValue& included_list) {
+ included_categories_.clear();
+ for (size_t i = 0; i < included_list.GetSize(); ++i) {
+ std::string category;
+ if (!included_list.GetString(i, &category))
+ continue;
+ if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
+ TRACE_DISABLED_BY_DEFAULT("")) == 0) {
+ disabled_categories_.push_back(category);
+ } else {
+ included_categories_.push_back(category);
+ }
+ }
+}
+
+void TraceConfigCategoryFilter::SetCategoriesFromExcludedList(
+ const ListValue& excluded_list) {
+ excluded_categories_.clear();
+ for (size_t i = 0; i < excluded_list.GetSize(); ++i) {
+ std::string category;
+ if (excluded_list.GetString(i, &category))
+ excluded_categories_.push_back(category);
+ }
+}
+
+void TraceConfigCategoryFilter::SetSyntheticDelaysFromList(
+ const ListValue& list) {
+ for (size_t i = 0; i < list.GetSize(); ++i) {
+ std::string delay;
+ if (!list.GetString(i, &delay))
+ continue;
+ // Synthetic delays are of the form "delay;option;option;...".
+ size_t name_length = delay.find(';');
+ if (name_length != std::string::npos && name_length > 0 &&
+ name_length != delay.size() - 1) {
+ synthetic_delays_.push_back(delay);
+ }
+ }
+}
+
+void TraceConfigCategoryFilter::AddCategoriesToDict(
+ const StringList& categories,
+ const char* param,
+ DictionaryValue* dict) const {
+ if (categories.empty())
+ return;
+
+ auto list = MakeUnique<ListValue>();
+ for (const std::string& category : categories)
+ list->AppendString(category);
+ dict->Set(param, std::move(list));
+}
+
+void TraceConfigCategoryFilter::WriteCategoryFilterString(
+ const StringList& values,
+ std::string* out,
+ bool included) const {
+ bool prepend_comma = !out->empty();
+ int token_cnt = 0;
+ for (const std::string& category : values) {
+ if (token_cnt > 0 || prepend_comma)
+ StringAppendF(out, ",");
+ StringAppendF(out, "%s%s", (included ? "" : "-"), category.c_str());
+ ++token_cnt;
+ }
+}
+
+void TraceConfigCategoryFilter::WriteCategoryFilterString(
+ const StringList& delays,
+ std::string* out) const {
+ bool prepend_comma = !out->empty();
+ int token_cnt = 0;
+ for (const std::string& category : delays) {
+ if (token_cnt > 0 || prepend_comma)
+ StringAppendF(out, ",");
+ StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
+ category.c_str());
+ ++token_cnt;
+ }
+}
+
+// static
+bool TraceConfigCategoryFilter::IsCategoryNameAllowed(StringPiece str) {
+ return !str.empty() && str.front() != ' ' && str.back() != ' ';
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/trace_config_category_filter.h b/chromium/base/trace_event/trace_config_category_filter.h
new file mode 100644
index 00000000000..0d7dba0374e
--- /dev/null
+++ b/chromium/base/trace_event/trace_config_category_filter.h
@@ -0,0 +1,86 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
+#define BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+#include "base/values.h"
+
+namespace base {
+namespace trace_event {
+
+// Configuration of categories enabled and disabled in TraceConfig.
+class BASE_EXPORT TraceConfigCategoryFilter {
+ public:
+ using StringList = std::vector<std::string>;
+
+ TraceConfigCategoryFilter();
+ TraceConfigCategoryFilter(const TraceConfigCategoryFilter& other);
+ ~TraceConfigCategoryFilter();
+
+ TraceConfigCategoryFilter& operator=(const TraceConfigCategoryFilter& rhs);
+
+ // Initializes from category filter string. See TraceConfig constructor for
+ // description of how to write category filter string.
+ void InitializeFromString(const StringPiece& category_filter_string);
+
+ // Initializes TraceConfigCategoryFilter object from the config dictionary.
+ void InitializeFromConfigDict(const DictionaryValue& dict);
+
+ // Merges this with category filter config.
+ void Merge(const TraceConfigCategoryFilter& config);
+ void Clear();
+
+ // Returns true if at least one category in the list is enabled by this
+ // trace config. This is used to determine if the category filters are
+ // enabled in the TRACE_* macros.
+ bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
+
+ // Returns true if the category is enabled according to this trace config.
+ // This tells whether a category is enabled from the TraceConfig's
+ // perspective. Please refer to IsCategoryGroupEnabled() to determine if a
+ // category is enabled from the tracing runtime's perspective.
+ bool IsCategoryEnabled(const StringPiece& category_name) const;
+
+ void ToDict(DictionaryValue* dict) const;
+
+ std::string ToFilterString() const;
+
+ // Returns true if category name is a valid string.
+ static bool IsCategoryNameAllowed(StringPiece str);
+
+ const StringList& included_categories() const { return included_categories_; }
+ const StringList& excluded_categories() const { return excluded_categories_; }
+ const StringList& synthetic_delays() const { return synthetic_delays_; }
+
+ private:
+ void SetCategoriesFromIncludedList(const ListValue& included_list);
+ void SetCategoriesFromExcludedList(const ListValue& excluded_list);
+ void SetSyntheticDelaysFromList(const ListValue& list);
+
+ void AddCategoriesToDict(const StringList& categories,
+ const char* param,
+ DictionaryValue* dict) const;
+
+ void WriteCategoryFilterString(const StringList& values,
+ std::string* out,
+ bool included) const;
+ void WriteCategoryFilterString(const StringList& delays,
+ std::string* out) const;
+
+ StringList included_categories_;
+ StringList disabled_categories_;
+ StringList excluded_categories_;
+ StringList synthetic_delays_;
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
diff --git a/chromium/base/trace_event/trace_config_unittest.cc b/chromium/base/trace_event/trace_config_unittest.cc
index 74aa7bdc63a..a856c271927 100644
--- a/chromium/base/trace_event/trace_config_unittest.cc
+++ b/chromium/base/trace_event/trace_config_unittest.cc
@@ -304,10 +304,12 @@ TEST(TraceConfigTest, EmptyAndAsteriskCategoryFilterString) {
CheckDefaultTraceConfigBehavior(tc_asterisk);
// They differ only for internal checking.
- EXPECT_FALSE(tc_empty.IsCategoryEnabled("Category1"));
- EXPECT_FALSE(tc_empty.IsCategoryEnabled("not-excluded-category"));
- EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("Category1"));
- EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("not-excluded-category"));
+ EXPECT_FALSE(tc_empty.category_filter().IsCategoryEnabled("Category1"));
+ EXPECT_FALSE(
+ tc_empty.category_filter().IsCategoryEnabled("not-excluded-category"));
+ EXPECT_TRUE(tc_asterisk.category_filter().IsCategoryEnabled("Category1"));
+ EXPECT_TRUE(
+ tc_asterisk.category_filter().IsCategoryEnabled("not-excluded-category"));
}
TEST(TraceConfigTest, DisabledByDefaultCategoryFilterString) {
@@ -402,13 +404,15 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
"-exc_pattern*,DELAY(test.Delay1;16),DELAY(test.Delay2;32)",
tc.ToCategoryFilterString().c_str());
- EXPECT_TRUE(tc.IsCategoryEnabled("included"));
- EXPECT_TRUE(tc.IsCategoryEnabled("inc_pattern_category"));
- EXPECT_TRUE(tc.IsCategoryEnabled("disabled-by-default-cc"));
- EXPECT_FALSE(tc.IsCategoryEnabled("excluded"));
- EXPECT_FALSE(tc.IsCategoryEnabled("exc_pattern_category"));
- EXPECT_FALSE(tc.IsCategoryEnabled("disabled-by-default-others"));
- EXPECT_FALSE(tc.IsCategoryEnabled("not-excluded-nor-included"));
+ EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("included"));
+ EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("inc_pattern_category"));
+ EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("disabled-by-default-cc"));
+ EXPECT_FALSE(tc.category_filter().IsCategoryEnabled("excluded"));
+ EXPECT_FALSE(tc.category_filter().IsCategoryEnabled("exc_pattern_category"));
+ EXPECT_FALSE(
+ tc.category_filter().IsCategoryEnabled("disabled-by-default-others"));
+ EXPECT_FALSE(
+ tc.category_filter().IsCategoryEnabled("not-excluded-nor-included"));
EXPECT_TRUE(tc.IsCategoryGroupEnabled("included"));
EXPECT_TRUE(tc.IsCategoryGroupEnabled("inc_pattern_category"));
@@ -431,10 +435,12 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
const TraceConfig::EventFilterConfig& event_filter = tc.event_filters()[0];
EXPECT_STREQ("event_whitelist_predicate",
event_filter.predicate_name().c_str());
- EXPECT_EQ(1u, event_filter.included_categories().size());
- EXPECT_STREQ("*", event_filter.included_categories()[0].c_str());
- EXPECT_EQ(1u, event_filter.excluded_categories().size());
- EXPECT_STREQ("unfiltered_cat", event_filter.excluded_categories()[0].c_str());
+ EXPECT_EQ(1u, event_filter.category_filter().included_categories().size());
+ EXPECT_STREQ("*",
+ event_filter.category_filter().included_categories()[0].c_str());
+ EXPECT_EQ(1u, event_filter.category_filter().excluded_categories().size());
+ EXPECT_STREQ("unfiltered_cat",
+ event_filter.category_filter().excluded_categories()[0].c_str());
EXPECT_TRUE(event_filter.filter_args());
std::string json_out;
@@ -449,8 +455,10 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
const char config_string_2[] = "{\"included_categories\":[\"*\"]}";
TraceConfig tc2(config_string_2);
- EXPECT_TRUE(tc2.IsCategoryEnabled("non-disabled-by-default-pattern"));
- EXPECT_FALSE(tc2.IsCategoryEnabled("disabled-by-default-pattern"));
+ EXPECT_TRUE(tc2.category_filter().IsCategoryEnabled(
+ "non-disabled-by-default-pattern"));
+ EXPECT_FALSE(
+ tc2.category_filter().IsCategoryEnabled("disabled-by-default-pattern"));
EXPECT_TRUE(tc2.IsCategoryGroupEnabled("non-disabled-by-default-pattern"));
EXPECT_FALSE(tc2.IsCategoryGroupEnabled("disabled-by-default-pattern"));
@@ -538,8 +546,9 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
"\"excluded_categories\":[\"category\",\"disabled-by-default-pattern\"]"
"}";
tc = TraceConfig(invalid_config_string_2);
- EXPECT_TRUE(tc.IsCategoryEnabled("category"));
- EXPECT_TRUE(tc.IsCategoryEnabled("disabled-by-default-pattern"));
+ EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("category"));
+ EXPECT_TRUE(
+ tc.category_filter().IsCategoryEnabled("disabled-by-default-pattern"));
EXPECT_TRUE(tc.IsCategoryGroupEnabled("category"));
EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-pattern"));
}
@@ -591,27 +600,25 @@ TEST(TraceConfigTest, IsCategoryGroupEnabled) {
EXPECT_FALSE(tc.IsCategoryGroupEnabled("excluded,disabled-by-default-cc"));
}
-TEST(TraceConfigTest, IsEmptyOrContainsLeadingOrTrailingWhitespace) {
- // Test that IsEmptyOrContainsLeadingOrTrailingWhitespace actually catches
- // categories that are explicitly forbidden.
- // This method is called in a DCHECK to assert that we don't have these types
- // of strings as categories.
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- " bad_category "));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- " bad_category"));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- "bad_category "));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- " bad_category"));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- "bad_category "));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- " bad_category "));
- EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- ""));
- EXPECT_FALSE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
- "good_category"));
+TEST(TraceConfigTest, IsCategoryNameAllowed) {
+ // Test that IsCategoryNameAllowed actually catches categories that are
+ // explicitly forbidden. This method is called in a DCHECK to assert that we
+ // don't have these types of strings as categories.
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed(" bad_category "));
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed(" bad_category"));
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed("bad_category "));
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed(" bad_category"));
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed("bad_category "));
+ EXPECT_FALSE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed(" bad_category "));
+ EXPECT_FALSE(TraceConfigCategoryFilter::IsCategoryNameAllowed(""));
+ EXPECT_TRUE(
+ TraceConfigCategoryFilter::IsCategoryNameAllowed("good_category"));
}
TEST(TraceConfigTest, SetTraceOptionValues) {
@@ -637,20 +644,20 @@ TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
EXPECT_EQ(tc_str1, tc2.ToString());
EXPECT_TRUE(tc1.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
- ASSERT_EQ(2u, tc1.memory_dump_config_.triggers.size());
+ ASSERT_EQ(2u, tc1.memory_dump_config().triggers.size());
EXPECT_EQ(200u,
- tc1.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
+ tc1.memory_dump_config().triggers[0].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
- tc1.memory_dump_config_.triggers[0].level_of_detail);
+ tc1.memory_dump_config().triggers[0].level_of_detail);
EXPECT_EQ(2000u,
- tc1.memory_dump_config_.triggers[1].min_time_between_dumps_ms);
+ tc1.memory_dump_config().triggers[1].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
- tc1.memory_dump_config_.triggers[1].level_of_detail);
+ tc1.memory_dump_config().triggers[1].level_of_detail);
EXPECT_EQ(
2048u,
- tc1.memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
+ tc1.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
std::string tc_str3 =
TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
@@ -658,20 +665,20 @@ TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
TraceConfig tc3(tc_str3);
EXPECT_EQ(tc_str3, tc3.ToString());
EXPECT_TRUE(tc3.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
- ASSERT_EQ(1u, tc3.memory_dump_config_.triggers.size());
- EXPECT_EQ(1u, tc3.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
+ ASSERT_EQ(1u, tc3.memory_dump_config().triggers.size());
+ EXPECT_EQ(1u, tc3.memory_dump_config().triggers[0].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
- tc3.memory_dump_config_.triggers[0].level_of_detail);
+ tc3.memory_dump_config().triggers[0].level_of_detail);
std::string tc_str4 =
TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(
1 /*heavy_period */);
TraceConfig tc4(tc_str4);
EXPECT_EQ(tc_str4, tc4.ToString());
- ASSERT_EQ(1u, tc4.memory_dump_config_.triggers.size());
- EXPECT_EQ(1u, tc4.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
+ ASSERT_EQ(1u, tc4.memory_dump_config().triggers.size());
+ EXPECT_EQ(1u, tc4.memory_dump_config().triggers[0].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
- tc4.memory_dump_config_.triggers[0].level_of_detail);
+ tc4.memory_dump_config().triggers[0].level_of_detail);
}
TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
@@ -679,22 +686,22 @@ TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
TraceConfig tc(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers());
EXPECT_EQ(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers(),
tc.ToString());
- EXPECT_EQ(0u, tc.memory_dump_config_.triggers.size());
- EXPECT_EQ(TraceConfig::MemoryDumpConfig::HeapProfiler
- ::kDefaultBreakdownThresholdBytes,
- tc.memory_dump_config_.heap_profiler_options
- .breakdown_threshold_bytes);
+ EXPECT_EQ(0u, tc.memory_dump_config().triggers.size());
+ EXPECT_EQ(
+ TraceConfig::MemoryDumpConfig::HeapProfiler ::
+ kDefaultBreakdownThresholdBytes,
+ tc.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
}
TEST(TraceConfigTest, LegacyStringToMemoryDumpConfig) {
TraceConfig tc(MemoryDumpManager::kTraceCategory, "");
EXPECT_TRUE(tc.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
EXPECT_NE(std::string::npos, tc.ToString().find("memory_dump_config"));
- EXPECT_EQ(2u, tc.memory_dump_config_.triggers.size());
- EXPECT_EQ(TraceConfig::MemoryDumpConfig::HeapProfiler
- ::kDefaultBreakdownThresholdBytes,
- tc.memory_dump_config_.heap_profiler_options
- .breakdown_threshold_bytes);
+ EXPECT_EQ(2u, tc.memory_dump_config().triggers.size());
+ EXPECT_EQ(
+ TraceConfig::MemoryDumpConfig::HeapProfiler ::
+ kDefaultBreakdownThresholdBytes,
+ tc.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
}
} // namespace trace_event
diff --git a/chromium/base/trace_event/trace_event_argument.cc b/chromium/base/trace_event/trace_event_argument.cc
index da33c6da004..646b1f21687 100644
--- a/chromium/base/trace_event/trace_event_argument.cc
+++ b/chromium/base/trace_event/trace_event_argument.cc
@@ -268,7 +268,7 @@ void TracedValue::SetBaseValueWithCopiedName(base::StringPiece name,
} break;
case base::Value::Type::STRING: {
- const StringValue* string_value;
+ const Value* string_value;
value.GetAsString(&string_value);
SetStringWithCopiedName(name, string_value->GetString());
} break;
@@ -289,7 +289,7 @@ void TracedValue::SetBaseValueWithCopiedName(base::StringPiece name,
value.GetAsList(&list_value);
BeginArrayWithCopiedName(name);
for (const auto& base_value : *list_value)
- AppendBaseValue(*base_value);
+ AppendBaseValue(base_value);
EndArray();
} break;
}
@@ -322,7 +322,7 @@ void TracedValue::AppendBaseValue(const base::Value& value) {
} break;
case base::Value::Type::STRING: {
- const StringValue* string_value;
+ const Value* string_value;
value.GetAsString(&string_value);
AppendString(string_value->GetString());
} break;
@@ -343,7 +343,7 @@ void TracedValue::AppendBaseValue(const base::Value& value) {
value.GetAsList(&list_value);
BeginArray();
for (const auto& base_value : *list_value)
- AppendBaseValue(*base_value);
+ AppendBaseValue(base_value);
EndArray();
} break;
}
@@ -369,9 +369,11 @@ std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
cur_dict = new_dict;
} else {
cur_list->Append(WrapUnique(new_dict));
+ // |new_dict| is invalidated at this point, so |cur_dict| needs to be
+ // reset.
+ cur_list->GetDictionary(cur_list->GetSize() - 1, &cur_dict);
stack.push_back(cur_list);
cur_list = nullptr;
- cur_dict = new_dict;
}
} break;
@@ -396,7 +398,8 @@ std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
} else {
cur_list->Append(WrapUnique(new_list));
stack.push_back(cur_list);
- cur_list = new_list;
+ // |cur_list| is invalidated at this point, so it needs to be reset.
+ cur_list->GetList(cur_list->GetSize() - 1, &cur_list);
}
} break;
diff --git a/chromium/base/trace_event/trace_event_etw_export_win.cc b/chromium/base/trace_event/trace_event_etw_export_win.cc
index 06a6b9574a8..e97ab56be6e 100644
--- a/chromium/base/trace_event/trace_event_etw_export_win.cc
+++ b/chromium/base/trace_event/trace_event_etw_export_win.cc
@@ -348,8 +348,8 @@ void TraceEventETWExport::AddCompleteEndEvent(const char* name) {
// static
bool TraceEventETWExport::IsCategoryGroupEnabled(
- const char* category_group_name) {
- DCHECK(category_group_name);
+ StringPiece category_group_name) {
+ DCHECK(!category_group_name.empty());
auto* instance = GetInstance();
if (instance == nullptr)
return false;
@@ -357,12 +357,11 @@ bool TraceEventETWExport::IsCategoryGroupEnabled(
if (!instance->IsETWExportEnabled())
return false;
- CStringTokenizer category_group_tokens(
- category_group_name, category_group_name + strlen(category_group_name),
- ",");
+ CStringTokenizer category_group_tokens(category_group_name.begin(),
+ category_group_name.end(), ",");
while (category_group_tokens.GetNext()) {
- std::string category_group_token = category_group_tokens.token();
- if (instance->IsCategoryEnabled(category_group_token.c_str())) {
+ StringPiece category_group_token = category_group_tokens.token_piece();
+ if (instance->IsCategoryEnabled(category_group_token)) {
return true;
}
}
@@ -406,7 +405,7 @@ bool TraceEventETWExport::UpdateEnabledCategories() {
return true;
}
-bool TraceEventETWExport::IsCategoryEnabled(const char* category_name) const {
+bool TraceEventETWExport::IsCategoryEnabled(StringPiece category_name) const {
DCHECK_EQ(kNumberOfCategories, categories_status_.size());
// Try to find the category and return its status if found
auto it = categories_status_.find(category_name);
@@ -415,7 +414,7 @@ bool TraceEventETWExport::IsCategoryEnabled(const char* category_name) const {
// Otherwise return the corresponding default status by first checking if the
// category is disabled by default.
- if (StringPiece(category_name).starts_with("disabled-by-default")) {
+ if (category_name.starts_with("disabled-by-default")) {
DCHECK(categories_status_.find(kDisabledOtherEventsGroupName) !=
categories_status_.end());
return categories_status_.find(kDisabledOtherEventsGroupName)->second;
diff --git a/chromium/base/trace_event/trace_event_etw_export_win.h b/chromium/base/trace_event/trace_event_etw_export_win.h
index 8ee2d634169..3a6b6612ca3 100644
--- a/chromium/base/trace_event/trace_event_etw_export_win.h
+++ b/chromium/base/trace_event/trace_event_etw_export_win.h
@@ -56,7 +56,7 @@ class BASE_EXPORT TraceEventETWExport {
static void AddCompleteEndEvent(const char* name);
// Returns true if any category in the group is enabled.
- static bool IsCategoryGroupEnabled(const char* category_group_name);
+ static bool IsCategoryGroupEnabled(StringPiece category_group_name);
private:
// Ensure only the provider can construct us.
@@ -70,7 +70,7 @@ class BASE_EXPORT TraceEventETWExport {
bool UpdateEnabledCategories();
// Returns true if the category is enabled.
- bool IsCategoryEnabled(const char* category_name) const;
+ bool IsCategoryEnabled(StringPiece category_name) const;
// Called back by the update thread to check for potential changes to the
// keyword.
diff --git a/chromium/base/trace_event/trace_event_memory_overhead.cc b/chromium/base/trace_event/trace_event_memory_overhead.cc
index 48a0d29f446..99f0240a8be 100644
--- a/chromium/base/trace_event/trace_event_memory_overhead.cc
+++ b/chromium/base/trace_event/trace_event_memory_overhead.cc
@@ -77,16 +77,16 @@ void TraceEventMemoryOverhead::AddValue(const Value& value) {
break;
case Value::Type::STRING: {
- const StringValue* string_value = nullptr;
+ const Value* string_value = nullptr;
value.GetAsString(&string_value);
- Add("StringValue", sizeof(StringValue));
+ Add("StringValue", sizeof(Value));
AddString(string_value->GetString());
} break;
case Value::Type::BINARY: {
- const BinaryValue* binary_value = nullptr;
+ const Value* binary_value = nullptr;
value.GetAsBinary(&binary_value);
- Add("BinaryValue", sizeof(BinaryValue) + binary_value->GetSize());
+ Add("BinaryValue", sizeof(Value) + binary_value->GetSize());
} break;
case Value::Type::DICTIONARY: {
@@ -105,7 +105,7 @@ void TraceEventMemoryOverhead::AddValue(const Value& value) {
value.GetAsList(&list_value);
Add("ListValue", sizeof(ListValue));
for (const auto& v : *list_value)
- AddValue(*v);
+ AddValue(v);
} break;
default:
diff --git a/chromium/base/trace_event/trace_event_system_stats_monitor.cc b/chromium/base/trace_event/trace_event_system_stats_monitor.cc
index 03ef5a67fda..52e1cdcc3d9 100644
--- a/chromium/base/trace_event/trace_event_system_stats_monitor.cc
+++ b/chromium/base/trace_event/trace_event_system_stats_monitor.cc
@@ -79,16 +79,14 @@ void TraceEventSystemStatsMonitor::OnTraceLogEnabled() {
if (!enabled)
return;
task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&TraceEventSystemStatsMonitor::StartProfiling,
- weak_factory_.GetWeakPtr()));
+ FROM_HERE, base::BindOnce(&TraceEventSystemStatsMonitor::StartProfiling,
+ weak_factory_.GetWeakPtr()));
}
void TraceEventSystemStatsMonitor::OnTraceLogDisabled() {
task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&TraceEventSystemStatsMonitor::StopProfiling,
- weak_factory_.GetWeakPtr()));
+ FROM_HERE, base::BindOnce(&TraceEventSystemStatsMonitor::StopProfiling,
+ weak_factory_.GetWeakPtr()));
}
void TraceEventSystemStatsMonitor::StartProfiling() {
diff --git a/chromium/base/trace_event/trace_event_unittest.cc b/chromium/base/trace_event/trace_event_unittest.cc
index 29be3feec83..5b68c4ef427 100644
--- a/chromium/base/trace_event/trace_event_unittest.cc
+++ b/chromium/base/trace_event/trace_event_unittest.cc
@@ -124,8 +124,9 @@ class TraceEventTestFixture : public testing::Test {
Thread flush_thread("flush");
flush_thread.Start();
flush_thread.task_runner()->PostTask(
- FROM_HERE, base::Bind(&TraceEventTestFixture::EndTraceAndFlushAsync,
- base::Unretained(this), &flush_complete_event));
+ FROM_HERE,
+ base::BindOnce(&TraceEventTestFixture::EndTraceAndFlushAsync,
+ base::Unretained(this), &flush_complete_event));
flush_complete_event.Wait();
}
@@ -1737,7 +1738,8 @@ TEST_F(TraceEventTestFixture, DataCapturedOnThread) {
thread.Start();
thread.task_runner()->PostTask(
- FROM_HERE, base::Bind(&TraceWithAllMacroVariants, &task_complete_event));
+ FROM_HERE,
+ base::BindOnce(&TraceWithAllMacroVariants, &task_complete_event));
task_complete_event.Wait();
thread.Stop();
@@ -1760,8 +1762,8 @@ TEST_F(TraceEventTestFixture, DataCapturedManyThreads) {
WaitableEvent::InitialState::NOT_SIGNALED);
threads[i]->Start();
threads[i]->task_runner()->PostTask(
- FROM_HERE, base::Bind(&TraceManyInstantEvents, i, num_events,
- task_complete_events[i]));
+ FROM_HERE, base::BindOnce(&TraceManyInstantEvents, i, num_events,
+ task_complete_events[i]));
}
for (int i = 0; i < num_threads; i++) {
@@ -1810,8 +1812,8 @@ TEST_F(TraceEventTestFixture, ThreadNames) {
threads[i]->Start();
thread_ids[i] = threads[i]->GetThreadId();
threads[i]->task_runner()->PostTask(
- FROM_HERE, base::Bind(&TraceManyInstantEvents, i, kNumEvents,
- task_complete_events[i]));
+ FROM_HERE, base::BindOnce(&TraceManyInstantEvents, i, kNumEvents,
+ task_complete_events[i]));
}
for (int i = 0; i < kNumThreads; i++) {
task_complete_events[i]->Wait();
@@ -2720,11 +2722,11 @@ TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopBeforeTracing) {
WaitableEvent::InitialState::NOT_SIGNALED);
thread.Start();
thread.task_runner()->PostTask(
- FROM_HERE, Bind(&TraceLog::SetCurrentThreadBlocksMessageLoop,
- Unretained(TraceLog::GetInstance())));
+ FROM_HERE, BindOnce(&TraceLog::SetCurrentThreadBlocksMessageLoop,
+ Unretained(TraceLog::GetInstance())));
thread.task_runner()->PostTask(
- FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
+ FROM_HERE, BindOnce(&TraceWithAllMacroVariants, &task_complete_event));
task_complete_event.Wait();
WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
@@ -2732,7 +2734,8 @@ TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopBeforeTracing) {
WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
thread.task_runner()->PostTask(
- FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
+ FROM_HERE,
+ BindOnce(&BlockUntilStopped, &task_start_event, &task_stop_event));
task_start_event.Wait();
EndTraceAndFlush();
@@ -2776,16 +2779,16 @@ TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopAfterTracing) {
thread.Start();
thread.task_runner()->PostTask(
- FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
+ FROM_HERE, BindOnce(&TraceWithAllMacroVariants, &task_complete_event));
task_complete_event.Wait();
WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
- thread.task_runner()->PostTask(
- FROM_HERE, Bind(&SetBlockingFlagAndBlockUntilStopped, &task_start_event,
- &task_stop_event));
+ thread.task_runner()->PostTask(FROM_HERE,
+ BindOnce(&SetBlockingFlagAndBlockUntilStopped,
+ &task_start_event, &task_stop_event));
task_start_event.Wait();
EndTraceAndFlush();
@@ -2804,7 +2807,7 @@ TEST_F(TraceEventTestFixture, ThreadOnceBlocking) {
thread.Start();
thread.task_runner()->PostTask(
- FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
+ FROM_HERE, BindOnce(&TraceWithAllMacroVariants, &task_complete_event));
task_complete_event.Wait();
task_complete_event.Reset();
@@ -2813,7 +2816,8 @@ TEST_F(TraceEventTestFixture, ThreadOnceBlocking) {
WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
thread.task_runner()->PostTask(
- FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
+ FROM_HERE,
+ BindOnce(&BlockUntilStopped, &task_start_event, &task_stop_event));
task_start_event.Wait();
// The thread will timeout in this flush.
@@ -2828,7 +2832,8 @@ TEST_F(TraceEventTestFixture, ThreadOnceBlocking) {
task_start_event.Reset();
task_stop_event.Reset();
thread.task_runner()->PostTask(
- FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
+ FROM_HERE,
+ BindOnce(&BlockUntilStopped, &task_start_event, &task_stop_event));
task_start_event.Wait();
task_stop_event.Signal();
Clear();
@@ -2837,7 +2842,7 @@ TEST_F(TraceEventTestFixture, ThreadOnceBlocking) {
// local buffer for the thread without any error.
BeginTrace();
thread.task_runner()->PostTask(
- FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
+ FROM_HERE, BindOnce(&TraceWithAllMacroVariants, &task_complete_event));
task_complete_event.Wait();
task_complete_event.Reset();
EndTraceAndFlushInThreadWithMessageLoop();
@@ -3087,11 +3092,15 @@ TEST_F(TraceEventTestFixture, EventFiltering) {
"{"
" \"included_categories\": ["
" \"filtered_cat\","
- " \"unfiltered_cat\"],"
+ " \"unfiltered_cat\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("unfiltered_cat") "\"],"
" \"event_filters\": ["
" {"
" \"filter_predicate\": \"testing_predicate\", "
- " \"included_categories\": [\"filtered_cat\"]"
+ " \"included_categories\": ["
+ " \"filtered_cat\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\"]"
" }"
" "
" ]"
@@ -3110,12 +3119,15 @@ TEST_F(TraceEventTestFixture, EventFiltering) {
TRACE_EVENT0("filtered_cat", "a mushroom");
TRACE_EVENT0("unfiltered_cat", "a horse");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a dog");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("unfiltered_cat"), "a pony");
+
// This is scoped so we can test the end event being filtered.
{ TRACE_EVENT0("filtered_cat", "another cat whoa"); }
EndTraceAndFlush();
- EXPECT_EQ(3u, filter_hits_counter.filter_trace_event_hit_count);
+ EXPECT_EQ(4u, filter_hits_counter.filter_trace_event_hit_count);
EXPECT_EQ(1u, filter_hits_counter.end_event_hit_count);
}
@@ -3124,12 +3136,14 @@ TEST_F(TraceEventTestFixture, EventWhitelistFiltering) {
"{"
" \"included_categories\": ["
" \"filtered_cat\","
- " \"unfiltered_cat\"],"
+ " \"unfiltered_cat\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\"],"
" \"event_filters\": ["
" {"
" \"filter_predicate\": \"%s\", "
- " \"included_categories\": [\"*\"], "
- " \"excluded_categories\": [\"unfiltered_cat\"], "
+ " \"included_categories\": ["
+ " \"filtered_cat\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("*") "\"], "
" \"filter_args\": {"
" \"event_name_whitelist\": [\"a snake\", \"a dog\"]"
" }"
@@ -3147,12 +3161,16 @@ TEST_F(TraceEventTestFixture, EventWhitelistFiltering) {
TRACE_EVENT0("filtered_cat", "a snake");
TRACE_EVENT0("filtered_cat", "a mushroom");
TRACE_EVENT0("unfiltered_cat", "a cat");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a dog");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a pony");
EndTraceAndFlush();
EXPECT_TRUE(FindMatchingValue("name", "a snake"));
EXPECT_FALSE(FindMatchingValue("name", "a mushroom"));
EXPECT_TRUE(FindMatchingValue("name", "a cat"));
+ EXPECT_TRUE(FindMatchingValue("name", "a dog"));
+ EXPECT_FALSE(FindMatchingValue("name", "a pony"));
}
TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
@@ -3160,12 +3178,16 @@ TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
"{"
" \"included_categories\": ["
" \"filtered_cat\","
- " \"unfiltered_cat\"],"
+ " \"unfiltered_cat\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("unfiltered_cat") "\"],"
" \"excluded_categories\": [\"excluded_cat\"],"
" \"event_filters\": ["
" {"
" \"filter_predicate\": \"%s\", "
- " \"included_categories\": [\"*\"]"
+ " \"included_categories\": ["
+ " \"*\","
+ " \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\"]"
" }"
" ]"
"}",
@@ -3179,6 +3201,8 @@ TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
TRACE_EVENT0("filtered_cat", "a snake");
TRACE_EVENT0("excluded_cat", "a mushroom");
TRACE_EVENT0("unfiltered_cat", "a cat");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a dog");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("unfiltered_cat"), "a pony");
EndTraceAndFlush();
@@ -3186,6 +3210,8 @@ TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
EXPECT_TRUE(FindMatchingValue("name", "a snake"));
EXPECT_FALSE(FindMatchingValue("name", "a mushroom"));
EXPECT_TRUE(FindMatchingValue("name", "a cat"));
+ EXPECT_TRUE(FindMatchingValue("name", "a dog"));
+ EXPECT_TRUE(FindMatchingValue("name", "a pony"));
}
TEST_F(TraceEventTestFixture, ClockSyncEventsAreAlwaysAddedToTrace) {
diff --git a/chromium/base/trace_event/trace_log.cc b/chromium/base/trace_event/trace_log.cc
index 6dddb94018f..3e9375db53e 100644
--- a/chromium/base/trace_event/trace_log.cc
+++ b/chromium/base/trace_event/trace_log.cc
@@ -19,8 +19,10 @@
#include "base/memory/ref_counted_memory.h"
#include "base/memory/singleton.h"
#include "base/message_loop/message_loop.h"
+#include "base/process/process_info.h"
#include "base/process/process_metrics.h"
#include "base/stl_util.h"
+#include "base/strings/string_piece.h"
#include "base/strings/string_split.h"
#include "base/strings/string_tokenizer.h"
#include "base/strings/stringprintf.h"
@@ -645,8 +647,8 @@ void TraceLog::SetEnabled(const TraceConfig& trace_config,
observer->OnTraceLogEnabled();
for (const auto& it : observer_map) {
it.second.task_runner->PostTask(
- FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogEnabled,
- it.second.observer));
+ FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogEnabled,
+ it.second.observer));
}
{
@@ -746,8 +748,8 @@ void TraceLog::SetDisabledWhileLocked(uint8_t modes_to_disable) {
observer->OnTraceLogDisabled();
for (const auto& it : observer_map) {
it.second.task_runner->PostTask(
- FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogDisabled,
- it.second.observer));
+ FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogDisabled,
+ it.second.observer));
}
}
dispatching_to_observer_list_ = false;
@@ -891,12 +893,13 @@ void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
if (!thread_message_loop_task_runners.empty()) {
for (auto& task_runner : thread_message_loop_task_runners) {
task_runner->PostTask(
- FROM_HERE, Bind(&TraceLog::FlushCurrentThread, Unretained(this),
- gen, discard_events));
+ FROM_HERE, BindOnce(&TraceLog::FlushCurrentThread, Unretained(this),
+ gen, discard_events));
}
flush_task_runner_->PostDelayedTask(
- FROM_HERE, Bind(&TraceLog::OnFlushTimeout, Unretained(this), gen,
- discard_events),
+ FROM_HERE,
+ BindOnce(&TraceLog::OnFlushTimeout, Unretained(this), gen,
+ discard_events),
TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
return;
}
@@ -967,14 +970,15 @@ void TraceLog::FinishFlush(int generation, bool discard_events) {
if (use_worker_thread_) {
base::PostTaskWithTraits(
- FROM_HERE, base::TaskTraits()
- .MayBlock()
- .WithPriority(base::TaskPriority::BACKGROUND)
- .WithShutdownBehavior(
- base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN),
- Bind(&TraceLog::ConvertTraceEventsToTraceFormat,
- Passed(&previous_logged_events), flush_output_callback,
- argument_filter_predicate));
+ FROM_HERE,
+ base::TaskTraits()
+ .MayBlock()
+ .WithPriority(base::TaskPriority::BACKGROUND)
+ .WithShutdownBehavior(
+ base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN),
+ BindOnce(&TraceLog::ConvertTraceEventsToTraceFormat,
+ Passed(&previous_logged_events), flush_output_callback,
+ argument_filter_predicate));
return;
}
@@ -1002,8 +1006,8 @@ void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
return;
flush_task_runner_->PostTask(
- FROM_HERE, Bind(&TraceLog::FinishFlush, Unretained(this), generation,
- discard_events));
+ FROM_HERE, BindOnce(&TraceLog::FinishFlush, Unretained(this), generation,
+ discard_events));
}
void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
@@ -1500,8 +1504,18 @@ void TraceLog::AddMetadataEventsWhileLocked() {
process_name_);
}
+#if !defined(OS_NACL) && !defined(OS_IOS)
+ Time process_creation_time = CurrentProcessInfo::CreationTime();
+ if (!process_creation_time.is_null()) {
+ TimeDelta process_uptime = Time::Now() - process_creation_time;
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id, "process_uptime_seconds",
+ "uptime", process_uptime.InSeconds());
+ }
+#endif // !defined(OS_NACL) && !defined(OS_IOS)
+
if (!process_labels_.empty()) {
- std::vector<std::string> labels;
+ std::vector<base::StringPiece> labels;
for (const auto& it : process_labels_)
labels.push_back(it.second);
InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
diff --git a/chromium/base/tuple.h b/chromium/base/tuple.h
index 34fd789976f..e13afaf31fc 100644
--- a/chromium/base/tuple.h
+++ b/chromium/base/tuple.h
@@ -51,20 +51,6 @@ template <size_t N, size_t... Ns>
struct MakeIndexSequenceImpl<N, Ns...>
: MakeIndexSequenceImpl<N - 1, N - 1, Ns...> {};
-// std::get() in <=libstdc++-4.6 returns an lvalue-reference for
-// rvalue-reference of a tuple, where an rvalue-reference is expected.
-template <size_t I, typename... Ts>
-typename std::tuple_element<I, std::tuple<Ts...>>::type&& get(
- std::tuple<Ts...>&& t) {
- using ElemType = typename std::tuple_element<I, std::tuple<Ts...>>::type;
- return std::forward<ElemType>(std::get<I>(t));
-}
-
-template <size_t I, typename T>
-auto get(T& t) -> decltype(std::get<I>(t)) {
- return std::get<I>(t);
-}
-
template <size_t N>
using MakeIndexSequence = typename MakeIndexSequenceImpl<N>::Type;
@@ -88,7 +74,7 @@ inline void DispatchToMethodImpl(const ObjT& obj,
Method method,
Tuple&& args,
IndexSequence<Ns...>) {
- (obj->*method)(base::get<Ns>(std::forward<Tuple>(args))...);
+ (obj->*method)(std::get<Ns>(std::forward<Tuple>(args))...);
}
template <typename ObjT, typename Method, typename Tuple>
@@ -105,7 +91,7 @@ template <typename Function, typename Tuple, size_t... Ns>
inline void DispatchToFunctionImpl(Function function,
Tuple&& args,
IndexSequence<Ns...>) {
- (*function)(base::get<Ns>(std::forward<Tuple>(args))...);
+ (*function)(std::get<Ns>(std::forward<Tuple>(args))...);
}
template <typename Function, typename Tuple>
@@ -128,7 +114,7 @@ inline void DispatchToMethodImpl(const ObjT& obj,
OutTuple* out,
IndexSequence<InNs...>,
IndexSequence<OutNs...>) {
- (obj->*method)(base::get<InNs>(std::forward<InTuple>(in))...,
+ (obj->*method)(std::get<InNs>(std::forward<InTuple>(in))...,
&std::get<OutNs>(*out)...);
}
diff --git a/chromium/base/tuple_unittest.cc b/chromium/base/tuple_unittest.cc
index 6f90c292206..815b43bb1b3 100644
--- a/chromium/base/tuple_unittest.cc
+++ b/chromium/base/tuple_unittest.cc
@@ -114,30 +114,4 @@ TEST(TupleTest, Copying) {
EXPECT_EQ(2, CopyLogger::TimesCopied);
}
-TEST(TupleTest, Get) {
- int i = 1;
- int j = 2;
- std::tuple<int, int&, int&&> t(3, i, std::move(j));
- EXPECT_TRUE((std::is_same<int&, decltype(base::get<0>(t))>::value));
- EXPECT_EQ(3, base::get<0>(t));
-
- EXPECT_TRUE((std::is_same<int&, decltype(base::get<1>(t))>::value));
- EXPECT_EQ(1, base::get<1>(t));
-
- EXPECT_TRUE((std::is_same<int&, decltype(base::get<2>(t))>::value));
- EXPECT_EQ(2, base::get<2>(t));
-
- EXPECT_TRUE((std::is_same<int&&,
- decltype(base::get<0>(std::move(t)))>::value));
- EXPECT_EQ(3, base::get<0>(std::move(t)));
-
- EXPECT_TRUE((std::is_same<int&,
- decltype(base::get<1>(std::move(t)))>::value));
- EXPECT_EQ(1, base::get<1>(std::move(t)));
-
- EXPECT_TRUE((std::is_same<int&&,
- decltype(base::get<2>(std::move(t)))>::value));
- EXPECT_EQ(2, base::get<2>(std::move(t)));
-}
-
} // namespace base
diff --git a/chromium/base/value_conversions.cc b/chromium/base/value_conversions.cc
index a461e2c31a9..7f220004e14 100644
--- a/chromium/base/value_conversions.cc
+++ b/chromium/base/value_conversions.cc
@@ -9,6 +9,7 @@
#include <string>
#include "base/files/file_path.h"
+#include "base/memory/ptr_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/time/time.h"
#include "base/values.h"
@@ -17,8 +18,8 @@ namespace base {
// |Value| internally stores strings in UTF-8, so we have to convert from the
// system native code to UTF-8 and back.
-StringValue* CreateFilePathValue(const FilePath& in_value) {
- return new StringValue(in_value.AsUTF8Unsafe());
+std::unique_ptr<Value> CreateFilePathValue(const FilePath& in_value) {
+ return base::MakeUnique<Value>(in_value.AsUTF8Unsafe());
}
bool GetValueAsFilePath(const Value& value, FilePath* file_path) {
@@ -32,9 +33,9 @@ bool GetValueAsFilePath(const Value& value, FilePath* file_path) {
// |Value| does not support 64-bit integers, and doubles do not have enough
// precision, so we store the 64-bit time value as a string instead.
-StringValue* CreateTimeDeltaValue(const TimeDelta& time) {
+std::unique_ptr<Value> CreateTimeDeltaValue(const TimeDelta& time) {
std::string string_value = base::Int64ToString(time.ToInternalValue());
- return new StringValue(string_value);
+ return base::MakeUnique<Value>(string_value);
}
bool GetValueAsTimeDelta(const Value& value, TimeDelta* time) {
diff --git a/chromium/base/value_conversions.h b/chromium/base/value_conversions.h
index 6ce1bff27e4..bfdec70a91a 100644
--- a/chromium/base/value_conversions.h
+++ b/chromium/base/value_conversions.h
@@ -7,21 +7,21 @@
// This file contains methods to convert things to a |Value| and back.
+#include <memory>
#include "base/base_export.h"
-
namespace base {
class FilePath;
class TimeDelta;
class Value;
-using StringValue = Value;
// The caller takes ownership of the returned value.
-BASE_EXPORT StringValue* CreateFilePathValue(const FilePath& in_value);
+BASE_EXPORT std::unique_ptr<Value> CreateFilePathValue(
+ const FilePath& in_value);
BASE_EXPORT bool GetValueAsFilePath(const Value& value, FilePath* file_path);
-BASE_EXPORT StringValue* CreateTimeDeltaValue(const TimeDelta& time);
+BASE_EXPORT std::unique_ptr<Value> CreateTimeDeltaValue(const TimeDelta& time);
BASE_EXPORT bool GetValueAsTimeDelta(const Value& value, TimeDelta* time);
} // namespace base
diff --git a/chromium/base/values.cc b/chromium/base/values.cc
index 9aa04f33c64..0dc62b3d399 100644
--- a/chromium/base/values.cc
+++ b/chromium/base/values.cc
@@ -32,17 +32,14 @@ std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node);
// Make a deep copy of |node|, but don't include empty lists or dictionaries
// in the copy. It's possible for this function to return NULL and it
// expects |node| to always be non-NULL.
-std::unique_ptr<ListValue> CopyListWithoutEmptyChildren(const ListValue& list) {
- std::unique_ptr<ListValue> copy;
- for (const auto& entry : list) {
- std::unique_ptr<Value> child_copy = CopyWithoutEmptyChildren(*entry);
- if (child_copy) {
- if (!copy)
- copy.reset(new ListValue);
- copy->Append(std::move(child_copy));
- }
+std::unique_ptr<Value> CopyListWithoutEmptyChildren(const Value& list) {
+ Value copy(Value::Type::LIST);
+ for (const auto& entry : list.GetList()) {
+ std::unique_ptr<Value> child_copy = CopyWithoutEmptyChildren(entry);
+ if (child_copy)
+ copy.GetList().push_back(std::move(*child_copy));
}
- return copy;
+ return copy.GetList().empty() ? nullptr : MakeUnique<Value>(std::move(copy));
}
std::unique_ptr<DictionaryValue> CopyDictionaryWithoutEmptyChildren(
@@ -69,33 +66,27 @@ std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node) {
static_cast<const DictionaryValue&>(node));
default:
- return node.CreateDeepCopy();
+ return MakeUnique<Value>(node);
}
}
} // namespace
// static
-std::unique_ptr<Value> Value::CreateNullValue() {
- return WrapUnique(new Value(Type::NONE));
-}
-
-// static
-std::unique_ptr<BinaryValue> BinaryValue::CreateWithCopiedBuffer(
- const char* buffer,
- size_t size) {
- return MakeUnique<BinaryValue>(std::vector<char>(buffer, buffer + size));
+std::unique_ptr<Value> Value::CreateWithCopiedBuffer(const char* buffer,
+ size_t size) {
+ return MakeUnique<Value>(std::vector<char>(buffer, buffer + size));
}
Value::Value(const Value& that) {
InternalCopyConstructFrom(that);
}
-Value::Value(Value&& that) {
+Value::Value(Value&& that) noexcept {
InternalMoveConstructFrom(std::move(that));
}
-Value::Value() : type_(Type::NONE) {}
+Value::Value() noexcept : type_(Type::NONE) {}
Value::Value(Type type) : type_(type) {
// Initialize with the default value.
@@ -149,7 +140,7 @@ Value::Value(const std::string& in_string) : type_(Type::STRING) {
DCHECK(IsStringUTF8(*string_value_));
}
-Value::Value(std::string&& in_string) : type_(Type::STRING) {
+Value::Value(std::string&& in_string) noexcept : type_(Type::STRING) {
string_value_.Init(std::move(in_string));
DCHECK(IsStringUTF8(*string_value_));
}
@@ -168,32 +159,37 @@ Value::Value(const std::vector<char>& in_blob) : type_(Type::BINARY) {
binary_value_.Init(in_blob);
}
-Value::Value(std::vector<char>&& in_blob) : type_(Type::BINARY) {
+Value::Value(std::vector<char>&& in_blob) noexcept : type_(Type::BINARY) {
binary_value_.Init(std::move(in_blob));
}
+Value::Value(DictStorage&& in_dict) noexcept : type_(Type::DICTIONARY) {
+ dict_ptr_.Init(MakeUnique<DictStorage>(std::move(in_dict)));
+}
+
+Value::Value(const ListStorage& in_list) : type_(Type::LIST) {
+ list_.Init(in_list);
+}
+
+Value::Value(ListStorage&& in_list) noexcept : type_(Type::LIST) {
+ list_.Init(std::move(in_list));
+}
+
Value& Value::operator=(const Value& that) {
- if (this != &that) {
- if (type_ == that.type_) {
- InternalCopyAssignFrom(that);
- } else {
- InternalCleanup();
- InternalCopyConstructFrom(that);
- }
+ if (type_ == that.type_) {
+ InternalCopyAssignFromSameType(that);
+ } else {
+ // This is not a self assignment because the type_ doesn't match.
+ InternalCleanup();
+ InternalCopyConstructFrom(that);
}
return *this;
}
-Value& Value::operator=(Value&& that) {
- if (this != &that) {
- if (type_ == that.type_) {
- InternalMoveAssignFrom(std::move(that));
- } else {
- InternalCleanup();
- InternalMoveConstructFrom(std::move(that));
- }
- }
+Value& Value::operator=(Value&& that) noexcept {
+ InternalCleanup();
+ InternalMoveConstructFrom(std::move(that));
return *this;
}
@@ -238,6 +234,16 @@ const std::vector<char>& Value::GetBlob() const {
return *binary_value_;
}
+Value::ListStorage& Value::GetList() {
+ CHECK(is_list());
+ return *list_;
+}
+
+const Value::ListStorage& Value::GetList() const {
+ CHECK(is_list());
+ return *list_;
+}
+
size_t Value::GetSize() const {
return GetBlob().size();
}
@@ -290,9 +296,9 @@ bool Value::GetAsString(string16* out_value) const {
return is_string();
}
-bool Value::GetAsString(const StringValue** out_value) const {
+bool Value::GetAsString(const Value** out_value) const {
if (out_value && is_string()) {
- *out_value = static_cast<const StringValue*>(this);
+ *out_value = static_cast<const Value*>(this);
return true;
}
return is_string();
@@ -306,7 +312,7 @@ bool Value::GetAsString(StringPiece* out_value) const {
return is_string();
}
-bool Value::GetAsBinary(const BinaryValue** out_value) const {
+bool Value::GetAsBinary(const Value** out_value) const {
if (out_value && is_blob()) {
*out_value = this;
return true;
@@ -347,114 +353,113 @@ bool Value::GetAsDictionary(const DictionaryValue** out_value) const {
}
Value* Value::DeepCopy() const {
- // This method should only be getting called for null Values--all subclasses
- // need to provide their own implementation;.
- switch (type()) {
- case Type::NONE:
- return CreateNullValue().release();
-
- case Type::BOOLEAN:
- return new Value(bool_value_);
- case Type::INTEGER:
- return new Value(int_value_);
- case Type::DOUBLE:
- return new Value(double_value_);
- // For now, make StringValues for backward-compatibility. Convert to
- // Value when that code is deleted.
- case Type::STRING:
- return new StringValue(*string_value_);
- // For now, make BinaryValues for backward-compatibility. Convert to
- // Value when that code is deleted.
- case Type::BINARY:
- return new BinaryValue(*binary_value_);
-
- // TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
- // are completely inlined.
- case Type::DICTIONARY: {
- DictionaryValue* result = new DictionaryValue;
-
- for (const auto& current_entry : **dict_ptr_) {
- result->SetWithoutPathExpansion(current_entry.first,
- current_entry.second->CreateDeepCopy());
- }
-
- return result;
- }
-
- case Type::LIST: {
- ListValue* result = new ListValue;
-
- for (const auto& entry : *list_)
- result->Append(entry->CreateDeepCopy());
-
- return result;
- }
-
- default:
- NOTREACHED();
- return nullptr;
- }
+ return new Value(*this);
}
std::unique_ptr<Value> Value::CreateDeepCopy() const {
- return WrapUnique(DeepCopy());
+ return MakeUnique<Value>(*this);
}
-bool Value::Equals(const Value* other) const {
- if (other->type() != type())
+bool operator==(const Value& lhs, const Value& rhs) {
+ if (lhs.type_ != rhs.type_)
return false;
- switch (type()) {
- case Type::NONE:
+ switch (lhs.type_) {
+ case Value::Type::NONE:
return true;
- case Type::BOOLEAN:
- return bool_value_ == other->bool_value_;
- case Type::INTEGER:
- return int_value_ == other->int_value_;
- case Type::DOUBLE:
- return double_value_ == other->double_value_;
- case Type::STRING:
- return *string_value_ == *(other->string_value_);
- case Type::BINARY:
- return *binary_value_ == *(other->binary_value_);
+ case Value::Type::BOOLEAN:
+ return lhs.bool_value_ == rhs.bool_value_;
+ case Value::Type::INTEGER:
+ return lhs.int_value_ == rhs.int_value_;
+ case Value::Type::DOUBLE:
+ return lhs.double_value_ == rhs.double_value_;
+ case Value::Type::STRING:
+ return *lhs.string_value_ == *rhs.string_value_;
+ case Value::Type::BINARY:
+ return *lhs.binary_value_ == *rhs.binary_value_;
// TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
// are completely inlined.
- case Type::DICTIONARY: {
- if ((*dict_ptr_)->size() != (*other->dict_ptr_)->size())
+ case Value::Type::DICTIONARY:
+ if ((*lhs.dict_ptr_)->size() != (*rhs.dict_ptr_)->size())
return false;
+ return std::equal(std::begin(**lhs.dict_ptr_), std::end(**lhs.dict_ptr_),
+ std::begin(**rhs.dict_ptr_),
+ [](const Value::DictStorage::value_type& u,
+ const Value::DictStorage::value_type& v) {
+ return std::tie(u.first, *u.second) ==
+ std::tie(v.first, *v.second);
+ });
+ case Value::Type::LIST:
+ return *lhs.list_ == *rhs.list_;
+ }
- return std::equal(std::begin(**dict_ptr_), std::end(**dict_ptr_),
- std::begin(**(other->dict_ptr_)),
- [](const DictStorage::value_type& lhs,
- const DictStorage::value_type& rhs) {
- if (lhs.first != rhs.first)
- return false;
+ NOTREACHED();
+ return false;
+}
- return lhs.second->Equals(rhs.second.get());
- });
- }
- case Type::LIST: {
- if (list_->size() != other->list_->size())
- return false;
+bool operator!=(const Value& lhs, const Value& rhs) {
+ return !(lhs == rhs);
+}
- return std::equal(std::begin(*list_), std::end(*list_),
- std::begin(*(other->list_)),
- [](const ListStorage::value_type& lhs,
- const ListStorage::value_type& rhs) {
- return lhs->Equals(rhs.get());
- });
- }
+bool operator<(const Value& lhs, const Value& rhs) {
+ if (lhs.type_ != rhs.type_)
+ return lhs.type_ < rhs.type_;
+
+ switch (lhs.type_) {
+ case Value::Type::NONE:
+ return false;
+ case Value::Type::BOOLEAN:
+ return lhs.bool_value_ < rhs.bool_value_;
+ case Value::Type::INTEGER:
+ return lhs.int_value_ < rhs.int_value_;
+ case Value::Type::DOUBLE:
+ return lhs.double_value_ < rhs.double_value_;
+ case Value::Type::STRING:
+ return *lhs.string_value_ < *rhs.string_value_;
+ case Value::Type::BINARY:
+ return *lhs.binary_value_ < *rhs.binary_value_;
+ // TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
+ // are completely inlined.
+ case Value::Type::DICTIONARY:
+ return std::lexicographical_compare(
+ std::begin(**lhs.dict_ptr_), std::end(**lhs.dict_ptr_),
+ std::begin(**rhs.dict_ptr_), std::end(**rhs.dict_ptr_),
+ [](const Value::DictStorage::value_type& u,
+ const Value::DictStorage::value_type& v) {
+ return std::tie(u.first, *u.second) < std::tie(v.first, *v.second);
+ });
+ case Value::Type::LIST:
+ return *lhs.list_ < *rhs.list_;
}
NOTREACHED();
return false;
}
+bool operator>(const Value& lhs, const Value& rhs) {
+ return rhs < lhs;
+}
+
+bool operator<=(const Value& lhs, const Value& rhs) {
+ return !(rhs < lhs);
+}
+
+bool operator>=(const Value& lhs, const Value& rhs) {
+ return !(lhs < rhs);
+}
+
+bool Value::Equals(const Value* other) const {
+ DCHECK(other);
+ return *this == *other;
+}
+
// static
bool Value::Equals(const Value* a, const Value* b) {
- if ((a == NULL) && (b == NULL)) return true;
- if ((a == NULL) ^ (b == NULL)) return false;
- return a->Equals(b);
+ if ((a == NULL) && (b == NULL))
+ return true;
+ if ((a == NULL) ^ (b == NULL))
+ return false;
+ return *a == *b;
}
void Value::InternalCopyFundamentalValue(const Value& that) {
@@ -495,15 +500,20 @@ void Value::InternalCopyConstructFrom(const Value& that) {
case Type::BINARY:
binary_value_.Init(*that.binary_value_);
return;
- // DictStorage and ListStorage are move-only types due to the presence of
- // unique_ptrs. This is why the call to |CreateDeepCopy| is necessary here.
- // TODO(crbug.com/646113): Clean this up when DictStorage and ListStorage
- // can be copied directly.
+ // DictStorage is a move-only type due to the presence of unique_ptrs. This
+ // is why the explicit copy of every element is necessary here.
+ // TODO(crbug.com/646113): Clean this up when DictStorage can be copied
+ // directly.
case Type::DICTIONARY:
- dict_ptr_.Init(std::move(*that.CreateDeepCopy()->dict_ptr_));
+ dict_ptr_.Init(MakeUnique<DictStorage>());
+ for (const auto& it : **that.dict_ptr_) {
+ (*dict_ptr_)
+ ->emplace_hint((*dict_ptr_)->end(), it.first,
+ MakeUnique<Value>(*it.second));
+ }
return;
case Type::LIST:
- list_.Init(std::move(*that.CreateDeepCopy()->list_));
+ list_.Init(*that.list_);
return;
}
}
@@ -534,8 +544,10 @@ void Value::InternalMoveConstructFrom(Value&& that) {
}
}
-void Value::InternalCopyAssignFrom(const Value& that) {
- type_ = that.type_;
+void Value::InternalCopyAssignFromSameType(const Value& that) {
+ // TODO(crbug.com/646113): make this a DCHECK once base::Value does not have
+ // subclasses.
+ CHECK_EQ(type_, that.type_);
switch (type_) {
case Type::NONE:
@@ -551,41 +563,17 @@ void Value::InternalCopyAssignFrom(const Value& that) {
case Type::BINARY:
*binary_value_ = *that.binary_value_;
return;
- // DictStorage and ListStorage are move-only types due to the presence of
- // unique_ptrs. This is why the call to |CreateDeepCopy| is necessary here.
- // TODO(crbug.com/646113): Clean this up when DictStorage and ListStorage
- // can be copied directly.
- case Type::DICTIONARY:
- *dict_ptr_ = std::move(*that.CreateDeepCopy()->dict_ptr_);
- return;
- case Type::LIST:
- *list_ = std::move(*that.CreateDeepCopy()->list_);
- return;
- }
-}
-
-void Value::InternalMoveAssignFrom(Value&& that) {
- type_ = that.type_;
-
- switch (type_) {
- case Type::NONE:
- case Type::BOOLEAN:
- case Type::INTEGER:
- case Type::DOUBLE:
- InternalCopyFundamentalValue(that);
- return;
-
- case Type::STRING:
- *string_value_ = std::move(*that.string_value_);
- return;
- case Type::BINARY:
- *binary_value_ = std::move(*that.binary_value_);
- return;
- case Type::DICTIONARY:
- *dict_ptr_ = std::move(*that.dict_ptr_);
+ // DictStorage is a move-only type due to the presence of unique_ptrs. This
+ // is why the explicit call to the copy constructor is necessary here.
+ // TODO(crbug.com/646113): Clean this up when DictStorage can be copied
+ // directly.
+ case Type::DICTIONARY: {
+ Value copy = that;
+ *dict_ptr_ = std::move(*copy.dict_ptr_);
return;
+ }
case Type::LIST:
- *list_ = std::move(*that.list_);
+ *list_ = *that.list_;
return;
}
}
@@ -683,11 +671,11 @@ void DictionaryValue::SetDouble(StringPiece path, double in_value) {
}
void DictionaryValue::SetString(StringPiece path, StringPiece in_value) {
- Set(path, new StringValue(in_value));
+ Set(path, new Value(in_value));
}
void DictionaryValue::SetString(StringPiece path, const string16& in_value) {
- Set(path, new StringValue(in_value));
+ Set(path, new Value(in_value));
}
void DictionaryValue::SetWithoutPathExpansion(StringPiece key,
@@ -717,12 +705,12 @@ void DictionaryValue::SetDoubleWithoutPathExpansion(StringPiece path,
void DictionaryValue::SetStringWithoutPathExpansion(StringPiece path,
StringPiece in_value) {
- SetWithoutPathExpansion(path, base::MakeUnique<base::StringValue>(in_value));
+ SetWithoutPathExpansion(path, base::MakeUnique<base::Value>(in_value));
}
void DictionaryValue::SetStringWithoutPathExpansion(StringPiece path,
const string16& in_value) {
- SetWithoutPathExpansion(path, base::MakeUnique<base::StringValue>(in_value));
+ SetWithoutPathExpansion(path, base::MakeUnique<base::Value>(in_value));
}
bool DictionaryValue::Get(StringPiece path,
@@ -809,7 +797,7 @@ bool DictionaryValue::GetStringASCII(StringPiece path,
}
bool DictionaryValue::GetBinary(StringPiece path,
- const BinaryValue** out_value) const {
+ const Value** out_value) const {
const Value* value;
bool result = Get(path, &value);
if (!result || !value->IsType(Type::BINARY))
@@ -821,10 +809,9 @@ bool DictionaryValue::GetBinary(StringPiece path,
return true;
}
-bool DictionaryValue::GetBinary(StringPiece path, BinaryValue** out_value) {
+bool DictionaryValue::GetBinary(StringPiece path, Value** out_value) {
return static_cast<const DictionaryValue&>(*this).GetBinary(
- path,
- const_cast<const BinaryValue**>(out_value));
+ path, const_cast<const Value**>(out_value));
}
bool DictionaryValue::GetDictionary(StringPiece path,
@@ -1038,6 +1025,7 @@ std::unique_ptr<DictionaryValue> DictionaryValue::DeepCopyWithoutEmptyChildren()
}
void DictionaryValue::MergeDictionary(const DictionaryValue* dictionary) {
+ CHECK(dictionary->is_dict());
for (DictionaryValue::Iterator it(*dictionary); !it.IsAtEnd(); it.Advance()) {
const Value* merge_value = &it.value();
// Check whether we have to merge dictionaries.
@@ -1050,12 +1038,12 @@ void DictionaryValue::MergeDictionary(const DictionaryValue* dictionary) {
}
}
// All other cases: Make a copy and hook it up.
- SetWithoutPathExpansion(it.key(),
- base::WrapUnique(merge_value->DeepCopy()));
+ SetWithoutPathExpansion(it.key(), MakeUnique<Value>(*merge_value));
}
}
void DictionaryValue::Swap(DictionaryValue* other) {
+ CHECK(other->is_dict());
dict_ptr_->swap(*(other->dict_ptr_));
}
@@ -1067,11 +1055,11 @@ DictionaryValue::Iterator::Iterator(const Iterator& other) = default;
DictionaryValue::Iterator::~Iterator() {}
DictionaryValue* DictionaryValue::DeepCopy() const {
- return static_cast<DictionaryValue*>(Value::DeepCopy());
+ return new DictionaryValue(*this);
}
std::unique_ptr<DictionaryValue> DictionaryValue::CreateDeepCopy() const {
- return WrapUnique(DeepCopy());
+ return MakeUnique<DictionaryValue>(*this);
}
///////////////////// ListValue ////////////////////
@@ -1092,6 +1080,10 @@ void ListValue::Clear() {
list_->clear();
}
+void ListValue::Reserve(size_t n) {
+ list_->reserve(n);
+}
+
bool ListValue::Set(size_t index, Value* in_value) {
return Set(index, WrapUnique(in_value));
}
@@ -1103,12 +1095,10 @@ bool ListValue::Set(size_t index, std::unique_ptr<Value> in_value) {
if (index >= list_->size()) {
// Pad out any intermediate indexes with null settings
while (index > list_->size())
- Append(CreateNullValue());
+ Append(MakeUnique<Value>());
Append(std::move(in_value));
} else {
- // TODO(dcheng): remove this DCHECK once the raw pointer version is removed?
- DCHECK((*list_)[index] != in_value);
- (*list_)[index] = std::move(in_value);
+ (*list_)[index] = std::move(*in_value);
}
return true;
}
@@ -1118,7 +1108,7 @@ bool ListValue::Get(size_t index, const Value** out_value) const {
return false;
if (out_value)
- *out_value = (*list_)[index].get();
+ *out_value = &(*list_)[index];
return true;
}
@@ -1169,7 +1159,7 @@ bool ListValue::GetString(size_t index, string16* out_value) const {
return value->GetAsString(out_value);
}
-bool ListValue::GetBinary(size_t index, const BinaryValue** out_value) const {
+bool ListValue::GetBinary(size_t index, const Value** out_value) const {
const Value* value;
bool result = Get(index, &value);
if (!result || !value->IsType(Type::BINARY))
@@ -1181,10 +1171,9 @@ bool ListValue::GetBinary(size_t index, const BinaryValue** out_value) const {
return true;
}
-bool ListValue::GetBinary(size_t index, BinaryValue** out_value) {
+bool ListValue::GetBinary(size_t index, Value** out_value) {
return static_cast<const ListValue&>(*this).GetBinary(
- index,
- const_cast<const BinaryValue**>(out_value));
+ index, const_cast<const Value**>(out_value));
}
bool ListValue::GetDictionary(size_t index,
@@ -1229,39 +1218,38 @@ bool ListValue::Remove(size_t index, std::unique_ptr<Value>* out_value) {
return false;
if (out_value)
- *out_value = std::move((*list_)[index]);
+ *out_value = MakeUnique<Value>(std::move((*list_)[index]));
list_->erase(list_->begin() + index);
return true;
}
bool ListValue::Remove(const Value& value, size_t* index) {
- for (auto it = list_->begin(); it != list_->end(); ++it) {
- if ((*it)->Equals(&value)) {
- size_t previous_index = it - list_->begin();
- list_->erase(it);
+ auto it = std::find(list_->begin(), list_->end(), value);
- if (index)
- *index = previous_index;
- return true;
- }
- }
- return false;
+ if (it == list_->end())
+ return false;
+
+ if (index)
+ *index = std::distance(list_->begin(), it);
+
+ list_->erase(it);
+ return true;
}
ListValue::iterator ListValue::Erase(iterator iter,
std::unique_ptr<Value>* out_value) {
if (out_value)
- *out_value = std::move(*ListStorage::iterator(iter));
+ *out_value = MakeUnique<Value>(std::move(*iter));
return list_->erase(iter);
}
void ListValue::Append(std::unique_ptr<Value> in_value) {
- list_->push_back(std::move(in_value));
+ list_->push_back(std::move(*in_value));
}
-#if !defined(OS_LINUX)
+#if !defined(OS_LINUX) && !defined(OS_MACOSX) && !defined(OS_ANDROID)
void ListValue::Append(Value* in_value) {
DCHECK(in_value);
Append(WrapUnique(in_value));
@@ -1281,11 +1269,11 @@ void ListValue::AppendDouble(double in_value) {
}
void ListValue::AppendString(StringPiece in_value) {
- Append(MakeUnique<StringValue>(in_value));
+ Append(MakeUnique<Value>(in_value));
}
void ListValue::AppendString(const string16& in_value) {
- Append(MakeUnique<StringValue>(in_value));
+ Append(MakeUnique<Value>(in_value));
}
void ListValue::AppendStrings(const std::vector<std::string>& in_values) {
@@ -1304,12 +1292,10 @@ void ListValue::AppendStrings(const std::vector<string16>& in_values) {
bool ListValue::AppendIfNotPresent(std::unique_ptr<Value> in_value) {
DCHECK(in_value);
- for (const auto& entry : *list_) {
- if (entry->Equals(in_value.get())) {
- return false;
- }
- }
- list_->push_back(std::move(in_value));
+ if (std::find(list_->begin(), list_->end(), *in_value) != list_->end())
+ return false;
+
+ list_->push_back(std::move(*in_value));
return true;
}
@@ -1318,27 +1304,25 @@ bool ListValue::Insert(size_t index, std::unique_ptr<Value> in_value) {
if (index > list_->size())
return false;
- list_->insert(list_->begin() + index, std::move(in_value));
+ list_->insert(list_->begin() + index, std::move(*in_value));
return true;
}
ListValue::const_iterator ListValue::Find(const Value& value) const {
- return std::find_if(list_->begin(), list_->end(),
- [&value](const std::unique_ptr<Value>& entry) {
- return entry->Equals(&value);
- });
+ return std::find(list_->begin(), list_->end(), value);
}
void ListValue::Swap(ListValue* other) {
+ CHECK(other->is_list());
list_->swap(*(other->list_));
}
ListValue* ListValue::DeepCopy() const {
- return static_cast<ListValue*>(Value::DeepCopy());
+ return new ListValue(*this);
}
std::unique_ptr<ListValue> ListValue::CreateDeepCopy() const {
- return WrapUnique(DeepCopy());
+ return MakeUnique<ListValue>(*this);
}
ValueSerializer::~ValueSerializer() {
diff --git a/chromium/base/values.h b/chromium/base/values.h
index a95786878b2..275bee0dea5 100644
--- a/chromium/base/values.h
+++ b/chromium/base/values.h
@@ -29,6 +29,7 @@
#include "base/base_export.h"
#include "base/compiler_specific.h"
+#include "base/containers/flat_map.h"
#include "base/macros.h"
#include "base/memory/manual_constructor.h"
#include "base/strings/string16.h"
@@ -39,8 +40,6 @@ namespace base {
class DictionaryValue;
class ListValue;
class Value;
-using StringValue = Value;
-using BinaryValue = Value;
// The Value class is the base class for Values. A Value can be instantiated
// via the Create*Value() factory methods, or by directly creating instances of
@@ -49,8 +48,8 @@ using BinaryValue = Value;
// See the file-level comment above for more information.
class BASE_EXPORT Value {
public:
- using DictStorage = std::map<std::string, std::unique_ptr<Value>>;
- using ListStorage = std::vector<std::unique_ptr<Value>>;
+ using DictStorage = base::flat_map<std::string, std::unique_ptr<Value>>;
+ using ListStorage = std::vector<Value>;
enum class Type {
NONE = 0,
@@ -64,19 +63,17 @@ class BASE_EXPORT Value {
// Note: Do not add more types. See the file-level comment above for why.
};
- static std::unique_ptr<Value> CreateNullValue();
-
// For situations where you want to keep ownership of your buffer, this
// factory method creates a new BinaryValue by copying the contents of the
// buffer that's passed in.
// DEPRECATED, use MakeUnique<Value>(const std::vector<char>&) instead.
// TODO(crbug.com/646113): Delete this and migrate callsites.
- static std::unique_ptr<BinaryValue> CreateWithCopiedBuffer(const char* buffer,
- size_t size);
+ static std::unique_ptr<Value> CreateWithCopiedBuffer(const char* buffer,
+ size_t size);
Value(const Value& that);
- Value(Value&& that);
- Value(); // A null value.
+ Value(Value&& that) noexcept;
+ Value() noexcept; // A null value.
explicit Value(Type type);
explicit Value(bool in_bool);
explicit Value(int in_int);
@@ -90,16 +87,21 @@ class BASE_EXPORT Value {
// arguments.
explicit Value(const char* in_string);
explicit Value(const std::string& in_string);
- explicit Value(std::string&& in_string);
+ explicit Value(std::string&& in_string) noexcept;
explicit Value(const char16* in_string);
explicit Value(const string16& in_string);
explicit Value(StringPiece in_string);
explicit Value(const std::vector<char>& in_blob);
- explicit Value(std::vector<char>&& in_blob);
+ explicit Value(std::vector<char>&& in_blob) noexcept;
+
+ explicit Value(DictStorage&& in_dict) noexcept;
+
+ explicit Value(const ListStorage& in_list);
+ explicit Value(ListStorage&& in_list) noexcept;
Value& operator=(const Value& that);
- Value& operator=(Value&& that);
+ Value& operator=(Value&& that) noexcept;
~Value();
@@ -131,6 +133,9 @@ class BASE_EXPORT Value {
const std::string& GetString() const;
const std::vector<char>& GetBlob() const;
+ ListStorage& GetList();
+ const ListStorage& GetList() const;
+
size_t GetSize() const; // DEPRECATED, use GetBlob().size() instead.
const char* GetBuffer() const; // DEPRECATED, use GetBlob().data() instead.
@@ -143,9 +148,9 @@ class BASE_EXPORT Value {
bool GetAsDouble(double* out_value) const;
bool GetAsString(std::string* out_value) const;
bool GetAsString(string16* out_value) const;
- bool GetAsString(const StringValue** out_value) const;
+ bool GetAsString(const Value** out_value) const;
bool GetAsString(StringPiece* out_value) const;
- bool GetAsBinary(const BinaryValue** out_value) const;
+ bool GetAsBinary(const Value** out_value) const;
// ListValue::From is the equivalent for std::unique_ptr conversions.
bool GetAsList(ListValue** out_value);
bool GetAsList(const ListValue** out_value) const;
@@ -158,15 +163,30 @@ class BASE_EXPORT Value {
// to the copy. The caller gets ownership of the copy, of course.
// Subclasses return their own type directly in their overrides;
// this works because C++ supports covariant return types.
+ // DEPRECATED, use Value's copy constructor instead.
+ // TODO(crbug.com/646113): Delete this and migrate callsites.
Value* DeepCopy() const;
// Preferred version of DeepCopy. TODO(estade): remove the above.
std::unique_ptr<Value> CreateDeepCopy() const;
+ // Comparison operators so that Values can easily be used with standard
+ // library algorithms and associative containers.
+ BASE_EXPORT friend bool operator==(const Value& lhs, const Value& rhs);
+ BASE_EXPORT friend bool operator!=(const Value& lhs, const Value& rhs);
+ BASE_EXPORT friend bool operator<(const Value& lhs, const Value& rhs);
+ BASE_EXPORT friend bool operator>(const Value& lhs, const Value& rhs);
+ BASE_EXPORT friend bool operator<=(const Value& lhs, const Value& rhs);
+ BASE_EXPORT friend bool operator>=(const Value& lhs, const Value& rhs);
+
// Compares if two Value objects have equal contents.
+ // DEPRECATED, use operator==(const Value& lhs, const Value& rhs) instead.
+ // TODO(crbug.com/646113): Delete this and migrate callsites.
bool Equals(const Value* other) const;
// Compares if two Value objects have equal contents. Can handle NULLs.
- // NULLs are considered equal but different from Value::CreateNullValue().
+ // NULLs are considered equal but different from Value(Value::Type::NONE).
+ // DEPRECATED, use operator==(const Value& lhs, const Value& rhs) instead.
+ // TODO(crbug.com/646113): Delete this and migrate callsites.
static bool Equals(const Value* a, const Value* b);
protected:
@@ -191,8 +211,7 @@ class BASE_EXPORT Value {
void InternalCopyFundamentalValue(const Value& that);
void InternalCopyConstructFrom(const Value& that);
void InternalMoveConstructFrom(Value&& that);
- void InternalCopyAssignFrom(const Value& that);
- void InternalMoveAssignFrom(Value&& that);
+ void InternalCopyAssignFromSameType(const Value& that);
void InternalCleanup();
};
@@ -275,8 +294,8 @@ class BASE_EXPORT DictionaryValue : public Value {
bool GetString(StringPiece path, std::string* out_value) const;
bool GetString(StringPiece path, string16* out_value) const;
bool GetStringASCII(StringPiece path, std::string* out_value) const;
- bool GetBinary(StringPiece path, const BinaryValue** out_value) const;
- bool GetBinary(StringPiece path, BinaryValue** out_value);
+ bool GetBinary(StringPiece path, const Value** out_value) const;
+ bool GetBinary(StringPiece path, Value** out_value);
bool GetDictionary(StringPiece path,
const DictionaryValue** out_value) const;
bool GetDictionary(StringPiece path, DictionaryValue** out_value);
@@ -353,6 +372,8 @@ class BASE_EXPORT DictionaryValue : public Value {
DictStorage::const_iterator it_;
};
+ // DEPRECATED, use DictionaryValue's copy constructor instead.
+ // TODO(crbug.com/646113): Delete this and migrate callsites.
DictionaryValue* DeepCopy() const;
// Preferred version of DeepCopy. TODO(estade): remove the above.
std::unique_ptr<DictionaryValue> CreateDeepCopy() const;
@@ -375,9 +396,15 @@ class BASE_EXPORT ListValue : public Value {
// Returns the number of Values in this list.
size_t GetSize() const { return list_->size(); }
+ // Returns the capacity of storage for Values in this list.
+ size_t capacity() const { return list_->capacity(); }
+
// Returns whether the list is empty.
bool empty() const { return list_->empty(); }
+ // Reserves storage for at least |n| values.
+ void Reserve(size_t n);
+
// Sets the list item at the given index to be the Value specified by
// the value given. If the index beyond the current end of the list, null
// Values will be used to pad out the list.
@@ -405,8 +432,8 @@ class BASE_EXPORT ListValue : public Value {
bool GetDouble(size_t index, double* out_value) const;
bool GetString(size_t index, std::string* out_value) const;
bool GetString(size_t index, string16* out_value) const;
- bool GetBinary(size_t index, const BinaryValue** out_value) const;
- bool GetBinary(size_t index, BinaryValue** out_value);
+ bool GetBinary(size_t index, const Value** out_value) const;
+ bool GetBinary(size_t index, Value** out_value);
bool GetDictionary(size_t index, const DictionaryValue** out_value) const;
bool GetDictionary(size_t index, DictionaryValue** out_value);
bool GetList(size_t index, const ListValue** out_value) const;
@@ -432,7 +459,7 @@ class BASE_EXPORT ListValue : public Value {
// Appends a Value to the end of the list.
void Append(std::unique_ptr<Value> in_value);
-#if !defined(OS_LINUX)
+#if !defined(OS_LINUX) && !defined(OS_MACOSX) && !defined(OS_ANDROID)
// Deprecated version of the above. TODO(estade): remove.
void Append(Value* in_value);
#endif
@@ -469,6 +496,8 @@ class BASE_EXPORT ListValue : public Value {
const_iterator begin() const { return list_->begin(); }
const_iterator end() const { return list_->end(); }
+ // DEPRECATED, use ListValue's copy constructor instead.
+ // TODO(crbug.com/646113): Delete this and migrate callsites.
ListValue* DeepCopy() const;
// Preferred version of DeepCopy. TODO(estade): remove DeepCopy.
std::unique_ptr<ListValue> CreateDeepCopy() const;
diff --git a/chromium/base/values_unittest.cc b/chromium/base/values_unittest.cc
index b44f80ce233..fe1d84dd69a 100644
--- a/chromium/base/values_unittest.cc
+++ b/chromium/base/values_unittest.cc
@@ -8,6 +8,8 @@
#include <limits>
#include <memory>
+#include <string>
+#include <type_traits>
#include <utility>
#include <vector>
@@ -18,6 +20,23 @@
namespace base {
+TEST(ValuesTest, TestNothrow) {
+ static_assert(std::is_nothrow_move_constructible<Value>::value,
+ "IsNothrowMoveConstructible");
+ static_assert(std::is_nothrow_default_constructible<Value>::value,
+ "IsNothrowDefaultConstructible");
+ static_assert(std::is_nothrow_constructible<Value, std::string&&>::value,
+ "IsNothrowMoveConstructibleFromString");
+ static_assert(
+ std::is_nothrow_constructible<Value, std::vector<char>&&>::value,
+ "IsNothrowMoveConstructibleFromBlob");
+ static_assert(
+ std::is_nothrow_constructible<Value, Value::ListStorage&&>::value,
+ "IsNothrowMoveConstructibleFromList");
+ static_assert(std::is_nothrow_move_assignable<Value>::value,
+ "IsNothrowMoveAssignable");
+}
+
// Group of tests for the value constructors.
TEST(ValuesTest, ConstructBool) {
Value true_value(true);
@@ -43,48 +62,48 @@ TEST(ValuesTest, ConstructDouble) {
TEST(ValuesTest, ConstructStringFromConstCharPtr) {
const char* str = "foobar";
- StringValue value(str);
+ Value value(str);
EXPECT_EQ(Value::Type::STRING, value.type());
EXPECT_EQ("foobar", value.GetString());
}
TEST(ValuesTest, ConstructStringFromStdStringConstRef) {
std::string str = "foobar";
- StringValue value(str);
+ Value value(str);
EXPECT_EQ(Value::Type::STRING, value.type());
EXPECT_EQ("foobar", value.GetString());
}
TEST(ValuesTest, ConstructStringFromStdStringRefRef) {
std::string str = "foobar";
- StringValue value(std::move(str));
+ Value value(std::move(str));
EXPECT_EQ(Value::Type::STRING, value.type());
EXPECT_EQ("foobar", value.GetString());
}
TEST(ValuesTest, ConstructStringFromConstChar16Ptr) {
string16 str = ASCIIToUTF16("foobar");
- StringValue value(str.c_str());
+ Value value(str.c_str());
EXPECT_EQ(Value::Type::STRING, value.type());
EXPECT_EQ("foobar", value.GetString());
}
TEST(ValuesTest, ConstructStringFromString16) {
string16 str = ASCIIToUTF16("foobar");
- StringValue value(str);
+ Value value(str);
EXPECT_EQ(Value::Type::STRING, value.type());
EXPECT_EQ("foobar", value.GetString());
}
TEST(ValuesTest, ConstructStringFromStringPiece) {
StringPiece str = "foobar";
- StringValue value(str);
+ Value value(str);
EXPECT_EQ(Value::Type::STRING, value.type());
EXPECT_EQ("foobar", value.GetString());
}
TEST(ValuesTest, ConstructBinary) {
- BinaryValue value(std::vector<char>({0xF, 0x0, 0x0, 0xB, 0xA, 0x2}));
+ Value value(std::vector<char>({0xF, 0x0, 0x0, 0xB, 0xA, 0x2}));
EXPECT_EQ(Value::Type::BINARY, value.type());
EXPECT_EQ(std::vector<char>({0xF, 0x0, 0x0, 0xB, 0xA, 0x2}), value.GetBlob());
}
@@ -151,8 +170,8 @@ TEST(ValuesTest, CopyDouble) {
}
TEST(ValuesTest, CopyString) {
- StringValue value("foobar");
- StringValue copied_value(value);
+ Value value("foobar");
+ Value copied_value(value);
EXPECT_EQ(value.type(), copied_value.type());
EXPECT_EQ(value.GetString(), copied_value.GetString());
@@ -164,8 +183,8 @@ TEST(ValuesTest, CopyString) {
}
TEST(ValuesTest, CopyBinary) {
- BinaryValue value(std::vector<char>({0xF, 0x0, 0x0, 0xB, 0xA, 0x2}));
- BinaryValue copied_value(value);
+ Value value(std::vector<char>({0xF, 0x0, 0x0, 0xB, 0xA, 0x2}));
+ Value copied_value(value);
EXPECT_EQ(value.type(), copied_value.type());
EXPECT_EQ(value.GetBlob(), copied_value.GetBlob());
@@ -199,25 +218,14 @@ TEST(ValuesTest, CopyDictionary) {
}
TEST(ValuesTest, CopyList) {
- // TODO(crbug.com/646113): Clean this up once ListValue switched to
- // value semantics.
- int copy;
- ListValue value;
- value.AppendInteger(123);
-
- ListValue copied_value(value);
- copied_value.GetInteger(0, &copy);
-
- EXPECT_EQ(value.type(), copied_value.type());
- EXPECT_EQ(123, copy);
+ Value value(Value::ListStorage{Value(123)});
- auto blank = MakeUnique<Value>();
-
- *blank = value;
- EXPECT_EQ(Value::Type::LIST, blank->type());
+ Value copied_value(value);
+ EXPECT_EQ(value, copied_value);
- static_cast<ListValue*>(blank.get())->GetInteger(0, &copy);
- EXPECT_EQ(123, copy);
+ Value blank;
+ blank = value;
+ EXPECT_EQ(value, blank);
}
// Group of tests for the move constructors and move-assigmnent.
@@ -270,28 +278,28 @@ TEST(ValuesTest, MoveDouble) {
}
TEST(ValuesTest, MoveString) {
- StringValue value("foobar");
- StringValue moved_value(std::move(value));
+ Value value("foobar");
+ Value moved_value(std::move(value));
EXPECT_EQ(Value::Type::STRING, moved_value.type());
EXPECT_EQ("foobar", moved_value.GetString());
Value blank;
- blank = StringValue("foobar");
+ blank = Value("foobar");
EXPECT_EQ(Value::Type::STRING, blank.type());
EXPECT_EQ("foobar", blank.GetString());
}
TEST(ValuesTest, MoveBinary) {
const std::vector<char> buffer = {0xF, 0x0, 0x0, 0xB, 0xA, 0x2};
- BinaryValue value(buffer);
- BinaryValue moved_value(std::move(value));
+ Value value(buffer);
+ Value moved_value(std::move(value));
EXPECT_EQ(Value::Type::BINARY, moved_value.type());
EXPECT_EQ(buffer, moved_value.GetBlob());
Value blank;
- blank = BinaryValue(buffer);
+ blank = Value(buffer);
EXPECT_EQ(Value::Type::BINARY, blank.type());
EXPECT_EQ(buffer, blank.GetBlob());
}
@@ -316,22 +324,17 @@ TEST(ValuesTest, MoveDictionary) {
}
TEST(ValuesTest, MoveList) {
- // TODO(crbug.com/646113): Clean this up once ListValue switched to
- // value semantics.
- int move;
- ListValue value;
- value.AppendInteger(123);
-
- ListValue moved_value(std::move(value));
- moved_value.GetInteger(0, &move);
-
+ const Value::ListStorage list = {Value(123)};
+ Value value(list);
+ Value moved_value(std::move(value));
EXPECT_EQ(Value::Type::LIST, moved_value.type());
- EXPECT_EQ(123, move);
+ EXPECT_EQ(123, moved_value.GetList().back().GetInt());
Value blank;
- blank = ListValue();
+ blank = Value(list);
EXPECT_EQ(Value::Type::LIST, blank.type());
+ EXPECT_EQ(123, blank.GetList().back().GetInt());
}
TEST(ValuesTest, Basic) {
@@ -382,7 +385,7 @@ TEST(ValuesTest, List) {
mixed_list->Set(0, MakeUnique<Value>(true));
mixed_list->Set(1, MakeUnique<Value>(42));
mixed_list->Set(2, MakeUnique<Value>(88.8));
- mixed_list->Set(3, MakeUnique<StringValue>("foo"));
+ mixed_list->Set(3, MakeUnique<Value>("foo"));
ASSERT_EQ(4u, mixed_list->GetSize());
Value *value = NULL;
@@ -421,7 +424,7 @@ TEST(ValuesTest, List) {
base::Value not_found_value(false);
ASSERT_NE(mixed_list->end(), mixed_list->Find(sought_value));
- ASSERT_TRUE((*mixed_list->Find(sought_value))->GetAsInteger(&int_value));
+ ASSERT_TRUE((*mixed_list->Find(sought_value)).GetAsInteger(&int_value));
ASSERT_EQ(42, int_value);
ASSERT_EQ(mixed_list->end(), mixed_list->Find(not_found_value));
}
@@ -435,7 +438,7 @@ TEST(ValuesTest, BinaryValue) {
// Test the common case of a non-empty buffer
std::vector<char> buffer(15);
char* original_buffer = buffer.data();
- binary.reset(new BinaryValue(std::move(buffer)));
+ binary.reset(new Value(std::move(buffer)));
ASSERT_TRUE(binary.get());
ASSERT_TRUE(binary->GetBuffer());
ASSERT_EQ(original_buffer, binary->GetBuffer());
@@ -443,7 +446,7 @@ TEST(ValuesTest, BinaryValue) {
char stack_buffer[42];
memset(stack_buffer, '!', 42);
- binary = BinaryValue::CreateWithCopiedBuffer(stack_buffer, 42);
+ binary = Value::CreateWithCopiedBuffer(stack_buffer, 42);
ASSERT_TRUE(binary.get());
ASSERT_TRUE(binary->GetBuffer());
ASSERT_NE(stack_buffer, binary->GetBuffer());
@@ -452,24 +455,24 @@ TEST(ValuesTest, BinaryValue) {
// Test overloaded GetAsBinary.
Value* narrow_value = binary.get();
- const BinaryValue* narrow_binary = NULL;
+ const Value* narrow_binary = NULL;
ASSERT_TRUE(narrow_value->GetAsBinary(&narrow_binary));
EXPECT_EQ(binary.get(), narrow_binary);
}
TEST(ValuesTest, StringValue) {
// Test overloaded StringValue constructor.
- std::unique_ptr<Value> narrow_value(new StringValue("narrow"));
+ std::unique_ptr<Value> narrow_value(new Value("narrow"));
ASSERT_TRUE(narrow_value.get());
ASSERT_TRUE(narrow_value->IsType(Value::Type::STRING));
- std::unique_ptr<Value> utf16_value(new StringValue(ASCIIToUTF16("utf16")));
+ std::unique_ptr<Value> utf16_value(new Value(ASCIIToUTF16("utf16")));
ASSERT_TRUE(utf16_value.get());
ASSERT_TRUE(utf16_value->IsType(Value::Type::STRING));
// Test overloaded GetAsString.
std::string narrow = "http://google.com";
string16 utf16 = ASCIIToUTF16("http://google.com");
- const StringValue* string_value = NULL;
+ const Value* string_value = NULL;
ASSERT_TRUE(narrow_value->GetAsString(&narrow));
ASSERT_TRUE(narrow_value->GetAsString(&utf16));
ASSERT_TRUE(narrow_value->GetAsString(&string_value));
@@ -487,8 +490,7 @@ TEST(ValuesTest, StringValue) {
// Don't choke on NULL values.
ASSERT_TRUE(narrow_value->GetAsString(static_cast<string16*>(NULL)));
ASSERT_TRUE(narrow_value->GetAsString(static_cast<std::string*>(NULL)));
- ASSERT_TRUE(narrow_value->GetAsString(
- static_cast<const StringValue**>(NULL)));
+ ASSERT_TRUE(narrow_value->GetAsString(static_cast<const Value**>(NULL)));
}
TEST(ValuesTest, ListDeletion) {
@@ -525,10 +527,10 @@ TEST(ValuesTest, ListRemoval) {
{
ListValue list;
auto value = MakeUnique<Value>();
- Value* original_value = value.get();
+ Value original_value = *value;
list.Append(std::move(value));
size_t index = 0;
- list.Remove(*original_value, &index);
+ list.Remove(original_value, &index);
EXPECT_EQ(0U, index);
EXPECT_EQ(0U, list.GetSize());
}
@@ -568,8 +570,8 @@ TEST(ValuesTest, DictionaryRemoval) {
TEST(ValuesTest, DictionaryWithoutPathExpansion) {
DictionaryValue dict;
- dict.Set("this.is.expanded", Value::CreateNullValue());
- dict.SetWithoutPathExpansion("this.isnt.expanded", Value::CreateNullValue());
+ dict.Set("this.is.expanded", MakeUnique<Value>());
+ dict.SetWithoutPathExpansion("this.isnt.expanded", MakeUnique<Value>());
EXPECT_FALSE(dict.HasKey("this.is.expanded"));
EXPECT_TRUE(dict.HasKey("this"));
@@ -592,8 +594,8 @@ TEST(ValuesTest, DictionaryWithoutPathExpansion) {
// TODO(estade): remove.
TEST(ValuesTest, DictionaryWithoutPathExpansionDeprecated) {
DictionaryValue dict;
- dict.Set("this.is.expanded", Value::CreateNullValue());
- dict.SetWithoutPathExpansion("this.isnt.expanded", Value::CreateNullValue());
+ dict.Set("this.is.expanded", MakeUnique<Value>());
+ dict.SetWithoutPathExpansion("this.isnt.expanded", MakeUnique<Value>());
EXPECT_FALSE(dict.HasKey("this.is.expanded"));
EXPECT_TRUE(dict.HasKey("this"));
@@ -639,7 +641,7 @@ TEST(ValuesTest, DictionaryRemovePath) {
TEST(ValuesTest, DeepCopy) {
DictionaryValue original_dict;
- std::unique_ptr<Value> scoped_null = Value::CreateNullValue();
+ auto scoped_null = MakeUnique<Value>();
Value* original_null = scoped_null.get();
original_dict.Set("null", std::move(scoped_null));
std::unique_ptr<Value> scoped_bool(new Value(true));
@@ -651,18 +653,16 @@ TEST(ValuesTest, DeepCopy) {
std::unique_ptr<Value> scoped_double(new Value(3.14));
Value* original_double = scoped_double.get();
original_dict.Set("double", std::move(scoped_double));
- std::unique_ptr<StringValue> scoped_string(new StringValue("hello"));
- StringValue* original_string = scoped_string.get();
+ std::unique_ptr<Value> scoped_string(new Value("hello"));
+ Value* original_string = scoped_string.get();
original_dict.Set("string", std::move(scoped_string));
- std::unique_ptr<StringValue> scoped_string16(
- new StringValue(ASCIIToUTF16("hello16")));
- StringValue* original_string16 = scoped_string16.get();
+ std::unique_ptr<Value> scoped_string16(new Value(ASCIIToUTF16("hello16")));
+ Value* original_string16 = scoped_string16.get();
original_dict.Set("string16", std::move(scoped_string16));
std::vector<char> original_buffer(42, '!');
- std::unique_ptr<BinaryValue> scoped_binary(
- new BinaryValue(std::move(original_buffer)));
- BinaryValue* original_binary = scoped_binary.get();
+ std::unique_ptr<Value> scoped_binary(new Value(std::move(original_buffer)));
+ Value* original_binary = scoped_binary.get();
original_dict.Set("binary", std::move(scoped_binary));
std::unique_ptr<ListValue> scoped_list(new ListValue());
@@ -681,7 +681,7 @@ TEST(ValuesTest, DeepCopy) {
scoped_nested_dictionary->SetString("key", "value");
original_dict.Set("dictionary", std::move(scoped_nested_dictionary));
- std::unique_ptr<DictionaryValue> copy_dict = original_dict.CreateDeepCopy();
+ auto copy_dict = MakeUnique<DictionaryValue>(original_dict);
ASSERT_TRUE(copy_dict.get());
ASSERT_NE(copy_dict.get(), &original_dict);
@@ -788,13 +788,13 @@ TEST(ValuesTest, DeepCopy) {
}
TEST(ValuesTest, Equals) {
- std::unique_ptr<Value> null1(Value::CreateNullValue());
- std::unique_ptr<Value> null2(Value::CreateNullValue());
+ auto null1 = MakeUnique<Value>();
+ auto null2 = MakeUnique<Value>();
EXPECT_NE(null1.get(), null2.get());
- EXPECT_TRUE(null1->Equals(null2.get()));
+ EXPECT_EQ(*null1, *null2);
Value boolean(false);
- EXPECT_FALSE(null1->Equals(&boolean));
+ EXPECT_NE(*null1, boolean);
DictionaryValue dv;
dv.SetBoolean("a", false);
@@ -802,36 +802,36 @@ TEST(ValuesTest, Equals) {
dv.SetDouble("c", 2.5);
dv.SetString("d1", "string");
dv.SetString("d2", ASCIIToUTF16("http://google.com"));
- dv.Set("e", Value::CreateNullValue());
+ dv.Set("e", MakeUnique<Value>());
- std::unique_ptr<DictionaryValue> copy = dv.CreateDeepCopy();
- EXPECT_TRUE(dv.Equals(copy.get()));
+ auto copy = MakeUnique<DictionaryValue>(dv);
+ EXPECT_EQ(dv, *copy);
std::unique_ptr<ListValue> list(new ListValue);
ListValue* original_list = list.get();
- list->Append(Value::CreateNullValue());
+ list->Append(MakeUnique<Value>());
list->Append(WrapUnique(new DictionaryValue));
- std::unique_ptr<Value> list_copy(list->CreateDeepCopy());
+ auto list_copy = MakeUnique<Value>(*list);
dv.Set("f", std::move(list));
- EXPECT_FALSE(dv.Equals(copy.get()));
+ EXPECT_NE(dv, *copy);
copy->Set("f", std::move(list_copy));
- EXPECT_TRUE(dv.Equals(copy.get()));
+ EXPECT_EQ(dv, *copy);
original_list->Append(MakeUnique<Value>(true));
- EXPECT_FALSE(dv.Equals(copy.get()));
+ EXPECT_NE(dv, *copy);
// Check if Equals detects differences in only the keys.
- copy = dv.CreateDeepCopy();
- EXPECT_TRUE(dv.Equals(copy.get()));
+ copy = MakeUnique<DictionaryValue>(dv);
+ EXPECT_EQ(dv, *copy);
copy->Remove("a", NULL);
copy->SetBoolean("aa", false);
- EXPECT_FALSE(dv.Equals(copy.get()));
+ EXPECT_NE(dv, *copy);
}
TEST(ValuesTest, StaticEquals) {
- std::unique_ptr<Value> null1(Value::CreateNullValue());
- std::unique_ptr<Value> null2(Value::CreateNullValue());
+ auto null1 = MakeUnique<Value>();
+ auto null2 = MakeUnique<Value>();
EXPECT_TRUE(Value::Equals(null1.get(), null2.get()));
EXPECT_TRUE(Value::Equals(NULL, NULL));
@@ -845,16 +845,136 @@ TEST(ValuesTest, StaticEquals) {
EXPECT_FALSE(Value::Equals(i42.get(), NULL));
EXPECT_FALSE(Value::Equals(NULL, i42.get()));
- // NULL and Value::CreateNullValue() are intentionally different: We need
+ // NULL and MakeUnique<Value>() are intentionally different: We need
// support for NULL as a return value for "undefined" without caring for
// ownership of the pointer.
EXPECT_FALSE(Value::Equals(null1.get(), NULL));
EXPECT_FALSE(Value::Equals(NULL, null1.get()));
}
+TEST(ValuesTest, Comparisons) {
+ // Test None Values.
+ Value null1;
+ Value null2;
+ EXPECT_EQ(null1, null2);
+ EXPECT_FALSE(null1 != null2);
+ EXPECT_FALSE(null1 < null2);
+ EXPECT_FALSE(null1 > null2);
+ EXPECT_LE(null1, null2);
+ EXPECT_GE(null1, null2);
+
+ // Test Bool Values.
+ Value bool1(false);
+ Value bool2(true);
+ EXPECT_FALSE(bool1 == bool2);
+ EXPECT_NE(bool1, bool2);
+ EXPECT_LT(bool1, bool2);
+ EXPECT_FALSE(bool1 > bool2);
+ EXPECT_LE(bool1, bool2);
+ EXPECT_FALSE(bool1 >= bool2);
+
+ // Test Int Values.
+ Value int1(1);
+ Value int2(2);
+ EXPECT_FALSE(int1 == int2);
+ EXPECT_NE(int1, int2);
+ EXPECT_LT(int1, int2);
+ EXPECT_FALSE(int1 > int2);
+ EXPECT_LE(int1, int2);
+ EXPECT_FALSE(int1 >= int2);
+
+ // Test Double Values.
+ Value double1(1.0);
+ Value double2(2.0);
+ EXPECT_FALSE(double1 == double2);
+ EXPECT_NE(double1, double2);
+ EXPECT_LT(double1, double2);
+ EXPECT_FALSE(double1 > double2);
+ EXPECT_LE(double1, double2);
+ EXPECT_FALSE(double1 >= double2);
+
+ // Test String Values.
+ Value string1("1");
+ Value string2("2");
+ EXPECT_FALSE(string1 == string2);
+ EXPECT_NE(string1, string2);
+ EXPECT_LT(string1, string2);
+ EXPECT_FALSE(string1 > string2);
+ EXPECT_LE(string1, string2);
+ EXPECT_FALSE(string1 >= string2);
+
+ // Test Binary Values.
+ Value binary1(std::vector<char>{0x01});
+ Value binary2(std::vector<char>{0x02});
+ EXPECT_FALSE(binary1 == binary2);
+ EXPECT_NE(binary1, binary2);
+ EXPECT_LT(binary1, binary2);
+ EXPECT_FALSE(binary1 > binary2);
+ EXPECT_LE(binary1, binary2);
+ EXPECT_FALSE(binary1 >= binary2);
+
+ // Test Empty List Values.
+ ListValue null_list1;
+ ListValue null_list2;
+ EXPECT_EQ(null_list1, null_list2);
+ EXPECT_FALSE(null_list1 != null_list2);
+ EXPECT_FALSE(null_list1 < null_list2);
+ EXPECT_FALSE(null_list1 > null_list2);
+ EXPECT_LE(null_list1, null_list2);
+ EXPECT_GE(null_list1, null_list2);
+
+ // Test Non Empty List Values.
+ ListValue int_list1;
+ ListValue int_list2;
+ int_list1.AppendInteger(1);
+ int_list2.AppendInteger(2);
+ EXPECT_FALSE(int_list1 == int_list2);
+ EXPECT_NE(int_list1, int_list2);
+ EXPECT_LT(int_list1, int_list2);
+ EXPECT_FALSE(int_list1 > int_list2);
+ EXPECT_LE(int_list1, int_list2);
+ EXPECT_FALSE(int_list1 >= int_list2);
+
+ // Test Empty Dict Values.
+ DictionaryValue null_dict1;
+ DictionaryValue null_dict2;
+ EXPECT_EQ(null_dict1, null_dict2);
+ EXPECT_FALSE(null_dict1 != null_dict2);
+ EXPECT_FALSE(null_dict1 < null_dict2);
+ EXPECT_FALSE(null_dict1 > null_dict2);
+ EXPECT_LE(null_dict1, null_dict2);
+ EXPECT_GE(null_dict1, null_dict2);
+
+ // Test Non Empty Dict Values.
+ DictionaryValue int_dict1;
+ DictionaryValue int_dict2;
+ int_dict1.SetInteger("key", 1);
+ int_dict2.SetInteger("key", 2);
+ EXPECT_FALSE(int_dict1 == int_dict2);
+ EXPECT_NE(int_dict1, int_dict2);
+ EXPECT_LT(int_dict1, int_dict2);
+ EXPECT_FALSE(int_dict1 > int_dict2);
+ EXPECT_LE(int_dict1, int_dict2);
+ EXPECT_FALSE(int_dict1 >= int_dict2);
+
+ // Test Values of different types.
+ std::vector<Value> values = {null1, bool1, int1, double1,
+ string1, binary1, int_dict1, int_list1};
+ for (size_t i = 0; i < values.size(); ++i) {
+ for (size_t j = i + 1; j < values.size(); ++j) {
+ EXPECT_FALSE(values[i] == values[j]);
+ EXPECT_NE(values[i], values[j]);
+ EXPECT_LT(values[i], values[j]);
+ EXPECT_FALSE(values[i] > values[j]);
+ EXPECT_LE(values[i], values[j]);
+ EXPECT_FALSE(values[i] >= values[j]);
+ }
+ }
+}
+
TEST(ValuesTest, DeepCopyCovariantReturnTypes) {
DictionaryValue original_dict;
- std::unique_ptr<Value> scoped_null(Value::CreateNullValue());
+ auto scoped_null = MakeUnique<Value>();
Value* original_null = scoped_null.get();
original_dict.Set("null", std::move(scoped_null));
std::unique_ptr<Value> scoped_bool(new Value(true));
@@ -866,17 +986,15 @@ TEST(ValuesTest, DeepCopyCovariantReturnTypes) {
std::unique_ptr<Value> scoped_double(new Value(3.14));
Value* original_double = scoped_double.get();
original_dict.Set("double", std::move(scoped_double));
- std::unique_ptr<StringValue> scoped_string(new StringValue("hello"));
+ std::unique_ptr<Value> scoped_string(new Value("hello"));
Value* original_string = scoped_string.get();
original_dict.Set("string", std::move(scoped_string));
- std::unique_ptr<StringValue> scoped_string16(
- new StringValue(ASCIIToUTF16("hello16")));
+ std::unique_ptr<Value> scoped_string16(new Value(ASCIIToUTF16("hello16")));
Value* original_string16 = scoped_string16.get();
original_dict.Set("string16", std::move(scoped_string16));
std::vector<char> original_buffer(42, '!');
- std::unique_ptr<BinaryValue> scoped_binary(
- new BinaryValue(std::move(original_buffer)));
+ std::unique_ptr<Value> scoped_binary(new Value(std::move(original_buffer)));
Value* original_binary = scoped_binary.get();
original_dict.Set("binary", std::move(scoped_binary));
@@ -888,25 +1006,25 @@ TEST(ValuesTest, DeepCopyCovariantReturnTypes) {
scoped_list->Append(std::move(scoped_list_element_1));
original_dict.Set("list", std::move(scoped_list));
- std::unique_ptr<Value> copy_dict = original_dict.CreateDeepCopy();
- std::unique_ptr<Value> copy_null = original_null->CreateDeepCopy();
- std::unique_ptr<Value> copy_bool = original_bool->CreateDeepCopy();
- std::unique_ptr<Value> copy_int = original_int->CreateDeepCopy();
- std::unique_ptr<Value> copy_double = original_double->CreateDeepCopy();
- std::unique_ptr<Value> copy_string = original_string->CreateDeepCopy();
- std::unique_ptr<Value> copy_string16 = original_string16->CreateDeepCopy();
- std::unique_ptr<Value> copy_binary = original_binary->CreateDeepCopy();
- std::unique_ptr<Value> copy_list = original_list->CreateDeepCopy();
-
- EXPECT_TRUE(original_dict.Equals(copy_dict.get()));
- EXPECT_TRUE(original_null->Equals(copy_null.get()));
- EXPECT_TRUE(original_bool->Equals(copy_bool.get()));
- EXPECT_TRUE(original_int->Equals(copy_int.get()));
- EXPECT_TRUE(original_double->Equals(copy_double.get()));
- EXPECT_TRUE(original_string->Equals(copy_string.get()));
- EXPECT_TRUE(original_string16->Equals(copy_string16.get()));
- EXPECT_TRUE(original_binary->Equals(copy_binary.get()));
- EXPECT_TRUE(original_list->Equals(copy_list.get()));
+ auto copy_dict = MakeUnique<Value>(original_dict);
+ auto copy_null = MakeUnique<Value>(*original_null);
+ auto copy_bool = MakeUnique<Value>(*original_bool);
+ auto copy_int = MakeUnique<Value>(*original_int);
+ auto copy_double = MakeUnique<Value>(*original_double);
+ auto copy_string = MakeUnique<Value>(*original_string);
+ auto copy_string16 = MakeUnique<Value>(*original_string16);
+ auto copy_binary = MakeUnique<Value>(*original_binary);
+ auto copy_list = MakeUnique<Value>(*original_list);
+
+ EXPECT_EQ(original_dict, *copy_dict);
+ EXPECT_EQ(*original_null, *copy_null);
+ EXPECT_EQ(*original_bool, *copy_bool);
+ EXPECT_EQ(*original_int, *copy_int);
+ EXPECT_EQ(*original_double, *copy_double);
+ EXPECT_EQ(*original_string, *copy_string);
+ EXPECT_EQ(*original_string16, *copy_string16);
+ EXPECT_EQ(*original_binary, *copy_binary);
+ EXPECT_EQ(*original_list, *copy_list);
}
TEST(ValuesTest, RemoveEmptyChildren) {
@@ -972,7 +1090,7 @@ TEST(ValuesTest, RemoveEmptyChildren) {
{
std::unique_ptr<ListValue> inner(new ListValue);
std::unique_ptr<ListValue> inner2(new ListValue);
- inner2->Append(MakeUnique<StringValue>("hello"));
+ inner2->Append(MakeUnique<Value>("hello"));
inner->Append(WrapUnique(new DictionaryValue));
inner->Append(std::move(inner2));
root->Set("list_with_empty_children", std::move(inner));
@@ -1070,28 +1188,28 @@ TEST(ValuesTest, DictionaryIterator) {
ADD_FAILURE();
}
- StringValue value1("value1");
- dict.Set("key1", value1.CreateDeepCopy());
+ Value value1("value1");
+ dict.Set("key1", MakeUnique<Value>(value1));
bool seen1 = false;
for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
EXPECT_FALSE(seen1);
EXPECT_EQ("key1", it.key());
- EXPECT_TRUE(value1.Equals(&it.value()));
+ EXPECT_EQ(value1, it.value());
seen1 = true;
}
EXPECT_TRUE(seen1);
- StringValue value2("value2");
- dict.Set("key2", value2.CreateDeepCopy());
+ Value value2("value2");
+ dict.Set("key2", MakeUnique<Value>(value2));
bool seen2 = seen1 = false;
for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
if (it.key() == "key1") {
EXPECT_FALSE(seen1);
- EXPECT_TRUE(value1.Equals(&it.value()));
+ EXPECT_EQ(value1, it.value());
seen1 = true;
} else if (it.key() == "key2") {
EXPECT_FALSE(seen2);
- EXPECT_TRUE(value2.Equals(&it.value()));
+ EXPECT_EQ(value2, it.value());
seen2 = true;
} else {
ADD_FAILURE();
@@ -1110,26 +1228,26 @@ TEST(ValuesTest, GetWithNullOutValue) {
Value bool_value(false);
Value int_value(1234);
Value double_value(12.34567);
- StringValue string_value("foo");
- BinaryValue binary_value(Value::Type::BINARY);
+ Value string_value("foo");
+ Value binary_value(Value::Type::BINARY);
DictionaryValue dict_value;
ListValue list_value;
- main_dict.Set("bool", bool_value.CreateDeepCopy());
- main_dict.Set("int", int_value.CreateDeepCopy());
- main_dict.Set("double", double_value.CreateDeepCopy());
- main_dict.Set("string", string_value.CreateDeepCopy());
- main_dict.Set("binary", binary_value.CreateDeepCopy());
- main_dict.Set("dict", dict_value.CreateDeepCopy());
- main_dict.Set("list", list_value.CreateDeepCopy());
-
- main_list.Append(bool_value.CreateDeepCopy());
- main_list.Append(int_value.CreateDeepCopy());
- main_list.Append(double_value.CreateDeepCopy());
- main_list.Append(string_value.CreateDeepCopy());
- main_list.Append(binary_value.CreateDeepCopy());
- main_list.Append(dict_value.CreateDeepCopy());
- main_list.Append(list_value.CreateDeepCopy());
+ main_dict.Set("bool", MakeUnique<Value>(bool_value));
+ main_dict.Set("int", MakeUnique<Value>(int_value));
+ main_dict.Set("double", MakeUnique<Value>(double_value));
+ main_dict.Set("string", MakeUnique<Value>(string_value));
+ main_dict.Set("binary", MakeUnique<Value>(binary_value));
+ main_dict.Set("dict", MakeUnique<Value>(dict_value));
+ main_dict.Set("list", MakeUnique<Value>(list_value));
+
+ main_list.Append(MakeUnique<Value>(bool_value));
+ main_list.Append(MakeUnique<Value>(int_value));
+ main_list.Append(MakeUnique<Value>(double_value));
+ main_list.Append(MakeUnique<Value>(string_value));
+ main_list.Append(MakeUnique<Value>(binary_value));
+ main_list.Append(MakeUnique<Value>(dict_value));
+ main_list.Append(MakeUnique<Value>(list_value));
EXPECT_TRUE(main_dict.Get("bool", NULL));
EXPECT_TRUE(main_dict.Get("int", NULL));
@@ -1386,4 +1504,10 @@ TEST(ValuesTest, GetWithNullOutValue) {
EXPECT_FALSE(main_list.GetList(7, NULL));
}
+TEST(ValuesTest, SelfSwap) {
+ base::Value test(1);
+ std::swap(test, test);
+ EXPECT_TRUE(test.GetInt() == 1);
+}
+
} // namespace base
diff --git a/chromium/base/win/message_window.cc b/chromium/base/win/message_window.cc
index 26b64a53388..155047e4aee 100644
--- a/chromium/base/win/message_window.cc
+++ b/chromium/base/win/message_window.cc
@@ -32,8 +32,8 @@ class MessageWindow::WindowClass {
DISALLOW_COPY_AND_ASSIGN(WindowClass);
};
-static LazyInstance<MessageWindow::WindowClass> g_window_class =
- LAZY_INSTANCE_INITIALIZER;
+static LazyInstance<MessageWindow::WindowClass>::DestructorAtExit
+ g_window_class = LAZY_INSTANCE_INITIALIZER;
MessageWindow::WindowClass::WindowClass()
: atom_(0), instance_(CURRENT_MODULE()) {
diff --git a/chromium/base/win/scoped_comptr.h b/chromium/base/win/scoped_comptr.h
index 9442672054a..ac704e09534 100644
--- a/chromium/base/win/scoped_comptr.h
+++ b/chromium/base/win/scoped_comptr.h
@@ -5,19 +5,18 @@
#ifndef BASE_WIN_SCOPED_COMPTR_H_
#define BASE_WIN_SCOPED_COMPTR_H_
+#include <objbase.h>
#include <unknwn.h>
#include "base/logging.h"
-#include "base/memory/ref_counted.h"
namespace base {
namespace win {
+// DEPRECATED: Use Microsoft::WRL::ComPtr instead.
// A fairly minimalistic smart class for COM interface pointers.
-// Uses scoped_refptr for the basic smart pointer functionality
-// and adds a few IUnknown specific services.
template <class Interface, const IID* interface_id = &__uuidof(Interface)>
-class ScopedComPtr : public scoped_refptr<Interface> {
+class ScopedComPtr {
public:
// Utility template to prevent users of ScopedComPtr from calling AddRef
// and/or Release() without going through the ScopedComPtr class.
@@ -28,16 +27,17 @@ class ScopedComPtr : public scoped_refptr<Interface> {
STDMETHOD_(ULONG, Release)() = 0;
};
- typedef scoped_refptr<Interface> ParentClass;
-
ScopedComPtr() {
}
- explicit ScopedComPtr(Interface* p) : ParentClass(p) {
+ explicit ScopedComPtr(Interface* p) : ptr_(p) {
+ if (ptr_)
+ ptr_->AddRef();
}
- ScopedComPtr(const ScopedComPtr<Interface, interface_id>& p)
- : ParentClass(p) {
+ ScopedComPtr(const ScopedComPtr<Interface, interface_id>& p) : ptr_(p.get()) {
+ if (ptr_)
+ ptr_->AddRef();
}
~ScopedComPtr() {
@@ -46,31 +46,39 @@ class ScopedComPtr : public scoped_refptr<Interface> {
static_assert(
sizeof(ScopedComPtr<Interface, interface_id>) == sizeof(Interface*),
"ScopedComPtrSize");
+ Reset();
}
+ Interface* get() const { return ptr_; }
+
+ explicit operator bool() const { return ptr_ != nullptr; }
+
// Explicit Release() of the held object. Useful for reuse of the
// ScopedComPtr instance.
// Note that this function equates to IUnknown::Release and should not
// be confused with e.g. unique_ptr::release().
- void Release() {
- if (this->ptr_ != NULL) {
- this->ptr_->Release();
- this->ptr_ = NULL;
+ unsigned long Reset() {
+ unsigned long ref = 0;
+ Interface* temp = ptr_;
+ if (temp) {
+ ptr_ = nullptr;
+ ref = temp->Release();
}
+ return ref;
}
// Sets the internal pointer to NULL and returns the held object without
// releasing the reference.
Interface* Detach() {
- Interface* p = this->ptr_;
- this->ptr_ = NULL;
+ Interface* p = ptr_;
+ ptr_ = nullptr;
return p;
}
// Accepts an interface pointer that has already been addref-ed.
void Attach(Interface* p) {
- DCHECK(!this->ptr_);
- this->ptr_ = p;
+ DCHECK(!ptr_);
+ ptr_ = p;
}
// Retrieves the pointer address.
@@ -78,8 +86,8 @@ class ScopedComPtr : public scoped_refptr<Interface> {
// The function DCHECKs on the current value being NULL.
// Usage: Foo(p.Receive());
Interface** Receive() {
- DCHECK(!this->ptr_) << "Object leak. Pointer must be NULL";
- return &this->ptr_;
+ DCHECK(!ptr_) << "Object leak. Pointer must be NULL";
+ return &ptr_;
}
// A convenience for whenever a void pointer is needed as an out argument.
@@ -89,50 +97,51 @@ class ScopedComPtr : public scoped_refptr<Interface> {
template <class Query>
HRESULT QueryInterface(Query** p) {
- DCHECK(p != NULL);
- DCHECK(this->ptr_ != NULL);
+ DCHECK(p);
+ DCHECK(ptr_);
// IUnknown already has a template version of QueryInterface
// so the iid parameter is implicit here. The only thing this
// function adds are the DCHECKs.
- return this->ptr_->QueryInterface(p);
+ return ptr_->QueryInterface(IID_PPV_ARGS(p));
}
// QI for times when the IID is not associated with the type.
HRESULT QueryInterface(const IID& iid, void** obj) {
- DCHECK(obj != NULL);
- DCHECK(this->ptr_ != NULL);
- return this->ptr_->QueryInterface(iid, obj);
+ DCHECK(obj);
+ DCHECK(ptr_);
+ return ptr_->QueryInterface(iid, obj);
}
// Queries |other| for the interface this object wraps and returns the
// error code from the other->QueryInterface operation.
HRESULT QueryFrom(IUnknown* object) {
- DCHECK(object != NULL);
- return object->QueryInterface(Receive());
+ DCHECK(object);
+ return object->QueryInterface(IID_PPV_ARGS(Receive()));
}
// Convenience wrapper around CoCreateInstance
- HRESULT CreateInstance(const CLSID& clsid, IUnknown* outer = NULL,
+ HRESULT CreateInstance(const CLSID& clsid,
+ IUnknown* outer = nullptr,
DWORD context = CLSCTX_ALL) {
- DCHECK(!this->ptr_);
+ DCHECK(!ptr_);
HRESULT hr = ::CoCreateInstance(clsid, outer, context, *interface_id,
- reinterpret_cast<void**>(&this->ptr_));
+ reinterpret_cast<void**>(&ptr_));
return hr;
}
// Checks if the identity of |other| and this object is the same.
bool IsSameObject(IUnknown* other) {
- if (!other && !this->ptr_)
+ if (!other && !ptr_)
return true;
- if (!other || !this->ptr_)
+ if (!other || !ptr_)
return false;
ScopedComPtr<IUnknown> my_identity;
- QueryInterface(my_identity.Receive());
+ QueryInterface(IID_PPV_ARGS(my_identity.Receive()));
ScopedComPtr<IUnknown> other_identity;
- other->QueryInterface(other_identity.Receive());
+ other->QueryInterface(IID_PPV_ARGS(other_identity.Receive()));
return my_identity == other_identity;
}
@@ -148,20 +157,110 @@ class ScopedComPtr : public scoped_refptr<Interface> {
// by statically casting the ScopedComPtr instance to the wrapped interface
// and then making the call... but generally that shouldn't be necessary.
BlockIUnknownMethods* operator->() const {
- DCHECK(this->ptr_ != NULL);
- return reinterpret_cast<BlockIUnknownMethods*>(this->ptr_);
+ DCHECK(ptr_);
+ return reinterpret_cast<BlockIUnknownMethods*>(ptr_);
+ }
+
+ ScopedComPtr<Interface, interface_id>& operator=(Interface* rhs) {
+ // AddRef first so that self assignment should work
+ if (rhs)
+ rhs->AddRef();
+ Interface* old_ptr = ptr_;
+ ptr_ = rhs;
+ if (old_ptr)
+ old_ptr->Release();
+ return *this;
+ }
+
+ ScopedComPtr<Interface, interface_id>& operator=(
+ const ScopedComPtr<Interface, interface_id>& rhs) {
+ return *this = rhs.ptr_;
+ }
+
+ Interface& operator*() const {
+ DCHECK(ptr_);
+ return *ptr_;
+ }
+
+ bool operator==(const ScopedComPtr<Interface, interface_id>& rhs) const {
+ return ptr_ == rhs.get();
+ }
+
+ template <typename U>
+ bool operator==(const ScopedComPtr<U>& rhs) const {
+ return ptr_ == rhs.get();
+ }
+
+ template <typename U>
+ bool operator==(const U* rhs) const {
+ return ptr_ == rhs;
}
- // Pull in operator=() from the parent class.
- using scoped_refptr<Interface>::operator=;
+ bool operator!=(const ScopedComPtr<Interface, interface_id>& rhs) const {
+ return ptr_ != rhs.get();
+ }
- // static methods
+ template <typename U>
+ bool operator!=(const ScopedComPtr<U>& rhs) const {
+ return ptr_ != rhs.get();
+ }
- static const IID& iid() {
- return *interface_id;
+ template <typename U>
+ bool operator!=(const U* rhs) const {
+ return ptr_ != rhs;
}
+
+ void swap(ScopedComPtr<Interface, interface_id>& r) {
+ Interface* tmp = ptr_;
+ ptr_ = r.ptr_;
+ r.ptr_ = tmp;
+ }
+
+ private:
+ Interface* ptr_ = nullptr;
};
+template <typename T, typename U>
+bool operator==(const T* lhs, const ScopedComPtr<U>& rhs) {
+ return lhs == rhs.get();
+}
+
+template <typename T>
+bool operator==(const ScopedComPtr<T>& lhs, std::nullptr_t null) {
+ return !static_cast<bool>(lhs);
+}
+
+template <typename T>
+bool operator==(std::nullptr_t null, const ScopedComPtr<T>& rhs) {
+ return !static_cast<bool>(rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const T* lhs, const ScopedComPtr<U>& rhs) {
+ return !operator==(lhs, rhs);
+}
+
+template <typename T>
+bool operator!=(const ScopedComPtr<T>& lhs, std::nullptr_t null) {
+ return !operator==(lhs, null);
+}
+
+template <typename T>
+bool operator!=(std::nullptr_t null, const ScopedComPtr<T>& rhs) {
+ return !operator==(null, rhs);
+}
+
+template <typename T>
+std::ostream& operator<<(std::ostream& out, const ScopedComPtr<T>& p) {
+ return out << p.get();
+}
+
+// Helper to make IID_PPV_ARGS work with ScopedComPtr.
+template <typename T>
+void** IID_PPV_ARGS_Helper(base::win::ScopedComPtr<T>* pp) throw() {
+ return pp->ReceiveVoid();
+}
+
} // namespace win
} // namespace base
diff --git a/chromium/base/win/scoped_comptr_unittest.cc b/chromium/base/win/scoped_comptr_unittest.cc
index 988c4690c5e..3cde3145217 100644
--- a/chromium/base/win/scoped_comptr_unittest.cc
+++ b/chromium/base/win/scoped_comptr_unittest.cc
@@ -4,6 +4,7 @@
#include "base/win/scoped_comptr.h"
+#include <objbase.h>
#include <shlobj.h>
#include <memory>
@@ -18,8 +19,8 @@ namespace {
struct Dummy {
Dummy() : adds(0), releases(0) { }
- void AddRef() { ++adds; }
- void Release() { ++releases; }
+ unsigned long AddRef() { return ++adds; }
+ unsigned long Release() { return ++releases; }
int adds;
int releases;
@@ -34,9 +35,6 @@ const IID dummy_iid = {0x12345678u,
} // namespace
TEST(ScopedComPtrTest, ScopedComPtr) {
- EXPECT_EQ(memcmp(&ScopedComPtr<IUnknown>::iid(), &IID_IUnknown, sizeof(IID)),
- 0);
-
base::win::ScopedCOMInitializer com_initializer;
EXPECT_TRUE(com_initializer.succeeded());
@@ -51,10 +49,9 @@ TEST(ScopedComPtrTest, ScopedComPtr) {
EXPECT_TRUE(SUCCEEDED(CoGetMalloc(1, mem_alloc.Receive())));
ScopedComPtr<IUnknown> qi_test;
- EXPECT_HRESULT_SUCCEEDED(mem_alloc.QueryInterface(IID_IUnknown,
- reinterpret_cast<void**>(qi_test.Receive())));
+ EXPECT_HRESULT_SUCCEEDED(mem_alloc.QueryInterface(IID_PPV_ARGS(&qi_test)));
EXPECT_TRUE(qi_test.get() != NULL);
- qi_test.Release();
+ qi_test.Reset();
// test ScopedComPtr& constructor
ScopedComPtr<IMalloc> copy1(mem_alloc);
@@ -66,7 +63,7 @@ TEST(ScopedComPtrTest, ScopedComPtr) {
copy1 = naked_copy; // Test the =(T*) operator.
naked_copy->Release();
- copy1.Release();
+ copy1.Reset();
EXPECT_FALSE(copy1.IsSameObject(unk2.get())); // unk2 is valid, copy1 is not
// test Interface* constructor
@@ -75,7 +72,7 @@ TEST(ScopedComPtrTest, ScopedComPtr) {
EXPECT_TRUE(SUCCEEDED(unk.QueryFrom(mem_alloc.get())));
EXPECT_TRUE(unk.get() != NULL);
- unk.Release();
+ unk.Reset();
EXPECT_TRUE(unk.get() == NULL);
EXPECT_TRUE(unk.IsSameObject(copy1.get())); // both are NULL
}
diff --git a/chromium/base/win/scoped_handle_unittest.cc b/chromium/base/win/scoped_handle_unittest.cc
index ca6fb451a87..0a75115bb9c 100644
--- a/chromium/base/win/scoped_handle_unittest.cc
+++ b/chromium/base/win/scoped_handle_unittest.cc
@@ -114,11 +114,11 @@ TEST(ScopedHandleTest, MAYBE_MultiProcess) {
CommandLine command_line(base::GetMultiProcessTestChildBaseCommandLine());
command_line.AppendSwitch(switches::kTestDoNotInitializeIcu);
- base::Process test_child_process = base::SpawnMultiProcessTestChild(
+ base::SpawnChildResult spawn_child = base::SpawnMultiProcessTestChild(
"ActiveVerifierChildProcess", command_line, LaunchOptions());
int rv = -1;
- ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+ ASSERT_TRUE(spawn_child.process.WaitForExitWithTimeout(
TestTimeouts::action_timeout(), &rv));
EXPECT_EQ(0, rv);
}
diff --git a/chromium/base/win/shortcut.cc b/chromium/base/win/shortcut.cc
index f5748099964..856b00f0f59 100644
--- a/chromium/base/win/shortcut.cc
+++ b/chromium/base/win/shortcut.cc
@@ -29,14 +29,14 @@ void InitializeShortcutInterfaces(
const wchar_t* shortcut,
ScopedComPtr<IShellLink>* i_shell_link,
ScopedComPtr<IPersistFile>* i_persist_file) {
- i_shell_link->Release();
- i_persist_file->Release();
+ i_shell_link->Reset();
+ i_persist_file->Reset();
if (FAILED(i_shell_link->CreateInstance(CLSID_ShellLink, NULL,
CLSCTX_INPROC_SERVER)) ||
FAILED(i_persist_file->QueryFrom(i_shell_link->get())) ||
(shortcut && FAILED((*i_persist_file)->Load(shortcut, STGM_READWRITE)))) {
- i_shell_link->Release();
- i_persist_file->Release();
+ i_shell_link->Reset();
+ i_persist_file->Reset();
}
}
@@ -158,15 +158,15 @@ bool CreateOrUpdateShortcutLink(const FilePath& shortcut_path,
// Release the interfaces to the old shortcut to make sure it doesn't prevent
// overwriting it if needed.
- old_i_persist_file.Release();
- old_i_shell_link.Release();
+ old_i_persist_file.Reset();
+ old_i_shell_link.Reset();
HRESULT result = i_persist_file->Save(shortcut_path.value().c_str(), TRUE);
// Release the interfaces in case the SHChangeNotify call below depends on
// the operations above being fully completed.
- i_persist_file.Release();
- i_shell_link.Release();
+ i_persist_file.Reset();
+ i_shell_link.Reset();
// If we successfully created/updated the icon, notify the shell that we have
// done so.
diff --git a/chromium/base/win/wait_chain_unittest.cc b/chromium/base/win/wait_chain_unittest.cc
index ba04872206d..d5814916150 100644
--- a/chromium/base/win/wait_chain_unittest.cc
+++ b/chromium/base/win/wait_chain_unittest.cc
@@ -194,7 +194,9 @@ Process StartChildProcess(HANDLE mutex, HANDLE sync_event) {
handle_vector.push_back(mutex);
handle_vector.push_back(sync_event);
options.handles_to_inherit = &handle_vector;
- return SpawnMultiProcessTestChild("WaitChainTestProc", command_line, options);
+ base::SpawnChildResult spawn_result =
+ SpawnMultiProcessTestChild("WaitChainTestProc", command_line, options);
+ return std::move(spawn_result.process);
}
// Returns true if the |wait_chain| is an alternating sequence of thread objects
diff --git a/chromium/base/win/win_util.cc b/chromium/base/win/win_util.cc
index d380821c718..130d6499e48 100644
--- a/chromium/base/win/win_util.cc
+++ b/chromium/base/win/win_util.cc
@@ -33,8 +33,10 @@
#include "base/base_switches.h"
#include "base/command_line.h"
+#include "base/files/file_path.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/scoped_native_library.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
@@ -499,8 +501,9 @@ bool IsEnrolledToDomain() {
bool IsDeviceRegisteredWithManagement() {
static bool is_device_registered_with_management = []() {
- HMODULE mdm_dll = ::LoadLibrary(L"MDMRegistration.dll");
- if (!mdm_dll)
+ ScopedNativeLibrary library(
+ FilePath(FILE_PATH_LITERAL("MDMRegistration.dll")));
+ if (!library.is_valid())
return false;
using IsDeviceRegisteredWithManagementFunction =
@@ -508,7 +511,7 @@ bool IsDeviceRegisteredWithManagement() {
IsDeviceRegisteredWithManagementFunction
is_device_registered_with_management_function =
reinterpret_cast<IsDeviceRegisteredWithManagementFunction>(
- ::GetProcAddress(mdm_dll, "IsDeviceRegisteredWithManagement"));
+ library.GetFunctionPointer("IsDeviceRegisteredWithManagement"));
if (!is_device_registered_with_management_function)
return false;
diff --git a/chromium/base/win/win_util.h b/chromium/base/win/win_util.h
index 6cb6eed5721..8faa9d0ee7d 100644
--- a/chromium/base/win/win_util.h
+++ b/chromium/base/win/win_util.h
@@ -137,6 +137,8 @@ BASE_EXPORT bool IsWindows10TabletMode(HWND hwnd);
// 1. Metrics:- To gain insight into how users use Chrome.
// 2. Physical keyboard presence :- If a device is in tablet mode, it means
// that there is no physical keyboard attached.
+// 3. To set the right interactions media queries,
+// see https://drafts.csswg.org/mediaqueries-4/#mf-interaction
// This function optionally sets the |reason| parameter to determine as to why
// or why not a device was deemed to be a tablet.
// Returns true if the device is in tablet mode.
diff --git a/chromium/base/win/windows_version.cc b/chromium/base/win/windows_version.cc
index 4b7f8baaca9..d192ab90ff7 100644
--- a/chromium/base/win/windows_version.cc
+++ b/chromium/base/win/windows_version.cc
@@ -50,8 +50,10 @@ Version MajorMinorBuildToVersion(int major, int minor, int build) {
} else if (major == 10) {
if (build < 10586) {
return VERSION_WIN10;
- } else {
+ } else if (build < 14393) {
return VERSION_WIN10_TH2;
+ } else {
+ return VERSION_WIN10_R1;
}
} else if (major > 6) {
NOTREACHED();
diff --git a/chromium/base/win/windows_version.h b/chromium/base/win/windows_version.h
index 9969597471b..984fd3727d1 100644
--- a/chromium/base/win/windows_version.h
+++ b/chromium/base/win/windows_version.h
@@ -34,6 +34,7 @@ enum Version {
VERSION_WIN8_1 = 6, // Also includes Windows Server 2012 R2.
VERSION_WIN10 = 7, // Also includes Windows 10 Server.
VERSION_WIN10_TH2 = 8, // Threshold 2: Version 1511, Build 10586.
+ VERSION_WIN10_R1 = 9, // Redstone 1: Version 1607, Build 14393.
VERSION_WIN_LAST, // Indicates error condition.
};