summaryrefslogtreecommitdiffstats
path: root/chromium/base
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2018-05-15 10:20:33 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2018-05-15 10:28:57 +0000
commitd17ea114e5ef69ad5d5d7413280a13e6428098aa (patch)
tree2c01a75df69f30d27b1432467cfe7c1467a498da /chromium/base
parent8c5c43c7b138c9b4b0bf56d946e61d3bbc111bec (diff)
BASELINE: Update Chromium to 67.0.3396.47
Change-Id: Idcb1341782e417561a2473eeecc82642dafda5b7 Reviewed-by: Michal Klocek <michal.klocek@qt.io>
Diffstat (limited to 'chromium/base')
-rw-r--r--chromium/base/BUILD.gn234
-rw-r--r--chromium/base/allocator/README.md30
-rw-r--r--chromium/base/allocator/partition_allocator/address_space_randomization.h259
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator.cc335
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator_internal.h18
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h181
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator_internals_win.h119
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator_unittest.cc193
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc151
-rw-r--r--chromium/base/android/jni_generator/jni_exception_list.gni2
-rw-r--r--chromium/base/atomicops.h4
-rw-r--r--chromium/base/base_switches.cc9
-rw-r--r--chromium/base/base_switches.h3
-rw-r--r--chromium/base/bind_internal.h2
-rw-r--r--chromium/base/bits.h41
-rw-r--r--chromium/base/callback.h4
-rw-r--r--chromium/base/callback_helpers.h4
-rw-r--r--chromium/base/callback_internal.h5
-rw-r--r--chromium/base/callback_list.h37
-rw-r--r--chromium/base/containers/circular_deque_unittest.cc2
-rw-r--r--chromium/base/containers/flat_tree.h9
-rw-r--r--chromium/base/containers/id_map.h87
-rw-r--r--chromium/base/containers/id_map_unittest.cc20
-rw-r--r--chromium/base/containers/linked_list.h22
-rw-r--r--chromium/base/containers/linked_list_unittest.cc41
-rw-r--r--chromium/base/containers/span.h122
-rw-r--r--chromium/base/containers/span_unittest.cc105
-rw-r--r--chromium/base/containers/span_unittest.nc8
-rw-r--r--chromium/base/debug/activity_tracker.cc6
-rw-r--r--chromium/base/debug/asan_invalid_access.cc44
-rw-r--r--chromium/base/debug/asan_invalid_access.h13
-rw-r--r--chromium/base/debug/close_handle_hook_win.cc13
-rw-r--r--chromium/base/debug/profiler.cc37
-rw-r--r--chromium/base/debug/profiler.h3
-rw-r--r--chromium/base/debug/task_annotator.cc53
-rw-r--r--chromium/base/debug/task_annotator.h15
-rw-r--r--chromium/base/debug/task_annotator_unittest.cc339
-rw-r--r--chromium/base/event_types.h37
-rw-r--r--chromium/base/feature_list.cc14
-rw-r--r--chromium/base/feature_list.h12
-rw-r--r--chromium/base/files/dir_reader_posix_unittest.cc1
-rw-r--r--chromium/base/files/file_descriptor_watcher_posix.cc23
-rw-r--r--chromium/base/files/file_descriptor_watcher_posix.h17
-rw-r--r--chromium/base/files/file_unittest.cc3
-rw-r--r--chromium/base/files/file_util_posix.cc31
-rw-r--r--chromium/base/fuchsia/async_dispatcher.cc317
-rw-r--r--chromium/base/fuchsia/async_dispatcher.h84
-rw-r--r--chromium/base/fuchsia/async_dispatcher_unittest.cc219
-rw-r--r--chromium/base/i18n/file_util_icu_unittest.cc2
-rw-r--r--chromium/base/i18n/rtl.cc46
-rw-r--r--chromium/base/i18n/rtl.h4
-rw-r--r--chromium/base/json/OWNERS1
-rw-r--r--chromium/base/json/json_correctness_fuzzer.cc (renamed from chromium/base/json/correctness_fuzzer.cc)8
-rw-r--r--chromium/base/json/json_parser.cc425
-rw-r--r--chromium/base/json/json_parser.h77
-rw-r--r--chromium/base/json/json_parser_unittest.cc52
-rw-r--r--chromium/base/json/json_reader.cc11
-rw-r--r--chromium/base/json/json_reader.h2
-rw-r--r--chromium/base/json/json_reader_fuzzer.cc29
-rw-r--r--chromium/base/json/json_reader_unittest.cc967
-rw-r--r--chromium/base/json/string_escape_fuzzer.cc37
-rw-r--r--chromium/base/logging.cc19
-rw-r--r--chromium/base/logging.h21
-rw-r--r--chromium/base/logging_unittest.cc25
-rw-r--r--chromium/base/mac/sdk_forward_declarations.h47
-rw-r--r--chromium/base/macros.h4
-rw-r--r--chromium/base/memory/discardable_shared_memory.cc27
-rw-r--r--chromium/base/memory/linked_ptr_unittest.cc2
-rw-r--r--chromium/base/memory/platform_shared_memory_region.cc37
-rw-r--r--chromium/base/memory/platform_shared_memory_region.h222
-rw-r--r--chromium/base/memory/platform_shared_memory_region_android.cc190
-rw-r--r--chromium/base/memory/platform_shared_memory_region_fuchsia.cc172
-rw-r--r--chromium/base/memory/platform_shared_memory_region_mac.cc198
-rw-r--r--chromium/base/memory/platform_shared_memory_region_posix.cc291
-rw-r--r--chromium/base/memory/platform_shared_memory_region_unittest.cc302
-rw-r--r--chromium/base/memory/platform_shared_memory_region_win.cc345
-rw-r--r--chromium/base/memory/protected_memory.cc17
-rw-r--r--chromium/base/memory/protected_memory.h109
-rw-r--r--chromium/base/memory/protected_memory_cfi.h5
-rw-r--r--chromium/base/memory/protected_memory_posix.cc23
-rw-r--r--chromium/base/memory/protected_memory_win.cc52
-rw-r--r--chromium/base/memory/ptr_util.h7
-rw-r--r--chromium/base/memory/ptr_util_unittest.cc30
-rw-r--r--chromium/base/memory/read_only_shared_memory_region.cc95
-rw-r--r--chromium/base/memory/read_only_shared_memory_region.h106
-rw-r--r--chromium/base/memory/ref_counted_memory.cc26
-rw-r--r--chromium/base/memory/ref_counted_memory.h52
-rw-r--r--chromium/base/memory/ref_counted_memory_unittest.cc16
-rw-r--r--chromium/base/memory/ref_counted_unittest.cc2
-rw-r--r--chromium/base/memory/scoped_refptr.h8
-rw-r--r--chromium/base/memory/shared_memory_mapping.cc115
-rw-r--r--chromium/base/memory/shared_memory_mapping.h143
-rw-r--r--chromium/base/memory/shared_memory_region_unittest.cc296
-rw-r--r--chromium/base/memory/shared_memory_tracker.cc104
-rw-r--r--chromium/base/memory/shared_memory_tracker.h24
-rw-r--r--chromium/base/memory/unsafe_shared_memory_region.cc74
-rw-r--r--chromium/base/memory/unsafe_shared_memory_region.h100
-rw-r--r--chromium/base/memory/writable_shared_memory_region.cc82
-rw-r--r--chromium/base/memory/writable_shared_memory_region.h97
-rw-r--r--chromium/base/message_loop/message_loop.cc117
-rw-r--r--chromium/base/message_loop/message_loop.h96
-rw-r--r--chromium/base/message_loop/message_loop_io_posix_unittest.cc68
-rw-r--r--chromium/base/message_loop/message_loop_unittest.cc293
-rw-r--r--chromium/base/message_loop/message_pump_for_io.h44
-rw-r--r--chromium/base/message_loop/message_pump_for_ui.h55
-rw-r--r--chromium/base/message_loop/message_pump_fuchsia.cc3
-rw-r--r--chromium/base/message_loop/message_pump_fuchsia.h19
-rw-r--r--chromium/base/message_loop/message_pump_io_ios.cc44
-rw-r--r--chromium/base/message_loop/message_pump_io_ios.h68
-rw-r--r--chromium/base/message_loop/message_pump_io_ios_unittest.cc37
-rw-r--r--chromium/base/message_loop/message_pump_libevent.cc53
-rw-r--r--chromium/base/message_loop/message_pump_libevent.h84
-rw-r--r--chromium/base/message_loop/message_pump_libevent_unittest.cc44
-rw-r--r--chromium/base/message_loop/message_pump_mac.h2
-rw-r--r--chromium/base/message_loop/message_pump_mac.mm10
-rw-r--r--chromium/base/message_loop/message_pump_win.cc18
-rw-r--r--chromium/base/message_loop/watchable_io_message_pump_posix.cc16
-rw-r--r--chromium/base/message_loop/watchable_io_message_pump_posix.h88
-rw-r--r--chromium/base/metrics/OWNERS1
-rw-r--r--chromium/base/metrics/dummy_histogram.cc102
-rw-r--r--chromium/base/metrics/dummy_histogram.h61
-rw-r--r--chromium/base/metrics/field_trial.cc27
-rw-r--r--chromium/base/metrics/field_trial.h22
-rw-r--r--chromium/base/metrics/field_trial_unittest.cc54
-rw-r--r--chromium/base/metrics/histogram.cc83
-rw-r--r--chromium/base/metrics/histogram.h16
-rw-r--r--chromium/base/metrics/histogram_base.cc7
-rw-r--r--chromium/base/metrics/histogram_base.h9
-rw-r--r--chromium/base/metrics/histogram_macros.h38
-rw-r--r--chromium/base/metrics/histogram_macros_internal.h50
-rw-r--r--chromium/base/metrics/histogram_macros_local.h9
-rw-r--r--chromium/base/metrics/histogram_macros_unittest.cc12
-rw-r--r--chromium/base/metrics/histogram_samples.h3
-rw-r--r--chromium/base/metrics/histogram_snapshot_manager.cc4
-rw-r--r--chromium/base/metrics/histogram_unittest.cc78
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator.cc98
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator.h42
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator_unittest.cc1
-rw-r--r--chromium/base/metrics/persistent_histogram_storage.cc101
-rw-r--r--chromium/base/metrics/persistent_histogram_storage.h68
-rw-r--r--chromium/base/metrics/persistent_histogram_storage_unittest.cc75
-rw-r--r--chromium/base/metrics/sparse_histogram.cc7
-rw-r--r--chromium/base/metrics/sparse_histogram_unittest.cc5
-rw-r--r--chromium/base/metrics/statistics_recorder.h3
-rw-r--r--chromium/base/metrics/statistics_recorder_unittest.cc4
-rw-r--r--chromium/base/native_library.h18
-rw-r--r--chromium/base/native_library_fuchsia.cc95
-rw-r--r--chromium/base/native_library_ios.mm8
-rw-r--r--chromium/base/native_library_mac.mm9
-rw-r--r--chromium/base/native_library_posix.cc8
-rw-r--r--chromium/base/native_library_unittest.cc19
-rw-r--r--chromium/base/native_library_win.cc10
-rw-r--r--chromium/base/no_destructor_unittest.cc7
-rw-r--r--chromium/base/observer_list_unittest.cc8
-rw-r--r--chromium/base/optional.h58
-rw-r--r--chromium/base/optional_unittest.cc45
-rw-r--r--chromium/base/pending_task.cc16
-rw-r--r--chromium/base/pending_task.h9
-rw-r--r--chromium/base/pending_task_unittest.cc169
-rw-r--r--chromium/base/posix/unix_domain_socket.cc5
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source_stub.cc (renamed from chromium/base/power_monitor/power_monitor_device_source_posix.cc)0
-rw-r--r--chromium/base/process/kill.cc30
-rw-r--r--chromium/base/process/kill.h36
-rw-r--r--chromium/base/process/kill_fuchsia.cc16
-rw-r--r--chromium/base/process/kill_posix.cc76
-rw-r--r--chromium/base/process/kill_win.cc45
-rw-r--r--chromium/base/process/launch_fuchsia.cc22
-rw-r--r--chromium/base/process/launch_posix.cc9
-rw-r--r--chromium/base/process/process_info_win.cc1
-rw-r--r--chromium/base/process/process_metrics.h134
-rw-r--r--chromium/base/process/process_metrics_freebsd.cc58
-rw-r--r--chromium/base/process/process_metrics_fuchsia.cc20
-rw-r--r--chromium/base/process/process_metrics_ios.cc27
-rw-r--r--chromium/base/process/process_metrics_linux.cc132
-rw-r--r--chromium/base/process/process_metrics_mac.cc194
-rw-r--r--chromium/base/process/process_metrics_openbsd.cc71
-rw-r--r--chromium/base/process/process_metrics_unittest.cc38
-rw-r--r--chromium/base/process/process_metrics_unittest_ios.cc17
-rw-r--r--chromium/base/process/process_metrics_win.cc151
-rw-r--r--chromium/base/process/process_posix.cc7
-rw-r--r--chromium/base/process/process_util_unittest.cc110
-rw-r--r--chromium/base/profiler/native_stack_sampler_mac.cc76
-rw-r--r--chromium/base/run_loop.cc16
-rw-r--r--chromium/base/run_loop.h12
-rw-r--r--chromium/base/run_loop_unittest.cc25
-rw-r--r--chromium/base/safe_numerics_unittest.cc11
-rw-r--r--chromium/base/sampling_heap_profiler/sampling_heap_profiler.cc94
-rw-r--r--chromium/base/sampling_heap_profiler/sampling_heap_profiler.h6
-rw-r--r--chromium/base/sampling_heap_profiler/sampling_heap_profiler_unittest.cc24
-rw-r--r--chromium/base/security_unittest.cc4
-rw-r--r--chromium/base/stl_util.h12
-rw-r--r--chromium/base/stl_util_unittest.cc9
-rw-r--r--chromium/base/strings/old_utf_string_conversions.cc262
-rw-r--r--chromium/base/strings/old_utf_string_conversions.h64
-rw-r--r--chromium/base/strings/strcat.h6
-rw-r--r--chromium/base/strings/string_number_conversions_fuzzer.cc67
-rw-r--r--chromium/base/strings/string_piece.h30
-rw-r--r--chromium/base/strings/string_piece_forward.h1
-rw-r--r--chromium/base/strings/string_tokenizer.h24
-rw-r--r--chromium/base/strings/string_tokenizer_fuzzer.cc56
-rw-r--r--chromium/base/strings/string_util.cc6
-rw-r--r--chromium/base/strings/string_util.h3
-rw-r--r--chromium/base/strings/utf_string_conversion_utils.cc7
-rw-r--r--chromium/base/strings/utf_string_conversions.cc336
-rw-r--r--chromium/base/strings/utf_string_conversions.h6
-rw-r--r--chromium/base/strings/utf_string_conversions_fuzzer.cc56
-rw-r--r--chromium/base/strings/utf_string_conversions_regression_fuzzer.cc105
-rw-r--r--chromium/base/synchronization/waitable_event_win.cc3
-rw-r--r--chromium/base/sys_info.cc11
-rw-r--r--chromium/base/sys_info.h1
-rw-r--r--chromium/base/sys_info_android.cc2
-rw-r--r--chromium/base/task_scheduler/delayed_task_manager.cc3
-rw-r--r--chromium/base/task_scheduler/delayed_task_manager.h4
-rw-r--r--chromium/base/task_scheduler/delayed_task_manager_unittest.cc3
-rw-r--r--chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc40
-rw-r--r--chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.h22
-rw-r--r--chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc46
-rw-r--r--chromium/base/task_scheduler/task_tracker.cc18
-rw-r--r--chromium/base/task_scheduler/task_tracker.h12
-rw-r--r--chromium/base/task_scheduler/task_tracker_posix.cc4
-rw-r--r--chromium/base/task_scheduler/task_tracker_posix.h6
-rw-r--r--chromium/base/test/BUILD.gn51
-rw-r--r--chromium/base/third_party/symbolize/BUILD.gn16
-rw-r--r--chromium/base/third_party/symbolize/symbolize.cc6
-rw-r--r--chromium/base/threading/platform_thread_android.cc2
-rw-r--r--chromium/base/threading/platform_thread_fuchsia.cc3
-rw-r--r--chromium/base/threading/platform_thread_linux.cc2
-rw-r--r--chromium/base/threading/platform_thread_mac.mm2
-rw-r--r--chromium/base/threading/platform_thread_win.cc6
-rw-r--r--chromium/base/threading/post_task_and_reply_impl.cc126
-rw-r--r--chromium/base/threading/post_task_and_reply_impl.h17
-rw-r--r--chromium/base/threading/post_task_and_reply_impl_unittest.cc172
-rw-r--r--chromium/base/threading/scoped_blocking_call.h54
-rw-r--r--chromium/base/threading/thread.cc5
-rw-r--r--chromium/base/threading/thread.h3
-rw-r--r--chromium/base/threading/thread_id_name_manager.cc16
-rw-r--r--chromium/base/threading/thread_id_name_manager.h7
-rw-r--r--chromium/base/threading/thread_local_storage.cc74
-rw-r--r--chromium/base/threading/thread_local_storage.h27
-rw-r--r--chromium/base/threading/thread_local_storage_unittest.cc133
-rw-r--r--chromium/base/threading/thread_restrictions.cc14
-rw-r--r--chromium/base/threading/thread_restrictions.h27
-rw-r--r--chromium/base/time/default_tick_clock.cc11
-rw-r--r--chromium/base/time/default_tick_clock.h5
-rw-r--r--chromium/base/time/tick_clock.h2
-rw-r--r--chromium/base/timer/timer.cc6
-rw-r--r--chromium/base/timer/timer.h12
-rw-r--r--chromium/base/timer/timer_unittest.cc9
-rw-r--r--chromium/base/tools_sanity_unittest.cc38
-rw-r--r--chromium/base/trace_event/auto_open_close_event.cc4
-rw-r--r--chromium/base/trace_event/cfi_backtrace_android.cc314
-rw-r--r--chromium/base/trace_event/cfi_backtrace_android.h157
-rw-r--r--chromium/base/trace_event/cfi_backtrace_android_unittest.cc197
-rw-r--r--chromium/base/trace_event/common/trace_event_common.h18
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc18
-rw-r--r--chromium/base/trace_event/malloc_dump_provider.cc12
-rw-r--r--chromium/base/trace_event/memory_dump_manager.cc54
-rw-r--r--chromium/base/trace_event/memory_dump_manager.h5
-rw-r--r--chromium/base/trace_event/memory_dump_manager_unittest.cc26
-rw-r--r--chromium/base/trace_event/memory_dump_provider_info.cc4
-rw-r--r--chromium/base/trace_event/memory_dump_provider_info.h6
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.cc4
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.h3
-rw-r--r--chromium/base/trace_event/memory_dump_scheduler.cc5
-rw-r--r--chromium/base/trace_event/memory_infra_background_whitelist.cc106
-rw-r--r--chromium/base/trace_event/memory_infra_background_whitelist.h6
-rw-r--r--chromium/base/trace_event/memory_peak_detector_unittest.cc7
-rw-r--r--chromium/base/trace_event/process_memory_dump.cc60
-rw-r--r--chromium/base/trace_event/process_memory_dump.h7
-rw-r--r--chromium/base/trace_event/process_memory_dump_unittest.cc9
-rw-r--r--chromium/base/trace_event/trace_config.cc56
-rw-r--r--chromium/base/trace_event/trace_config.h2
-rw-r--r--chromium/base/trace_event/trace_event.h26
-rw-r--r--chromium/base/trace_event/trace_event_android.cc2
-rw-r--r--chromium/base/trace_event/trace_log.cc31
-rw-r--r--chromium/base/trace_event/trace_log.h6
-rw-r--r--chromium/base/values.cc3
-rw-r--r--chromium/base/win/com_init_check_hook.cc4
-rw-r--r--chromium/base/win/core_winrt_util.cc2
-rw-r--r--chromium/base/win/scoped_hstring.cc2
-rw-r--r--chromium/base/win/win_includes_unittest.cc1
-rw-r--r--chromium/base/win/win_util_unittest.cc4
-rw-r--r--chromium/base/win/windows_types.h1
283 files changed, 11136 insertions, 4656 deletions
diff --git a/chromium/base/BUILD.gn b/chromium/base/BUILD.gn
index e01a284943d..ebc47153341 100644
--- a/chromium/base/BUILD.gn
+++ b/chromium/base/BUILD.gn
@@ -21,7 +21,6 @@ import("//build/buildflag_header.gni")
import("//build/config/allocator.gni")
import("//build/config/arm.gni")
import("//build/config/chromecast_build.gni")
-import("//build/config/clang/clang.gni")
import("//build/config/compiler/compiler.gni")
import("//build/config/dcheck_always_on.gni")
import("//build/config/jumbo.gni")
@@ -322,7 +321,6 @@ jumbo_component("base") {
"deferred_sequenced_task_runner.h",
"environment.cc",
"environment.h",
- "event_types.h",
"export_template.h",
"feature_list.cc",
"feature_list.h",
@@ -514,11 +512,17 @@ jumbo_component("base") {
"memory/memory_pressure_monitor_mac.h",
"memory/memory_pressure_monitor_win.cc",
"memory/memory_pressure_monitor_win.h",
+ "memory/platform_shared_memory_region.cc",
+ "memory/platform_shared_memory_region.h",
+ "memory/protected_memory.cc",
"memory/protected_memory.h",
"memory/protected_memory_cfi.h",
"memory/protected_memory_posix.cc",
+ "memory/protected_memory_win.cc",
"memory/ptr_util.h",
"memory/raw_scoped_refptr_mismatch_checker.h",
+ "memory/read_only_shared_memory_region.cc",
+ "memory/read_only_shared_memory_region.h",
"memory/ref_counted.cc",
"memory/ref_counted.h",
"memory/ref_counted_delete_on_sequence.h",
@@ -531,11 +535,17 @@ jumbo_component("base") {
"memory/shared_memory_handle.h",
"memory/shared_memory_helper.cc",
"memory/shared_memory_helper.h",
+ "memory/shared_memory_mapping.cc",
+ "memory/shared_memory_mapping.h",
"memory/shared_memory_tracker.cc",
"memory/shared_memory_tracker.h",
"memory/singleton.h",
+ "memory/unsafe_shared_memory_region.cc",
+ "memory/unsafe_shared_memory_region.h",
"memory/weak_ptr.cc",
"memory/weak_ptr.h",
+ "memory/writable_shared_memory_region.cc",
+ "memory/writable_shared_memory_region.h",
"message_loop/incoming_task_queue.cc",
"message_loop/incoming_task_queue.h",
"message_loop/message_loop.cc",
@@ -548,6 +558,8 @@ jumbo_component("base") {
"message_loop/message_pump_android.h",
"message_loop/message_pump_default.cc",
"message_loop/message_pump_default.h",
+ "message_loop/message_pump_for_io.h",
+ "message_loop/message_pump_for_ui.h",
"message_loop/message_pump_glib.cc",
"message_loop/message_pump_glib.h",
"message_loop/message_pump_io_ios.cc",
@@ -559,8 +571,12 @@ jumbo_component("base") {
"message_loop/message_pump_win.cc",
"message_loop/message_pump_win.h",
"message_loop/timer_slack.h",
+ "message_loop/watchable_io_message_pump_posix.cc",
+ "message_loop/watchable_io_message_pump_posix.h",
"metrics/bucket_ranges.cc",
"metrics/bucket_ranges.h",
+ "metrics/dummy_histogram.cc",
+ "metrics/dummy_histogram.h",
"metrics/field_trial.cc",
"metrics/field_trial.h",
"metrics/field_trial_param_associator.cc",
@@ -1134,6 +1150,8 @@ jumbo_component("base") {
"base_paths_posix.h",
"base_paths_win.cc",
"base_paths_win.h",
+ "metrics/persistent_histogram_storage.cc",
+ "metrics/persistent_histogram_storage.h",
"posix/unix_domain_socket.cc",
]
@@ -1145,6 +1163,7 @@ jumbo_component("base") {
all_dependent_configs = []
defines = []
data = []
+ data_deps = []
configs += [
":base_flags",
@@ -1228,6 +1247,7 @@ jumbo_component("base") {
if (is_android) {
sources -= [ "debug/stack_trace_posix.cc" ]
sources += [
+ "memory/platform_shared_memory_region_android.cc",
"memory/shared_memory_android.cc",
"memory/shared_memory_handle_android.cc",
"time/time_android.cc",
@@ -1260,6 +1280,13 @@ jumbo_component("base") {
# Needs to be a public config so that dependent targets link against it as
# well when doing a component build.
public_configs = [ ":android_system_libs" ]
+
+ if (can_unwind_with_cfi_table) {
+ sources += [
+ "trace_event/cfi_backtrace_android.cc",
+ "trace_event/cfi_backtrace_android.h",
+ ]
+ }
}
# Chromeos.
@@ -1274,6 +1301,7 @@ jumbo_component("base") {
"debug/stack_trace_posix.cc",
"message_loop/message_pump_libevent.cc",
"message_loop/message_pump_libevent.h",
+ "native_library_posix.cc",
"posix/unix_domain_socket.cc",
"posix/unix_domain_socket.h",
"process/kill_posix.cc",
@@ -1290,15 +1318,19 @@ jumbo_component("base") {
"base_paths_fuchsia.h",
"debug/stack_trace_fuchsia.cc",
"files/file_path_watcher_fuchsia.cc",
+ "fuchsia/async_dispatcher.cc",
+ "fuchsia/async_dispatcher.h",
"fuchsia/default_job.cc",
"fuchsia/default_job.h",
"fuchsia/fuchsia_logging.cc",
"fuchsia/fuchsia_logging.h",
"fuchsia/scoped_zx_handle.h",
+ "memory/platform_shared_memory_region_fuchsia.cc",
"memory/shared_memory_fuchsia.cc",
"memory/shared_memory_handle_fuchsia.cc",
"message_loop/message_pump_fuchsia.cc",
"message_loop/message_pump_fuchsia.h",
+ "native_library_fuchsia.cc",
"process/kill_fuchsia.cc",
"process/launch_fuchsia.cc",
"process/memory_fuchsia.cc",
@@ -1315,6 +1347,9 @@ jumbo_component("base") {
]
libs = [ "launchpad" ]
+
+ public_deps += [ "//third_party/fuchsia-sdk:async" ]
+ deps += [ "//third_party/fuchsia-sdk:async" ]
}
# NaCl.
@@ -1418,17 +1453,26 @@ jumbo_component("base") {
"allocator/partition_allocator/oom.h",
"allocator/partition_allocator/page_allocator.cc",
"allocator/partition_allocator/page_allocator.h",
+ "allocator/partition_allocator/page_allocator_internal.h",
"allocator/partition_allocator/partition_alloc.cc",
"allocator/partition_allocator/partition_alloc.h",
"allocator/partition_allocator/spin_lock.cc",
"allocator/partition_allocator/spin_lock.h",
]
+ if (is_win) {
+ sources +=
+ [ "allocator/partition_allocator/page_allocator_internals_win.h" ]
+ } else if (is_posix || is_fuchsia) {
+ sources +=
+ [ "allocator/partition_allocator/page_allocator_internals_posix.h" ]
+ }
}
}
# Windows.
if (is_win) {
sources += [
+ "memory/platform_shared_memory_region_win.cc",
"memory/shared_memory_handle_win.cc",
"memory/shared_memory_win.cc",
"power_monitor/power_monitor_device_source_win.cc",
@@ -1451,79 +1495,12 @@ jumbo_component("base") {
"//base/win:base_win_buildflags",
]
+ data_deps += [ "//build/win:runtime_libs" ]
+
if (com_init_check_hook_disabled) {
defines += [ "COM_INIT_CHECK_HOOK_DISABLED" ]
}
- if (is_component_build) {
- # Copy the VS runtime DLLs into the isolate so that they don't have to be
- # preinstalled on the target machine. The debug runtimes have a "d" at
- # the end.
- if (is_debug) {
- vcrt_suffix = "d"
- } else {
- vcrt_suffix = ""
- }
-
- # These runtime files are copied to the output directory by the
- # vs_toolchain script that runs as part of toolchain configuration.
- data += [
- "$root_out_dir/msvcp140${vcrt_suffix}.dll",
- "$root_out_dir/vccorlib140${vcrt_suffix}.dll",
- "$root_out_dir/vcruntime140${vcrt_suffix}.dll",
-
- # Universal Windows 10 CRT files
- "$root_out_dir/api-ms-win-core-console-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-datetime-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-debug-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-errorhandling-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-file-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-file-l1-2-0.dll",
- "$root_out_dir/api-ms-win-core-file-l2-1-0.dll",
- "$root_out_dir/api-ms-win-core-handle-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-heap-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-interlocked-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-libraryloader-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-localization-l1-2-0.dll",
- "$root_out_dir/api-ms-win-core-memory-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-namedpipe-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-processenvironment-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-processthreads-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-processthreads-l1-1-1.dll",
- "$root_out_dir/api-ms-win-core-profile-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-rtlsupport-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-string-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-synch-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-synch-l1-2-0.dll",
- "$root_out_dir/api-ms-win-core-sysinfo-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-timezone-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-util-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-conio-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-convert-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-environment-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-filesystem-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-heap-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-locale-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-math-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-multibyte-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-private-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-process-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-runtime-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-stdio-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-string-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-time-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-utility-l1-1-0.dll",
- "$root_out_dir/ucrtbase${vcrt_suffix}.dll",
- ]
- if (is_asan) {
- if (current_cpu == "x64") {
- data += [ "$clang_base_path/lib/clang/$clang_version/lib/windows/clang_rt.asan_dynamic-x86_64.dll" ]
- } else {
- data += [ "$clang_base_path/lib/clang/$clang_version/lib/windows/clang_rt.asan_dynamic-i386.dll" ]
- }
- }
- }
-
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
@@ -1549,6 +1526,7 @@ jumbo_component("base") {
sources -= [ "profiler/native_stack_sampler_posix.cc" ]
sources += [
"mac/scoped_typeref.h",
+ "memory/platform_shared_memory_region_mac.cc",
"memory/shared_memory_handle_mac.cc",
"memory/shared_memory_mac.cc",
"power_monitor/power_monitor_device_source_mac.mm",
@@ -1698,17 +1676,20 @@ jumbo_component("base") {
set_sources_assignment_filter(sources_assignment_filter)
}
- # Android, Fuchsia, and MacOS have their own custom shared memory handle
+ # Android and MacOS have their own custom shared memory handle
# implementations. e.g. due to supporting both POSIX and native handles.
- if (is_posix && !is_android && !is_fuchsia && !is_mac) {
- sources += [ "memory/shared_memory_handle_posix.cc" ]
+ if (is_posix && !is_android && !is_mac) {
+ sources += [
+ "memory/platform_shared_memory_region_posix.cc",
+ "memory/shared_memory_handle_posix.cc",
+ ]
}
- if (is_posix && !is_fuchsia && !is_mac && !is_nacl) {
+ if (is_posix && !is_mac && !is_nacl) {
sources += [ "memory/shared_memory_posix.cc" ]
}
- if (is_posix && !is_fuchsia && !is_mac && !is_ios) {
+ if (is_posix && !is_mac && !is_ios) {
sources += [
"time/time_conversion_posix.cc",
"time/time_exploded_posix.cc",
@@ -1716,8 +1697,9 @@ jumbo_component("base") {
]
}
- if (is_posix && !is_mac && !is_ios && !is_android && !is_chromeos) {
- sources += [ "power_monitor/power_monitor_device_source_posix.cc" ]
+ if ((is_posix && !is_mac && !is_ios && !is_android && !is_chromeos) ||
+ is_fuchsia) {
+ sources += [ "power_monitor/power_monitor_device_source_stub.cc" ]
}
if (!use_glib) {
@@ -1771,6 +1753,7 @@ buildflag_header("debugging_buildflags") {
"ENABLE_PROFILING=$enable_profiling",
"CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
"UNSAFE_DEVELOPER_BUILD=$is_unsafe_developer_build",
+ "CAN_UNWIND_WITH_CFI_TABLE=$can_unwind_with_cfi_table",
]
}
@@ -2187,11 +2170,13 @@ test("base_unittests") {
"memory/memory_pressure_monitor_mac_unittest.cc",
"memory/memory_pressure_monitor_unittest.cc",
"memory/memory_pressure_monitor_win_unittest.cc",
+ "memory/platform_shared_memory_region_unittest.cc",
"memory/protected_memory_unittest.cc",
"memory/ptr_util_unittest.cc",
"memory/ref_counted_memory_unittest.cc",
"memory/ref_counted_unittest.cc",
"memory/shared_memory_mac_unittest.cc",
+ "memory/shared_memory_region_unittest.cc",
"memory/shared_memory_unittest.cc",
"memory/shared_memory_win_unittest.cc",
"memory/singleton_unittest.cc",
@@ -2214,6 +2199,7 @@ test("base_unittests") {
"metrics/histogram_unittest.cc",
"metrics/metrics_hashes_unittest.cc",
"metrics/persistent_histogram_allocator_unittest.cc",
+ "metrics/persistent_histogram_storage_unittest.cc",
"metrics/persistent_memory_allocator_unittest.cc",
"metrics/persistent_sample_map_unittest.cc",
"metrics/sample_map_unittest.cc",
@@ -2227,7 +2213,6 @@ test("base_unittests") {
"optional_unittest.cc",
"os_compat_android_unittest.cc",
"path_service_unittest.cc",
- "pending_task_unittest.cc",
"pickle_unittest.cc",
"posix/file_descriptor_shuffle_unittest.cc",
"posix/unix_domain_socket_unittest.cc",
@@ -2238,7 +2223,6 @@ test("base_unittests") {
"process/memory_unittest_mac.mm",
"process/process_info_unittest.cc",
"process/process_metrics_unittest.cc",
- "process/process_metrics_unittest_ios.cc",
"process/process_unittest.cc",
"process/process_util_unittest.cc",
"profiler/stack_sampling_profiler_unittest.cc",
@@ -2433,6 +2417,20 @@ test("base_unittests") {
}
if (is_android) {
+ # Add unwind tables in base_unittests_apk test apk. The unwind tables are
+ # generated from debug info in the binary. Removing "default_symbols" and
+ # adding symbols config removes the "strip_debug" config that strips the
+ # debug info, on base unittests apk.
+ if (can_unwind_with_cfi_table) {
+ configs -= [ "//build/config/compiler:default_symbols" ]
+ if (symbol_level == 2) {
+ configs += [ "//build/config/compiler:symbols" ]
+ } else {
+ configs += [ "//build/config/compiler:minimal_symbols" ]
+ }
+ add_unwind_tables_in_apk = true
+ sources += [ "trace_event/cfi_backtrace_android_unittest.cc" ]
+ }
sources -= [
"process/process_unittest.cc",
"process/process_util_unittest.cc",
@@ -2475,6 +2473,7 @@ test("base_unittests") {
if (use_partition_alloc) {
sources += [
"allocator/partition_allocator/address_space_randomization_unittest.cc",
+ "allocator/partition_allocator/page_allocator_unittest.cc",
"allocator/partition_allocator/partition_alloc_unittest.cc",
"allocator/partition_allocator/spin_lock_unittest.cc",
]
@@ -2507,7 +2506,7 @@ test("base_unittests") {
sources -= [ "message_loop/message_pump_glib_unittest.cc" ]
}
- if (is_posix && !is_ios && !is_fuchsia) {
+ if (is_posix && !is_ios) {
sources += [ "message_loop/message_pump_libevent_unittest.cc" ]
deps += [ "//base/third_party/libevent" ]
}
@@ -2517,6 +2516,9 @@ test("base_unittests") {
"files/file_locking_unittest.cc",
"posix/unix_domain_socket_unittest.cc",
]
+
+ sources += [ "fuchsia/async_dispatcher_unittest.cc" ]
+ deps += [ "//third_party/fuchsia-sdk:async" ]
}
if (is_android) {
@@ -2669,6 +2671,7 @@ if (is_android) {
deps = [
"//third_party/android_tools:android_support_annotations_java",
"//third_party/android_tools:android_support_multidex_java",
+ "//third_party/android_tools:android_support_v4_java",
"//third_party/jsr-305:jsr_305_javalib",
]
@@ -2748,6 +2751,9 @@ if (is_android) {
"android/java/src/org/chromium/base/process_launcher/ChildProcessService.java",
"android/java/src/org/chromium/base/process_launcher/ChildProcessServiceDelegate.java",
"android/java/src/org/chromium/base/process_launcher/FileDescriptorInfo.java",
+ "android/java/src/org/chromium/base/memory/MemoryPressureMonitor.java",
+ "android/java/src/org/chromium/base/memory/MemoryPressureCallback.java",
+ "android/java/src/org/chromium/base/memory/MemoryPressureUma.java",
]
# New versions of BuildConfig.java and NativeLibraries.java
@@ -2781,7 +2787,6 @@ if (is_android) {
"android/javatests/src/org/chromium/base/CommandLineInitUtilTest.java",
"android/javatests/src/org/chromium/base/CommandLineTest.java",
"android/javatests/src/org/chromium/base/EarlyTraceEventTest.java",
- "android/javatests/src/org/chromium/base/MemoryPressureListenerTest.java",
# TODO(nona): move to Junit once that is built for Android N.
"android/javatests/src/org/chromium/base/LocaleUtilsTest.java",
@@ -2896,6 +2901,7 @@ if (is_android) {
"android/junit/src/org/chromium/base/LogTest.java",
"android/junit/src/org/chromium/base/NonThreadSafeTest.java",
"android/junit/src/org/chromium/base/PromiseTest.java",
+ "android/junit/src/org/chromium/base/memory/MemoryPressureMonitorTest.java",
"android/junit/src/org/chromium/base/process_launcher/ChildConnectionAllocatorTest.java",
"android/junit/src/org/chromium/base/process_launcher/ChildProcessConnectionTest.java",
"test/android/junit/src/org/chromium/base/test/SetUpStatementTest.java",
@@ -2974,9 +2980,71 @@ fuzzer_test("base64_encode_fuzzer") {
fuzzer_test("base_json_correctness_fuzzer") {
sources = [
- "json/correctness_fuzzer.cc",
+ "json/json_correctness_fuzzer.cc",
+ ]
+ deps = [
+ ":base",
+ ]
+ dict = "//testing/libfuzzer/fuzzers/dicts/json.dict"
+}
+
+fuzzer_test("base_json_reader_fuzzer") {
+ sources = [
+ "json/json_reader_fuzzer.cc",
+ ]
+ deps = [
+ "//base",
+ ]
+ dict = "//testing/libfuzzer/fuzzers/dicts/json.dict"
+}
+
+fuzzer_test("base_json_string_escape_fuzzer") {
+ sources = [
+ "json/string_escape_fuzzer.cc",
+ ]
+ deps = [
+ "//base",
+ ]
+}
+
+fuzzer_test("string_number_conversions_fuzzer") {
+ sources = [
+ "strings/string_number_conversions_fuzzer.cc",
+ ]
+ deps = [
+ "//base",
+ ]
+}
+
+fuzzer_test("string_tokenizer_fuzzer") {
+ sources = [
+ "strings/string_tokenizer_fuzzer.cc",
+ ]
+ deps = [
+ "//base",
+ ]
+}
+
+fuzzer_test("utf_string_conversions_fuzzer") {
+ sources = [
+ "strings/utf_string_conversions_fuzzer.cc",
+ ]
+ deps = [
+ "//base",
+ ]
+}
+
+# TODO(dyaroshev): remove regression fuzzer, after we run it for a few days
+# and are confident that the transition was ok.
+fuzzer_test("utf_string_conversions_regression_fuzzer") {
+ sources = [
+ "strings/old_utf_string_conversions.cc",
+ "strings/old_utf_string_conversions.h",
+ "strings/utf_string_conversions_regression_fuzzer.cc",
]
deps = [
":base",
]
+
+ libfuzzer_options = [ "max_len=32" ]
}
diff --git a/chromium/base/allocator/README.md b/chromium/base/allocator/README.md
index d69c09c870c..62b9be61609 100644
--- a/chromium/base/allocator/README.md
+++ b/chromium/base/allocator/README.md
@@ -19,30 +19,30 @@ build-time flags involved are `use_allocator` and `use_allocator_shim`.
The default choices are as follows:
-**Windows**
+**Windows**
`use_allocator: winheap`, the default Windows heap.
Additionally, `static_library` (i.e. non-component) builds have a shim
-layer wrapping malloc/new, which is controlled by `use_allocator_shim`.
+layer wrapping malloc/new, which is controlled by `use_allocator_shim`.
The shim layer provides extra security features, such as preventing large
allocations that can hit signed vs. unsigned bugs in third_party code.
-**Linux Desktop / CrOS**
+**Linux Desktop / CrOS**
`use_allocator: tcmalloc`, a forked copy of tcmalloc which resides in
`third_party/tcmalloc/chromium`. Setting `use_allocator: none` causes the build
to fall back to the system (Glibc) symbols.
-**Android**
+**Android**
`use_allocator: none`, always use the allocator symbols coming from Android's
libc (Bionic). As it is developed as part of the OS, it is considered to be
-optimized for small devices and more memory-efficient than other choices.
+optimized for small devices and more memory-efficient than other choices.
The actual implementation backing malloc symbols in Bionic is up to the board
config and can vary (typically *dlmalloc* or *jemalloc* on most Nexus devices).
-**Mac/iOS**
+**Mac/iOS**
`use_allocator: none`, we always use the system's allocator implementation.
-In addition, when building for `asan` / `msan` / `syzyasan`, both the allocator
-and the shim layer are disabled.
+In addition, when building for `asan` / `msan` both the allocator and the shim
+layer are disabled.
Layering and build deps
-----------------------
@@ -59,7 +59,7 @@ If such a functional dependency is required that should be achieved using
abstractions in `base` (see `/base/allocator/allocator_extension.h` and
`/base/memory/`)
-**Why `base` depends on `allocator`?**
+**Why `base` depends on `allocator`?**
Because it needs to provide services that depend on the actual allocator
implementation. In the past `base` used to pretend to be allocator-agnostic
and get the dependencies injected by other layers. This ended up being an
@@ -90,7 +90,7 @@ Unified allocator shim
On most platforms, Chrome overrides the malloc / operator new symbols (and
corresponding free / delete and other variants). This is to enforce security
checks and lately to enable the
-[memory-infra heap profiler][url-memory-infra-heap-profiler].
+[memory-infra heap profiler][url-memory-infra-heap-profiler].
Historically each platform had its special logic for defining the allocator
symbols in different places of the codebase. The unified allocator shim is
a project aimed to unify the symbol definition and allocator routing logic in
@@ -102,7 +102,7 @@ a central place.
- Tracking bug: [https://crbug.com/550886][crbug.com/550886].
- Build-time flag: `use_allocator_shim`.
-**Overview of the unified allocator shim**
+**Overview of the unified allocator shim**
The allocator shim consists of three stages:
```
+-------------------------+ +-----------------------+ +----------------+
@@ -118,7 +118,7 @@ The allocator shim consists of three stages:
+-------------------------+
```
-**1. malloc symbols definition**
+**1. malloc symbols definition**
This stage takes care of overriding the symbols `malloc`, `free`,
`operator new`, `operator delete` and friends and routing those calls inside the
allocator shim (next point).
@@ -158,7 +158,7 @@ undefined symbol references to malloc symbols.
These symbols will be resolved against libc.so as usual.
More details in [crrev.com/1719433002](https://crrev.com/1719433002).
-**2. Shim layer implementation**
+**2. Shim layer implementation**
This stage contains the actual shim implementation. This consists of:
- A singly linked list of dispatchers (structs with function pointers to `malloc`-like functions). Dispatchers can be dynamically inserted at runtime
(using the `InsertAllocatorDispatch` API). They can intercept and override
@@ -166,7 +166,7 @@ allocator calls.
- The security checks (suicide on malloc-failure via `std::new_handler`, etc).
This happens inside `allocator_shim.cc`
-**3. Final allocator routing**
+**3. Final allocator routing**
The final element of the aforementioned dispatcher chain is statically defined
at build time and ultimately routes the allocator calls to the actual allocator
(as described in the *Background* section above). This is taken care of by the
@@ -175,7 +175,7 @@ headers in `allocator_shim_default_dispatch_to_*` files.
Appendixes
----------
-**How does the Windows shim layer replace the malloc symbols?**
+**How does the Windows shim layer replace the malloc symbols?**
The mechanism for hooking LIBCMT in Windows is rather tricky. The core
problem is that by default, the Windows library does not declare malloc and
free as weak symbols. Because of this, they cannot be overridden. To work
diff --git a/chromium/base/allocator/partition_allocator/address_space_randomization.h b/chromium/base/allocator/partition_allocator/address_space_randomization.h
index 8bea1f7d76c..d5d497db7ab 100644
--- a/chromium/base/allocator/partition_allocator/address_space_randomization.h
+++ b/chromium/base/allocator/partition_allocator/address_space_randomization.h
@@ -29,113 +29,164 @@ constexpr uintptr_t AslrMask(uintptr_t bits) {
return AslrAddress((1ULL << bits) - 1ULL);
}
-#if defined(ARCH_CPU_64_BITS)
-#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-// We shouldn't be allocating system pages at all for sanitizer builds. However,
-// we do, and if random hint addresses interfere with address ranges hard coded
-// in those tools, bad things happen. This address range is copied from TSAN
-// source but works with all tools.
-// See crbug.com/539863.
-constexpr uintptr_t kASLRMask = AslrAddress(0x007fffffffffULL);
-constexpr uintptr_t kASLROffset = AslrAddress(0x7e8000000000ULL);
-#elif defined(OS_WIN)
-// Windows 8.10 and newer support the full 48 bit address range. Older versions
-// of Windows only support 44 bits. Since kASLROffset is non-zero and may cause
-// a carry, use 47 and 43 bit masks.
-// See http://www.alex-ionescu.com/?p=246
-constexpr uintptr_t kASLRMask = AslrMask(47);
-constexpr uintptr_t kASLRMaskBefore8_10 = AslrMask(43);
-// Try not to map pages into the range where Windows loads DLLs by default.
-constexpr uintptr_t kASLROffset = 0x80000000ULL;
-#elif defined(OS_MACOSX)
-// macOS as of 10.12.5 does not clean up entries in page map levels 3/4
-// [PDP/PML4] created from mmap or mach_vm_allocate, even after the region is
-// destroyed. Using a virtual address space that is too large causes a leak of
-// about 1 wired [can never be paged out] page per call to mmap(). The page is
-// only reclaimed when the process is killed. Confine the hint to a 39-bit
-// section of the virtual address space.
-//
-// This implementation adapted from
-// https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference
-// is that here we clamp to 39 bits, not 32.
+// Turn off formatting, because the thicket of nested ifdefs below is
+// incomprehensible without indentation. It is also incomprehensible with
+// indentation, but the only other option is a combinatorial explosion of
+// *_{win,linux,mac,foo}_{32,64}.h files.
//
-// TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
-// changes.
-constexpr uintptr_t kASLRMask = AslrMask(38);
-constexpr uintptr_t kASLROffset = AslrAddress(0x1000000000ULL);
-#else // defined(OS_POSIX)
-#if defined(ARCH_CPU_X86_64)
-// Linux and OS X support the full 47-bit user space of x64 processors. Use
-// only 46 to allow kernel a chance to fulfill request.
-constexpr uintptr_t kASLRMask = AslrMask(46);
-constexpr uintptr_t kASLROffset = AslrAddress(0);
-#elif defined(ARCH_CPU_ARM64)
-// ARM64 on Linux has 39-bit user space. Use 38 bits since kASLROffset could
-// cause a carry.
-constexpr uintptr_t kASLRMask = AslrMask(38);
-constexpr uintptr_t kASLROffset = AslrAddress(0x1000000000ULL);
-#elif defined(ARCH_CPU_PPC64)
-#if defined(OS_AIX)
-// AIX: 64 bits of virtual addressing, but we limit address range to:
-// a) minimize Segment Lookaside Buffer (SLB) misses and
-// b) use extra address space to isolate the mmap regions.
-constexpr uintptr_t kASLRMask = AslrMask(30);
-constexpr uintptr_t kASLROffset = AslrAddress(0x400000000000ULL);
-#elif defined(ARCH_CPU_BIG_ENDIAN)
-// Big-endian Linux: 44 bits of virtual addressing. Use 42.
-constexpr uintptr_t kASLRMask = AslrMask(42);
-constexpr uintptr_t kASLROffset = AslrAddress(0);
-#else // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
-// Little-endian Linux: 48 bits of virtual addressing. Use 46.
-constexpr uintptr_t kASLRMask = AslrMask(46);
-constexpr uintptr_t kASLROffset = AslrAddress(0);
-#endif // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
-#elif defined(ARCH_CPU_S390X)
-// Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
-// of virtual addressing. Truncate to 40 bits to allow kernel chance to
-// fulfill request.
-constexpr uintptr_t kASLRMask = AslrMask(40);
-constexpr uintptr_t kASLROffset = AslrAddress(0);
-#elif defined(ARCH_CPU_S390)
-// 31 bits of virtual addressing. Truncate to 29 bits to allow kernel a chance
-// to fulfill request.
-constexpr uintptr_t kASLRMask = AslrMask(29);
-constexpr uintptr_t kASLROffset = AslrAddress(0);
-#else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
-// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
-// All other POSIX variants, use 30 bits.
-constexpr uintptr_t kASLRMask = AslrMask(30);
-#if defined(OS_SOLARIS)
-// For our Solaris/illumos mmap hint, we pick a random address in the bottom
-// half of the top half of the address space (that is, the third quarter).
-// Because we do not MAP_FIXED, this will be treated only as a hint -- the
-// system will not fail to mmap() because something else happens to already
-// be mapped at our random address. We deliberately set the hint high enough
-// to get well above the system's break (that is, the heap); Solaris and
-// illumos will try the hint and if that fails allocate as if there were
-// no hint at all. The high hint prevents the break from getting hemmed in
-// at low values, ceding half of the address space to the system heap.
-constexpr uintptr_t kASLROffset = AslrAddress(0x80000000ULL);
-#elif defined(OS_AIX)
-// The range 0x30000000 - 0xD0000000 is available on AIX;
-// choose the upper range.
-constexpr uintptr_t kASLROffset = AslrAddress(0x90000000ULL);
-#else // !defined(OS_SOLARIS) && !defined(OS_AIX)
-// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
-// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
-// 10.6 and 10.7.
-constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL);
-#endif // !defined(OS_SOLARIS) && !defined(OS_AIX)
-#endif // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
-// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
-#endif // defined(OS_POSIX)
-#else // defined(ARCH_CPU_32_BITS)
-// This is a good range on 32 bit Windows, Linux and Mac.
-// Allocates in the 0.5-1.5GB region. There is no issue with carries here.
-constexpr uintptr_t kASLRMask = AslrMask(30);
-constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL);
+// clang-format off
+
+#if defined(ARCH_CPU_64_BITS)
+
+ #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+ // We shouldn't allocate system pages at all for sanitizer builds. However,
+ // we do, and if random hint addresses interfere with address ranges
+ // hard-coded in those tools, bad things happen. This address range is
+ // copied from TSAN source but works with all tools. See
+ // https://crbug.com/539863.
+ constexpr uintptr_t kASLRMask = AslrAddress(0x007fffffffffULL);
+ constexpr uintptr_t kASLROffset = AslrAddress(0x7e8000000000ULL);
+
+ #elif defined(OS_WIN)
+
+ // Windows 8.10 and newer support the full 48 bit address range. Older
+ // versions of Windows only support 44 bits. Since kASLROffset is non-zero
+ // and may cause a carry, use 47 and 43 bit masks. See
+ // http://www.alex-ionescu.com/?p=246
+ constexpr uintptr_t kASLRMask = AslrMask(47);
+ constexpr uintptr_t kASLRMaskBefore8_10 = AslrMask(43);
+ // Try not to map pages into the range where Windows loads DLLs by default.
+ constexpr uintptr_t kASLROffset = 0x80000000ULL;
+
+ #elif defined(OS_MACOSX)
+
+ // macOS as of 10.12.5 does not clean up entries in page map levels 3/4
+ // [PDP/PML4] created from mmap or mach_vm_allocate, even after the region
+ // is destroyed. Using a virtual address space that is too large causes a
+ // leak of about 1 wired [can never be paged out] page per call to mmap. The
+ // page is only reclaimed when the process is killed. Confine the hint to a
+ // 39-bit section of the virtual address space.
+ //
+ // This implementation adapted from
+ // https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference
+ // is that here we clamp to 39 bits, not 32.
+ //
+ // TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
+ // changes.
+ constexpr uintptr_t kASLRMask = AslrMask(38);
+ constexpr uintptr_t kASLROffset = AslrAddress(0x1000000000ULL);
+
+ #elif defined(OS_POSIX)
+
+ #if defined(ARCH_CPU_X86_64)
+
+ // Linux (and macOS) support the full 47-bit user space of x64 processors.
+ // Use only 46 to allow the kernel a chance to fulfill the request.
+ constexpr uintptr_t kASLRMask = AslrMask(46);
+ constexpr uintptr_t kASLROffset = AslrAddress(0);
+
+ #elif defined(ARCH_CPU_ARM64)
+
+ // ARM64 on Linux has 39-bit user space. Use 38 bits since kASLROffset
+ // could cause a carry.
+ constexpr uintptr_t kASLRMask = AslrMask(38);
+ constexpr uintptr_t kASLROffset = AslrAddress(0x1000000000ULL);
+
+ #elif defined(ARCH_CPU_PPC64)
+
+ #if defined(OS_AIX)
+
+ // AIX has 64 bits of virtual addressing, but we limit the address range
+ // to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
+ // extra address space to isolate the mmap regions.
+ constexpr uintptr_t kASLRMask = AslrMask(30);
+ constexpr uintptr_t kASLROffset = AslrAddress(0x400000000000ULL);
+
+ #elif defined(ARCH_CPU_BIG_ENDIAN)
+
+ // Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
+ constexpr uintptr_t kASLRMask = AslrMask(42);
+ constexpr uintptr_t kASLROffset = AslrAddress(0);
+
+ #else // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
+
+ // Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
+ constexpr uintptr_t kASLRMask = AslrMask(46);
+ constexpr uintptr_t kASLROffset = AslrAddress(0);
+
+ #endif // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
+
+ #elif defined(ARCH_CPU_S390X)
+
+ // Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
+ // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
+ // chance to fulfill the request.
+ constexpr uintptr_t kASLRMask = AslrMask(40);
+ constexpr uintptr_t kASLROffset = AslrAddress(0);
+
+ #elif defined(ARCH_CPU_S390)
+
+ // 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
+ // a chance to fulfill the request.
+ constexpr uintptr_t kASLRMask = AslrMask(29);
+ constexpr uintptr_t kASLROffset = AslrAddress(0);
+
+ #else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
+ // !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
+
+ // For all other POSIX variants, use 30 bits.
+ constexpr uintptr_t kASLRMask = AslrMask(30);
+
+ #if defined(OS_SOLARIS)
+
+ // For our Solaris/illumos mmap hint, we pick a random address in the
+ // bottom half of the top half of the address space (that is, the third
+ // quarter). Because we do not MAP_FIXED, this will be treated only as a
+ // hint -- the system will not fail to mmap because something else
+ // happens to already be mapped at our random address. We deliberately
+ // set the hint high enough to get well above the system's break (that
+ // is, the heap); Solaris and illumos will try the hint and if that
+ // fails allocate as if there were no hint at all. The high hint
+ // prevents the break from getting hemmed in at low values, ceding half
+ // of the address space to the system heap.
+ constexpr uintptr_t kASLROffset = AslrAddress(0x80000000ULL);
+
+ #elif defined(OS_AIX)
+
+ // The range 0x30000000 - 0xD0000000 is available on AIX; choose the
+ // upper range.
+ constexpr uintptr_t kASLROffset = AslrAddress(0x90000000ULL);
+
+ #else // !defined(OS_SOLARIS) && !defined(OS_AIX)
+
+ // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+ // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
+ // 10.6 and 10.7.
+ constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL);
+
+ #endif // !defined(OS_SOLARIS) && !defined(OS_AIX)
+
+ #endif // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) && !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
+
+ #endif // defined(OS_POSIX)
+
+#elif defined(ARCH_CPU_32_BITS)
+
+ // This is a good range on 32-bit Windows and Android (the only platforms on
+ // which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
+ // is no issue with carries here.
+ constexpr uintptr_t kASLRMask = AslrMask(30);
+ constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL);
+
+#else
+
+ #error Please tell us about your exotic hardware! Sounds interesting.
+
#endif // defined(ARCH_CPU_32_BITS)
+// clang-format on
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/page_allocator.cc b/chromium/base/allocator/partition_allocator/page_allocator.cc
index 10568300e0e..b73e2b765fd 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator.cc
+++ b/chromium/base/allocator/partition_allocator/page_allocator.cc
@@ -6,9 +6,8 @@
#include <limits.h>
-#include <atomic>
-
#include "base/allocator/partition_allocator/address_space_randomization.h"
+#include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/spin_lock.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
@@ -17,221 +16,90 @@
#include "base/numerics/checked_math.h"
#include "build/build_config.h"
-#if defined(OS_POSIX)
-
-#include <errno.h>
-#include <sys/mman.h>
-
-#if defined(OS_MACOSX)
-#include <mach/mach.h>
-#endif
-#if defined(OS_LINUX)
-#include <sys/resource.h>
-#endif
-
-#ifndef MADV_FREE
-#define MADV_FREE MADV_DONTNEED
-#endif
+#include <atomic>
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
+#if defined(OS_WIN)
+#include <windows.h>
#endif
-namespace base {
-
-namespace {
-
-// On POSIX |mmap| uses a nearby address if the hint address is blocked.
-const bool kHintIsAdvisory = true;
-std::atomic<int32_t> s_allocPageErrorCode{0};
-
-int GetAccessFlags(PageAccessibilityConfiguration page_accessibility) {
- switch (page_accessibility) {
- case PageReadWrite:
- return PROT_READ | PROT_WRITE;
- case PageReadExecute:
- return PROT_READ | PROT_EXEC;
- case PageReadWriteExecute:
- return PROT_READ | PROT_WRITE | PROT_EXEC;
- default:
- NOTREACHED();
- FALLTHROUGH;
- case PageInaccessible:
- return PROT_NONE;
- }
-}
-
-#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
-// On Linux, multiple guarded memory regions may exceed the process address
-// space limit. This function will raise or lower the limit by |amount|.
-bool AdjustAddressSpaceLimit(int64_t amount) {
- struct rlimit old_rlimit;
- if (getrlimit(RLIMIT_AS, &old_rlimit))
- return false;
- const rlim_t new_limit =
- CheckAdd(old_rlimit.rlim_cur, amount).ValueOrDefault(old_rlimit.rlim_max);
- const struct rlimit new_rlimit = {std::min(new_limit, old_rlimit.rlim_max),
- old_rlimit.rlim_max};
- // setrlimit will fail if limit > old_rlimit.rlim_max.
- return setrlimit(RLIMIT_AS, &new_rlimit) == 0;
-}
-
-// Current WASM guarded memory regions have 8 GiB of address space. There are
-// schemes that reduce that to 4 GiB.
-constexpr size_t kMinimumGuardedMemorySize = 1ULL << 32; // 4 GiB
-
-#endif // defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
-
+#if defined(OS_POSIX)
+#include "base/allocator/partition_allocator/page_allocator_internals_posix.h"
#elif defined(OS_WIN)
-
-#include <windows.h>
+#include "base/allocator/partition_allocator/page_allocator_internals_win.h"
+#else
+#error Platform not supported.
+#endif
namespace base {
namespace {
-// |VirtualAlloc| will fail if allocation at the hint address is blocked.
-const bool kHintIsAdvisory = false;
-std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
-
-int GetAccessFlags(PageAccessibilityConfiguration page_accessibility) {
- switch (page_accessibility) {
- case PageReadWrite:
- return PAGE_READWRITE;
- case PageReadExecute:
- return PAGE_EXECUTE_READ;
- case PageReadWriteExecute:
- return PAGE_EXECUTE_READWRITE;
- default:
- NOTREACHED();
- FALLTHROUGH;
- case PageInaccessible:
- return PAGE_NOACCESS;
- }
-}
+// We may reserve/release address space on different threads.
+LazyInstance<subtle::SpinLock>::Leaky s_reserveLock = LAZY_INSTANCE_INITIALIZER;
-#else
-#error Unknown OS
-#endif // defined(OS_POSIX)
-
-// We may reserve / release address space on different threads.
-static LazyInstance<subtle::SpinLock>::Leaky s_reserveLock =
- LAZY_INSTANCE_INITIALIZER;
// We only support a single block of reserved address space.
void* s_reservation_address = nullptr;
size_t s_reservation_size = 0;
-// This internal function wraps the OS-specific page allocation call:
-// |VirtualAlloc| on Windows, and |mmap| on POSIX.
-static void* SystemAllocPages(void* hint,
- size_t length,
- PageAccessibilityConfiguration page_accessibility,
- PageTag page_tag,
- bool commit) {
- DCHECK(!(length & kPageAllocationGranularityOffsetMask));
- DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
- kPageAllocationGranularityOffsetMask));
- DCHECK(commit || page_accessibility == PageInaccessible);
-
- void* ret;
-#if defined(OS_WIN)
- DWORD access_flag = GetAccessFlags(page_accessibility);
- const DWORD type_flags = commit ? (MEM_RESERVE | MEM_COMMIT) : MEM_RESERVE;
- ret = VirtualAlloc(hint, length, type_flags, access_flag);
- if (ret == nullptr)
- s_allocPageErrorCode = GetLastError();
-#else
-
-#if defined(OS_MACOSX)
- // Use a custom tag to make it easier to distinguish partition alloc regions
- // in vmmap. Tags between 240-255 are supported.
- DCHECK_LE(PageTag::kFirst, page_tag);
- DCHECK_GE(PageTag::kLast, page_tag);
- int fd = VM_MAKE_TAG(static_cast<int>(page_tag));
-#else
- int fd = -1;
-#endif
- int access_flag = GetAccessFlags(page_accessibility);
- ret = mmap(hint, length, access_flag, MAP_ANONYMOUS | MAP_PRIVATE, fd, 0);
- if (ret == MAP_FAILED) {
- s_allocPageErrorCode = errno;
- ret = nullptr;
- }
-#endif
- return ret;
-}
-
-static void* AllocPagesIncludingReserved(
- void* address,
- size_t length,
- PageAccessibilityConfiguration page_accessibility,
- PageTag page_tag,
- bool commit) {
+void* AllocPagesIncludingReserved(void* address,
+ size_t length,
+ PageAccessibilityConfiguration accessibility,
+ PageTag page_tag,
+ bool commit) {
void* ret =
- SystemAllocPages(address, length, page_accessibility, page_tag, commit);
+ SystemAllocPages(address, length, accessibility, page_tag, commit);
if (ret == nullptr) {
const bool cant_alloc_length = kHintIsAdvisory || address == nullptr;
if (cant_alloc_length) {
// The system cannot allocate |length| bytes. Release any reserved address
// space and try once more.
ReleaseReservation();
- ret = SystemAllocPages(address, length, page_accessibility, page_tag,
- commit);
+ ret = SystemAllocPages(address, length, accessibility, page_tag, commit);
}
}
return ret;
}
-// Trims base to given length and alignment. Windows returns null on failure and
-// frees base.
-static void* TrimMapping(void* base,
- size_t base_length,
- size_t trim_length,
- uintptr_t align,
- PageAccessibilityConfiguration page_accessibility,
- bool commit) {
- size_t pre_slack = reinterpret_cast<uintptr_t>(base) & (align - 1);
- if (pre_slack)
- pre_slack = align - pre_slack;
+// Trims |base| to given |trim_length| and |alignment|.
+//
+// On failure, on Windows, this function returns nullptr and frees |base|.
+void* TrimMapping(void* base,
+ size_t base_length,
+ size_t trim_length,
+ uintptr_t alignment,
+ PageAccessibilityConfiguration accessibility,
+ bool commit) {
+ size_t pre_slack = reinterpret_cast<uintptr_t>(base) & (alignment - 1);
+ if (pre_slack) {
+ pre_slack = alignment - pre_slack;
+ }
size_t post_slack = base_length - pre_slack - trim_length;
DCHECK(base_length >= trim_length || pre_slack || post_slack);
DCHECK(pre_slack < base_length);
DCHECK(post_slack < base_length);
- void* ret = base;
-
-#if defined(OS_POSIX)
- // On POSIX we can resize the allocation run. Release unneeded memory before
- // and after the aligned range.
- (void)page_accessibility;
- if (pre_slack) {
- int res = munmap(base, pre_slack);
- CHECK(!res);
- ret = reinterpret_cast<char*>(base) + pre_slack;
- }
- if (post_slack) {
- int res = munmap(reinterpret_cast<char*>(ret) + trim_length, post_slack);
- CHECK(!res);
- }
-#else
- if (pre_slack || post_slack) {
- // On Windows we can't resize the allocation run. Free it and retry at the
- // aligned address within the freed range.
- ret = reinterpret_cast<char*>(base) + pre_slack;
- FreePages(base, base_length);
- ret = SystemAllocPages(ret, trim_length, page_accessibility,
- PageTag::kChromium, commit);
- }
-#endif
-
- return ret;
+ return TrimMappingInternal(base, base_length, trim_length, accessibility,
+ commit, pre_slack, post_slack);
}
} // namespace
+void* SystemAllocPages(void* hint,
+ size_t length,
+ PageAccessibilityConfiguration accessibility,
+ PageTag page_tag,
+ bool commit) {
+ DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+ DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
+ kPageAllocationGranularityOffsetMask));
+ DCHECK(commit || accessibility == PageInaccessible);
+ return SystemAllocPagesInternal(hint, length, accessibility, page_tag,
+ commit);
+}
+
void* AllocPages(void* address,
size_t length,
size_t align,
- PageAccessibilityConfiguration page_accessibility,
+ PageAccessibilityConfiguration accessibility,
PageTag page_tag,
bool commit) {
DCHECK(length >= kPageAllocationGranularity);
@@ -249,7 +117,7 @@ void* AllocPages(void* address,
// On 64 bit Linux, we may need to adjust the address space limit for
// guarded allocations.
if (length >= kMinimumGuardedMemorySize) {
- CHECK_EQ(PageInaccessible, page_accessibility);
+ CHECK_EQ(PageInaccessible, accessibility);
CHECK(!commit);
if (AdjustAddressSpaceLimit(base::checked_cast<int64_t>(length))) {
DLOG(WARNING) << "Could not address space by " << length;
@@ -274,8 +142,9 @@ void* AllocPages(void* address,
// On 64 bit systems, try 3 random aligned addresses.
constexpr int kExactSizeTries = 3;
#endif
+
for (int i = 0; i < kExactSizeTries; ++i) {
- void* ret = AllocPagesIncludingReserved(address, length, page_accessibility,
+ void* ret = AllocPagesIncludingReserved(address, length, accessibility,
page_tag, commit);
if (ret != nullptr) {
// If the alignment is to our liking, we're done.
@@ -311,13 +180,13 @@ void* AllocPages(void* address,
do {
// Continue randomizing only on POSIX.
address = kHintIsAdvisory ? GetRandomPageBase() : nullptr;
- ret = AllocPagesIncludingReserved(address, try_length, page_accessibility,
+ ret = AllocPagesIncludingReserved(address, try_length, accessibility,
page_tag, commit);
// The retries are for Windows, where a race can steal our mapping on
// resize.
} while (ret != nullptr &&
- (ret = TrimMapping(ret, try_length, length, align,
- page_accessibility, commit)) == nullptr);
+ (ret = TrimMapping(ret, try_length, length, align, accessibility,
+ commit)) == nullptr);
return ret;
}
@@ -326,112 +195,32 @@ void FreePages(void* address, size_t length) {
DCHECK(!(reinterpret_cast<uintptr_t>(address) &
kPageAllocationGranularityOffsetMask));
DCHECK(!(length & kPageAllocationGranularityOffsetMask));
-#if defined(OS_POSIX)
- int ret = munmap(address, length);
- CHECK(!ret);
-#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
- // On 64 bit Linux, restore the address space limit.
- if (length >= kMinimumGuardedMemorySize) {
- CHECK(AdjustAddressSpaceLimit(-base::checked_cast<int64_t>(length)));
- }
-#endif
-#else
- BOOL ret = VirtualFree(address, 0, MEM_RELEASE);
- CHECK(ret);
-#endif
+ FreePagesInternal(address, length);
}
bool SetSystemPagesAccess(void* address,
size_t length,
- PageAccessibilityConfiguration page_accessibility) {
+ PageAccessibilityConfiguration accessibility) {
DCHECK(!(length & kSystemPageOffsetMask));
-#if defined(OS_POSIX)
- int access_flag = GetAccessFlags(page_accessibility);
- return !mprotect(address, length, access_flag);
-#else
- if (page_accessibility == PageInaccessible) {
- return VirtualFree(address, length, MEM_DECOMMIT) != 0;
- } else {
- DWORD access_flag = GetAccessFlags(page_accessibility);
- return !!VirtualAlloc(address, length, MEM_COMMIT, access_flag);
- }
-#endif
+ return SetSystemPagesAccessInternal(address, length, accessibility);
}
void DecommitSystemPages(void* address, size_t length) {
DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
-#if defined(OS_POSIX)
- // In POSIX, there is no decommit concept. Discarding is an effective way of
- // implementing the Windows semantics where the OS is allowed to not swap the
- // pages in the region.
- //
- // TODO(ajwong): Also explore setting PageInaccessible to make the protection
- // semantics consistent between Windows and POSIX. This might have a perf cost
- // though as both decommit and recommit would incur an extra syscall.
- // http://crbug.com/766882
- DiscardSystemPages(address, length);
-#else
- CHECK(SetSystemPagesAccess(address, length, PageInaccessible));
-#endif
+ DecommitSystemPagesInternal(address, length);
}
bool RecommitSystemPages(void* address,
size_t length,
- PageAccessibilityConfiguration page_accessibility) {
+ PageAccessibilityConfiguration accessibility) {
DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
- DCHECK_NE(PageInaccessible, page_accessibility);
-#if defined(OS_POSIX)
- // On POSIX systems, read the memory to recommit. This has the correct
- // behavior because the API requires the permissions to be the same as before
- // decommitting and all configurations can read.
- (void)address;
- return true;
-#endif
- return SetSystemPagesAccess(address, length, page_accessibility);
+ DCHECK_NE(PageInaccessible, accessibility);
+ return RecommitSystemPagesInternal(address, length, accessibility);
}
void DiscardSystemPages(void* address, size_t length) {
DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
-#if defined(OS_POSIX)
-#if defined(OS_MACOSX)
- // On macOS, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also
- // marks the pages with the reusable bit, which allows both Activity Monitor
- // and memory-infra to correctly track the pages.
- int ret = madvise(address, length, MADV_FREE_REUSABLE);
-#else
- int ret = madvise(address, length, MADV_FREE);
-#endif
- if (ret != 0 && errno == EINVAL) {
- // MADV_FREE only works on Linux 4.5+ . If request failed,
- // retry with older MADV_DONTNEED . Note that MADV_FREE
- // being defined at compile time doesn't imply runtime support.
- ret = madvise(address, length, MADV_DONTNEED);
- }
- CHECK(!ret);
-#else
- // On Windows discarded pages are not returned to the system immediately and
- // not guaranteed to be zeroed when returned to the application.
- using DiscardVirtualMemoryFunction =
- DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
- static DiscardVirtualMemoryFunction discard_virtual_memory =
- reinterpret_cast<DiscardVirtualMemoryFunction>(-1);
- if (discard_virtual_memory ==
- reinterpret_cast<DiscardVirtualMemoryFunction>(-1))
- discard_virtual_memory =
- reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
- GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
- // Use DiscardVirtualMemory when available because it releases faster than
- // MEM_RESET.
- DWORD ret = 1;
- if (discard_virtual_memory)
- ret = discard_virtual_memory(address, length);
- // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
- // failure.
- if (ret) {
- void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
- CHECK(ptr);
- }
-#endif
+ DiscardSystemPagesInternal(address, length);
}
bool ReserveAddressSpace(size_t size) {
diff --git a/chromium/base/allocator/partition_allocator/page_allocator_internal.h b/chromium/base/allocator/partition_allocator/page_allocator_internal.h
new file mode 100644
index 00000000000..c8c003d99d4
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/page_allocator_internal.h
@@ -0,0 +1,18 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
+
+namespace base {
+
+void* SystemAllocPages(void* hint,
+ size_t length,
+ PageAccessibilityConfiguration accessibility,
+ PageTag page_tag,
+ bool commit);
+
+} // namespace base
+
+#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
diff --git a/chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h b/chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h
new file mode 100644
index 00000000000..0bdd76e3cca
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h
@@ -0,0 +1,181 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
+
+#include <errno.h>
+#include <sys/mman.h>
+
+#if defined(OS_MACOSX)
+#include <mach/mach.h>
+#endif
+#if defined(OS_LINUX)
+#include <sys/resource.h>
+#endif
+
+#include "build/build_config.h"
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+namespace base {
+
+// |mmap| uses a nearby address if the hint address is blocked.
+const bool kHintIsAdvisory = true;
+std::atomic<int32_t> s_allocPageErrorCode{0};
+
+int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
+ switch (accessibility) {
+ case PageReadWrite:
+ return PROT_READ | PROT_WRITE;
+ case PageReadExecute:
+ return PROT_READ | PROT_EXEC;
+ case PageReadWriteExecute:
+ return PROT_READ | PROT_WRITE | PROT_EXEC;
+ default:
+ NOTREACHED();
+ FALLTHROUGH;
+ case PageInaccessible:
+ return PROT_NONE;
+ }
+}
+
+#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
+
+// Multiple guarded memory regions may exceed the process address space limit.
+// This function will raise or lower the limit by |amount|.
+bool AdjustAddressSpaceLimit(int64_t amount) {
+ struct rlimit old_rlimit;
+ if (getrlimit(RLIMIT_AS, &old_rlimit))
+ return false;
+ const rlim_t new_limit =
+ CheckAdd(old_rlimit.rlim_cur, amount).ValueOrDefault(old_rlimit.rlim_max);
+ const struct rlimit new_rlimit = {std::min(new_limit, old_rlimit.rlim_max),
+ old_rlimit.rlim_max};
+ // setrlimit will fail if limit > old_rlimit.rlim_max.
+ return setrlimit(RLIMIT_AS, &new_rlimit) == 0;
+}
+
+// Current WASM guarded memory regions have 8 GiB of address space. There are
+// schemes that reduce that to 4 GiB.
+constexpr size_t kMinimumGuardedMemorySize = 1ULL << 32; // 4 GiB
+
+#endif // defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
+
+void* SystemAllocPagesInternal(void* hint,
+ size_t length,
+ PageAccessibilityConfiguration accessibility,
+ PageTag page_tag,
+ bool commit) {
+#if defined(OS_MACOSX)
+ // Use a custom tag to make it easier to distinguish Partition Alloc regions
+ // in vmmap(1). Tags between 240-255 are supported.
+ DCHECK_LE(PageTag::kFirst, page_tag);
+ DCHECK_GE(PageTag::kLast, page_tag);
+ int fd = VM_MAKE_TAG(static_cast<int>(page_tag));
+#else
+ int fd = -1;
+#endif
+
+ int access_flag = GetAccessFlags(accessibility);
+ void* ret =
+ mmap(hint, length, access_flag, MAP_ANONYMOUS | MAP_PRIVATE, fd, 0);
+ if (ret == MAP_FAILED) {
+ s_allocPageErrorCode = errno;
+ ret = nullptr;
+ }
+ return ret;
+}
+
+void* TrimMappingInternal(void* base,
+ size_t base_length,
+ size_t trim_length,
+ PageAccessibilityConfiguration accessibility,
+ bool commit,
+ size_t pre_slack,
+ size_t post_slack) {
+ void* ret = base;
+ // We can resize the allocation run. Release unneeded memory before and after
+ // the aligned range.
+ if (pre_slack) {
+ int res = munmap(base, pre_slack);
+ CHECK(!res);
+ ret = reinterpret_cast<char*>(base) + pre_slack;
+ }
+ if (post_slack) {
+ int res = munmap(reinterpret_cast<char*>(ret) + trim_length, post_slack);
+ CHECK(!res);
+ }
+ return ret;
+}
+
+bool SetSystemPagesAccessInternal(
+ void* address,
+ size_t length,
+ PageAccessibilityConfiguration accessibility) {
+ return 0 == mprotect(address, length, GetAccessFlags(accessibility));
+}
+
+void FreePagesInternal(void* address, size_t length) {
+ CHECK(!munmap(address, length));
+
+#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
+ // Restore the address space limit.
+ if (length >= kMinimumGuardedMemorySize) {
+ CHECK(AdjustAddressSpaceLimit(-base::checked_cast<int64_t>(length)));
+ }
+#endif
+}
+
+void DecommitSystemPagesInternal(void* address, size_t length) {
+ // In POSIX, there is no decommit concept. Discarding is an effective way of
+ // implementing the Windows semantics where the OS is allowed to not swap the
+ // pages in the region.
+ //
+ // TODO(ajwong): Also explore setting PageInaccessible to make the protection
+ // semantics consistent between Windows and POSIX. This might have a perf cost
+ // though as both decommit and recommit would incur an extra syscall.
+ // http://crbug.com/766882
+ DiscardSystemPages(address, length);
+}
+
+bool RecommitSystemPagesInternal(void* address,
+ size_t length,
+ PageAccessibilityConfiguration accessibility) {
+#if defined(OS_MACOSX)
+ // On macOS, to update accounting, we need to make another syscall. For more
+ // details, see https://crbug.com/823915.
+ madvise(address, length, MADV_FREE_REUSE);
+#endif
+
+ // On POSIX systems, the caller need simply read the memory to recommit it.
+ // This has the correct behavior because the API requires the permissions to
+ // be the same as before decommitting and all configurations can read.
+ return true;
+}
+
+void DiscardSystemPagesInternal(void* address, size_t length) {
+#if defined(OS_MACOSX)
+ int ret = madvise(address, length, MADV_FREE_REUSABLE);
+ if (ret) {
+ // MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED.
+ ret = madvise(address, length, MADV_DONTNEED);
+ }
+ CHECK(0 == ret);
+#else
+ // We have experimented with other flags, but with suboptimal results.
+ //
+ // MADV_FREE (Linux): Makes our memory measurements less predictable;
+ // performance benefits unclear.
+ //
+ // Therefore, we just do the simple thing: MADV_DONTNEED.
+ CHECK(!madvise(address, length, MADV_DONTNEED));
+#endif
+}
+
+} // namespace base
+
+#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
diff --git a/chromium/base/allocator/partition_allocator/page_allocator_internals_win.h b/chromium/base/allocator/partition_allocator/page_allocator_internals_win.h
new file mode 100644
index 00000000000..9769784ef9d
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/page_allocator_internals_win.h
@@ -0,0 +1,119 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
+
+#include "base/allocator/partition_allocator/page_allocator_internal.h"
+
+namespace base {
+
+// |VirtualAlloc| will fail if allocation at the hint address is blocked.
+const bool kHintIsAdvisory = false;
+std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
+
+int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
+ switch (accessibility) {
+ case PageReadWrite:
+ return PAGE_READWRITE;
+ case PageReadExecute:
+ return PAGE_EXECUTE_READ;
+ case PageReadWriteExecute:
+ return PAGE_EXECUTE_READWRITE;
+ default:
+ NOTREACHED();
+ FALLTHROUGH;
+ case PageInaccessible:
+ return PAGE_NOACCESS;
+ }
+}
+
+void* SystemAllocPagesInternal(void* hint,
+ size_t length,
+ PageAccessibilityConfiguration accessibility,
+ PageTag page_tag,
+ bool commit) {
+ DWORD access_flag = GetAccessFlags(accessibility);
+ const DWORD type_flags = commit ? (MEM_RESERVE | MEM_COMMIT) : MEM_RESERVE;
+ void* ret = VirtualAlloc(hint, length, type_flags, access_flag);
+ if (ret == nullptr) {
+ s_allocPageErrorCode = GetLastError();
+ }
+ return ret;
+}
+
+void* TrimMappingInternal(void* base,
+ size_t base_length,
+ size_t trim_length,
+ PageAccessibilityConfiguration accessibility,
+ bool commit,
+ size_t pre_slack,
+ size_t post_slack) {
+ void* ret = base;
+ if (pre_slack || post_slack) {
+ // We cannot resize the allocation run. Free it and retry at the aligned
+ // address within the freed range.
+ ret = reinterpret_cast<char*>(base) + pre_slack;
+ FreePages(base, base_length);
+ ret = SystemAllocPages(ret, trim_length, accessibility, PageTag::kChromium,
+ commit);
+ }
+ return ret;
+}
+
+bool SetSystemPagesAccessInternal(
+ void* address,
+ size_t length,
+ PageAccessibilityConfiguration accessibility) {
+ if (accessibility == PageInaccessible) {
+ return VirtualFree(address, length, MEM_DECOMMIT) != 0;
+ } else {
+ return nullptr != VirtualAlloc(address, length, MEM_COMMIT,
+ GetAccessFlags(accessibility));
+ }
+}
+
+void FreePagesInternal(void* address, size_t length) {
+ CHECK(VirtualFree(address, 0, MEM_RELEASE));
+}
+
+void DecommitSystemPagesInternal(void* address, size_t length) {
+ CHECK(SetSystemPagesAccess(address, length, PageInaccessible));
+}
+
+bool RecommitSystemPagesInternal(void* address,
+ size_t length,
+ PageAccessibilityConfiguration accessibility) {
+ return SetSystemPagesAccess(address, length, accessibility);
+}
+
+void DiscardSystemPagesInternal(void* address, size_t length) {
+ // On Windows, discarded pages are not returned to the system immediately and
+ // not guaranteed to be zeroed when returned to the application.
+ using DiscardVirtualMemoryFunction =
+ DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
+ static DiscardVirtualMemoryFunction discard_virtual_memory =
+ reinterpret_cast<DiscardVirtualMemoryFunction>(-1);
+ if (discard_virtual_memory ==
+ reinterpret_cast<DiscardVirtualMemoryFunction>(-1))
+ discard_virtual_memory =
+ reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
+ GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
+ // Use DiscardVirtualMemory when available because it releases faster than
+ // MEM_RESET.
+ DWORD ret = 1;
+ if (discard_virtual_memory) {
+ ret = discard_virtual_memory(address, length);
+ }
+ // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
+ // failure.
+ if (ret) {
+ void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
+ CHECK(ptr);
+ }
+}
+
+} // namespace base
+
+#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
diff --git a/chromium/base/allocator/partition_allocator/page_allocator_unittest.cc b/chromium/base/allocator/partition_allocator/page_allocator_unittest.cc
new file mode 100644
index 00000000000..9323d666456
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/page_allocator_unittest.cc
@@ -0,0 +1,193 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "base/allocator/partition_allocator/address_space_randomization.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+#include <setjmp.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#endif // defined(OS_POSIX) && !defined(OS_FUCHSIA)
+
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+namespace base {
+
+namespace {
+
+// Any number of bytes that can be allocated with no trouble.
+constexpr size_t kEasyAllocSize =
+ (1024 * 1024) & ~(kPageAllocationGranularity - 1);
+
+// A huge amount of memory, greater than or equal to the ASLR space.
+constexpr size_t kHugeMemoryAmount =
+ std::max(internal::kASLRMask, std::size_t{2} * internal::kASLRMask);
+
+} // namespace
+
+// Test that failed page allocations invoke base::ReleaseReservation().
+// We detect this by making a reservation and ensuring that after failure, we
+// can make a new reservation.
+TEST(PageAllocatorTest, AllocFailure) {
+ // Release any reservation made by another test.
+ ReleaseReservation();
+
+ // We can make a reservation.
+ EXPECT_TRUE(ReserveAddressSpace(kEasyAllocSize));
+
+ // We can't make another reservation until we trigger an allocation failure.
+ EXPECT_FALSE(ReserveAddressSpace(kEasyAllocSize));
+
+ size_t size = kHugeMemoryAmount;
+ // Skip the test for sanitizers and platforms with ASLR turned off.
+ if (size == 0)
+ return;
+
+ void* result = AllocPages(nullptr, size, kPageAllocationGranularity,
+ PageInaccessible, PageTag::kChromium, false);
+ if (result == nullptr) {
+ // We triggered allocation failure. Our reservation should have been
+ // released, and we should be able to make a new reservation.
+ EXPECT_TRUE(ReserveAddressSpace(kEasyAllocSize));
+ ReleaseReservation();
+ return;
+ }
+ // We couldn't fail. Make sure reservation is still there.
+ EXPECT_FALSE(ReserveAddressSpace(kEasyAllocSize));
+}
+
+// TODO(crbug.com/765801): Test failed on chromium.win/Win10 Tests x64.
+#if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
+#define MAYBE_ReserveAddressSpace DISABLED_ReserveAddressSpace
+#else
+#define MAYBE_ReserveAddressSpace ReserveAddressSpace
+#endif // defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
+
+// Test that reserving address space can fail.
+TEST(PageAllocatorTest, MAYBE_ReserveAddressSpace) {
+ // Release any reservation made by another test.
+ ReleaseReservation();
+
+ size_t size = kHugeMemoryAmount;
+ // Skip the test for sanitizers and platforms with ASLR turned off.
+ if (size == 0)
+ return;
+
+ bool success = ReserveAddressSpace(size);
+ if (!success) {
+ EXPECT_TRUE(ReserveAddressSpace(kEasyAllocSize));
+ return;
+ }
+ // We couldn't fail. Make sure reservation is still there.
+ EXPECT_FALSE(ReserveAddressSpace(kEasyAllocSize));
+}
+
+TEST(PageAllocatorTest, AllocAndFreePages) {
+ void* buffer = AllocPages(nullptr, kPageAllocationGranularity,
+ kPageAllocationGranularity, PageReadWrite,
+ PageTag::kChromium, true);
+ EXPECT_TRUE(buffer);
+ int* buffer0 = reinterpret_cast<int*>(buffer);
+ *buffer0 = 42;
+ EXPECT_EQ(42, *buffer0);
+ FreePages(buffer, kPageAllocationGranularity);
+}
+
+// Test permission setting on POSIX, where we can set a trap handler.
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+
+namespace {
+sigjmp_buf g_continuation;
+
+void SignalHandler(int signal, siginfo_t* info, void*) {
+ siglongjmp(g_continuation, 1);
+}
+} // namespace
+
+// On Mac, sometimes we get SIGBUS instead of SIGSEGV, so handle that too.
+#if defined(OS_MACOSX)
+#define EXTRA_FAULT_BEGIN_ACTION() \
+ struct sigaction old_bus_action; \
+ sigaction(SIGBUS, &action, &old_bus_action);
+#define EXTRA_FAULT_END_ACTION() sigaction(SIGBUS, &old_bus_action, nullptr);
+#else
+#define EXTRA_FAULT_BEGIN_ACTION()
+#define EXTRA_FAULT_END_ACTION()
+#endif
+
+// Install a signal handler so we can catch the fault we're about to trigger.
+#define FAULT_TEST_BEGIN() \
+ struct sigaction action = {}; \
+ struct sigaction old_action = {}; \
+ action.sa_sigaction = SignalHandler; \
+ sigemptyset(&action.sa_mask); \
+ action.sa_flags = SA_SIGINFO; \
+ sigaction(SIGSEGV, &action, &old_action); \
+ EXTRA_FAULT_BEGIN_ACTION(); \
+ int const save_sigs = 1; \
+ if (!sigsetjmp(g_continuation, save_sigs)) {
+// Fault generating code goes here...
+
+// Handle when sigsetjmp returns nonzero (we are returning from our handler).
+#define FAULT_TEST_END() \
+ } \
+ else { \
+ sigaction(SIGSEGV, &old_action, nullptr); \
+ EXTRA_FAULT_END_ACTION(); \
+ }
+
+TEST(PageAllocatorTest, InaccessiblePages) {
+ void* buffer = AllocPages(nullptr, kPageAllocationGranularity,
+ kPageAllocationGranularity, PageInaccessible,
+ PageTag::kChromium, true);
+ EXPECT_TRUE(buffer);
+
+ FAULT_TEST_BEGIN();
+
+ // Reading from buffer should fault.
+ int* buffer0 = reinterpret_cast<int*>(buffer);
+ int buffer0_contents = *buffer0;
+ EXPECT_EQ(buffer0_contents, *buffer0);
+ EXPECT_TRUE(false);
+
+ FAULT_TEST_END();
+
+ FreePages(buffer, kPageAllocationGranularity);
+}
+
+TEST(PageAllocatorTest, ReadExecutePages) {
+ void* buffer = AllocPages(nullptr, kPageAllocationGranularity,
+ kPageAllocationGranularity, PageReadExecute,
+ PageTag::kChromium, true);
+ EXPECT_TRUE(buffer);
+ int* buffer0 = reinterpret_cast<int*>(buffer);
+ // Reading from buffer should succeed.
+ int buffer0_contents = *buffer0;
+
+ FAULT_TEST_BEGIN();
+
+ // Writing to buffer should fault.
+ *buffer0 = ~buffer0_contents;
+ EXPECT_TRUE(false);
+
+ FAULT_TEST_END();
+
+ // Make sure no write occurred.
+ EXPECT_EQ(buffer0_contents, *buffer0);
+ FreePages(buffer, kPageAllocationGranularity);
+}
+
+#endif // defined(OS_POSIX) && !defined(OS_FUCHSIA)
+
+} // namespace base
+
+#endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
index 38d2a4a95dc..00f77f8f73e 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
@@ -29,11 +29,6 @@
namespace {
-template <typename T>
-std::unique_ptr<T[]> WrapArrayUnique(T* ptr) {
- return std::unique_ptr<T[]>(ptr);
-}
-
constexpr size_t kTestMaxAllocation = base::kSystemPageSize;
bool IsLargeMemoryDevice() {
@@ -249,27 +244,7 @@ void FreeFullPage(PartitionPage* page) {
}
#if defined(OS_LINUX)
-bool KernelSupportsMadvFree() {
- int32_t major_version;
- int32_t minor_version;
- int32_t bugfix_version;
- SysInfo::OperatingSystemVersionNumbers(&major_version, &minor_version,
- &bugfix_version);
- return std::vector<int32_t>{major_version, minor_version, bugfix_version} >=
- std::vector<int32_t>{4, 5};
-}
-
bool CheckPageInCore(void* ptr, bool in_core) {
- // If the kernel supports MADV_FREE, then pages may still be in core to be
- // reclaimed by the OS later. This is a cool optimization that prevents the
- // kernel from freeing and allocating memory in case the app needs more memory
- // soon -- it can just reuse the memory already allocated. Unfortunately,
- // there's no way to test if a page is in core because it needs to be, or if
- // it just hasn't been reclaimed yet.
- static bool kernel_supports_madv_free = KernelSupportsMadvFree();
- if (kernel_supports_madv_free)
- return true;
-
unsigned char ret = 0;
EXPECT_EQ(0, mincore(ptr, kSystemPageSize, &ret));
return in_core == (ret & 1);
@@ -332,73 +307,7 @@ class MockPartitionStatsDumper : public PartitionStatsDumper {
std::vector<PartitionBucketMemoryStats> bucket_stats;
};
-// Any number of bytes that can be allocated with no trouble.
-constexpr size_t kEasyAllocSize =
- (1024 * 1024) & ~(kPageAllocationGranularity - 1);
-
-// A huge amount of memory, greater than or equal to the ASLR space.
-constexpr size_t kHugeMemoryAmount =
- std::max(base::internal::kASLRMask,
- std::size_t{2} * base::internal::kASLRMask);
-
-} // anonymous namespace
-
-// Test that failed page allocations invoke base::ReleaseReservation().
-// We detect this by making a reservation and ensuring that after failure, we
-// can make a new reservation.
-TEST(PageAllocatorTest, AllocFailure) {
- // Release any reservation made by another test.
- base::ReleaseReservation();
-
- // We can make a reservation.
- EXPECT_TRUE(base::ReserveAddressSpace(kEasyAllocSize));
-
- // We can't make another reservation until we trigger an allocation failure.
- EXPECT_FALSE(base::ReserveAddressSpace(kEasyAllocSize));
-
- size_t size = kHugeMemoryAmount;
- // Skip the test for sanitizers and platforms with ASLR turned off.
- if (size == 0)
- return;
-
- void* result = base::AllocPages(nullptr, size, kPageAllocationGranularity,
- PageInaccessible, PageTag::kChromium, false);
- if (result == nullptr) {
- // We triggered allocation failure. Our reservation should have been
- // released, and we should be able to make a new reservation.
- EXPECT_TRUE(base::ReserveAddressSpace(kEasyAllocSize));
- base::ReleaseReservation();
- return;
- }
- // We couldn't fail. Make sure reservation is still there.
- EXPECT_FALSE(base::ReserveAddressSpace(kEasyAllocSize));
-}
-
-// TODO(crbug.com/765801): Test failed on chromium.win/Win10 Tests x64.
-#if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
-#define MAYBE_ReserveAddressSpace DISABLED_ReserveAddressSpace
-#else
-#define MAYBE_ReserveAddressSpace ReserveAddressSpace
-#endif // defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
-
-// Test that reserving address space can fail.
-TEST(PageAllocatorTest, MAYBE_ReserveAddressSpace) {
- // Release any reservation made by another test.
- base::ReleaseReservation();
-
- size_t size = kHugeMemoryAmount;
- // Skip the test for sanitizers and platforms with ASLR turned off.
- if (size == 0)
- return;
-
- bool success = base::ReserveAddressSpace(size);
- if (!success) {
- EXPECT_TRUE(base::ReserveAddressSpace(kEasyAllocSize));
- return;
- }
- // We couldn't fail. Make sure reservation is still there.
- EXPECT_FALSE(base::ReserveAddressSpace(kEasyAllocSize));
-}
+} // namespace
// Check that the most basic of allocate / free pairs work.
TEST_F(PartitionAllocTest, Basic) {
@@ -573,8 +482,7 @@ TEST_F(PartitionAllocTest, FreePageListPageTransitions) {
// The +1 is because we need to account for the fact that the current page
// never gets thrown on the freelist.
++numToFillFreeListPage;
- std::unique_ptr<PartitionPage* []> pages =
- WrapArrayUnique(new PartitionPage*[numToFillFreeListPage]);
+ auto pages = std::make_unique<PartitionPage* []>(numToFillFreeListPage);
size_t i;
for (i = 0; i < numToFillFreeListPage; ++i) {
@@ -616,8 +524,7 @@ TEST_F(PartitionAllocTest, MultiPageAllocs) {
--numPagesNeeded;
EXPECT_GT(numPagesNeeded, 1u);
- std::unique_ptr<PartitionPage* []> pages;
- pages = WrapArrayUnique(new PartitionPage*[numPagesNeeded]);
+ auto pages = std::make_unique<PartitionPage* []>(numPagesNeeded);
uintptr_t firstSuperPageBase = 0;
size_t i;
for (i = 0; i < numPagesNeeded; ++i) {
@@ -1163,10 +1070,10 @@ TEST_F(PartitionAllocTest, MappingCollision) {
// The -2 is because the first and last partition pages in a super page are
// guard pages.
size_t numPartitionPagesNeeded = kNumPartitionPagesPerSuperPage - 2;
- std::unique_ptr<PartitionPage* []> firstSuperPagePages =
- WrapArrayUnique(new PartitionPage*[numPartitionPagesNeeded]);
- std::unique_ptr<PartitionPage* []> secondSuperPagePages =
- WrapArrayUnique(new PartitionPage*[numPartitionPagesNeeded]);
+ auto firstSuperPagePages =
+ std::make_unique<PartitionPage* []>(numPartitionPagesNeeded);
+ auto secondSuperPagePages =
+ std::make_unique<PartitionPage* []>(numPartitionPagesNeeded);
size_t i;
for (i = 0; i < numPartitionPagesNeeded; ++i)
@@ -1353,48 +1260,44 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
EXPECT_TRUE(bucket->decommitted_pages_head);
}
-#if !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX)
-
// Unit tests that check if an allocation fails in "return null" mode,
// repeating it doesn't crash, and still returns null. The tests need to
// stress memory subsystem limits to do so, hence they try to allocate
// 6 GB of memory, each with a different per-allocation block sizes.
//
-// On 64-bit POSIX systems, the address space is limited to 6 GB using
-// setrlimit() first.
+// On 64-bit systems we need to restrict the address space to force allocation
+// failure, so these tests run only on POSIX systems that provide setrlimit(),
+// and use it to limit address space to 6GB.
+//
+// Disable these tests on Android because, due to the allocation-heavy behavior,
+// they tend to get OOM-killed rather than pass.
+// TODO(https://crbug.com/779645): Fuchsia currently sets OS_POSIX, but does
+// not provide a working setrlimit().
+#if !defined(ARCH_CPU_64_BITS) || \
+ (defined(OS_POSIX) && \
+ !(defined(OS_FUCHSIA) || defined(OS_MACOSX) || defined(OS_ANDROID)))
+
+// This is defined as a separate test class because RepeatedReturnNull
+// test exhausts the process memory, and breaks any test in the same
+// class that runs after it.
+class PartitionAllocReturnNullTest : public PartitionAllocTest {};
// Test "return null" for larger, direct-mapped allocations first. As a
// direct-mapped allocation's pages are unmapped and freed on release, this
// test is performd first for these "return null" tests in order to leave
// sufficient unreserved virtual memory around for the later one(s).
-
-// Disable this test on Android because, due to its allocation-heavy behavior,
-// it tends to get OOM-killed rather than pass.
-#if defined(OS_MACOSX) || defined(OS_ANDROID) || defined(OS_FUCHSIA)
-#define MAYBE_RepeatedReturnNullDirect DISABLED_RepeatedReturnNullDirect
-#else
-#define MAYBE_RepeatedReturnNullDirect RepeatedReturnNullDirect
-#endif
-TEST_F(PartitionAllocTest, MAYBE_RepeatedReturnNullDirect) {
+TEST_F(PartitionAllocReturnNullTest, RepeatedReturnNullDirect) {
// A direct-mapped allocation size.
DoReturnNullTest(32 * 1024 * 1024);
}
// Test "return null" with a 512 kB block size.
-
-// Disable this test on Android because, due to its allocation-heavy behavior,
-// it tends to get OOM-killed rather than pass.
-#if defined(OS_MACOSX) || defined(OS_ANDROID)
-#define MAYBE_RepeatedReturnNull DISABLED_RepeatedReturnNull
-#else
-#define MAYBE_RepeatedReturnNull RepeatedReturnNull
-#endif
-TEST_F(PartitionAllocTest, MAYBE_RepeatedReturnNull) {
+TEST_F(PartitionAllocReturnNullTest, RepeatedReturnNull) {
// A single-slot but non-direct-mapped allocation size.
DoReturnNullTest(512 * 1024);
}
-
-#endif // !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX)
+#endif // !defined(ARCH_CPU_64_BITS) || (defined(OS_POSIX) &&
+ // !(defined(OS_FUCHSIA) || defined(OS_MACOSX) || defined(OS_ANDROID)))
// Death tests misbehave on Android, http://crbug.com/643760.
#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
diff --git a/chromium/base/android/jni_generator/jni_exception_list.gni b/chromium/base/android/jni_generator/jni_exception_list.gni
index a990cc2755e..e3ce93adbb7 100644
--- a/chromium/base/android/jni_generator/jni_exception_list.gni
+++ b/chromium/base/android/jni_generator/jni_exception_list.gni
@@ -2,7 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import("//device/vr/features/features.gni")
+import("//device/vr/buildflags/buildflags.gni")
jni_exception_files = [
"//base/android/java/src/org/chromium/base/library_loader/LegacyLinker.java",
diff --git a/chromium/base/atomicops.h b/chromium/base/atomicops.h
index 3428fe87abb..4d8510e89e0 100644
--- a/chromium/base/atomicops.h
+++ b/chromium/base/atomicops.h
@@ -145,8 +145,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
} // namespace base
#if defined(OS_WIN)
-// TODO(jfb): The MSVC header includes windows.h, which other files end up
-// relying on. Fix this as part of crbug.com/559247.
+// TODO(jfb): Try to use base/atomicops_internals_portable.h everywhere.
+// https://crbug.com/559247.
# include "base/atomicops_internals_x86_msvc.h"
#else
# include "base/atomicops_internals_portable.h"
diff --git a/chromium/base/base_switches.cc b/chromium/base/base_switches.cc
index ab74d6aaf27..62e6d3dce9c 100644
--- a/chromium/base/base_switches.cc
+++ b/chromium/base/base_switches.cc
@@ -7,6 +7,9 @@
namespace switches {
+// Delays execution of base::TaskPriority::BACKGROUND tasks until shutdown.
+const char kDisableBackgroundTasks[] = "disable-background-tasks";
+
// Disables the crash reporting.
const char kDisableBreakpad[] = "disable-breakpad";
@@ -132,6 +135,12 @@ const char kEnableCrashReporterForTesting[] =
// given in base/android/library_loader/anchor_functions.h, via madvise and
// changing the library prefetch behavior.
const char kOrderfileMemoryOptimization[] = "orderfile-memory-optimization";
+// Force prefetching of the native library even if otherwise disabled, eg by
+// --orderfile-memory-optimization.
+const char kForceNativePrefetch[] = "force-native-prefetch";
+// If prefetching is enabled, only prefetch the ordered part of the native
+// library. Has no effect if prefetching is disabled.
+const char kNativePrefetchOrderedOnly[] = "native-prefetch-ordered-only";
#endif
} // namespace switches
diff --git a/chromium/base/base_switches.h b/chromium/base/base_switches.h
index 566ecbbcc6b..a444f09bbab 100644
--- a/chromium/base/base_switches.h
+++ b/chromium/base/base_switches.h
@@ -11,6 +11,7 @@
namespace switches {
+extern const char kDisableBackgroundTasks[];
extern const char kDisableBreakpad[];
extern const char kDisableFeatures[];
extern const char kDisableLowEndDeviceMode[];
@@ -47,6 +48,8 @@ extern const char kEnableCrashReporterForTesting[];
#if defined(OS_ANDROID)
extern const char kOrderfileMemoryOptimization[];
+extern const char kForceNativePrefetch[];
+extern const char kNativePrefetchOrderedOnly[];
#endif
} // namespace switches
diff --git a/chromium/base/bind_internal.h b/chromium/base/bind_internal.h
index 9d3a8026927..cb66d928be8 100644
--- a/chromium/base/bind_internal.h
+++ b/chromium/base/bind_internal.h
@@ -163,7 +163,7 @@ template <typename T>
using Unwrapper = BindUnwrapTraits<std::decay_t<T>>;
template <typename T>
-auto Unwrap(T&& o) -> decltype(Unwrapper<T>::Unwrap(std::forward<T>(o))) {
+decltype(auto) Unwrap(T&& o) {
return Unwrapper<T>::Unwrap(std::forward<T>(o));
}
diff --git a/chromium/base/bits.h b/chromium/base/bits.h
index 37d34d94cf1..7d4d90d1dac 100644
--- a/chromium/base/bits.h
+++ b/chromium/base/bits.h
@@ -21,34 +21,6 @@
namespace base {
namespace bits {
-// Returns the integer i such as 2^i <= n < 2^(i+1)
-inline int Log2Floor(uint32_t n) {
- if (n == 0)
- return -1;
- int log = 0;
- uint32_t value = n;
- for (int i = 4; i >= 0; --i) {
- int shift = (1 << i);
- uint32_t x = value >> shift;
- if (x != 0) {
- value = x;
- log += shift;
- }
- }
- DCHECK_EQ(value, 1u);
- return log;
-}
-
-// Returns the integer i such as 2^(i-1) < n <= 2^i
-inline int Log2Ceiling(uint32_t n) {
- if (n == 0) {
- return -1;
- } else {
- // Log2Floor returns -1 for 0, so the following works correctly for n=1.
- return 1 + Log2Floor(n - 1);
- }
-}
-
// Round up |size| to a multiple of alignment, which must be a power of two.
inline size_t Align(size_t size, size_t alignment) {
DCHECK_EQ(alignment & (alignment - 1), 0u);
@@ -180,6 +152,19 @@ ALWAYS_INLINE size_t CountTrailingZeroBitsSizeT(size_t x) {
return CountTrailingZeroBits(x);
}
+// Returns the integer i such as 2^i <= n < 2^(i+1)
+inline int Log2Floor(uint32_t n) {
+ return 31 - CountLeadingZeroBits(n);
+}
+
+// Returns the integer i such as 2^(i-1) < n <= 2^i
+inline int Log2Ceiling(uint32_t n) {
+ // When n == 0, we want the function to return -1.
+ // When n == 0, (n - 1) will underflow to 0xFFFFFFFF, which is
+ // why the statement below starts with (n ? 32 : -1).
+ return (n ? 32 : -1) - CountLeadingZeroBits(n - 1);
+}
+
} // namespace bits
} // namespace base
diff --git a/chromium/base/callback.h b/chromium/base/callback.h
index e9c5a0b64aa..70f5171620b 100644
--- a/chromium/base/callback.h
+++ b/chromium/base/callback.h
@@ -56,7 +56,7 @@ class OnceCallback<R(Args...)> : public internal::CallbackBase {
using PolymorphicInvoke = R (*)(internal::BindStateBase*,
internal::PassingTraitsType<Args>...);
- OnceCallback() : internal::CallbackBase(nullptr) {}
+ constexpr OnceCallback() = default;
explicit OnceCallback(internal::BindStateBase* bind_state)
: internal::CallbackBase(bind_state) {}
@@ -103,7 +103,7 @@ class RepeatingCallback<R(Args...)> : public internal::CallbackBaseCopyable {
using PolymorphicInvoke = R (*)(internal::BindStateBase*,
internal::PassingTraitsType<Args>...);
- RepeatingCallback() : internal::CallbackBaseCopyable(nullptr) {}
+ constexpr RepeatingCallback() = default;
explicit RepeatingCallback(internal::BindStateBase* bind_state)
: internal::CallbackBaseCopyable(bind_state) {}
diff --git a/chromium/base/callback_helpers.h b/chromium/base/callback_helpers.h
index 2a9f7f433f2..0e360c008ad 100644
--- a/chromium/base/callback_helpers.h
+++ b/chromium/base/callback_helpers.h
@@ -61,6 +61,10 @@ class AdaptCallbackForRepeatingHelper final {
// Wraps the given OnceCallback into a RepeatingCallback that relays its
// invocation to the original OnceCallback on the first invocation. The
// following invocations are just ignored.
+//
+// Note that this deliberately subverts the Once/Repeating paradigm of Callbacks
+// but helps ease the migration from old-style Callbacks. Avoid if possible; use
+// if necessary for migration. TODO(tzik): Remove it. https://crbug.com/730593
template <typename... Args>
RepeatingCallback<void(Args...)> AdaptCallbackForRepeating(
OnceCallback<void(Args...)> callback) {
diff --git a/chromium/base/callback_internal.h b/chromium/base/callback_internal.h
index ab2182e01f6..6deb927f9ec 100644
--- a/chromium/base/callback_internal.h
+++ b/chromium/base/callback_internal.h
@@ -134,6 +134,8 @@ class BASE_EXPORT CallbackBase {
// Returns true if this callback equals |other|. |other| may be null.
bool EqualsInternal(const CallbackBase& other) const;
+ constexpr inline CallbackBase();
+
// Allow initializing of |bind_state_| via the constructor to avoid default
// initialization of the scoped_refptr.
explicit CallbackBase(BindStateBase* bind_state);
@@ -150,6 +152,8 @@ class BASE_EXPORT CallbackBase {
scoped_refptr<BindStateBase> bind_state_;
};
+constexpr CallbackBase::CallbackBase() = default;
+
// CallbackBase<Copyable> is a direct base class of Copyable Callbacks.
class BASE_EXPORT CallbackBaseCopyable : public CallbackBase {
public:
@@ -159,6 +163,7 @@ class BASE_EXPORT CallbackBaseCopyable : public CallbackBase {
CallbackBaseCopyable& operator=(CallbackBaseCopyable&& c);
protected:
+ constexpr CallbackBaseCopyable() = default;
explicit CallbackBaseCopyable(BindStateBase* bind_state)
: CallbackBase(bind_state) {}
~CallbackBaseCopyable() = default;
diff --git a/chromium/base/callback_list.h b/chromium/base/callback_list.h
index 092aa2af160..f455c657308 100644
--- a/chromium/base/callback_list.h
+++ b/chromium/base/callback_list.h
@@ -15,10 +15,10 @@
// OVERVIEW:
//
-// A container for a list of callbacks. Unlike a normal STL vector or list,
-// this container can be modified during iteration without invalidating the
-// iterator. It safely handles the case of a callback removing itself
-// or another callback from the list while callbacks are being run.
+// A container for a list of (repeating) callbacks. Unlike a normal vector or
+// list, this container can be modified during iteration without invalidating
+// the iterator. It safely handles the case of a callback removing itself or
+// another callback from the list while callbacks are being run.
//
// TYPICAL USAGE:
//
@@ -26,10 +26,8 @@
// public:
// ...
//
-// typedef base::Callback<void(const Foo&)> OnFooCallback;
-//
// std::unique_ptr<base::CallbackList<void(const Foo&)>::Subscription>
-// RegisterCallback(const OnFooCallback& cb) {
+// RegisterCallback(const base::RepeatingCallback<void(const Foo&)>& cb) {
// return callback_list_.Add(cb);
// }
//
@@ -48,7 +46,7 @@
// public:
// MyWidgetListener::MyWidgetListener() {
// foo_subscription_ = MyWidget::GetCurrent()->RegisterCallback(
-// base::Bind(&MyWidgetListener::OnFoo, this)));
+// base::BindRepeating(&MyWidgetListener::OnFoo, this)));
// }
//
// MyWidgetListener::~MyWidgetListener() {
@@ -104,12 +102,12 @@ class CallbackListBase {
// CallbackList is destroyed.
std::unique_ptr<Subscription> Add(const CallbackType& cb) WARN_UNUSED_RESULT {
DCHECK(!cb.is_null());
- return std::unique_ptr<Subscription>(
- new Subscription(this, callbacks_.insert(callbacks_.end(), cb)));
+ return std::make_unique<Subscription>(
+ this, callbacks_.insert(callbacks_.end(), cb));
}
// Sets a callback which will be run when a subscription list is changed.
- void set_removal_callback(const Closure& callback) {
+ void set_removal_callback(const RepeatingClosure& callback) {
removal_callback_ = callback;
}
@@ -146,7 +144,7 @@ class CallbackListBase {
while ((list_iter_ != list_->callbacks_.end()) && list_iter_->is_null())
++list_iter_;
- CallbackType* cb = NULL;
+ CallbackType* cb = nullptr;
if (list_iter_ != list_->callbacks_.end()) {
cb = &(*list_iter_);
++list_iter_;
@@ -172,10 +170,10 @@ class CallbackListBase {
return Iterator(this);
}
- // Compact the list: remove any entries which were NULLed out during
+ // Compact the list: remove any entries which were nulled out during
// iteration.
void Compact() {
- typename std::list<CallbackType>::iterator it = callbacks_.begin();
+ auto it = callbacks_.begin();
bool updated = false;
while (it != callbacks_.end()) {
if ((*it).is_null()) {
@@ -193,7 +191,7 @@ class CallbackListBase {
private:
std::list<CallbackType> callbacks_;
int active_iterator_count_;
- Closure removal_callback_;
+ RepeatingClosure removal_callback_;
DISALLOW_COPY_AND_ASSIGN(CallbackListBase);
};
@@ -204,18 +202,17 @@ template <typename Sig> class CallbackList;
template <typename... Args>
class CallbackList<void(Args...)>
- : public internal::CallbackListBase<Callback<void(Args...)> > {
+ : public internal::CallbackListBase<RepeatingCallback<void(Args...)>> {
public:
- typedef Callback<void(Args...)> CallbackType;
+ using CallbackType = RepeatingCallback<void(Args...)>;
CallbackList() = default;
template <typename... RunArgs>
void Notify(RunArgs&&... args) {
- typename internal::CallbackListBase<CallbackType>::Iterator it =
- this->GetIterator();
+ auto it = this->GetIterator();
CallbackType* cb;
- while ((cb = it.GetNext()) != NULL) {
+ while ((cb = it.GetNext()) != nullptr) {
cb->Run(args...);
}
}
diff --git a/chromium/base/containers/circular_deque_unittest.cc b/chromium/base/containers/circular_deque_unittest.cc
index df960c3fc08..0c168e0c877 100644
--- a/chromium/base/containers/circular_deque_unittest.cc
+++ b/chromium/base/containers/circular_deque_unittest.cc
@@ -165,7 +165,7 @@ TEST(CircularDeque, EqualsMove) {
// Tests that self-assignment is a no-op.
TEST(CircularDeque, EqualsSelf) {
circular_deque<int> q = {1, 2, 3, 4, 5, 6};
- q = q;
+ q = *&q; // The *& defeats Clang's -Wself-assign warning.
EXPECT_EQ(6u, q.size());
for (int i = 0; i < 6; i++)
EXPECT_EQ(i + 1, q[i]);
diff --git a/chromium/base/containers/flat_tree.h b/chromium/base/containers/flat_tree.h
index e6a22857c0d..5b421accf88 100644
--- a/chromium/base/containers/flat_tree.h
+++ b/chromium/base/containers/flat_tree.h
@@ -805,8 +805,7 @@ auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::erase(
template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::erase(
const_iterator position) -> iterator {
- // We have to cast away const because of crbug.com/677044.
- return erase(const_cast_it(position));
+ return impl_.body_.erase(position);
}
template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
@@ -815,8 +814,7 @@ auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::erase(const K& val)
-> size_type {
auto eq_range = equal_range(val);
auto res = std::distance(eq_range.first, eq_range.second);
- // We have to cast away const because of crbug.com/677044.
- erase(const_cast_it(eq_range.first), const_cast_it(eq_range.second));
+ erase(eq_range.first, eq_range.second);
return res;
}
@@ -824,8 +822,7 @@ template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::erase(
const_iterator first,
const_iterator last) -> iterator {
- // We have to cast away const because of crbug.com/677044.
- return impl_.body_.erase(const_cast_it(first), const_cast_it(last));
+ return impl_.body_.erase(first, last);
}
// ----------------------------------------------------------------------------
diff --git a/chromium/base/containers/id_map.h b/chromium/base/containers/id_map.h
index ab3c11214b5..4c816da3767 100644
--- a/chromium/base/containers/id_map.h
+++ b/chromium/base/containers/id_map.h
@@ -48,14 +48,14 @@ class IDMap final {
// access it from a different, but consistent, thread (or sequence)
// post-construction. The first call to CalledOnValidSequence() will re-bind
// it.
- sequence_checker_.DetachFromSequence();
+ DETACH_FROM_SEQUENCE(sequence_checker_);
}
~IDMap() {
// Many IDMap's are static, and hence will be destroyed on the main
// thread. However, all the accesses may take place on another thread (or
// sequence), such as the IO thread. Detaching again to clean this up.
- sequence_checker_.DetachFromSequence();
+ DETACH_FROM_SEQUENCE(sequence_checker_);
}
// Sets whether Add and Replace should DCHECK if passed in NULL data.
@@ -72,9 +72,9 @@ class IDMap final {
void AddWithID(V data, KeyType id) { AddWithIDInternal(std::move(data), id); }
void Remove(KeyType id) {
- DCHECK(sequence_checker_.CalledOnValidSequence());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
typename HashTable::iterator i = data_.find(id);
- if (i == data_.end()) {
+ if (i == data_.end() || IsRemoved(id)) {
NOTREACHED() << "Attempting to remove an item not in the list";
return;
}
@@ -89,41 +89,42 @@ class IDMap final {
// Replaces the value for |id| with |new_data| and returns the existing value.
// Should only be called with an already added id.
V Replace(KeyType id, V new_data) {
- DCHECK(sequence_checker_.CalledOnValidSequence());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!check_on_null_data_ || new_data);
typename HashTable::iterator i = data_.find(id);
DCHECK(i != data_.end());
+ DCHECK(!IsRemoved(id));
- std::swap(i->second, new_data);
+ using std::swap;
+ swap(i->second, new_data);
return new_data;
}
void Clear() {
- DCHECK(sequence_checker_.CalledOnValidSequence());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (iteration_depth_ == 0) {
data_.clear();
} else {
- for (typename HashTable::iterator i = data_.begin();
- i != data_.end(); ++i)
- removed_ids_.insert(i->first);
+ removed_ids_.reserve(data_.size());
+ removed_ids_.insert(KeyIterator(data_.begin()), KeyIterator(data_.end()));
}
}
bool IsEmpty() const {
- DCHECK(sequence_checker_.CalledOnValidSequence());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return size() == 0u;
}
T* Lookup(KeyType id) const {
- DCHECK(sequence_checker_.CalledOnValidSequence());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
typename HashTable::const_iterator i = data_.find(id);
- if (i == data_.end())
+ if (i == data_.end() || !i->second || IsRemoved(id))
return nullptr;
return &*i->second;
}
size_t size() const {
- DCHECK(sequence_checker_.CalledOnValidSequence());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return data_.size() - removed_ids_.size();
}
@@ -156,7 +157,7 @@ class IDMap final {
}
~Iterator() {
- DCHECK(map_->sequence_checker_.CalledOnValidSequence());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(map_->sequence_checker_);
// We're going to decrement iteration depth. Make sure it's greater than
// zero so that it doesn't become negative.
@@ -167,39 +168,38 @@ class IDMap final {
}
bool IsAtEnd() const {
- DCHECK(map_->sequence_checker_.CalledOnValidSequence());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(map_->sequence_checker_);
return iter_ == map_->data_.end();
}
KeyType GetCurrentKey() const {
- DCHECK(map_->sequence_checker_.CalledOnValidSequence());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(map_->sequence_checker_);
return iter_->first;
}
ReturnType* GetCurrentValue() const {
- DCHECK(map_->sequence_checker_.CalledOnValidSequence());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(map_->sequence_checker_);
+ if (!iter_->second || map_->IsRemoved(iter_->first))
+ return nullptr;
return &*iter_->second;
}
void Advance() {
- DCHECK(map_->sequence_checker_.CalledOnValidSequence());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(map_->sequence_checker_);
++iter_;
SkipRemovedEntries();
}
private:
void Init() {
- DCHECK(map_->sequence_checker_.CalledOnValidSequence());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(map_->sequence_checker_);
++map_->iteration_depth_;
SkipRemovedEntries();
}
void SkipRemovedEntries() {
- while (iter_ != map_->data_.end() &&
- map_->removed_ids_.find(iter_->first) !=
- map_->removed_ids_.end()) {
+ while (iter_ != map_->data_.end() && map_->IsRemoved(iter_->first))
++iter_;
- }
}
IDMap<V, K>* map_;
@@ -210,8 +210,29 @@ class IDMap final {
typedef Iterator<const T> const_iterator;
private:
+ // Transforms a map iterator to an iterator on the keys of the map.
+ // Used by Clear() to populate |removed_ids_| in bulk.
+ struct KeyIterator : std::iterator<std::forward_iterator_tag, KeyType> {
+ using inner_iterator = typename HashTable::iterator;
+ inner_iterator iter_;
+
+ KeyIterator(inner_iterator iter) : iter_(iter) {}
+ KeyType operator*() const { return iter_->first; }
+ KeyIterator& operator++() {
+ ++iter_;
+ return *this;
+ }
+ KeyIterator operator++(int) { return KeyIterator(iter_++); }
+ bool operator==(const KeyIterator& other) const {
+ return iter_ == other.iter_;
+ }
+ bool operator!=(const KeyIterator& other) const {
+ return iter_ != other.iter_;
+ }
+ };
+
KeyType AddInternal(V data) {
- DCHECK(sequence_checker_.CalledOnValidSequence());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!check_on_null_data_ || data);
KeyType this_id = next_id_;
DCHECK(data_.find(this_id) == data_.end()) << "Inserting duplicate item";
@@ -221,16 +242,24 @@ class IDMap final {
}
void AddWithIDInternal(V data, KeyType id) {
- DCHECK(sequence_checker_.CalledOnValidSequence());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!check_on_null_data_ || data);
- DCHECK(data_.find(id) == data_.end()) << "Inserting duplicate item";
+ if (IsRemoved(id)) {
+ removed_ids_.erase(id);
+ } else {
+ DCHECK(data_.find(id) == data_.end()) << "Inserting duplicate item";
+ }
data_[id] = std::move(data);
}
+ bool IsRemoved(KeyType key) const {
+ return removed_ids_.find(key) != removed_ids_.end();
+ }
+
void Compact() {
DCHECK_EQ(0, iteration_depth_);
for (const auto& i : removed_ids_)
- Remove(i);
+ data_.erase(i);
removed_ids_.clear();
}
@@ -251,7 +280,7 @@ class IDMap final {
// See description above setter.
bool check_on_null_data_;
- base::SequenceChecker sequence_checker_;
+ SEQUENCE_CHECKER(sequence_checker_);
DISALLOW_COPY_AND_ASSIGN(IDMap);
};
diff --git a/chromium/base/containers/id_map_unittest.cc b/chromium/base/containers/id_map_unittest.cc
index 7a14b8dea0f..346b69f2bb3 100644
--- a/chromium/base/containers/id_map_unittest.cc
+++ b/chromium/base/containers/id_map_unittest.cc
@@ -8,7 +8,7 @@
#include <memory>
-#include "base/memory/ptr_util.h"
+#include "base/test/gtest_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -378,4 +378,22 @@ TEST(IDMapTest, Int64KeyType) {
EXPECT_TRUE(map.IsEmpty());
}
+TEST(IDMapTest, RemovedValueHandling) {
+ TestObject obj;
+ IDMap<TestObject*> map;
+ int key = map.Add(&obj);
+
+ IDMap<TestObject*>::iterator itr(&map);
+ map.Clear();
+ EXPECT_DCHECK_DEATH(map.Remove(key));
+ EXPECT_DCHECK_DEATH(map.Replace(key, &obj));
+ EXPECT_FALSE(map.Lookup(key));
+ EXPECT_FALSE(itr.IsAtEnd());
+ EXPECT_FALSE(itr.GetCurrentValue());
+
+ EXPECT_TRUE(map.IsEmpty());
+ map.AddWithID(&obj, key);
+ EXPECT_EQ(1u, map.size());
+}
+
} // namespace base
diff --git a/chromium/base/containers/linked_list.h b/chromium/base/containers/linked_list.h
index 41461ff365e..a913badb887 100644
--- a/chromium/base/containers/linked_list.h
+++ b/chromium/base/containers/linked_list.h
@@ -84,10 +84,24 @@ namespace base {
template <typename T>
class LinkNode {
public:
- LinkNode() : previous_(NULL), next_(NULL) {}
+ LinkNode() : previous_(nullptr), next_(nullptr) {}
LinkNode(LinkNode<T>* previous, LinkNode<T>* next)
: previous_(previous), next_(next) {}
+ LinkNode(LinkNode<T>&& rhs) {
+ next_ = rhs.next_;
+ rhs.next_ = nullptr;
+ previous_ = rhs.previous_;
+ rhs.previous_ = nullptr;
+
+ // If the node belongs to a list, next_ and previous_ are both non-null.
+ // Otherwise, they are both null.
+ if (next_) {
+ next_->previous_ = this;
+ previous_->next_ = this;
+ }
+ }
+
// Insert |this| into the linked list, before |e|.
void InsertBefore(LinkNode<T>* e) {
this->next_ = e;
@@ -108,10 +122,10 @@ class LinkNode {
void RemoveFromList() {
this->previous_->next_ = this->next_;
this->next_->previous_ = this->previous_;
- // next() and previous() return non-NULL if and only this node is not in any
+ // next() and previous() return non-null if and only this node is not in any
// list.
- this->next_ = NULL;
- this->previous_ = NULL;
+ this->next_ = nullptr;
+ this->previous_ = nullptr;
}
LinkNode<T>* previous() const {
diff --git a/chromium/base/containers/linked_list_unittest.cc b/chromium/base/containers/linked_list_unittest.cc
index 3470c86b62c..8e547ba3fe8 100644
--- a/chromium/base/containers/linked_list_unittest.cc
+++ b/chromium/base/containers/linked_list_unittest.cc
@@ -31,6 +31,18 @@ class MultipleInheritanceNode : public MultipleInheritanceNodeBase,
MultipleInheritanceNode() = default;
};
+class MovableNode : public LinkNode<MovableNode> {
+ public:
+ explicit MovableNode(int id) : id_(id) {}
+
+ MovableNode(MovableNode&&) = default;
+
+ int id() const { return id_; }
+
+ private:
+ int id_;
+};
+
// Checks that when iterating |list| (either from head to tail, or from
// tail to head, as determined by |forward|), we get back |node_ids|,
// which is an array of size |num_nodes|.
@@ -304,5 +316,34 @@ TEST(LinkedList, RemovedNodeHasNullNextPrevious) {
EXPECT_EQ(nullptr, n.previous());
}
+TEST(LinkedList, NodeMoveConstructor) {
+ LinkedList<MovableNode> list;
+
+ MovableNode n1(1);
+ MovableNode n2(2);
+ MovableNode n3(3);
+
+ list.Append(&n1);
+ list.Append(&n2);
+ list.Append(&n3);
+
+ EXPECT_EQ(&n1, n2.previous());
+ EXPECT_EQ(&n2, n1.next());
+ EXPECT_EQ(&n3, n2.next());
+ EXPECT_EQ(&n2, n3.previous());
+ EXPECT_EQ(2, n2.id());
+
+ MovableNode n2_new(std::move(n2));
+
+ EXPECT_EQ(nullptr, n2.next());
+ EXPECT_EQ(nullptr, n2.previous());
+
+ EXPECT_EQ(&n1, n2_new.previous());
+ EXPECT_EQ(&n2_new, n1.next());
+ EXPECT_EQ(&n3, n2_new.next());
+ EXPECT_EQ(&n2_new, n3.previous());
+ EXPECT_EQ(2, n2_new.id());
+}
+
} // namespace
} // namespace base
diff --git a/chromium/base/containers/span.h b/chromium/base/containers/span.h
index 7dec11805fc..3ae1247038e 100644
--- a/chromium/base/containers/span.h
+++ b/chromium/base/containers/span.h
@@ -13,6 +13,8 @@
#include <type_traits>
#include <utility>
+#include "base/logging.h"
+
namespace base {
template <typename T>
@@ -140,14 +142,18 @@ using EnableIfConstSpanCompatibleContainer =
// Differences from the working group proposal
// -------------------------------------------
//
-// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/p0122r5.pdf is the
-// latest working group proposal. The biggest difference is span does not
-// support a static extent template parameter. Other differences are documented
-// in subsections below.
+// https://wg21.link/P0122 is the latest working group proposal, Chromium
+// currently implements R6. The biggest difference is span does not support a
+// static extent template parameter. Other differences are documented in
+// subsections below.
//
// Differences from [views.constants]:
// - no dynamic_extent constant
//
+// Differences from [span.objectrep]:
+// - as_bytes() and as_writeable_bytes() return spans of uint8_t instead of
+// std::byte
+//
// Differences in constants and types:
// - no element_type type alias
// - no index_type type alias
@@ -157,26 +163,21 @@ using EnableIfConstSpanCompatibleContainer =
// Differences from [span.cons]:
// - no constructor from a pointer range
// - no constructor from std::array
-// - no constructor from std::unique_ptr
-// - no constructor from std::shared_ptr
-// - no explicitly defaulted the copy/move constructor/assignment operators,
-// since MSVC complains about constexpr functions that aren't marked const.
//
// Differences from [span.sub]:
// - no templated first()
// - no templated last()
// - no templated subspan()
+// - using size_t instead of ptrdiff_t for indexing
//
// Differences from [span.obs]:
-// - no length_bytes()
-// - no size_bytes()
+// - using size_t instead of ptrdiff_t to represent size()
//
// Differences from [span.elem]:
// - no operator ()()
-//
-// Differences from [span.objectrep]:
-// - no as_bytes()
-// - no as_writeable_bytes()
+// - using size_t instead of ptrdiff_t for indexing
+
+// [span], class template span
template <typename T>
class span {
public:
@@ -188,9 +189,8 @@ class span {
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
- // span constructors, copy, assignment, and destructor
+ // [span.cons], span constructors, copy, assignment, and destructor
constexpr span() noexcept : data_(nullptr), size_(0) {}
- constexpr span(std::nullptr_t) noexcept : span() {}
constexpr span(T* data, size_t size) noexcept : data_(data), size_(size) {}
// TODO(dcheng): Implement construction from a |begin| and |end| pointer.
template <size_t N>
@@ -206,50 +206,62 @@ class span {
typename Container,
typename = internal::EnableIfConstSpanCompatibleContainer<Container, T>>
span(const Container& container) : span(container.data(), container.size()) {}
- ~span() noexcept = default;
+ constexpr span(const span& other) noexcept = default;
// Conversions from spans of compatible types: this allows a span<T> to be
// seamlessly used as a span<const T>, but not the other way around.
template <typename U, typename = internal::EnableIfLegalSpanConversion<U, T>>
constexpr span(const span<U>& other) : span(other.data(), other.size()) {}
- template <typename U, typename = internal::EnableIfLegalSpanConversion<U, T>>
- constexpr span(span<U>&& other) : span(other.data(), other.size()) {}
+ constexpr span& operator=(const span& other) noexcept = default;
+ ~span() noexcept = default;
- // span subviews
- // Note: ideally all of these would DCHECK, but it requires fairly horrible
- // contortions.
- constexpr span first(size_t count) const { return span(data_, count); }
+ // [span.sub], span subviews
+ constexpr span first(size_t count) const {
+ CHECK(count <= size_);
+ return span(data_, count);
+ }
constexpr span last(size_t count) const {
+ CHECK(count <= size_);
return span(data_ + (size_ - count), count);
}
constexpr span subspan(size_t pos, size_t count = -1) const {
- return span(data_ + pos, std::min(size_ - pos, count));
+ constexpr auto npos = static_cast<size_t>(-1);
+ CHECK(pos <= size_);
+ CHECK(count == npos || count <= size_ - pos);
+ return span(data_ + pos, count == npos ? size_ - pos : count);
}
- // span observers
- constexpr size_t length() const noexcept { return size_; }
+ // [span.obs], span observers
constexpr size_t size() const noexcept { return size_; }
+ constexpr size_t size_bytes() const noexcept { return size() * sizeof(T); }
constexpr bool empty() const noexcept { return size_ == 0; }
- // span element access
- constexpr T& operator[](size_t index) const noexcept { return data_[index]; }
+ // [span.elem], span element access
+ constexpr T& operator[](size_t index) const noexcept {
+ CHECK(index < size_);
+ return data_[index];
+ }
constexpr T* data() const noexcept { return data_; }
- // span iterator support
- iterator begin() const noexcept { return data_; }
- iterator end() const noexcept { return data_ + size_; }
+ // [span.iter], span iterator support
+ constexpr iterator begin() const noexcept { return data_; }
+ constexpr iterator end() const noexcept { return data_ + size_; }
- const_iterator cbegin() const noexcept { return begin(); }
- const_iterator cend() const noexcept { return end(); }
+ constexpr const_iterator cbegin() const noexcept { return begin(); }
+ constexpr const_iterator cend() const noexcept { return end(); }
- reverse_iterator rbegin() const noexcept { return reverse_iterator(end()); }
- reverse_iterator rend() const noexcept { return reverse_iterator(begin()); }
+ constexpr reverse_iterator rbegin() const noexcept {
+ return reverse_iterator(end());
+ }
+ constexpr reverse_iterator rend() const noexcept {
+ return reverse_iterator(begin());
+ }
- const_reverse_iterator crbegin() const noexcept {
+ constexpr const_reverse_iterator crbegin() const noexcept {
return const_reverse_iterator(cend());
}
- const_reverse_iterator crend() const noexcept {
+ constexpr const_reverse_iterator crend() const noexcept {
return const_reverse_iterator(cbegin());
}
@@ -258,38 +270,50 @@ class span {
size_t size_;
};
+// [span.comparison], span comparison operators
// Relational operators. Equality is a element-wise comparison.
-template <typename T>
-constexpr bool operator==(const span<T>& lhs, const span<T>& rhs) noexcept {
+template <typename T, typename U>
+constexpr bool operator==(span<T> lhs, span<U> rhs) noexcept {
return std::equal(lhs.cbegin(), lhs.cend(), rhs.cbegin(), rhs.cend());
}
-template <typename T>
-constexpr bool operator!=(const span<T>& lhs, const span<T>& rhs) noexcept {
+template <typename T, typename U>
+constexpr bool operator!=(span<T> lhs, span<U> rhs) noexcept {
return !(lhs == rhs);
}
-template <typename T>
-constexpr bool operator<(const span<T>& lhs, const span<T>& rhs) noexcept {
+template <typename T, typename U>
+constexpr bool operator<(span<T> lhs, span<U> rhs) noexcept {
return std::lexicographical_compare(lhs.cbegin(), lhs.cend(), rhs.cbegin(),
rhs.cend());
}
-template <typename T>
-constexpr bool operator<=(const span<T>& lhs, const span<T>& rhs) noexcept {
+template <typename T, typename U>
+constexpr bool operator<=(span<T> lhs, span<U> rhs) noexcept {
return !(rhs < lhs);
}
-template <typename T>
-constexpr bool operator>(const span<T>& lhs, const span<T>& rhs) noexcept {
+template <typename T, typename U>
+constexpr bool operator>(span<T> lhs, span<U> rhs) noexcept {
return rhs < lhs;
}
-template <typename T>
-constexpr bool operator>=(const span<T>& lhs, const span<T>& rhs) noexcept {
+template <typename T, typename U>
+constexpr bool operator>=(span<T> lhs, span<U> rhs) noexcept {
return !(lhs < rhs);
}
+// [span.objectrep], views of object representation
+template <typename T>
+span<const uint8_t> as_bytes(span<T> s) noexcept {
+ return {reinterpret_cast<const uint8_t*>(s.data()), s.size_bytes()};
+}
+
+template <typename T, typename = std::enable_if_t<!std::is_const<T>::value>>
+span<uint8_t> as_writable_bytes(span<T> s) noexcept {
+ return {reinterpret_cast<uint8_t*>(s.data()), s.size_bytes()};
+}
+
// Type-deducing helpers for constructing a span.
template <typename T>
constexpr span<T> make_span(T* data, size_t size) noexcept {
diff --git a/chromium/base/containers/span_unittest.cc b/chromium/base/containers/span_unittest.cc
index 42b8b0a0236..2620f103c2d 100644
--- a/chromium/base/containers/span_unittest.cc
+++ b/chromium/base/containers/span_unittest.cc
@@ -6,6 +6,7 @@
#include <stdint.h>
+#include <algorithm>
#include <memory>
#include <vector>
@@ -19,8 +20,8 @@ using ::testing::Pointwise;
namespace base {
-TEST(SpanTest, ConstructFromNullptr) {
- span<int> span(nullptr);
+TEST(SpanTest, DefaultConstructor) {
+ span<int> span;
EXPECT_EQ(nullptr, span.data());
EXPECT_EQ(0u, span.size());
}
@@ -295,29 +296,29 @@ TEST(SpanTest, Subspan) {
}
}
-TEST(SpanTest, Length) {
+TEST(SpanTest, Size) {
{
span<int> span;
- EXPECT_EQ(0u, span.length());
+ EXPECT_EQ(0u, span.size());
}
{
int array[] = {1, 2, 3};
span<int> span(array);
- EXPECT_EQ(3u, span.length());
+ EXPECT_EQ(3u, span.size());
}
}
-TEST(SpanTest, Size) {
+TEST(SpanTest, SizeBytes) {
{
span<int> span;
- EXPECT_EQ(0u, span.size());
+ EXPECT_EQ(0u, span.size_bytes());
}
{
int array[] = {1, 2, 3};
span<int> span(array);
- EXPECT_EQ(3u, span.size());
+ EXPECT_EQ(3u * sizeof(int), span.size_bytes());
}
}
@@ -366,6 +367,11 @@ TEST(SpanTest, Equality) {
constexpr span<const int> span3(kArray3);
EXPECT_FALSE(span1 == span3);
+
+ static double kArray4[] = {2.0, 7.0, 1.0, 8.0, 3.0};
+ span<double> span4(kArray4);
+
+ EXPECT_EQ(span3, span4);
}
TEST(SpanTest, Inequality) {
@@ -380,6 +386,11 @@ TEST(SpanTest, Inequality) {
constexpr span<const int> span3(kArray3);
EXPECT_FALSE(span1 != span3);
+
+ static double kArray4[] = {1.0, 4.0, 6.0, 8.0, 9.0};
+ span<double> span4(kArray4);
+
+ EXPECT_NE(span3, span4);
}
TEST(SpanTest, LessThan) {
@@ -394,6 +405,11 @@ TEST(SpanTest, LessThan) {
constexpr span<const int> span3(kArray3);
EXPECT_FALSE(span1 < span3);
+
+ static double kArray4[] = {2.0, 3.0, 5.0, 7.0, 11.0, 13.0};
+ span<double> span4(kArray4);
+
+ EXPECT_LT(span3, span4);
}
TEST(SpanTest, LessEqual) {
@@ -409,6 +425,11 @@ TEST(SpanTest, LessEqual) {
constexpr span<const int> span3(kArray3);
EXPECT_FALSE(span1 <= span3);
+
+ static double kArray4[] = {2.0, 3.0, 5.0, 7.0, 11.0, 13.0};
+ span<double> span4(kArray4);
+
+ EXPECT_LE(span3, span4);
}
TEST(SpanTest, GreaterThan) {
@@ -423,6 +444,11 @@ TEST(SpanTest, GreaterThan) {
constexpr span<const int> span3(kArray3);
EXPECT_FALSE(span1 > span3);
+
+ static double kArray4[] = {2.0, 3.0, 5.0, 7.0, 11.0};
+ span<double> span4(kArray4);
+
+ EXPECT_GT(span3, span4);
}
TEST(SpanTest, GreaterEqual) {
@@ -438,6 +464,44 @@ TEST(SpanTest, GreaterEqual) {
constexpr span<const int> span3(kArray3);
EXPECT_FALSE(span1 >= span3);
+
+ static double kArray4[] = {2.0, 3.0, 5.0, 7.0, 11.0};
+ span<double> span4(kArray4);
+
+ EXPECT_GE(span3, span4);
+}
+
+TEST(SpanTest, AsBytes) {
+ {
+ constexpr int kArray[] = {2, 3, 5, 7, 11, 13};
+ span<const uint8_t> bytes_span = as_bytes(make_span(kArray));
+ EXPECT_EQ(reinterpret_cast<const uint8_t*>(kArray), bytes_span.data());
+ EXPECT_EQ(sizeof(kArray), bytes_span.size());
+ EXPECT_EQ(bytes_span.size(), bytes_span.size_bytes());
+ }
+
+ {
+ std::vector<int> vec = {1, 1, 2, 3, 5, 8};
+ span<int> mutable_span(vec);
+ span<const uint8_t> bytes_span = as_bytes(mutable_span);
+ EXPECT_EQ(reinterpret_cast<const uint8_t*>(vec.data()), bytes_span.data());
+ EXPECT_EQ(sizeof(int) * vec.size(), bytes_span.size());
+ EXPECT_EQ(bytes_span.size(), bytes_span.size_bytes());
+ }
+}
+
+TEST(SpanTest, AsWritableBytes) {
+ std::vector<int> vec = {1, 1, 2, 3, 5, 8};
+ span<int> mutable_span(vec);
+ span<uint8_t> writable_bytes_span = as_writable_bytes(mutable_span);
+ EXPECT_EQ(reinterpret_cast<uint8_t*>(vec.data()), writable_bytes_span.data());
+ EXPECT_EQ(sizeof(int) * vec.size(), writable_bytes_span.size());
+ EXPECT_EQ(writable_bytes_span.size(), writable_bytes_span.size_bytes());
+
+ // Set the first entry of vec to zero while writing through the span.
+ std::fill(writable_bytes_span.data(),
+ writable_bytes_span.data() + sizeof(int), 0);
+ EXPECT_EQ(0, vec[0]);
}
TEST(SpanTest, MakeSpanFromDataAndSize) {
@@ -464,4 +528,29 @@ TEST(SpanTest, MakeSpanFromContainer) {
EXPECT_EQ(span, make_span(vector));
}
+TEST(SpanTest, EnsureConstexprGoodness) {
+ static constexpr int kArray[] = {5, 4, 3, 2, 1};
+ constexpr span<const int> constexpr_span(kArray);
+ const size_t size = 2;
+
+ const size_t start = 1;
+ constexpr span<const int> subspan =
+ constexpr_span.subspan(start, start + size);
+ for (size_t i = 0; i < subspan.size(); ++i)
+ EXPECT_EQ(kArray[start + i], subspan[i]);
+
+ constexpr span<const int> firsts = constexpr_span.first(size);
+ for (size_t i = 0; i < firsts.size(); ++i)
+ EXPECT_EQ(kArray[i], firsts[i]);
+
+ constexpr span<const int> lasts = constexpr_span.last(size);
+ for (size_t i = 0; i < lasts.size(); ++i) {
+ const size_t j = (arraysize(kArray) - size) + i;
+ EXPECT_EQ(kArray[j], lasts[i]);
+ }
+
+ constexpr int item = constexpr_span[size];
+ EXPECT_EQ(kArray[size], item);
+}
+
} // namespace base
diff --git a/chromium/base/containers/span_unittest.nc b/chromium/base/containers/span_unittest.nc
index d8591b25e0d..824cd7534ea 100644
--- a/chromium/base/containers/span_unittest.nc
+++ b/chromium/base/containers/span_unittest.nc
@@ -66,6 +66,14 @@ void WontCompile() {
span<int> span(set);
}
+#elif defined(NCTEST_AS_WRITABLE_BYTES_WITH_CONST_CONTAINER_DISALLOWED) // [r"fatal error: no matching function for call to 'as_writable_bytes'"]
+
+// as_writable_bytes should not be possible for a const container.
+void WontCompile() {
+ const std::vector<int> v = {1, 2, 3};
+ span<uint8_t> bytes = as_writable_bytes(make_span(v));
+}
+
#elif defined(NCTEST_MAKE_SPAN_FROM_SET_CONVERSION_DISALLOWED) // [r"fatal error: no matching function for call to 'make_span'"]
// A std::set() should not satisfy the requirements for conversion to a span.
diff --git a/chromium/base/debug/activity_tracker.cc b/chromium/base/debug/activity_tracker.cc
index 20c56480694..99735f6d3e8 100644
--- a/chromium/base/debug/activity_tracker.cc
+++ b/chromium/base/debug/activity_tracker.cc
@@ -25,6 +25,7 @@
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
namespace base {
namespace debug {
@@ -260,8 +261,9 @@ void Activity::FillFrom(Activity* activity,
activity->activity_type = type;
activity->data = data;
-#if defined(SYZYASAN)
- // Create a stacktrace from the current location and get the addresses.
+#if (!defined(OS_NACL) && DCHECK_IS_ON()) || defined(ADDRESS_SANITIZER)
+ // Create a stacktrace from the current location and get the addresses for
+ // improved debuggability.
StackTrace stack_trace;
size_t stack_depth;
const void* const* stack_addrs = stack_trace.Addresses(&stack_depth);
diff --git a/chromium/base/debug/asan_invalid_access.cc b/chromium/base/debug/asan_invalid_access.cc
index ba222ae9f93..07c19db9c50 100644
--- a/chromium/base/debug/asan_invalid_access.cc
+++ b/chromium/base/debug/asan_invalid_access.cc
@@ -21,13 +21,7 @@ namespace debug {
namespace {
-#if defined(SYZYASAN) && defined(COMPILER_MSVC)
-// Disable warning C4530: "C++ exception handler used, but unwind semantics are
-// not enabled". We don't want to change the compilation flags just for this
-// test, and no exception should be triggered here, so this warning has no value
-// here.
-#pragma warning(push)
-#pragma warning(disable: 4530)
+#if defined(OS_WIN) && defined(ADDRESS_SANITIZER)
// Corrupt a memory block and make sure that the corruption gets detected either
// when we free it or when another crash happens (if |induce_crash| is set to
// true).
@@ -35,27 +29,28 @@ NOINLINE void CorruptMemoryBlock(bool induce_crash) {
// NOTE(sebmarchand): We intentionally corrupt a memory block here in order to
// trigger an Address Sanitizer (ASAN) error report.
static const int kArraySize = 5;
- int* array = new int[kArraySize];
- // Encapsulate the invalid memory access into a try-catch statement to prevent
- // this function from being instrumented. This way the underflow won't be
- // detected but the corruption will (as the allocator will still be hooked).
- try {
- // Declares the dummy value as volatile to make sure it doesn't get
- // optimized away.
- int volatile dummy = array[-1]--;
- base::debug::Alias(const_cast<int*>(&dummy));
- } catch (...) {
- }
+ LONG* array = new LONG[kArraySize];
+
+ // Explicitly call out to a kernel32 function to perform the memory access.
+ // This way the underflow won't be detected but the corruption will (as the
+ // allocator will still be hooked).
+ auto InterlockedIncrementFn =
+ reinterpret_cast<LONG (*)(LONG volatile * addend)>(
+ GetProcAddress(GetModuleHandle(L"kernel32"), "InterlockedIncrement"));
+ CHECK(InterlockedIncrementFn);
+
+ LONG volatile dummy = InterlockedIncrementFn(array - 1);
+ base::debug::Alias(const_cast<LONG*>(&dummy));
+
if (induce_crash)
CHECK(false);
delete[] array;
}
-#pragma warning(pop)
-#endif // SYZYASAN && COMPILER_MSVC
+#endif // OS_WIN && ADDRESS_SANITIZER
} // namespace
-#if defined(ADDRESS_SANITIZER) || defined(SYZYASAN)
+#if defined(ADDRESS_SANITIZER)
// NOTE(sebmarchand): We intentionally perform some invalid heap access here in
// order to trigger an AddressSanitizer (ASan) error report.
@@ -91,9 +86,7 @@ void AsanHeapUseAfterFree() {
base::debug::Alias(&dummy);
}
-#endif // ADDRESS_SANITIZER || SYZYASAN
-
-#if defined(SYZYASAN) && defined(COMPILER_MSVC)
+#if defined(OS_WIN)
void AsanCorruptHeapBlock() {
CorruptMemoryBlock(false);
}
@@ -101,7 +94,8 @@ void AsanCorruptHeapBlock() {
void AsanCorruptHeap() {
CorruptMemoryBlock(true);
}
-#endif // SYZYASAN && COMPILER_MSVC
+#endif // OS_WIN
+#endif // ADDRESS_SANITIZER
} // namespace debug
} // namespace base
diff --git a/chromium/base/debug/asan_invalid_access.h b/chromium/base/debug/asan_invalid_access.h
index bc9390e53d6..dc9a7ee647a 100644
--- a/chromium/base/debug/asan_invalid_access.h
+++ b/chromium/base/debug/asan_invalid_access.h
@@ -10,11 +10,12 @@
#include "base/base_export.h"
#include "base/compiler_specific.h"
+#include "build/build_config.h"
namespace base {
namespace debug {
-#if defined(ADDRESS_SANITIZER) || defined(SYZYASAN)
+#if defined(ADDRESS_SANITIZER)
// Generates an heap buffer overflow.
BASE_EXPORT NOINLINE void AsanHeapOverflow();
@@ -25,12 +26,9 @@ BASE_EXPORT NOINLINE void AsanHeapUnderflow();
// Generates an use after free.
BASE_EXPORT NOINLINE void AsanHeapUseAfterFree();
-#endif // ADDRESS_SANITIZER || SYZYASAN
-
// The "corrupt-block" and "corrupt-heap" classes of bugs is specific to
-// SyzyASan.
-#if defined(SYZYASAN) && defined(COMPILER_MSVC)
-
+// Windows.
+#if defined(OS_WIN)
// Corrupts a memory block and makes sure that the corruption gets detected when
// we try to free this block.
BASE_EXPORT NOINLINE void AsanCorruptHeapBlock();
@@ -39,7 +37,8 @@ BASE_EXPORT NOINLINE void AsanCorruptHeapBlock();
// crash occur.
BASE_EXPORT NOINLINE void AsanCorruptHeap();
-#endif // SYZYASAN && COMPILER_MSVC
+#endif // OS_WIN
+#endif // ADDRESS_SANITIZER
} // namespace debug
} // namespace base
diff --git a/chromium/base/debug/close_handle_hook_win.cc b/chromium/base/debug/close_handle_hook_win.cc
index 1f2aeab6d18..1f1f4321306 100644
--- a/chromium/base/debug/close_handle_hook_win.cc
+++ b/chromium/base/debug/close_handle_hook_win.cc
@@ -207,13 +207,15 @@ void HandleHooks::AddIATPatch(HMODULE module) {
return;
base::win::IATPatchFunction* patch = NULL;
- patch = IATPatch(module, "CloseHandle", &CloseHandleHook,
- reinterpret_cast<void**>(&g_close_function));
+ patch =
+ IATPatch(module, "CloseHandle", reinterpret_cast<void*>(&CloseHandleHook),
+ reinterpret_cast<void**>(&g_close_function));
if (!patch)
return;
hooks_.push_back(patch);
- patch = IATPatch(module, "DuplicateHandle", &DuplicateHandleHook,
+ patch = IATPatch(module, "DuplicateHandle",
+ reinterpret_cast<void*>(&DuplicateHandleHook),
reinterpret_cast<void**>(&g_duplicate_function));
if (!patch)
return;
@@ -223,9 +225,10 @@ void HandleHooks::AddIATPatch(HMODULE module) {
void HandleHooks::AddEATPatch() {
// An attempt to restore the entry on the table at destruction is not safe.
EATPatch(GetModuleHandleA("kernel32.dll"), "CloseHandle",
- &CloseHandleHook, reinterpret_cast<void**>(&g_close_function));
+ reinterpret_cast<void*>(&CloseHandleHook),
+ reinterpret_cast<void**>(&g_close_function));
EATPatch(GetModuleHandleA("kernel32.dll"), "DuplicateHandle",
- &DuplicateHandleHook,
+ reinterpret_cast<void*>(&DuplicateHandleHook),
reinterpret_cast<void**>(&g_duplicate_function));
}
diff --git a/chromium/base/debug/profiler.cc b/chromium/base/debug/profiler.cc
index 77181622df6..1ee948334e1 100644
--- a/chromium/base/debug/profiler.cc
+++ b/chromium/base/debug/profiler.cc
@@ -87,10 +87,6 @@ bool IsProfilingSupported() {
#if !defined(OS_WIN)
-bool IsBinaryInstrumented() {
- return false;
-}
-
ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc() {
return nullptr;
}
@@ -109,36 +105,6 @@ MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc() {
#else // defined(OS_WIN)
-bool IsBinaryInstrumented() {
- enum InstrumentationCheckState {
- UNINITIALIZED,
- INSTRUMENTED_IMAGE,
- NON_INSTRUMENTED_IMAGE,
- };
-
- static InstrumentationCheckState state = UNINITIALIZED;
-
- if (state == UNINITIALIZED) {
- base::win::PEImage image(CURRENT_MODULE());
-
- // Check to be sure our image is structured as we'd expect.
- DCHECK(image.VerifyMagic());
-
- // Syzygy-instrumented binaries contain a PE image section named ".thunks",
- // and all Syzygy-modified binaries contain the ".syzygy" image section.
- // This is a very fast check, as it only looks at the image header.
- if ((image.GetImageSectionHeaderByName(".thunks") != NULL) &&
- (image.GetImageSectionHeaderByName(".syzygy") != NULL)) {
- state = INSTRUMENTED_IMAGE;
- } else {
- state = NON_INSTRUMENTED_IMAGE;
- }
- }
- DCHECK(state != UNINITIALIZED);
-
- return state == INSTRUMENTED_IMAGE;
-}
-
namespace {
struct FunctionSearchContext {
@@ -186,9 +152,6 @@ bool FindResolutionFunctionInImports(
template <typename FunctionType>
FunctionType FindFunctionInImports(const char* function_name) {
- if (!IsBinaryInstrumented())
- return NULL;
-
base::win::PEImage image(CURRENT_MODULE());
FunctionSearchContext ctx = { function_name, NULL };
diff --git a/chromium/base/debug/profiler.h b/chromium/base/debug/profiler.h
index ea81b13c6ae..f706a1a3b4e 100644
--- a/chromium/base/debug/profiler.h
+++ b/chromium/base/debug/profiler.h
@@ -35,9 +35,6 @@ BASE_EXPORT bool BeingProfiled();
// Reset profiling after a fork, which disables timers.
BASE_EXPORT void RestartProfilingAfterFork();
-// Returns true iff this executable is instrumented with the Syzygy profiler.
-BASE_EXPORT bool IsBinaryInstrumented();
-
// Returns true iff this executable supports profiling.
BASE_EXPORT bool IsProfilingSupported();
diff --git a/chromium/base/debug/task_annotator.cc b/chromium/base/debug/task_annotator.cc
index d67412be928..2197b859159 100644
--- a/chromium/base/debug/task_annotator.cc
+++ b/chromium/base/debug/task_annotator.cc
@@ -8,12 +8,29 @@
#include "base/debug/activity_tracker.h"
#include "base/debug/alias.h"
+#include "base/no_destructor.h"
#include "base/pending_task.h"
+#include "base/threading/thread_local.h"
#include "base/trace_event/trace_event.h"
namespace base {
namespace debug {
+namespace {
+
+TaskAnnotator::ObserverForTesting* g_task_annotator_observer = nullptr;
+
+// Returns the TLS slot that stores the PendingTask currently in progress on
+// each thread. Used to allow creating a breadcrumb of program counters on the
+// stack to help identify a task's origin in crashes.
+ThreadLocalPointer<const PendingTask>* GetTLSForCurrentPendingTask() {
+ static NoDestructor<ThreadLocalPointer<const PendingTask>>
+ tls_for_current_pending_task;
+ return tls_for_current_pending_task.get();
+}
+
+} // namespace
+
TaskAnnotator::TaskAnnotator() = default;
TaskAnnotator::~TaskAnnotator() = default;
@@ -26,6 +43,21 @@ void TaskAnnotator::DidQueueTask(const char* queue_function,
TRACE_ID_MANGLE(GetTaskTraceID(pending_task)),
TRACE_EVENT_FLAG_FLOW_OUT);
}
+
+ // TODO(https://crbug.com/826902): Fix callers that invoke DidQueueTask()
+ // twice for the same PendingTask.
+ // DCHECK(!pending_task.task_backtrace[0])
+ // << "Task backtrace was already set, task posted twice??";
+ if (!pending_task.task_backtrace[0]) {
+ const PendingTask* parent_task = GetTLSForCurrentPendingTask()->Get();
+ if (parent_task) {
+ pending_task.task_backtrace[0] =
+ parent_task->posted_from.program_counter();
+ std::copy(parent_task->task_backtrace.begin(),
+ parent_task->task_backtrace.end() - 1,
+ pending_task.task_backtrace.begin() + 1);
+ }
+ }
}
void TaskAnnotator::RunTask(const char* queue_function,
@@ -58,7 +90,17 @@ void TaskAnnotator::RunTask(const char* queue_function,
pending_task->task_backtrace.end(), task_backtrace.begin() + 2);
debug::Alias(&task_backtrace);
+ ThreadLocalPointer<const PendingTask>* tls_for_current_pending_task =
+ GetTLSForCurrentPendingTask();
+ const PendingTask* previous_pending_task =
+ tls_for_current_pending_task->Get();
+ tls_for_current_pending_task->Set(pending_task);
+
+ if (g_task_annotator_observer)
+ g_task_annotator_observer->BeforeRunTask(pending_task);
std::move(pending_task->task).Run();
+
+ tls_for_current_pending_task->Set(previous_pending_task);
}
uint64_t TaskAnnotator::GetTaskTraceID(const PendingTask& task) const {
@@ -67,5 +109,16 @@ uint64_t TaskAnnotator::GetTaskTraceID(const PendingTask& task) const {
32);
}
+// static
+void TaskAnnotator::RegisterObserverForTesting(ObserverForTesting* observer) {
+ DCHECK(!g_task_annotator_observer);
+ g_task_annotator_observer = observer;
+}
+
+// static
+void TaskAnnotator::ClearObserverForTesting() {
+ g_task_annotator_observer = nullptr;
+}
+
} // namespace debug
} // namespace base
diff --git a/chromium/base/debug/task_annotator.h b/chromium/base/debug/task_annotator.h
index de03e418dd9..f53d02c2c39 100644
--- a/chromium/base/debug/task_annotator.h
+++ b/chromium/base/debug/task_annotator.h
@@ -18,6 +18,13 @@ namespace debug {
// such as task origins, queueing durations and memory usage.
class BASE_EXPORT TaskAnnotator {
public:
+ class ObserverForTesting {
+ public:
+ // Invoked just before RunTask() in the scope in which the task is about to
+ // be executed.
+ virtual void BeforeRunTask(const PendingTask* pending_task) = 0;
+ };
+
TaskAnnotator();
~TaskAnnotator();
@@ -40,6 +47,14 @@ class BASE_EXPORT TaskAnnotator {
uint64_t GetTaskTraceID(const PendingTask& task) const;
private:
+ friend class TaskAnnotatorBacktraceIntegrationTest;
+
+ // Registers an ObserverForTesting that will be invoked by all TaskAnnotators'
+ // RunTask(). This registration and the implementation of BeforeRunTask() are
+ // responsible to ensure thread-safety.
+ static void RegisterObserverForTesting(ObserverForTesting* observer);
+ static void ClearObserverForTesting();
+
DISALLOW_COPY_AND_ASSIGN(TaskAnnotator);
};
diff --git a/chromium/base/debug/task_annotator_unittest.cc b/chromium/base/debug/task_annotator_unittest.cc
index bfb0e7c9139..51a5d3295c8 100644
--- a/chromium/base/debug/task_annotator_unittest.cc
+++ b/chromium/base/debug/task_annotator_unittest.cc
@@ -3,8 +3,24 @@
// found in the LICENSE file.
#include "base/debug/task_annotator.h"
+
+#include <algorithm>
+#include <vector>
+
#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
#include "base/pending_task.h"
+#include "base/run_loop.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/test/scoped_task_environment.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -28,5 +44,328 @@ TEST(TaskAnnotatorTest, QueueAndRunTask) {
EXPECT_EQ(123, result);
}
+// Test task annotator integration in base APIs and ensuing support for
+// backtraces. Tasks posted across multiple threads in this test fixture should
+// be synchronized as BeforeRunTask() and VerifyTraceAndPost() assume tasks are
+// observed in lock steps, one at a time.
+class TaskAnnotatorBacktraceIntegrationTest
+ : public ::testing::Test,
+ public TaskAnnotator::ObserverForTesting {
+ public:
+ using ExpectedTrace = std::vector<const void*>;
+
+ TaskAnnotatorBacktraceIntegrationTest() = default;
+
+ ~TaskAnnotatorBacktraceIntegrationTest() override = default;
+
+ // TaskAnnotator::ObserverForTesting:
+ void BeforeRunTask(const PendingTask* pending_task) override {
+ AutoLock auto_lock(on_before_run_task_lock_);
+ last_posted_from_ = pending_task->posted_from;
+ last_task_backtrace_ = pending_task->task_backtrace;
+ }
+
+ void SetUp() override { TaskAnnotator::RegisterObserverForTesting(this); }
+
+ void TearDown() override { TaskAnnotator::ClearObserverForTesting(); }
+
+ void VerifyTraceAndPost(const scoped_refptr<SequencedTaskRunner>& task_runner,
+ const Location& posted_from,
+ const Location& next_from_here,
+ const ExpectedTrace& expected_trace,
+ OnceClosure task) {
+ SCOPED_TRACE(StringPrintf("Callback Depth: %zu", expected_trace.size()));
+
+ EXPECT_EQ(posted_from, last_posted_from_);
+ for (size_t i = 0; i < last_task_backtrace_.size(); i++) {
+ SCOPED_TRACE(StringPrintf("Trace frame: %zu", i));
+ if (i < expected_trace.size())
+ EXPECT_EQ(expected_trace[i], last_task_backtrace_[i]);
+ else
+ EXPECT_EQ(nullptr, last_task_backtrace_[i]);
+ }
+
+ task_runner->PostTask(next_from_here, std::move(task));
+ }
+
+ // Same as VerifyTraceAndPost() with the exception that it also posts a task
+ // that will prevent |task| from running until |wait_before_next_task| is
+ // signaled.
+ void VerifyTraceAndPostWithBlocker(
+ const scoped_refptr<SequencedTaskRunner>& task_runner,
+ const Location& posted_from,
+ const Location& next_from_here,
+ const ExpectedTrace& expected_trace,
+ OnceClosure task,
+ WaitableEvent* wait_before_next_task) {
+ DCHECK(wait_before_next_task);
+
+ // Need to lock to ensure the upcoming VerifyTraceAndPost() runs before the
+ // BeforeRunTask() hook for the posted WaitableEvent::Wait(). Otherwise the
+ // upcoming VerifyTraceAndPost() will race to read the state saved in the
+ // BeforeRunTask() hook preceding the current task.
+ AutoLock auto_lock(on_before_run_task_lock_);
+ task_runner->PostTask(
+ FROM_HERE,
+ BindOnce(&WaitableEvent::Wait, Unretained(wait_before_next_task)));
+ VerifyTraceAndPost(task_runner, posted_from, next_from_here, expected_trace,
+ std::move(task));
+ }
+
+ protected:
+ static void RunTwo(OnceClosure c1, OnceClosure c2) {
+ std::move(c1).Run();
+ std::move(c2).Run();
+ }
+
+ private:
+ // While calls to VerifyTraceAndPost() are strictly ordered in tests below
+ // (and hence non-racy), some helper methods (e.g. Wait/Signal) do racily call
+ // into BeforeRunTask(). This Lock ensures these unobserved writes are not
+ // racing. Locking isn't required on read per the VerifyTraceAndPost()
+ // themselves being ordered.
+ Lock on_before_run_task_lock_;
+
+ Location last_posted_from_ = {};
+ std::array<const void*, 4> last_task_backtrace_ = {};
+
+ DISALLOW_COPY_AND_ASSIGN(TaskAnnotatorBacktraceIntegrationTest);
+};
+
+// Ensure the task backtrace populates correctly.
+TEST_F(TaskAnnotatorBacktraceIntegrationTest, SingleThreadedSimple) {
+ MessageLoop loop;
+ const Location location0 = FROM_HERE;
+ const Location location1 = FROM_HERE;
+ const Location location2 = FROM_HERE;
+ const Location location3 = FROM_HERE;
+ const Location location4 = FROM_HERE;
+ const Location location5 = FROM_HERE;
+
+ RunLoop run_loop;
+
+ // Task 5 has tasks 4/3/2/1 as parents (task 0 isn't visible as only the
+ // last 4 parents are kept).
+ OnceClosure task5 = BindOnce(
+ &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+ Unretained(this), loop.task_runner(), location5, FROM_HERE,
+ ExpectedTrace({location4.program_counter(), location3.program_counter(),
+ location2.program_counter(), location1.program_counter()}),
+ run_loop.QuitClosure());
+
+ // Task i=4/3/2/1/0 have tasks [0,i) as parents.
+ OnceClosure task4 = BindOnce(
+ &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+ Unretained(this), loop.task_runner(), location4, location5,
+ ExpectedTrace({location3.program_counter(), location2.program_counter(),
+ location1.program_counter(), location0.program_counter()}),
+ std::move(task5));
+ OnceClosure task3 = BindOnce(
+ &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+ Unretained(this), loop.task_runner(), location3, location4,
+ ExpectedTrace({location2.program_counter(), location1.program_counter(),
+ location0.program_counter()}),
+ std::move(task4));
+ OnceClosure task2 = BindOnce(
+ &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+ Unretained(this), loop.task_runner(), location2, location3,
+ ExpectedTrace({location1.program_counter(), location0.program_counter()}),
+ std::move(task3));
+ OnceClosure task1 =
+ BindOnce(&TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+ Unretained(this), loop.task_runner(), location1, location2,
+ ExpectedTrace({location0.program_counter()}), std::move(task2));
+ OnceClosure task0 =
+ BindOnce(&TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+ Unretained(this), loop.task_runner(), location0, location1,
+ ExpectedTrace({}), std::move(task1));
+
+ loop.task_runner()->PostTask(location0, std::move(task0));
+
+ run_loop.Run();
+}
+
+// Ensure it works when posting tasks across multiple threads managed by //base.
+TEST_F(TaskAnnotatorBacktraceIntegrationTest, MultipleThreads) {
+ test::ScopedTaskEnvironment scoped_task_environment;
+
+ // Use diverse task runners (a MessageLoop on the main thread, a TaskScheduler
+ // based SequencedTaskRunner, and a TaskScheduler based
+ // SingleThreadTaskRunner) to verify that TaskAnnotator can capture backtraces
+ // for PostTasks back-and-forth between these.
+ auto main_thread_a = ThreadTaskRunnerHandle::Get();
+ auto task_runner_b = CreateSingleThreadTaskRunnerWithTraits({});
+ auto task_runner_c = CreateSequencedTaskRunnerWithTraits(
+ {base::MayBlock(), base::WithBaseSyncPrimitives()});
+
+ const Location& location_a0 = FROM_HERE;
+ const Location& location_a1 = FROM_HERE;
+ const Location& location_a2 = FROM_HERE;
+ const Location& location_a3 = FROM_HERE;
+
+ const Location& location_b0 = FROM_HERE;
+ const Location& location_b1 = FROM_HERE;
+
+ const Location& location_c0 = FROM_HERE;
+
+ RunLoop run_loop;
+
+ // All tasks below happen in lock step by nature of being posted by the
+ // previous one (plus the synchronous nature of RunTwo()) with the exception
+ // of the follow-up local task to |task_b0_local|. This WaitableEvent ensures
+ // it completes before |task_c0| runs to avoid racy invocations of
+ // BeforeRunTask()+VerifyTraceAndPost().
+ WaitableEvent lock_step(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ // Here is the execution order generated below:
+ // A: TA0 -> TA1 \ TA2
+ // B: TB0L \ + TB0F \ Signal \ /
+ // ---------\--/ \ /
+ // \ \ /
+ // C: Wait........ TC0 /
+
+ // On task runner c, post a task back to main thread that verifies its trace
+ // and terminates after one more self-post.
+ OnceClosure task_a2 = BindOnce(
+ &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+ Unretained(this), main_thread_a, location_a2, location_a3,
+ ExpectedTrace(
+ {location_c0.program_counter(), location_b0.program_counter(),
+ location_a1.program_counter(), location_a0.program_counter()}),
+ run_loop.QuitClosure());
+ OnceClosure task_c0 =
+ BindOnce(&TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+ Unretained(this), main_thread_a, location_c0, location_a2,
+ ExpectedTrace({location_b0.program_counter(),
+ location_a1.program_counter(),
+ location_a0.program_counter()}),
+ std::move(task_a2));
+
+ // On task runner b run two tasks that conceptually come from the same
+ // location (managed via RunTwo().) One will post back to task runner b and
+ // another will post to task runner c to test spawning multiple tasks on
+ // different message loops. The task posted to task runner c will not get
+ // location b1 whereas the one posted back to task runner b will.
+ OnceClosure task_b0_fork = BindOnce(
+ &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPostWithBlocker,
+ Unretained(this), task_runner_c, location_b0, location_c0,
+ ExpectedTrace(
+ {location_a1.program_counter(), location_a0.program_counter()}),
+ std::move(task_c0), &lock_step);
+ OnceClosure task_b0_local =
+ BindOnce(&TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+ Unretained(this), task_runner_b, location_b0, location_b1,
+ ExpectedTrace({location_a1.program_counter(),
+ location_a0.program_counter()}),
+ BindOnce(&WaitableEvent::Signal, Unretained(&lock_step)));
+
+ OnceClosure task_a1 =
+ BindOnce(&TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+ Unretained(this), task_runner_b, location_a1, location_b0,
+ ExpectedTrace({location_a0.program_counter()}),
+ BindOnce(&TaskAnnotatorBacktraceIntegrationTest::RunTwo,
+ std::move(task_b0_local), std::move(task_b0_fork)));
+ OnceClosure task_a0 =
+ BindOnce(&TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+ Unretained(this), main_thread_a, location_a0, location_a1,
+ ExpectedTrace({}), std::move(task_a1));
+
+ main_thread_a->PostTask(location_a0, std::move(task_a0));
+
+ run_loop.Run();
+}
+
+// Ensure nesting doesn't break the chain.
+TEST_F(TaskAnnotatorBacktraceIntegrationTest, SingleThreadedNested) {
+ MessageLoop loop;
+ const Location location0 = FROM_HERE;
+ const Location location1 = FROM_HERE;
+ const Location location2 = FROM_HERE;
+ const Location location3 = FROM_HERE;
+ const Location location4 = FROM_HERE;
+ const Location location5 = FROM_HERE;
+
+ RunLoop run_loop;
+
+ // Task execution below looks like this, w.r.t. to RunLoop depths:
+ // 1 : T0 \ + NRL1 \ ---------> T4 -> T5
+ // 2 : ---------> T1 \ -> NRL2 \ ----> T2 -> T3 / + Quit /
+ // 3 : ---------> DN /
+
+ // NRL1 tests that tasks that occur at a different nesting depth than their
+ // parent have a sane backtrace nonetheless (both ways).
+
+ // NRL2 tests that posting T2 right after exiting the RunLoop (from the same
+ // task) results in NRL2 being its parent (and not the DoNothing() task that
+ // just ran -- which would have been the case if the "current task" wasn't
+ // restored properly when returning from a task within a task).
+
+ // In other words, this is regression test for a bug in the previous
+ // implementation. In the current implementation, replacing
+ // tls_for_current_pending_task->Set(previous_pending_task);
+ // by
+ // tls_for_current_pending_task->Set(nullptr);
+ // at the end of TaskAnnotator::RunTask() makes this test fail.
+
+ RunLoop nested_run_loop1(RunLoop::Type::kNestableTasksAllowed);
+
+ // Expectations are the same as in SingleThreadedSimple test despite the
+ // nested loop starting between tasks 0 and 1 and stopping between tasks 3 and
+ // 4.
+ OnceClosure task5 = BindOnce(
+ &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+ Unretained(this), loop.task_runner(), location5, FROM_HERE,
+ ExpectedTrace({location4.program_counter(), location3.program_counter(),
+ location2.program_counter(), location1.program_counter()}),
+ run_loop.QuitClosure());
+ OnceClosure task4 = BindOnce(
+ &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+ Unretained(this), loop.task_runner(), location4, location5,
+ ExpectedTrace({location3.program_counter(), location2.program_counter(),
+ location1.program_counter(), location0.program_counter()}),
+ std::move(task5));
+ OnceClosure task3 = BindOnce(
+ &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+ Unretained(this), loop.task_runner(), location3, location4,
+ ExpectedTrace({location2.program_counter(), location1.program_counter(),
+ location0.program_counter()}),
+ std::move(task4));
+
+ OnceClosure run_task_3_then_quit_nested_loop1 =
+ BindOnce(&TaskAnnotatorBacktraceIntegrationTest::RunTwo, std::move(task3),
+ nested_run_loop1.QuitClosure());
+
+ OnceClosure task2 = BindOnce(
+ &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+ Unretained(this), loop.task_runner(), location2, location3,
+ ExpectedTrace({location1.program_counter(), location0.program_counter()}),
+ std::move(run_task_3_then_quit_nested_loop1));
+
+ // Task 1 is custom. It enters another nested RunLoop, has it do work and exit
+ // before posting the next task. This confirms that |task1| is restored as the
+ // current task before posting |task2| after returning from the nested loop.
+ RunLoop nested_run_loop2(RunLoop::Type::kNestableTasksAllowed);
+ OnceClosure task1 = BindOnce(
+ [](RunLoop* nested_run_loop, const Location& location2,
+ OnceClosure task2) {
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, DoNothing());
+ nested_run_loop->RunUntilIdle();
+ ThreadTaskRunnerHandle::Get()->PostTask(location2, std::move(task2));
+ },
+ Unretained(&nested_run_loop2), location2, std::move(task2));
+
+ OnceClosure task0 =
+ BindOnce(&TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+ Unretained(this), loop.task_runner(), location0, location1,
+ ExpectedTrace({}), std::move(task1));
+
+ loop.task_runner()->PostTask(location0, std::move(task0));
+ loop.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&RunLoop::Run, Unretained(&nested_run_loop1)));
+
+ run_loop.Run();
+}
+
} // namespace debug
} // namespace base
diff --git a/chromium/base/event_types.h b/chromium/base/event_types.h
deleted file mode 100644
index 9905800d2e8..00000000000
--- a/chromium/base/event_types.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_EVENT_TYPES_H_
-#define BASE_EVENT_TYPES_H_
-
-#include "build/build_config.h"
-
-#if defined(OS_WIN)
-#include <windows.h>
-#elif defined(USE_X11)
-typedef union _XEvent XEvent;
-#elif defined(OS_MACOSX)
-#if defined(__OBJC__)
-@class NSEvent;
-#else // __OBJC__
-class NSEvent;
-#endif // __OBJC__
-#endif
-
-namespace base {
-
-// Cross platform typedefs for native event types.
-#if defined(OS_WIN)
-typedef MSG NativeEvent;
-#elif defined(USE_X11)
-typedef XEvent* NativeEvent;
-#elif defined(OS_MACOSX)
-typedef NSEvent* NativeEvent;
-#else
-typedef void* NativeEvent;
-#endif
-
-} // namespace base
-
-#endif // BASE_EVENT_TYPES_H_
diff --git a/chromium/base/feature_list.cc b/chromium/base/feature_list.cc
index e38e1646254..1610eecbcf7 100644
--- a/chromium/base/feature_list.cc
+++ b/chromium/base/feature_list.cc
@@ -76,10 +76,10 @@ bool IsValidFeatureOrFieldTrialName(const std::string& name) {
} // namespace
-#if DCHECK_IS_ON() && defined(SYZYASAN)
-const Feature kSyzyAsanDCheckIsFatalFeature{"DcheckIsFatal",
- base::FEATURE_DISABLED_BY_DEFAULT};
-#endif // defined(SYZYASAN)
+#if DCHECK_IS_CONFIGURABLE
+const Feature kDCheckIsFatalFeature{"DcheckIsFatal",
+ base::FEATURE_DISABLED_BY_DEFAULT};
+#endif // DCHECK_IS_CONFIGURABLE
FeatureList::FeatureList() = default;
@@ -263,19 +263,19 @@ void FeatureList::SetInstance(std::unique_ptr<FeatureList> instance) {
// Note: Intentional leak of global singleton.
g_feature_list_instance = instance.release();
-#if DCHECK_IS_ON() && defined(SYZYASAN)
+#if DCHECK_IS_CONFIGURABLE
// Update the behaviour of LOG_DCHECK to match the Feature configuration.
// DCHECK is also forced to be FATAL if we are running a death-test.
// TODO(asvitkine): If we find other use-cases that need integrating here
// then define a proper API/hook for the purpose.
- if (base::FeatureList::IsEnabled(kSyzyAsanDCheckIsFatalFeature) ||
+ if (base::FeatureList::IsEnabled(kDCheckIsFatalFeature) ||
base::CommandLine::ForCurrentProcess()->HasSwitch(
"gtest_internal_run_death_test")) {
logging::LOG_DCHECK = logging::LOG_FATAL;
} else {
logging::LOG_DCHECK = logging::LOG_INFO;
}
-#endif // DCHECK_IS_ON() && defined(SYZYASAN)
+#endif // DCHECK_IS_CONFIGURABLE
}
// static
diff --git a/chromium/base/feature_list.h b/chromium/base/feature_list.h
index 0c89e3cf99c..b317ee64c6d 100644
--- a/chromium/base/feature_list.h
+++ b/chromium/base/feature_list.h
@@ -43,12 +43,12 @@ struct BASE_EXPORT Feature {
const FeatureState default_state;
};
-#if DCHECK_IS_ON() && defined(SYZYASAN)
-// SyzyASAN builds have DCHECKs built-in, but configurable at run-time to been
-// fatal, or not, via a DcheckIsFatal feature. We define the Feature here since
-// it is checked in FeatureList::SetInstance(). See crbug.com/596231.
-extern const Feature kSyzyAsanDCheckIsFatalFeature;
-#endif // defined(SYZYASAN)
+#if DCHECK_IS_CONFIGURABLE
+// DCHECKs have been built-in, and are configurable at run-time to be fatal, or
+// not, via a DcheckIsFatal feature. We define the Feature here since it is
+// checked in FeatureList::SetInstance(). See https://crbug.com/596231.
+extern const Feature kDCheckIsFatalFeature;
+#endif // DCHECK_IS_CONFIGURABLE
// The FeatureList class is used to determine whether a given feature is on or
// off. It provides an authoritative answer, taking into account command-line
diff --git a/chromium/base/files/dir_reader_posix_unittest.cc b/chromium/base/files/dir_reader_posix_unittest.cc
index 87ac6b90793..1954cb2f08f 100644
--- a/chromium/base/files/dir_reader_posix_unittest.cc
+++ b/chromium/base/files/dir_reader_posix_unittest.cc
@@ -5,6 +5,7 @@
#include "base/files/dir_reader_posix.h"
#include <fcntl.h>
+#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
diff --git a/chromium/base/files/file_descriptor_watcher_posix.cc b/chromium/base/files/file_descriptor_watcher_posix.cc
index 104c719c1b5..8e29d197527 100644
--- a/chromium/base/files/file_descriptor_watcher_posix.cc
+++ b/chromium/base/files/file_descriptor_watcher_posix.cc
@@ -8,6 +8,7 @@
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_pump_for_io.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/sequenced_task_runner_handle.h"
@@ -41,10 +42,10 @@ FileDescriptorWatcher::Controller::~Controller() {
}
class FileDescriptorWatcher::Controller::Watcher
- : public MessageLoopForIO::Watcher,
+ : public MessagePumpForIO::FdWatcher,
public MessageLoop::DestructionObserver {
public:
- Watcher(WeakPtr<Controller> controller, MessageLoopForIO::Mode mode, int fd);
+ Watcher(WeakPtr<Controller> controller, MessagePumpForIO::Mode mode, int fd);
~Watcher() override;
void StartWatching();
@@ -52,7 +53,7 @@ class FileDescriptorWatcher::Controller::Watcher
private:
friend class FileDescriptorWatcher;
- // MessageLoopForIO::Watcher:
+ // MessagePumpForIO::FdWatcher:
void OnFileCanReadWithoutBlocking(int fd) override;
void OnFileCanWriteWithoutBlocking(int fd) override;
@@ -60,7 +61,7 @@ class FileDescriptorWatcher::Controller::Watcher
void WillDestroyCurrentMessageLoop() override;
// Used to instruct the MessageLoopForIO to stop watching the file descriptor.
- MessageLoopForIO::FileDescriptorWatcher file_descriptor_watcher_;
+ MessagePumpForIO::FdWatchController file_descriptor_watcher_;
// Runs tasks on the sequence on which this was instantiated (i.e. the
// sequence on which the callback must run).
@@ -72,7 +73,7 @@ class FileDescriptorWatcher::Controller::Watcher
// Whether this Watcher is notified when |fd_| becomes readable or writable
// without blocking.
- const MessageLoopForIO::Mode mode_;
+ const MessagePumpForIO::Mode mode_;
// The watched file descriptor.
const int fd_;
@@ -90,7 +91,7 @@ class FileDescriptorWatcher::Controller::Watcher
FileDescriptorWatcher::Controller::Watcher::Watcher(
WeakPtr<Controller> controller,
- MessageLoopForIO::Mode mode,
+ MessagePumpForIO::Mode mode,
int fd)
: file_descriptor_watcher_(FROM_HERE),
controller_(controller),
@@ -125,7 +126,7 @@ void FileDescriptorWatcher::Controller::Watcher::StartWatching() {
void FileDescriptorWatcher::Controller::Watcher::OnFileCanReadWithoutBlocking(
int fd) {
DCHECK_EQ(fd_, fd);
- DCHECK_EQ(MessageLoopForIO::WATCH_READ, mode_);
+ DCHECK_EQ(MessagePumpForIO::WATCH_READ, mode_);
DCHECK(thread_checker_.CalledOnValidThread());
// Run the callback on the sequence on which the watch was initiated.
@@ -136,7 +137,7 @@ void FileDescriptorWatcher::Controller::Watcher::OnFileCanReadWithoutBlocking(
void FileDescriptorWatcher::Controller::Watcher::OnFileCanWriteWithoutBlocking(
int fd) {
DCHECK_EQ(fd_, fd);
- DCHECK_EQ(MessageLoopForIO::WATCH_WRITE, mode_);
+ DCHECK_EQ(MessagePumpForIO::WATCH_WRITE, mode_);
DCHECK(thread_checker_.CalledOnValidThread());
// Run the callback on the sequence on which the watch was initiated.
@@ -155,7 +156,7 @@ void FileDescriptorWatcher::Controller::Watcher::
delete this;
}
-FileDescriptorWatcher::Controller::Controller(MessageLoopForIO::Mode mode,
+FileDescriptorWatcher::Controller::Controller(MessagePumpForIO::Mode mode,
int fd,
const Closure& callback)
: callback_(callback),
@@ -203,13 +204,13 @@ FileDescriptorWatcher::~FileDescriptorWatcher() {
std::unique_ptr<FileDescriptorWatcher::Controller>
FileDescriptorWatcher::WatchReadable(int fd, const Closure& callback) {
- return WrapUnique(new Controller(MessageLoopForIO::WATCH_READ, fd, callback));
+ return WrapUnique(new Controller(MessagePumpForIO::WATCH_READ, fd, callback));
}
std::unique_ptr<FileDescriptorWatcher::Controller>
FileDescriptorWatcher::WatchWritable(int fd, const Closure& callback) {
return WrapUnique(
- new Controller(MessageLoopForIO::WATCH_WRITE, fd, callback));
+ new Controller(MessagePumpForIO::WATCH_WRITE, fd, callback));
}
} // namespace base
diff --git a/chromium/base/files/file_descriptor_watcher_posix.h b/chromium/base/files/file_descriptor_watcher_posix.h
index 6cc011bb3e7..aa4457904b8 100644
--- a/chromium/base/files/file_descriptor_watcher_posix.h
+++ b/chromium/base/files/file_descriptor_watcher_posix.h
@@ -13,6 +13,7 @@
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_pump_for_io.h"
#include "base/sequence_checker.h"
namespace base {
@@ -21,6 +22,15 @@ class SingleThreadTaskRunner;
// The FileDescriptorWatcher API allows callbacks to be invoked when file
// descriptors are readable or writable without blocking.
+//
+// To enable this API in unit tests, use a ScopedTaskEnvironment with
+// MainThreadType::IO.
+//
+// Note: Prefer FileDescriptorWatcher to MessageLoopForIO::WatchFileDescriptor()
+// for non-critical IO. FileDescriptorWatcher works on threads/sequences without
+// MessagePumps but involves going through the task queue after being notified
+// by the OS (a desirablable property for non-critical IO that shouldn't preempt
+// the main queue).
class BASE_EXPORT FileDescriptorWatcher {
public:
// Instantiated and returned by WatchReadable() or WatchWritable(). The
@@ -37,7 +47,7 @@ class BASE_EXPORT FileDescriptorWatcher {
// Registers |callback| to be invoked when |fd| is readable or writable
// without blocking (depending on |mode|).
- Controller(MessageLoopForIO::Mode mode, int fd, const Closure& callback);
+ Controller(MessagePumpForIO::Mode mode, int fd, const Closure& callback);
// Starts watching the file descriptor.
void StartWatching();
@@ -79,12 +89,13 @@ class BASE_EXPORT FileDescriptorWatcher {
FileDescriptorWatcher(MessageLoopForIO* message_loop_for_io);
~FileDescriptorWatcher();
- // Registers |callback| to be invoked on the current sequence when |fd| is
+ // Registers |callback| to be posted on the current sequence when |fd| is
// readable or writable without blocking. |callback| is unregistered when the
// returned Controller is deleted (deletion must happen on the current
// sequence). To call these methods, a FileDescriptorWatcher must have been
// instantiated on the current thread and SequencedTaskRunnerHandle::IsSet()
- // must return true.
+ // must return true (these conditions are met at least on all TaskScheduler
+ // threads as well as on threads backed by a MessageLoopForIO).
static std::unique_ptr<Controller> WatchReadable(int fd,
const Closure& callback);
static std::unique_ptr<Controller> WatchWritable(int fd,
diff --git a/chromium/base/files/file_unittest.cc b/chromium/base/files/file_unittest.cc
index 1bc09facd03..112b90d5cd4 100644
--- a/chromium/base/files/file_unittest.cc
+++ b/chromium/base/files/file_unittest.cc
@@ -113,8 +113,7 @@ TEST(FileTest, SelfSwap) {
FilePath file_path = temp_dir.GetPath().AppendASCII("create_file_1");
File file(file_path,
base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_DELETE_ON_CLOSE);
- using namespace std;
- swap(file, file);
+ std::swap(file, file);
EXPECT_TRUE(file.IsValid());
}
diff --git a/chromium/base/files/file_util_posix.cc b/chromium/base/files/file_util_posix.cc
index 2a348182e68..3cdff6049ea 100644
--- a/chromium/base/files/file_util_posix.cc
+++ b/chromium/base/files/file_util_posix.cc
@@ -103,14 +103,12 @@ bool VerifySpecificPathControlledByUser(const FilePath& path,
}
if (S_ISLNK(stat_info.st_mode)) {
- DLOG(ERROR) << "Path " << path.value()
- << " is a symbolic link.";
+ DLOG(ERROR) << "Path " << path.value() << " is a symbolic link.";
return false;
}
if (stat_info.st_uid != owner_uid) {
- DLOG(ERROR) << "Path " << path.value()
- << " is owned by the wrong user.";
+ DLOG(ERROR) << "Path " << path.value() << " is owned by the wrong user.";
return false;
}
@@ -122,8 +120,7 @@ bool VerifySpecificPathControlledByUser(const FilePath& path,
}
if (stat_info.st_mode & S_IWOTH) {
- DLOG(ERROR) << "Path " << path.value()
- << " is writable by any user.";
+ DLOG(ERROR) << "Path " << path.value() << " is writable by any user.";
return false;
}
@@ -227,15 +224,13 @@ bool DoCopyDirectory(const FilePath& from_path,
// This function does not properly handle destinations within the source
FilePath real_to_path = to_path;
- if (PathExists(real_to_path)) {
+ if (PathExists(real_to_path))
real_to_path = MakeAbsoluteFilePath(real_to_path);
- if (real_to_path.empty())
- return false;
- } else {
+ else
real_to_path = MakeAbsoluteFilePath(real_to_path.DirName());
- if (real_to_path.empty())
- return false;
- }
+ if (real_to_path.empty())
+ return false;
+
FilePath real_from_path = MakeAbsoluteFilePath(from_path);
if (real_from_path.empty())
return false;
@@ -1064,17 +1059,15 @@ namespace internal {
bool MoveUnsafe(const FilePath& from_path, const FilePath& to_path) {
AssertBlockingAllowed();
- // Windows compatibility: if to_path exists, from_path and to_path
+ // Windows compatibility: if |to_path| exists, |from_path| and |to_path|
// must be the same type, either both files, or both directories.
stat_wrapper_t to_file_info;
if (CallStat(to_path.value().c_str(), &to_file_info) == 0) {
stat_wrapper_t from_file_info;
- if (CallStat(from_path.value().c_str(), &from_file_info) == 0) {
- if (S_ISDIR(to_file_info.st_mode) != S_ISDIR(from_file_info.st_mode))
- return false;
- } else {
+ if (CallStat(from_path.value().c_str(), &from_file_info) != 0)
+ return false;
+ if (S_ISDIR(to_file_info.st_mode) != S_ISDIR(from_file_info.st_mode))
return false;
- }
}
if (rename(from_path.value().c_str(), to_path.value().c_str()) == 0)
diff --git a/chromium/base/fuchsia/async_dispatcher.cc b/chromium/base/fuchsia/async_dispatcher.cc
new file mode 100644
index 00000000000..1dba86c75db
--- /dev/null
+++ b/chromium/base/fuchsia/async_dispatcher.cc
@@ -0,0 +1,317 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/fuchsia/async_dispatcher.h"
+
+#include <lib/async/task.h>
+#include <lib/async/wait.h>
+#include <zircon/syscalls.h>
+
+#include "base/fuchsia/fuchsia_logging.h"
+
+namespace base {
+
+namespace {
+
+template <typename T>
+uintptr_t key_from_ptr(T* ptr) {
+ return reinterpret_cast<uintptr_t>(ptr);
+};
+
+} // namespace
+
+class AsyncDispatcher::WaitState : public LinkNode<WaitState> {
+ public:
+ explicit WaitState(AsyncDispatcher* async_dispatcher) {
+ async_dispatcher->wait_list_.Append(this);
+ }
+ ~WaitState() { RemoveFromList(); }
+
+ async_wait_t* wait() {
+ // WaitState objects are allocated in-place in the |state| field of an
+ // enclosing async_wait_t, so async_wait_t address can be calculated by
+ // subtracting state offset in async_wait_t from |this|.
+ static_assert(std::is_standard_layout<async_wait_t>(),
+ "async_wait_t is expected to have standard layout.");
+ return reinterpret_cast<async_wait_t*>(reinterpret_cast<uint8_t*>(this) -
+ offsetof(async_wait_t, state));
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(WaitState);
+};
+
+class AsyncDispatcher::TaskState : public LinkNode<TaskState> {
+ public:
+ explicit TaskState(LinkNode<TaskState>* previous_task) {
+ InsertAfter(previous_task);
+ }
+ ~TaskState() { RemoveFromList(); }
+
+ async_task_t* task() {
+ // TaskState objects are allocated in-place in the |state| field of an
+ // enclosing async_task_t, so async_task_t address can be calculated by
+ // subtracting state offset in async_task_t from |this|.
+ static_assert(std::is_standard_layout<async_task_t>(),
+ "async_task_t is expected to have standard layout.");
+ return reinterpret_cast<async_task_t*>(reinterpret_cast<uint8_t*>(this) -
+ offsetof(async_task_t, state));
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TaskState);
+};
+
+AsyncDispatcher::AsyncDispatcher() {
+ zx_status_t status = zx_port_create(0u, port_.receive());
+ ZX_DCHECK(status == ZX_OK, status);
+
+ status = zx_timer_create(0u, ZX_CLOCK_MONOTONIC, timer_.receive());
+ ZX_DCHECK(status == ZX_OK, status);
+ status =
+ zx_object_wait_async(timer_.get(), port_.get(), key_from_ptr(&timer_),
+ ZX_TIMER_SIGNALED, ZX_WAIT_ASYNC_REPEATING);
+ ZX_DCHECK(status == ZX_OK, status);
+
+ status = zx_event_create(0, stop_event_.receive());
+ ZX_DCHECK(status == ZX_OK, status);
+ status = zx_object_wait_async(stop_event_.get(), port_.get(),
+ key_from_ptr(&stop_event_), ZX_EVENT_SIGNALED,
+ ZX_WAIT_ASYNC_REPEATING);
+ ZX_DCHECK(status == ZX_OK, status);
+
+ static const async_ops_t async_ops_t_impl = {
+ NowOp, BeginWaitOp, CancelWaitOp, PostTaskOp,
+ CancelTaskOp, QueuePacketOp, SetGuestBellTrapOp,
+ };
+ ops = &async_ops_t_impl;
+
+ DCHECK(!async_get_default());
+ async_set_default(this);
+}
+
+AsyncDispatcher::~AsyncDispatcher() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK_EQ(async_get_default(), this);
+
+ // Some waits and tasks may be canceled while the dispatcher is being
+ // destroyed, so pop-from-head until none remain.
+
+ while (!wait_list_.empty()) {
+ WaitState* state = wait_list_.head()->value();
+ async_wait_t* wait = state->wait();
+ state->~WaitState();
+ wait->handler(this, wait, ZX_ERR_CANCELED, nullptr);
+ }
+
+ while (!task_list_.empty()) {
+ TaskState* state = task_list_.head()->value();
+ async_task_t* task = state->task();
+ state->~TaskState();
+ task->handler(this, task, ZX_ERR_CANCELED);
+ }
+
+ async_set_default(nullptr);
+}
+
+zx_status_t AsyncDispatcher::DispatchOrWaitUntil(zx_time_t deadline) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ zx_port_packet_t packet = {};
+ zx_status_t status = zx_port_wait(port_.get(), deadline, &packet, 0);
+ if (status != ZX_OK)
+ return status;
+
+ if (packet.type == ZX_PKT_TYPE_SIGNAL_ONE ||
+ packet.type == ZX_PKT_TYPE_SIGNAL_REP) {
+ if (packet.key == key_from_ptr(&timer_)) {
+ // |timer_| has expired.
+ DCHECK(packet.signal.observed & ZX_TIMER_SIGNALED);
+ DispatchTasks();
+ return ZX_OK;
+ } else if (packet.key == key_from_ptr(&stop_event_)) {
+ // Stop() was called.
+ DCHECK(packet.signal.observed & ZX_EVENT_SIGNALED);
+ status = zx_object_signal(stop_event_.get(), ZX_EVENT_SIGNALED, 0);
+ ZX_DCHECK(status == ZX_OK, status);
+ return ZX_ERR_CANCELED;
+ } else {
+ DCHECK_EQ(packet.type, ZX_PKT_TYPE_SIGNAL_ONE);
+ async_wait_t* wait = reinterpret_cast<async_wait_t*>(packet.key);
+
+ // Clean the state before invoking the handler: it may destroy the wait.
+ WaitState* state = reinterpret_cast<WaitState*>(&wait->state);
+ state->~WaitState();
+
+ wait->handler(this, wait, packet.status, &packet.signal);
+
+ return ZX_OK;
+ }
+ }
+
+ NOTREACHED();
+ return ZX_ERR_INTERNAL;
+}
+
+void AsyncDispatcher::Stop() {
+ // Can be called on any thread.
+ zx_status_t status =
+ zx_object_signal(stop_event_.get(), 0, ZX_EVENT_SIGNALED);
+ ZX_DCHECK(status == ZX_OK, status);
+}
+
+zx_time_t AsyncDispatcher::NowOp(async_t* async) {
+ DCHECK(async);
+ return zx_clock_get(ZX_CLOCK_MONOTONIC);
+}
+
+zx_status_t AsyncDispatcher::BeginWaitOp(async_t* async, async_wait_t* wait) {
+ return static_cast<AsyncDispatcher*>(async)->BeginWait(wait);
+}
+
+zx_status_t AsyncDispatcher::CancelWaitOp(async_t* async, async_wait_t* wait) {
+ return static_cast<AsyncDispatcher*>(async)->CancelWait(wait);
+}
+
+zx_status_t AsyncDispatcher::PostTaskOp(async_t* async, async_task_t* task) {
+ return static_cast<AsyncDispatcher*>(async)->PostTask(task);
+}
+
+zx_status_t AsyncDispatcher::CancelTaskOp(async_t* async, async_task_t* task) {
+ return static_cast<AsyncDispatcher*>(async)->CancelTask(task);
+}
+
+zx_status_t AsyncDispatcher::QueuePacketOp(async_t* async,
+ async_receiver_t* receiver,
+ const zx_packet_user_t* data) {
+ return ZX_ERR_NOT_SUPPORTED;
+}
+
+zx_status_t AsyncDispatcher::SetGuestBellTrapOp(async_t* async,
+ async_guest_bell_trap_t* trap,
+ zx_handle_t guest,
+ zx_vaddr_t addr,
+ size_t length) {
+ return ZX_ERR_NOT_SUPPORTED;
+}
+
+zx_status_t AsyncDispatcher::BeginWait(async_wait_t* wait) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ static_assert(sizeof(AsyncDispatcher::WaitState) <= sizeof(async_state_t),
+ "WaitState is too big");
+ WaitState* state = new (&wait->state) WaitState(this);
+ zx_status_t status = zx_object_wait_async(wait->object, port_.get(),
+ reinterpret_cast<uintptr_t>(wait),
+ wait->trigger, ZX_WAIT_ASYNC_ONCE);
+
+ if (status != ZX_OK)
+ state->~WaitState();
+
+ return status;
+}
+
+zx_status_t AsyncDispatcher::CancelWait(async_wait_t* wait) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ zx_status_t status =
+ zx_port_cancel(port_.get(), wait->object, (uintptr_t)wait);
+ if (status == ZX_OK) {
+ WaitState* state = reinterpret_cast<WaitState*>(&(wait->state));
+ state->~WaitState();
+ }
+
+ return status;
+}
+
+zx_status_t AsyncDispatcher::PostTask(async_task_t* task) {
+ // Can be called on any thread.
+ AutoLock lock(lock_);
+
+ // Find correct position for the new task in |task_list_| to keep the list
+ // sorted by deadline. This implementation has O(N) complexity, but it's
+ // acceptable - async task are not expected to be used frequently.
+ // TODO(sergeyu): Consider using a more efficient data structure if tasks
+ // performance becomes important.
+ LinkNode<TaskState>* node;
+ for (node = task_list_.head(); node != task_list_.end();
+ node = node->previous()) {
+ if (task->deadline >= node->value()->task()->deadline)
+ break;
+ }
+
+ static_assert(sizeof(AsyncDispatcher::TaskState) <= sizeof(async_state_t),
+ "TaskState is too big");
+
+ // Will insert new task after |node|.
+ new (&task->state) TaskState(node);
+
+ if (reinterpret_cast<TaskState*>(&task->state) == task_list_.head()) {
+ // Task inserted at head. Earliest deadline changed.
+ RestartTimerLocked();
+ }
+
+ return ZX_OK;
+}
+
+zx_status_t AsyncDispatcher::CancelTask(async_task_t* task) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ AutoLock lock(lock_);
+
+ if (!task->state.reserved[0])
+ return ZX_ERR_NOT_FOUND;
+
+ TaskState* state = reinterpret_cast<TaskState*>(&task->state);
+ state->~TaskState();
+
+ return ZX_OK;
+}
+
+void AsyncDispatcher::DispatchTasks() {
+ // Snapshot now value to set implicit bound for the tasks that will run before
+ // DispatchTasks() returns. This also helps to avoid calling zx_clock_get()
+ // more than necessary.
+ zx_time_t now = zx_clock_get(ZX_CLOCK_MONOTONIC);
+
+ while (true) {
+ async_task_t* task;
+ {
+ AutoLock lock(lock_);
+ if (task_list_.empty())
+ break;
+
+ TaskState* task_state = task_list_.head()->value();
+ task = task_state->task();
+
+ if (task->deadline > now) {
+ RestartTimerLocked();
+ break;
+ }
+
+ task_state->~TaskState();
+
+ // ~TaskState() is expected to reset the state to 0. The destructor
+ // removes the task from the |task_list_| and LinkNode::RemoveFromList()
+ // sets both its fields to nullptr, which is equivalent to resetting the
+ // state to 0.
+ DCHECK_EQ(task->state.reserved[0], 0u);
+ }
+
+ // The handler is responsible for freeing the |task| or it may reuse it.
+ task->handler(this, task, ZX_OK);
+ }
+}
+
+void AsyncDispatcher::RestartTimerLocked() {
+ lock_.AssertAcquired();
+
+ if (task_list_.empty())
+ return;
+ zx_time_t deadline = task_list_.head()->value()->task()->deadline;
+ zx_status_t status = zx_timer_set(timer_.get(), deadline, 0);
+ ZX_DCHECK(status == ZX_OK, status);
+}
+
+} // namespace base
diff --git a/chromium/base/fuchsia/async_dispatcher.h b/chromium/base/fuchsia/async_dispatcher.h
new file mode 100644
index 00000000000..97342cce6a8
--- /dev/null
+++ b/chromium/base/fuchsia/async_dispatcher.h
@@ -0,0 +1,84 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FUCHSIA_ASYNC_DISPATCHER_H_
+#define BASE_FUCHSIA_ASYNC_DISPATCHER_H_
+
+#include <lib/async/default.h>
+#include <lib/async/dispatcher.h>
+
+#include "base/containers/linked_list.h"
+#include "base/fuchsia/scoped_zx_handle.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+
+// Implementation of dispatcher for Fuchsia's async library. It's necessary to
+// run Fuchsia's library on chromium threads.
+class AsyncDispatcher : public async_t {
+ public:
+ AsyncDispatcher();
+ ~AsyncDispatcher();
+
+ // Returns after running one or more tasks or waits until |deadline|.
+ // Returns |ZX_OK| if some tasks were executed, |ZX_ERR_TIMED_OUT| - the
+ // deadline expired, |ZX_ERR_CANCELED| - Stop() was called.
+ zx_status_t DispatchOrWaitUntil(zx_time_t deadline);
+
+ // If Run() is being executed then it will return as soon as possible (e.g.
+ // finishing running the current task), otherwise the following Run() call
+ // will quit immediately instead of waiting until deadline expires.
+ void Stop();
+
+ private:
+ class WaitState;
+ class TaskState;
+
+ static zx_time_t NowOp(async_t* async);
+ static zx_status_t BeginWaitOp(async_t* async, async_wait_t* wait);
+ static zx_status_t CancelWaitOp(async_t* async, async_wait_t* wait);
+ static zx_status_t PostTaskOp(async_t* async, async_task_t* task);
+ static zx_status_t CancelTaskOp(async_t* async, async_task_t* task);
+ static zx_status_t QueuePacketOp(async_t* async,
+ async_receiver_t* receiver,
+ const zx_packet_user_t* data);
+ static zx_status_t SetGuestBellTrapOp(async_t* async,
+ async_guest_bell_trap_t* trap,
+ zx_handle_t guest,
+ zx_vaddr_t addr,
+ size_t length);
+
+ // async_ops_t implementation. Called by corresponding *Op() methods above.
+ zx_status_t BeginWait(async_wait_t* wait);
+ zx_status_t CancelWait(async_wait_t* wait);
+ zx_status_t PostTask(async_task_t* task);
+ zx_status_t CancelTask(async_task_t* task);
+
+ // Runs tasks in |task_list_| that have deadline in the past.
+ void DispatchTasks();
+
+ // Must be called while |lock_| is held.
+ void RestartTimerLocked();
+
+ THREAD_CHECKER(thread_checker_);
+
+ ScopedZxHandle port_;
+ ScopedZxHandle timer_;
+ ScopedZxHandle stop_event_;
+
+ LinkedList<WaitState> wait_list_;
+
+ // |lock_| must be held when accessing |task_list_|.
+ base::Lock lock_;
+
+ LinkedList<TaskState> task_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncDispatcher);
+};
+
+} // namespace base
+
+#endif // BASE_FUCHSIA_ASYNC_DISPATCHER_H_ \ No newline at end of file
diff --git a/chromium/base/fuchsia/async_dispatcher_unittest.cc b/chromium/base/fuchsia/async_dispatcher_unittest.cc
new file mode 100644
index 00000000000..7f1722f4fe4
--- /dev/null
+++ b/chromium/base/fuchsia/async_dispatcher_unittest.cc
@@ -0,0 +1,219 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/fuchsia/async_dispatcher.h"
+
+#include <lib/async/default.h>
+#include <lib/async/task.h>
+#include <lib/async/wait.h>
+
+#include "base/callback.h"
+#include "base/fuchsia/scoped_zx_handle.h"
+#include "base/test/test_timeouts.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+struct TestTask : public async_task_t {
+ explicit TestTask() {
+ state = ASYNC_STATE_INIT;
+ handler = &TaskProc;
+ deadline = 0;
+ }
+
+ static void TaskProc(async_t* async, async_task_t* task, zx_status_t status);
+
+ int num_calls = 0;
+ int repeats = 1;
+ OnceClosure on_call;
+ zx_status_t last_status = ZX_OK;
+};
+
+// static
+void TestTask::TaskProc(async_t* async,
+ async_task_t* task,
+ zx_status_t status) {
+ EXPECT_EQ(async, async_get_default());
+ EXPECT_TRUE(status == ZX_OK || status == ZX_ERR_CANCELED)
+ << "status: " << status;
+
+ auto* test_task = static_cast<TestTask*>(task);
+ test_task->num_calls++;
+ test_task->last_status = status;
+
+ if (!test_task->on_call.is_null())
+ std::move(test_task->on_call).Run();
+
+ if (test_task->num_calls < test_task->repeats)
+ async_post_task(async, task);
+};
+
+struct TestWait : public async_wait_t {
+ TestWait(zx_handle_t handle,
+ zx_signals_t signals) {
+ state = ASYNC_STATE_INIT;
+ handler = &HandleProc;
+ object = handle;
+ trigger = signals;
+ }
+
+ static void HandleProc(async_t* async,
+ async_wait_t* wait,
+ zx_status_t status,
+ const zx_packet_signal_t* signal);
+ int num_calls = 0;
+ OnceClosure on_call;
+ zx_status_t last_status = ZX_OK;
+};
+
+// static
+void TestWait::HandleProc(async_t* async,
+ async_wait_t* wait,
+ zx_status_t status,
+ const zx_packet_signal_t* signal) {
+ EXPECT_EQ(async, async_get_default());
+ EXPECT_TRUE(status == ZX_OK || status == ZX_ERR_CANCELED)
+ << "status: " << status;
+
+ auto* test_wait = static_cast<TestWait*>(wait);
+
+ test_wait->num_calls++;
+ test_wait->last_status = status;
+
+ if (!test_wait->on_call.is_null())
+ std::move(test_wait->on_call).Run();
+}
+
+} // namespace
+
+class AsyncDispatcherTest : public testing::Test {
+ public:
+ AsyncDispatcherTest() {
+ dispatcher_ = std::make_unique<AsyncDispatcher>();
+
+ async_ = async_get_default();
+ EXPECT_TRUE(async_);
+
+ EXPECT_EQ(zx_socket_create(ZX_SOCKET_DATAGRAM, socket1_.receive(),
+ socket2_.receive()),
+ ZX_OK);
+ }
+
+ ~AsyncDispatcherTest() override = default;
+
+ void RunUntilIdle() {
+ while (true) {
+ zx_status_t status = dispatcher_->DispatchOrWaitUntil(0);
+ if (status != ZX_OK) {
+ EXPECT_EQ(status, ZX_ERR_TIMED_OUT);
+ break;
+ }
+ }
+ }
+
+ protected:
+ std::unique_ptr<AsyncDispatcher> dispatcher_;
+
+ async_t* async_ = nullptr;
+
+ base::ScopedZxHandle socket1_;
+ base::ScopedZxHandle socket2_;
+};
+
+TEST_F(AsyncDispatcherTest, PostTask) {
+ TestTask task;
+ ASSERT_EQ(async_post_task(async_, &task), ZX_OK);
+ dispatcher_->DispatchOrWaitUntil(0);
+ EXPECT_EQ(task.num_calls, 1);
+ EXPECT_EQ(task.last_status, ZX_OK);
+}
+
+TEST_F(AsyncDispatcherTest, TaskRepeat) {
+ TestTask task;
+ task.repeats = 2;
+ ASSERT_EQ(async_post_task(async_, &task), ZX_OK);
+ RunUntilIdle();
+ EXPECT_EQ(task.num_calls, 2);
+ EXPECT_EQ(task.last_status, ZX_OK);
+}
+
+TEST_F(AsyncDispatcherTest, DelayedTask) {
+ TestTask task;
+ constexpr auto kDelay = TimeDelta::FromMilliseconds(5);
+ TimeTicks started = TimeTicks::Now();
+ task.deadline = zx_deadline_after(kDelay.InNanoseconds());
+ ASSERT_EQ(async_post_task(async_, &task), ZX_OK);
+ zx_status_t status = dispatcher_->DispatchOrWaitUntil(zx_deadline_after(
+ (kDelay + TestTimeouts::tiny_timeout()).InNanoseconds()));
+ EXPECT_EQ(status, ZX_OK);
+ EXPECT_GE(TimeTicks::Now() - started, kDelay);
+}
+
+TEST_F(AsyncDispatcherTest, CancelTask) {
+ TestTask task;
+ ASSERT_EQ(async_post_task(async_, &task), ZX_OK);
+ ASSERT_EQ(async_cancel_task(async_, &task), ZX_OK);
+ RunUntilIdle();
+ EXPECT_EQ(task.num_calls, 0);
+}
+
+TEST_F(AsyncDispatcherTest, TaskObserveShutdown) {
+ TestTask task;
+ ASSERT_EQ(async_post_task(async_, &task), ZX_OK);
+ dispatcher_.reset();
+
+ EXPECT_EQ(task.num_calls, 1);
+ EXPECT_EQ(task.last_status, ZX_ERR_CANCELED);
+}
+
+TEST_F(AsyncDispatcherTest, Wait) {
+ TestWait wait(socket1_.get(), ZX_SOCKET_READABLE);
+ EXPECT_EQ(async_begin_wait(async_, &wait), ZX_OK);
+
+ // Handler shouldn't be called because the event wasn't signaled.
+ RunUntilIdle();
+ EXPECT_EQ(wait.num_calls, 0);
+
+ char byte = 0;
+ EXPECT_EQ(zx_socket_write(socket2_.get(), /*options=*/0, &byte, sizeof(byte),
+ /*actual=*/nullptr),
+ ZX_OK);
+
+ zx_status_t status = dispatcher_->DispatchOrWaitUntil(
+ zx_deadline_after(TestTimeouts::tiny_timeout().InNanoseconds()));
+ EXPECT_EQ(status, ZX_OK);
+
+ EXPECT_EQ(wait.num_calls, 1);
+ EXPECT_EQ(wait.last_status, ZX_OK);
+}
+
+TEST_F(AsyncDispatcherTest, CancelWait) {
+ TestWait wait(socket1_.get(), ZX_SOCKET_READABLE);
+ EXPECT_EQ(async_begin_wait(async_, &wait), ZX_OK);
+
+ char byte = 0;
+ EXPECT_EQ(zx_socket_write(socket2_.get(), /*options=*/0, &byte, sizeof(byte),
+ /*actual=*/nullptr),
+ ZX_OK);
+
+ EXPECT_EQ(async_cancel_wait(async_, &wait), ZX_OK);
+
+ RunUntilIdle();
+ EXPECT_EQ(wait.num_calls, 0);
+}
+
+TEST_F(AsyncDispatcherTest, WaitShutdown) {
+ TestWait wait(socket1_.get(), ZX_SOCKET_READABLE);
+ EXPECT_EQ(async_begin_wait(async_, &wait), ZX_OK);
+ RunUntilIdle();
+ dispatcher_.reset();
+
+ EXPECT_EQ(wait.num_calls, 1);
+ EXPECT_EQ(wait.last_status, ZX_ERR_CANCELED);
+}
+
+} // namespace base
diff --git a/chromium/base/i18n/file_util_icu_unittest.cc b/chromium/base/i18n/file_util_icu_unittest.cc
index 2028997fe79..062d29b0d8d 100644
--- a/chromium/base/i18n/file_util_icu_unittest.cc
+++ b/chromium/base/i18n/file_util_icu_unittest.cc
@@ -37,7 +37,7 @@ static const struct GoodBadPairLinux {
{" ", "- -"},
};
-TEST_F(FileUtilICUTest, ReplaceIllegalCharacersInPathLinuxTest) {
+TEST_F(FileUtilICUTest, ReplaceIllegalCharactersInPathLinuxTest) {
for (size_t i = 0; i < arraysize(kLinuxIllegalCharacterCases); ++i) {
std::string bad_name(kLinuxIllegalCharacterCases[i].bad_name);
ReplaceIllegalCharactersInPath(&bad_name, '-');
diff --git a/chromium/base/i18n/rtl.cc b/chromium/base/i18n/rtl.cc
index bad21548a70..bba0d449cb6 100644
--- a/chromium/base/i18n/rtl.cc
+++ b/chromium/base/i18n/rtl.cc
@@ -84,30 +84,6 @@ base::i18n::TextDirection GetCharacterDirection(UChar32 character) {
return base::i18n::UNKNOWN_DIRECTION;
}
-// Gets the explicitly forced text direction for debugging. If no forcing is
-// applied, returns UNKNOWN_DIRECTION.
-base::i18n::TextDirection GetForcedTextDirection() {
- // On iOS, check for RTL forcing.
-#if defined(OS_IOS)
- if (base::ios::IsInForcedRTL())
- return base::i18n::RIGHT_TO_LEFT;
-#endif
-
- base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
- if (command_line->HasSwitch(switches::kForceUIDirection)) {
- std::string force_flag =
- command_line->GetSwitchValueASCII(switches::kForceUIDirection);
-
- if (force_flag == switches::kForceDirectionLTR)
- return base::i18n::LEFT_TO_RIGHT;
-
- if (force_flag == switches::kForceDirectionRTL)
- return base::i18n::RIGHT_TO_LEFT;
- }
-
- return base::i18n::UNKNOWN_DIRECTION;
-}
-
} // namespace
namespace base {
@@ -186,6 +162,28 @@ bool ICUIsRTL() {
return g_icu_text_direction == RIGHT_TO_LEFT;
}
+TextDirection GetForcedTextDirection() {
+// On iOS, check for RTL forcing.
+#if defined(OS_IOS)
+ if (base::ios::IsInForcedRTL())
+ return base::i18n::RIGHT_TO_LEFT;
+#endif
+
+ base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
+ if (command_line->HasSwitch(switches::kForceUIDirection)) {
+ std::string force_flag =
+ command_line->GetSwitchValueASCII(switches::kForceUIDirection);
+
+ if (force_flag == switches::kForceDirectionLTR)
+ return base::i18n::LEFT_TO_RIGHT;
+
+ if (force_flag == switches::kForceDirectionRTL)
+ return base::i18n::RIGHT_TO_LEFT;
+ }
+
+ return base::i18n::UNKNOWN_DIRECTION;
+}
+
TextDirection GetTextDirectionForLocaleInStartUp(const char* locale_name) {
// Check for direction forcing.
TextDirection forced_direction = GetForcedTextDirection();
diff --git a/chromium/base/i18n/rtl.h b/chromium/base/i18n/rtl.h
index 333b9bf4629..53259709057 100644
--- a/chromium/base/i18n/rtl.h
+++ b/chromium/base/i18n/rtl.h
@@ -60,6 +60,10 @@ BASE_I18N_EXPORT bool IsRTL();
// NOTE: Generally, you should call IsRTL() instead of this.
BASE_I18N_EXPORT bool ICUIsRTL();
+// Gets the explicitly forced text direction for debugging. If no forcing is
+// applied, returns UNKNOWN_DIRECTION.
+BASE_I18N_EXPORT TextDirection GetForcedTextDirection();
+
// Returns the text direction for |locale_name|.
// As a startup optimization, this method checks the locale against a list of
// Chrome-supported RTL locales.
diff --git a/chromium/base/json/OWNERS b/chromium/base/json/OWNERS
new file mode 100644
index 00000000000..14fce2ae686
--- /dev/null
+++ b/chromium/base/json/OWNERS
@@ -0,0 +1 @@
+rsesek@chromium.org
diff --git a/chromium/base/json/correctness_fuzzer.cc b/chromium/base/json/json_correctness_fuzzer.cc
index 57434341a2c..1f32d8c230c 100644
--- a/chromium/base/json/correctness_fuzzer.cc
+++ b/chromium/base/json/json_correctness_fuzzer.cc
@@ -27,7 +27,13 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
int error_code, error_line, error_column;
std::string error_message;
- const std::string input_string(reinterpret_cast<const char*>(data), size - 1);
+ // Create a copy of input buffer, as otherwise we don't catch
+ // overflow that touches the last byte (which is used in options).
+ std::unique_ptr<char[]> input(new char[size - 1]);
+ memcpy(input.get(), data, size - 1);
+
+ base::StringPiece input_string(input.get(), size - 1);
+
const int options = data[size - 1];
auto parsed_value = base::JSONReader::ReadAndReturnError(
input_string, options, &error_code, &error_message, &error_line,
diff --git a/chromium/base/json/json_parser.cc b/chromium/base/json/json_parser.cc
index 223cd4eee9e..0d31ba80afd 100644
--- a/chromium/base/json/json_parser.cc
+++ b/chromium/base/json/json_parser.cc
@@ -11,6 +11,7 @@
#include "base/debug/alias.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/numerics/safe_conversions.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_piece.h"
#include "base/strings/string_util.h"
@@ -48,6 +49,8 @@ class StackMarker {
DISALLOW_COPY_AND_ASSIGN(StackMarker);
};
+constexpr uint32_t kUnicodeReplacementPoint = 0xFFFD;
+
} // namespace
// This is U+FFFD.
@@ -56,9 +59,6 @@ const char kUnicodeReplacementString[] = "\xEF\xBF\xBD";
JSONParser::JSONParser(int options, int max_depth)
: options_(options),
max_depth_(max_depth),
- start_pos_(nullptr),
- pos_(nullptr),
- end_pos_(nullptr),
index_(0),
stack_depth_(0),
line_number_(0),
@@ -72,9 +72,7 @@ JSONParser::JSONParser(int options, int max_depth)
JSONParser::~JSONParser() = default;
Optional<Value> JSONParser::Parse(StringPiece input) {
- start_pos_ = input.data();
- pos_ = start_pos_;
- end_pos_ = start_pos_ + input.length();
+ input_ = input;
index_ = 0;
line_number_ = 1;
index_last_line_ = 0;
@@ -83,16 +81,18 @@ Optional<Value> JSONParser::Parse(StringPiece input) {
error_line_ = 0;
error_column_ = 0;
- // When the input JSON string starts with a UTF-8 Byte-Order-Mark
- // <0xEF 0xBB 0xBF>, advance the start position to avoid the
- // ParseNextToken function mis-treating a Unicode BOM as an invalid
- // character and returning NULL.
- if (CanConsume(3) && static_cast<uint8_t>(*pos_) == 0xEF &&
- static_cast<uint8_t>(*(pos_ + 1)) == 0xBB &&
- static_cast<uint8_t>(*(pos_ + 2)) == 0xBF) {
- NextNChars(3);
+ // ICU and ReadUnicodeCharacter() use int32_t for lengths, so ensure
+ // that the index_ will not overflow when parsing.
+ if (!base::IsValueInRangeForNumericType<int32_t>(input.length())) {
+ ReportError(JSONReader::JSON_TOO_LARGE, 0);
+ return nullopt;
}
+ // When the input JSON string starts with a UTF-8 Byte-Order-Mark,
+ // advance the start position to avoid the ParseNextToken function mis-
+ // treating a Unicode BOM as an invalid character and returning NULL.
+ ConsumeIfMatch("\xEF\xBB\xBF");
+
// Parse the first and any nested tokens.
Optional<Value> root(ParseNextToken());
if (!root)
@@ -100,10 +100,8 @@ Optional<Value> JSONParser::Parse(StringPiece input) {
// Make sure the input stream is at an end.
if (GetNextToken() != T_END_OF_INPUT) {
- if (!CanConsume(1) || (NextChar() && GetNextToken() != T_END_OF_INPUT)) {
- ReportError(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, 1);
- return nullopt;
- }
+ ReportError(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, 1);
+ return nullopt;
}
return root;
@@ -138,40 +136,28 @@ JSONParser::StringBuilder::~StringBuilder() = default;
JSONParser::StringBuilder& JSONParser::StringBuilder::operator=(
StringBuilder&& other) = default;
-void JSONParser::StringBuilder::Append(const char& c) {
- DCHECK_GE(c, 0);
- DCHECK_LT(static_cast<unsigned char>(c), 128);
+void JSONParser::StringBuilder::Append(uint32_t point) {
+ DCHECK(IsValidCharacter(point));
- if (string_)
- string_->push_back(c);
- else
+ if (point < kExtendedASCIIStart && !string_) {
+ DCHECK_EQ(static_cast<char>(point), pos_[length_]);
++length_;
-}
-
-void JSONParser::StringBuilder::AppendString(const char* str, size_t len) {
- DCHECK(string_);
- string_->append(str, len);
+ } else {
+ Convert();
+ if (UNLIKELY(point == kUnicodeReplacementPoint)) {
+ string_->append(kUnicodeReplacementString);
+ } else {
+ WriteUnicodeCharacter(point, &*string_);
+ }
+ }
}
void JSONParser::StringBuilder::Convert() {
if (string_)
return;
-
string_.emplace(pos_, length_);
}
-StringPiece JSONParser::StringBuilder::AsStringPiece() {
- if (string_)
- return *string_;
- return StringPiece(pos_, length_);
-}
-
-const std::string& JSONParser::StringBuilder::AsString() {
- if (!string_)
- Convert();
- return *string_;
-}
-
std::string JSONParser::StringBuilder::DestructiveAsString() {
if (string_)
return std::move(*string_);
@@ -180,29 +166,48 @@ std::string JSONParser::StringBuilder::DestructiveAsString() {
// JSONParser private //////////////////////////////////////////////////////////
-inline bool JSONParser::CanConsume(int length) {
- return pos_ + length <= end_pos_;
+Optional<StringPiece> JSONParser::PeekChars(int count) {
+ if (static_cast<size_t>(index_) + count > input_.length())
+ return nullopt;
+ // Using StringPiece::substr() is significantly slower (according to
+ // base_perftests) than constructing a substring manually.
+ return StringPiece(input_.data() + index_, count);
}
-const char* JSONParser::NextChar() {
- DCHECK(CanConsume(1));
- ++index_;
- ++pos_;
- return pos_;
+Optional<char> JSONParser::PeekChar() {
+ Optional<StringPiece> chars = PeekChars(1);
+ if (chars)
+ return (*chars)[0];
+ return nullopt;
+}
+
+Optional<StringPiece> JSONParser::ConsumeChars(int count) {
+ Optional<StringPiece> chars = PeekChars(count);
+ if (chars)
+ index_ += count;
+ return chars;
+}
+
+Optional<char> JSONParser::ConsumeChar() {
+ Optional<StringPiece> chars = ConsumeChars(1);
+ if (chars)
+ return (*chars)[0];
+ return nullopt;
}
-void JSONParser::NextNChars(int n) {
- DCHECK(CanConsume(n));
- index_ += n;
- pos_ += n;
+const char* JSONParser::pos() {
+ CHECK_LE(static_cast<size_t>(index_), input_.length());
+ return input_.data() + index_;
}
JSONParser::Token JSONParser::GetNextToken() {
EatWhitespaceAndComments();
- if (!CanConsume(1))
+
+ Optional<char> c = PeekChar();
+ if (!c)
return T_END_OF_INPUT;
- switch (*pos_) {
+ switch (*c) {
case '{':
return T_OBJECT_BEGIN;
case '}':
@@ -241,18 +246,19 @@ JSONParser::Token JSONParser::GetNextToken() {
}
void JSONParser::EatWhitespaceAndComments() {
- while (pos_ < end_pos_) {
- switch (*pos_) {
+ while (Optional<char> c = PeekChar()) {
+ switch (*c) {
case '\r':
case '\n':
index_last_line_ = index_;
// Don't increment line_number_ twice for "\r\n".
- if (!(*pos_ == '\n' && pos_ > start_pos_ && *(pos_ - 1) == '\r'))
+ if (!(c == '\n' && index_ > 0 && input_[index_ - 1] == '\r')) {
++line_number_;
+ }
FALLTHROUGH;
case ' ':
case '\t':
- NextChar();
+ ConsumeChar();
break;
case '/':
if (!EatComment())
@@ -265,34 +271,29 @@ void JSONParser::EatWhitespaceAndComments() {
}
bool JSONParser::EatComment() {
- if (*pos_ != '/' || !CanConsume(1))
- return false;
-
- NextChar();
-
- if (!CanConsume(1))
+ Optional<StringPiece> comment_start = ConsumeChars(2);
+ if (!comment_start)
return false;
- if (*pos_ == '/') {
+ if (comment_start == "//") {
// Single line comment, read to newline.
- while (CanConsume(1)) {
- if (*pos_ == '\n' || *pos_ == '\r')
+ while (Optional<char> c = PeekChar()) {
+ if (c == '\n' || c == '\r')
return true;
- NextChar();
+ ConsumeChar();
}
- } else if (*pos_ == '*') {
+ } else if (comment_start == "/*") {
char previous_char = '\0';
// Block comment, read until end marker.
- while (CanConsume(1)) {
- if (previous_char == '*' && *pos_ == '/') {
- // EatWhitespaceAndComments will inspect pos_, which will still be on
+ while (Optional<char> c = PeekChar()) {
+ if (previous_char == '*' && c == '/') {
+ // EatWhitespaceAndComments will inspect pos(), which will still be on
// the last / of the comment, so advance once more (which may also be
// end of input).
- NextChar();
+ ConsumeChar();
return true;
}
- previous_char = *pos_;
- NextChar();
+ previous_char = *ConsumeChar();
}
// If the comment is unterminated, GetNextToken will report T_END_OF_INPUT.
@@ -333,23 +334,22 @@ Optional<Value> JSONParser::ConsumeDictionary() {
// complete.
const char* initial_pos[16];
for (size_t i = 0; i < arraysize(initial_pos); ++i)
- initial_pos[i] = pos_ + i * 256;
+ initial_pos[i] = pos() + i * 256;
debug::Alias(&initial_pos);
- if (*pos_ != '{') {
+ if (ConsumeChar() != '{') {
ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
return nullopt;
}
StackMarker depth_check(max_depth_, &stack_depth_);
if (depth_check.IsTooDeep()) {
- ReportError(JSONReader::JSON_TOO_MUCH_NESTING, 1);
+ ReportError(JSONReader::JSON_TOO_MUCH_NESTING, 0);
return nullopt;
}
std::vector<Value::DictStorage::value_type> dict_storage;
- NextChar();
Token token = GetNextToken();
while (token != T_OBJECT_END) {
if (token != T_STRING) {
@@ -364,7 +364,6 @@ Optional<Value> JSONParser::ConsumeDictionary() {
}
// Read the separator.
- NextChar();
token = GetNextToken();
if (token != T_OBJECT_PAIR_SEPARATOR) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
@@ -372,7 +371,7 @@ Optional<Value> JSONParser::ConsumeDictionary() {
}
// The next token is the value. Ownership transfers to |dict|.
- NextChar();
+ ConsumeChar();
Optional<Value> value = ParseNextToken();
if (!value) {
// ReportError from deeper level.
@@ -382,10 +381,9 @@ Optional<Value> JSONParser::ConsumeDictionary() {
dict_storage.emplace_back(key.DestructiveAsString(),
std::make_unique<Value>(std::move(*value)));
- NextChar();
token = GetNextToken();
if (token == T_LIST_SEPARATOR) {
- NextChar();
+ ConsumeChar();
token = GetNextToken();
if (token == T_OBJECT_END && !(options_ & JSON_ALLOW_TRAILING_COMMAS)) {
ReportError(JSONReader::JSON_TRAILING_COMMA, 1);
@@ -397,24 +395,25 @@ Optional<Value> JSONParser::ConsumeDictionary() {
}
}
+ ConsumeChar(); // Closing '}'.
+
return Value(Value::DictStorage(std::move(dict_storage), KEEP_LAST_OF_DUPES));
}
Optional<Value> JSONParser::ConsumeList() {
- if (*pos_ != '[') {
+ if (ConsumeChar() != '[') {
ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
return nullopt;
}
StackMarker depth_check(max_depth_, &stack_depth_);
if (depth_check.IsTooDeep()) {
- ReportError(JSONReader::JSON_TOO_MUCH_NESTING, 1);
+ ReportError(JSONReader::JSON_TOO_MUCH_NESTING, 0);
return nullopt;
}
Value::ListStorage list_storage;
- NextChar();
Token token = GetNextToken();
while (token != T_ARRAY_END) {
Optional<Value> item = ParseToken(token);
@@ -425,10 +424,9 @@ Optional<Value> JSONParser::ConsumeList() {
list_storage.push_back(std::move(*item));
- NextChar();
token = GetNextToken();
if (token == T_LIST_SEPARATOR) {
- NextChar();
+ ConsumeChar();
token = GetNextToken();
if (token == T_ARRAY_END && !(options_ & JSON_ALLOW_TRAILING_COMMAS)) {
ReportError(JSONReader::JSON_TRAILING_COMMA, 1);
@@ -440,6 +438,8 @@ Optional<Value> JSONParser::ConsumeList() {
}
}
+ ConsumeChar(); // Closing ']'.
+
return Value(std::move(list_storage));
}
@@ -452,61 +452,40 @@ Optional<Value> JSONParser::ConsumeString() {
}
bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
- if (*pos_ != '"') {
+ if (ConsumeChar() != '"') {
ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
return false;
}
- // Strings are at minimum two characters: the surrounding double quotes.
- if (!CanConsume(2)) {
- ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return false;
- }
-
// StringBuilder will internally build a StringPiece unless a UTF-16
// conversion occurs, at which point it will perform a copy into a
// std::string.
- StringBuilder string(NextChar());
-
- // Handle the empty string case early.
- if (*pos_ == '"') {
- *out = std::move(string);
- return true;
- }
-
- int length = end_pos_ - start_pos_;
- int32_t next_char = 0;
-
- // There must always be at least two characters left in the stream: the next
- // string character and the terminating closing quote.
- while (CanConsume(2)) {
- int start_index = index_;
- pos_ = start_pos_ + index_; // CBU8_NEXT is postcrement.
- CBU8_NEXT(start_pos_, index_, length, next_char);
- if (next_char < 0 || !IsValidCharacter(next_char)) {
+ StringBuilder string(pos());
+
+ while (PeekChar()) {
+ uint32_t next_char = 0;
+ if (!ReadUnicodeCharacter(input_.data(),
+ static_cast<int32_t>(input_.length()),
+ &index_,
+ &next_char) ||
+ !IsValidCharacter(next_char)) {
if ((options_ & JSON_REPLACE_INVALID_CHARACTERS) == 0) {
ReportError(JSONReader::JSON_UNSUPPORTED_ENCODING, 1);
return false;
}
- CBU8_NEXT(start_pos_, start_index, length, next_char);
- string.Convert();
- string.AppendString(kUnicodeReplacementString,
- arraysize(kUnicodeReplacementString) - 1);
+ ConsumeChar();
+ string.Append(kUnicodeReplacementPoint);
continue;
}
if (next_char == '"') {
- --index_; // Rewind by one because of CBU8_NEXT.
+ ConsumeChar();
*out = std::move(string);
return true;
- }
-
- // If this character is not an escape sequence...
- if (next_char != '\\') {
- if (next_char < kExtendedASCIIStart)
- string.Append(static_cast<char>(next_char));
- else
- DecodeUTF8(next_char, &string);
+ } else if (next_char != '\\') {
+ // If this character is not an escape sequence...
+ ConsumeChar();
+ string.Append(next_char);
} else {
// And if it is an escape sequence, the input string will be adjusted
// (either by combining the two characters of an encoded escape sequence,
@@ -514,58 +493,42 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
// a conversion.
string.Convert();
- if (!CanConsume(1)) {
- ReportError(JSONReader::JSON_INVALID_ESCAPE, 0);
- return false;
- }
-
- NextChar();
- if (!CanConsume(1)) {
+ // Read past the escape '\' and ensure there's a character following.
+ Optional<StringPiece> escape_sequence = ConsumeChars(2);
+ if (!escape_sequence) {
ReportError(JSONReader::JSON_INVALID_ESCAPE, 0);
return false;
}
- switch (*pos_) {
+ switch ((*escape_sequence)[1]) {
// Allowed esape sequences:
case 'x': { // UTF-8 sequence.
// UTF-8 \x escape sequences are not allowed in the spec, but they
// are supported here for backwards-compatiblity with the old parser.
- if (!CanConsume(3)) {
- ReportError(JSONReader::JSON_INVALID_ESCAPE, 1);
+ escape_sequence = ConsumeChars(2);
+ if (!escape_sequence) {
+ ReportError(JSONReader::JSON_INVALID_ESCAPE, -2);
return false;
}
int hex_digit = 0;
- if (!HexStringToInt(StringPiece(NextChar(), 2), &hex_digit) ||
+ if (!HexStringToInt(*escape_sequence, &hex_digit) ||
!IsValidCharacter(hex_digit)) {
- ReportError(JSONReader::JSON_INVALID_ESCAPE, -1);
+ ReportError(JSONReader::JSON_INVALID_ESCAPE, -2);
return false;
}
- NextChar();
- if (hex_digit < kExtendedASCIIStart)
- string.Append(static_cast<char>(hex_digit));
- else
- DecodeUTF8(hex_digit, &string);
+ string.Append(hex_digit);
break;
}
case 'u': { // UTF-16 sequence.
// UTF units are of the form \uXXXX.
- if (!CanConsume(5)) { // 5 being 'u' and four HEX digits.
+ uint32_t code_point;
+ if (!DecodeUTF16(&code_point)) {
ReportError(JSONReader::JSON_INVALID_ESCAPE, 0);
return false;
}
-
- // Skip the 'u'.
- NextChar();
-
- std::string utf8_units;
- if (!DecodeUTF16(&utf8_units)) {
- ReportError(JSONReader::JSON_INVALID_ESCAPE, -1);
- return false;
- }
-
- string.AppendString(utf8_units.data(), utf8_units.length());
+ string.Append(code_point);
break;
}
case '"':
@@ -608,28 +571,16 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
}
// Entry is at the first X in \uXXXX.
-bool JSONParser::DecodeUTF16(std::string* dest_string) {
- if (!CanConsume(4))
+bool JSONParser::DecodeUTF16(uint32_t* out_code_point) {
+ Optional<StringPiece> escape_sequence = ConsumeChars(4);
+ if (!escape_sequence)
return false;
- // This is a 32-bit field because the shift operations in the
- // conversion process below cause MSVC to error about "data loss."
- // This only stores UTF-16 code units, though.
// Consume the UTF-16 code unit, which may be a high surrogate.
int code_unit16_high = 0;
- if (!HexStringToInt(StringPiece(pos_, 4), &code_unit16_high))
+ if (!HexStringToInt(*escape_sequence, &code_unit16_high))
return false;
- // Only add 3, not 4, because at the end of this iteration, the parser has
- // finished working with the last digit of the UTF sequence, meaning that
- // the next iteration will advance to the next byte.
- NextNChars(3);
-
- // Used to convert the UTF-16 code units to a code point and then to a UTF-8
- // code unit sequence.
- char code_unit8[8] = { 0 };
- size_t offset = 0;
-
// If this is a high surrogate, consume the next code unit to get the
// low surrogate.
if (CBU16_IS_SURROGATE(code_unit16_high)) {
@@ -640,29 +591,26 @@ bool JSONParser::DecodeUTF16(std::string* dest_string) {
// Make sure that the token has more characters to consume the
// lower surrogate.
- if (!CanConsume(6)) // 6 being '\' 'u' and four HEX digits.
+ if (!ConsumeIfMatch("\\u"))
return false;
- if (*NextChar() != '\\' || *NextChar() != 'u')
+
+ escape_sequence = ConsumeChars(4);
+ if (!escape_sequence)
return false;
- NextChar(); // Read past 'u'.
int code_unit16_low = 0;
- if (!HexStringToInt(StringPiece(pos_, 4), &code_unit16_low))
+ if (!HexStringToInt(*escape_sequence, &code_unit16_low))
return false;
- NextNChars(3);
-
- if (!CBU16_IS_TRAIL(code_unit16_low)) {
+ if (!CBU16_IS_TRAIL(code_unit16_low))
return false;
- }
uint32_t code_point =
CBU16_GET_SUPPLEMENTARY(code_unit16_high, code_unit16_low);
if (!IsValidCharacter(code_point))
return false;
- offset = 0;
- CBU8_APPEND_UNSAFE(code_unit8, offset, code_point);
+ *out_code_point = code_point;
} else {
// Not a surrogate.
DCHECK(CBU16_IS_SINGLE(code_unit16_high));
@@ -670,42 +618,23 @@ bool JSONParser::DecodeUTF16(std::string* dest_string) {
if ((options_ & JSON_REPLACE_INVALID_CHARACTERS) == 0) {
return false;
}
- dest_string->append(kUnicodeReplacementString);
+ *out_code_point = kUnicodeReplacementPoint;
return true;
}
- CBU8_APPEND_UNSAFE(code_unit8, offset, code_unit16_high);
+ *out_code_point = code_unit16_high;
}
- dest_string->append(code_unit8, offset);
return true;
}
-void JSONParser::DecodeUTF8(const int32_t& point, StringBuilder* dest) {
- DCHECK(IsValidCharacter(point));
-
- // Anything outside of the basic ASCII plane will need to be decoded from
- // int32_t to a multi-byte sequence.
- if (point < kExtendedASCIIStart) {
- dest->Append(static_cast<char>(point));
- } else {
- char utf8_units[4] = { 0 };
- int offset = 0;
- CBU8_APPEND_UNSAFE(utf8_units, offset, point);
- dest->Convert();
- // CBU8_APPEND_UNSAFE can overwrite up to 4 bytes, so utf8_units may not be
- // zero terminated at this point. |offset| contains the correct length.
- dest->AppendString(utf8_units, offset);
- }
-}
-
Optional<Value> JSONParser::ConsumeNumber() {
- const char* num_start = pos_;
+ const char* num_start = pos();
const int start_index = index_;
int end_index = start_index;
- if (*pos_ == '-')
- NextChar();
+ if (PeekChar() == '-')
+ ConsumeChar();
if (!ReadInt(false)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
@@ -714,8 +643,8 @@ Optional<Value> JSONParser::ConsumeNumber() {
end_index = index_;
// The optional fraction part.
- if (CanConsume(1) && *pos_ == '.') {
- NextChar();
+ if (PeekChar() == '.') {
+ ConsumeChar();
if (!ReadInt(true)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
return nullopt;
@@ -724,14 +653,11 @@ Optional<Value> JSONParser::ConsumeNumber() {
}
// Optional exponent part.
- if (CanConsume(1) && (*pos_ == 'e' || *pos_ == 'E')) {
- NextChar();
- if (!CanConsume(1)) {
- ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return nullopt;
- }
- if (*pos_ == '-' || *pos_ == '+') {
- NextChar();
+ Optional<char> c = PeekChar();
+ if (c == 'e' || c == 'E') {
+ ConsumeChar();
+ if (PeekChar() == '-' || PeekChar() == '+') {
+ ConsumeChar();
}
if (!ReadInt(true)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
@@ -744,8 +670,7 @@ Optional<Value> JSONParser::ConsumeNumber() {
// so save off where the parser should be on exit (see Consume invariant at
// the top of the header), then make sure the next token is one which is
// valid.
- const char* exit_pos = pos_ - 1;
- int exit_index = index_ - 1;
+ int exit_index = index_;
switch (GetNextToken()) {
case T_OBJECT_END:
@@ -758,7 +683,6 @@ Optional<Value> JSONParser::ConsumeNumber() {
return nullopt;
}
- pos_ = exit_pos;
index_ = exit_index;
StringPiece num_string(num_start, end_index - start_index);
@@ -780,15 +704,15 @@ bool JSONParser::ReadInt(bool allow_leading_zeros) {
size_t len = 0;
char first = 0;
- while (CanConsume(1)) {
- if (!IsAsciiDigit(*pos_))
+ while (Optional<char> c = PeekChar()) {
+ if (!IsAsciiDigit(c))
break;
if (len == 0)
- first = *pos_;
+ first = *c;
++len;
- NextChar();
+ ConsumeChar();
}
if (len == 0)
@@ -801,49 +725,24 @@ bool JSONParser::ReadInt(bool allow_leading_zeros) {
}
Optional<Value> JSONParser::ConsumeLiteral() {
- switch (*pos_) {
- case 't': {
- const char kTrueLiteral[] = "true";
- const int kTrueLen = static_cast<int>(strlen(kTrueLiteral));
- if (!CanConsume(kTrueLen) ||
- !StringsAreEqual(pos_, kTrueLiteral, kTrueLen)) {
- ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return nullopt;
- }
- NextNChars(kTrueLen - 1);
- return Value(true);
- }
- case 'f': {
- const char kFalseLiteral[] = "false";
- const int kFalseLen = static_cast<int>(strlen(kFalseLiteral));
- if (!CanConsume(kFalseLen) ||
- !StringsAreEqual(pos_, kFalseLiteral, kFalseLen)) {
- ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return nullopt;
- }
- NextNChars(kFalseLen - 1);
- return Value(false);
- }
- case 'n': {
- const char kNullLiteral[] = "null";
- const int kNullLen = static_cast<int>(strlen(kNullLiteral));
- if (!CanConsume(kNullLen) ||
- !StringsAreEqual(pos_, kNullLiteral, kNullLen)) {
- ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return nullopt;
- }
- NextNChars(kNullLen - 1);
- return Value(Value::Type::NONE);
- }
- default:
- ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
- return nullopt;
+ if (ConsumeIfMatch("true")) {
+ return Value(true);
+ } else if (ConsumeIfMatch("false")) {
+ return Value(false);
+ } else if (ConsumeIfMatch("null")) {
+ return Value(Value::Type::NONE);
+ } else {
+ ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+ return nullopt;
}
}
-// static
-bool JSONParser::StringsAreEqual(const char* one, const char* two, size_t len) {
- return strncmp(one, two, len) == 0;
+bool JSONParser::ConsumeIfMatch(StringPiece match) {
+ if (match == PeekChars(match.size())) {
+ ConsumeChars(match.size());
+ return true;
+ }
+ return false;
}
void JSONParser::ReportError(JSONReader::JsonParseError code,
diff --git a/chromium/base/json/json_parser.h b/chromium/base/json/json_parser.h
index 3fb87003a16..a4dd2ba365e 100644
--- a/chromium/base/json/json_parser.h
+++ b/chromium/base/json/json_parser.h
@@ -31,17 +31,16 @@ class JSONParserTest;
// to be used directly; it encapsulates logic that need not be exposed publicly.
//
// This parser guarantees O(n) time through the input string. Iteration happens
-// on the byte level, with the functions CanConsume and NextChar. The conversion
-// from byte to JSON token happens without advancing the parser in
+// on the byte level, with the functions ConsumeChars() and ConsumeChar(). The
+// conversion from byte to JSON token happens without advancing the parser in
// GetNextToken/ParseToken, that is tokenization operates on the current parser
// position without advancing.
//
// Built on top of these are a family of Consume functions that iterate
// internally. Invariant: on entry of a Consume function, the parser is wound
-// to the first byte of a valid JSON token. On exit, it is on the last byte
-// of a token, such that the next iteration of the parser will be at the byte
-// immediately following the token, which would likely be the first byte of the
-// next token.
+// to the first byte of a valid JSON token. On exit, it is on the first byte
+// after the token that was just consumed, which would likely be the first byte
+// of the next token.
class BASE_EXPORT JSONParser {
public:
JSONParser(int options, int max_depth = JSONReader::kStackMaxDepth);
@@ -101,26 +100,16 @@ class BASE_EXPORT JSONParser {
StringBuilder& operator=(StringBuilder&& other);
- // Either increases the |length_| of the string or copies the character if
- // the StringBuilder has been converted. |c| must be in the basic ASCII
- // plane; all other characters need to be in UTF-8 units, appended with
- // AppendString below.
- void Append(const char& c);
-
- // Appends a string to the std::string. Must be Convert()ed to use.
- void AppendString(const char* str, size_t len);
+ // Appends the Unicode code point |point| to the string, either by
+ // increasing the |length_| of the string if the string has not been
+ // converted, or by appending the UTF8 bytes for the code point.
+ void Append(uint32_t point);
// Converts the builder from its default StringPiece to a full std::string,
// performing a copy. Once a builder is converted, it cannot be made a
// StringPiece again.
void Convert();
- // Returns the builder as a StringPiece.
- StringPiece AsStringPiece();
-
- // Returns the builder as a std::string.
- const std::string& AsString();
-
// Returns the builder as a string, invalidating all state. This allows
// the internal string buffer representation to be destructively moved
// in cases where the builder will not be needed any more.
@@ -138,15 +127,22 @@ class BASE_EXPORT JSONParser {
base::Optional<std::string> string_;
};
- // Quick check that the stream has capacity to consume |length| more bytes.
- bool CanConsume(int length);
+ // Returns the next |count| bytes of the input stream, or nullopt if fewer
+ // than |count| bytes remain.
+ Optional<StringPiece> PeekChars(int count);
+
+ // Calls PeekChars() with a |count| of 1.
+ Optional<char> PeekChar();
- // The basic way to consume a single character in the stream. Consumes one
- // byte of the input stream and returns a pointer to the rest of it.
- const char* NextChar();
+ // Returns the next |count| bytes of the input stream, or nullopt if fewer
+ // than |count| bytes remain, and advances the parser position by |count|.
+ Optional<StringPiece> ConsumeChars(int count);
- // Performs the equivalent of NextChar N times.
- void NextNChars(int n);
+ // Calls ConsumeChars() with a |count| of 1.
+ Optional<char> ConsumeChar();
+
+ // Returns a pointer to the current character position.
+ const char* pos();
// Skips over whitespace and comments to find the next token in the stream.
// This does not advance the parser for non-whitespace or comment chars.
@@ -185,13 +181,8 @@ class BASE_EXPORT JSONParser {
// Helper function for ConsumeStringRaw() that consumes the next four or 10
// bytes (parser is wound to the first character of a HEX sequence, with the
// potential for consuming another \uXXXX for a surrogate). Returns true on
- // success and places the UTF8 code units in |dest_string|, and false on
- // failure.
- bool DecodeUTF16(std::string* dest_string);
- // Helper function for ConsumeStringRaw() that takes a single code point,
- // decodes it into UTF-8 units, and appends it to the given builder. The
- // point must be valid.
- void DecodeUTF8(const int32_t& point, StringBuilder* dest);
+ // success and places the code point |out_code_point|, and false on failure.
+ bool DecodeUTF16(uint32_t* out_code_point);
// Assuming that the parser is wound to the start of a valid JSON number,
// this parses and converts it to either an int or double value.
@@ -204,8 +195,11 @@ class BASE_EXPORT JSONParser {
// parser is wound to the first character of any of those.
Optional<Value> ConsumeLiteral();
- // Compares two string buffers of a given length.
- static bool StringsAreEqual(const char* left, const char* right, size_t len);
+ // Helper function that returns true if the byte squence |match| can be
+ // consumed at the current parser position. Returns false if there are fewer
+ // than |match|-length bytes or if the sequence does not match, and the
+ // parser state is unchanged.
+ bool ConsumeIfMatch(StringPiece match);
// Sets the error information to |code| at the current column, based on
// |index_| and |index_last_line_|, with an optional positive/negative
@@ -223,15 +217,8 @@ class BASE_EXPORT JSONParser {
// Maximum depth to parse.
const int max_depth_;
- // Pointer to the start of the input data.
- const char* start_pos_;
-
- // Pointer to the current position in the input data. Equivalent to
- // |start_pos_ + index_|.
- const char* pos_;
-
- // Pointer to the last character of the input data.
- const char* end_pos_;
+ // The input stream being parsed. Note: Not guaranteed to NUL-terminated.
+ StringPiece input_;
// The index in the input stream to which the parser is wound.
int index_;
diff --git a/chromium/base/json/json_parser_unittest.cc b/chromium/base/json/json_parser_unittest.cc
index e2f9e32e0e1..25a355914a5 100644
--- a/chromium/base/json/json_parser_unittest.cc
+++ b/chromium/base/json/json_parser_unittest.cc
@@ -23,9 +23,8 @@ class JSONParserTest : public testing::Test {
JSONParser* NewTestParser(const std::string& input,
int options = JSON_PARSE_RFC) {
JSONParser* parser = new JSONParser(options);
- parser->start_pos_ = input.data();
- parser->pos_ = parser->start_pos_;
- parser->end_pos_ = parser->start_pos_ + input.length();
+ parser->input_ = input;
+ parser->index_ = 0;
return parser;
}
@@ -41,10 +40,12 @@ class JSONParserTest : public testing::Test {
}
void TestLastThree(JSONParser* parser) {
- EXPECT_EQ(',', *parser->NextChar());
- EXPECT_EQ('|', *parser->NextChar());
- EXPECT_EQ('\0', *parser->NextChar());
- EXPECT_EQ(parser->end_pos_, parser->pos_);
+ EXPECT_EQ(',', *parser->PeekChar());
+ parser->ConsumeChar();
+ EXPECT_EQ('|', *parser->PeekChar());
+ parser->ConsumeChar();
+ EXPECT_EQ('\0', *parser->pos());
+ EXPECT_EQ(static_cast<size_t>(parser->index_), parser->input_.length());
}
};
@@ -52,18 +53,21 @@ TEST_F(JSONParserTest, NextChar) {
std::string input("Hello world");
std::unique_ptr<JSONParser> parser(NewTestParser(input));
- EXPECT_EQ('H', *parser->pos_);
+ EXPECT_EQ('H', *parser->pos());
for (size_t i = 1; i < input.length(); ++i) {
- EXPECT_EQ(input[i], *parser->NextChar());
+ parser->ConsumeChar();
+ EXPECT_EQ(input[i], *parser->PeekChar());
}
- EXPECT_EQ(parser->end_pos_, parser->NextChar());
+ parser->ConsumeChar();
+ EXPECT_EQ('\0', *parser->pos());
+ EXPECT_EQ(static_cast<size_t>(parser->index_), parser->input_.length());
}
TEST_F(JSONParserTest, ConsumeString) {
std::string input("\"test\",|");
std::unique_ptr<JSONParser> parser(NewTestParser(input));
Optional<Value> value(parser->ConsumeString());
- EXPECT_EQ('"', *parser->pos_);
+ EXPECT_EQ(',', *parser->pos());
TestLastThree(parser.get());
@@ -77,7 +81,7 @@ TEST_F(JSONParserTest, ConsumeList) {
std::string input("[true, false],|");
std::unique_ptr<JSONParser> parser(NewTestParser(input));
Optional<Value> value(parser->ConsumeList());
- EXPECT_EQ(']', *parser->pos_);
+ EXPECT_EQ(',', *parser->pos());
TestLastThree(parser.get());
@@ -91,7 +95,7 @@ TEST_F(JSONParserTest, ConsumeDictionary) {
std::string input("{\"abc\":\"def\"},|");
std::unique_ptr<JSONParser> parser(NewTestParser(input));
Optional<Value> value(parser->ConsumeDictionary());
- EXPECT_EQ('}', *parser->pos_);
+ EXPECT_EQ(',', *parser->pos());
TestLastThree(parser.get());
@@ -108,7 +112,7 @@ TEST_F(JSONParserTest, ConsumeLiterals) {
std::string input("true,|");
std::unique_ptr<JSONParser> parser(NewTestParser(input));
Optional<Value> value(parser->ConsumeLiteral());
- EXPECT_EQ('e', *parser->pos_);
+ EXPECT_EQ(',', *parser->pos());
TestLastThree(parser.get());
@@ -121,7 +125,7 @@ TEST_F(JSONParserTest, ConsumeLiterals) {
input = "false,|";
parser.reset(NewTestParser(input));
value = parser->ConsumeLiteral();
- EXPECT_EQ('e', *parser->pos_);
+ EXPECT_EQ(',', *parser->pos());
TestLastThree(parser.get());
@@ -133,7 +137,7 @@ TEST_F(JSONParserTest, ConsumeLiterals) {
input = "null,|";
parser.reset(NewTestParser(input));
value = parser->ConsumeLiteral();
- EXPECT_EQ('l', *parser->pos_);
+ EXPECT_EQ(',', *parser->pos());
TestLastThree(parser.get());
@@ -146,7 +150,7 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
std::string input("1234,|");
std::unique_ptr<JSONParser> parser(NewTestParser(input));
Optional<Value> value(parser->ConsumeNumber());
- EXPECT_EQ('4', *parser->pos_);
+ EXPECT_EQ(',', *parser->pos());
TestLastThree(parser.get());
@@ -159,7 +163,7 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
input = "-1234,|";
parser.reset(NewTestParser(input));
value = parser->ConsumeNumber();
- EXPECT_EQ('4', *parser->pos_);
+ EXPECT_EQ(',', *parser->pos());
TestLastThree(parser.get());
@@ -171,7 +175,7 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
input = "12.34,|";
parser.reset(NewTestParser(input));
value = parser->ConsumeNumber();
- EXPECT_EQ('4', *parser->pos_);
+ EXPECT_EQ(',', *parser->pos());
TestLastThree(parser.get());
@@ -184,7 +188,7 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
input = "42e3,|";
parser.reset(NewTestParser(input));
value = parser->ConsumeNumber();
- EXPECT_EQ('3', *parser->pos_);
+ EXPECT_EQ(',', *parser->pos());
TestLastThree(parser.get());
@@ -196,7 +200,7 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
input = "314159e-5,|";
parser.reset(NewTestParser(input));
value = parser->ConsumeNumber();
- EXPECT_EQ('5', *parser->pos_);
+ EXPECT_EQ(',', *parser->pos());
TestLastThree(parser.get());
@@ -208,7 +212,7 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
input = "0.42e+3,|";
parser.reset(NewTestParser(input));
value = parser->ConsumeNumber();
- EXPECT_EQ('3', *parser->pos_);
+ EXPECT_EQ(',', *parser->pos());
TestLastThree(parser.get());
@@ -319,7 +323,7 @@ TEST_F(JSONParserTest, ErrorMessages) {
root = JSONReader::ReadAndReturnError(("[\"\\ufffe\"]"), JSON_PARSE_RFC,
&error_code, &error_message);
- EXPECT_EQ(JSONParser::FormatErrorMessage(1, 7, JSONReader::kInvalidEscape),
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 8, JSONReader::kInvalidEscape),
error_message);
EXPECT_EQ(JSONReader::JSON_INVALID_ESCAPE, error_code);
}
@@ -436,6 +440,8 @@ TEST_F(JSONParserTest, UnterminatedInputs) {
"nul",
"\"\\x2",
"\"\\u123",
+ "\"\\",
+ "\"\\/",
// clang-format on
};
diff --git a/chromium/base/json/json_reader.cc b/chromium/base/json/json_reader.cc
index 40ada3dc21a..bf2a18a5e56 100644
--- a/chromium/base/json/json_reader.cc
+++ b/chromium/base/json/json_reader.cc
@@ -38,6 +38,8 @@ const char JSONReader::kUnsupportedEncoding[] =
"Unsupported encoding. JSON must be UTF-8.";
const char JSONReader::kUnquotedDictionaryKey[] =
"Dictionary keys must be quoted.";
+const char JSONReader::kInputTooLarge[] =
+ "Input string is too large (>2GB).";
JSONReader::JSONReader(int options, int max_depth)
: parser_(new internal::JSONParser(options, max_depth)) {}
@@ -99,10 +101,13 @@ std::string JSONReader::ErrorCodeToString(JsonParseError error_code) {
return kUnsupportedEncoding;
case JSON_UNQUOTED_DICTIONARY_KEY:
return kUnquotedDictionaryKey;
- default:
- NOTREACHED();
- return std::string();
+ case JSON_TOO_LARGE:
+ return kInputTooLarge;
+ case JSON_PARSE_ERROR_COUNT:
+ break;
}
+ NOTREACHED();
+ return std::string();
}
std::unique_ptr<Value> JSONReader::ReadToValue(StringPiece json) {
diff --git a/chromium/base/json/json_reader.h b/chromium/base/json/json_reader.h
index 1277a497e40..2c6bd3e479e 100644
--- a/chromium/base/json/json_reader.h
+++ b/chromium/base/json/json_reader.h
@@ -71,6 +71,7 @@ class BASE_EXPORT JSONReader {
JSON_UNEXPECTED_DATA_AFTER_ROOT,
JSON_UNSUPPORTED_ENCODING,
JSON_UNQUOTED_DICTIONARY_KEY,
+ JSON_TOO_LARGE,
JSON_PARSE_ERROR_COUNT
};
@@ -83,6 +84,7 @@ class BASE_EXPORT JSONReader {
static const char kUnexpectedDataAfterRoot[];
static const char kUnsupportedEncoding[];
static const char kUnquotedDictionaryKey[];
+ static const char kInputTooLarge[];
// Constructs a reader.
JSONReader(int options = JSON_PARSE_RFC, int max_depth = kStackMaxDepth);
diff --git a/chromium/base/json/json_reader_fuzzer.cc b/chromium/base/json/json_reader_fuzzer.cc
new file mode 100644
index 00000000000..a8490da179f
--- /dev/null
+++ b/chromium/base/json/json_reader_fuzzer.cc
@@ -0,0 +1,29 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_reader.h"
+#include "base/values.h"
+
+int error_code, error_line, error_column;
+std::string error_message;
+
+// Entry point for LibFuzzer.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ if (size < 2)
+ return 0;
+
+ // Create a copy of input buffer, as otherwise we don't catch
+ // overflow that touches the last byte (which is used in options).
+ std::unique_ptr<char[]> input(new char[size - 1]);
+ memcpy(input.get(), data, size - 1);
+
+ base::StringPiece input_string(input.get(), size - 1);
+
+ const int options = data[size - 1];
+ base::JSONReader::ReadAndReturnError(input_string, options, &error_code,
+ &error_message, &error_line,
+ &error_column);
+
+ return 0;
+}
diff --git a/chromium/base/json/json_reader_unittest.cc b/chromium/base/json/json_reader_unittest.cc
index 0297bbf19af..38a4e9e90a1 100644
--- a/chromium/base/json/json_reader_unittest.cc
+++ b/chromium/base/json/json_reader_unittest.cc
@@ -21,552 +21,523 @@
namespace base {
-TEST(JSONReaderTest, Reading) {
- {
- // some whitespace checking
- std::unique_ptr<Value> root = JSONReader().ReadToValue(" null ");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_none());
- }
+TEST(JSONReaderTest, Whitespace) {
+ std::unique_ptr<Value> root = JSONReader().ReadToValue(" null ");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_none());
+}
- {
- // Invalid JSON string
- EXPECT_FALSE(JSONReader().ReadToValue("nu"));
- }
+TEST(JSONReaderTest, InvalidString) {
+ EXPECT_FALSE(JSONReader().ReadToValue("nu"));
+}
- {
- // Simple bool
- std::unique_ptr<Value> root = JSONReader().ReadToValue("true ");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_bool());
- }
+TEST(JSONReaderTest, SimpleBool) {
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("true ");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_bool());
+}
- {
- // Embedded comment
- std::unique_ptr<Value> root = JSONReader().ReadToValue("/* comment */null");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_none());
- root = JSONReader().ReadToValue("40 /* comment */");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_int());
- root = JSONReader().ReadToValue("true // comment");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_bool());
- root = JSONReader().ReadToValue("/* comment */\"sample string\"");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_string());
- std::string value;
- EXPECT_TRUE(root->GetAsString(&value));
- EXPECT_EQ("sample string", value);
- std::unique_ptr<ListValue> list = ListValue::From(
- JSONReader().ReadToValue("[1, /* comment, 2 ] */ \n 3]"));
- ASSERT_TRUE(list);
- EXPECT_EQ(2u, list->GetSize());
- int int_val = 0;
- EXPECT_TRUE(list->GetInteger(0, &int_val));
- EXPECT_EQ(1, int_val);
- EXPECT_TRUE(list->GetInteger(1, &int_val));
- EXPECT_EQ(3, int_val);
- list = ListValue::From(JSONReader().ReadToValue("[1, /*a*/2, 3]"));
- ASSERT_TRUE(list);
- EXPECT_EQ(3u, list->GetSize());
- root = JSONReader().ReadToValue("/* comment **/42");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_int());
- EXPECT_TRUE(root->GetAsInteger(&int_val));
- EXPECT_EQ(42, int_val);
- root = JSONReader().ReadToValue(
- "/* comment **/\n"
- "// */ 43\n"
- "44");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_int());
- EXPECT_TRUE(root->GetAsInteger(&int_val));
- EXPECT_EQ(44, int_val);
- }
+TEST(JSONReaderTest, EmbeddedComments) {
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("/* comment */null");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_none());
+ root = JSONReader().ReadToValue("40 /* comment */");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_int());
+ root = JSONReader().ReadToValue("true // comment");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_bool());
+ root = JSONReader().ReadToValue("/* comment */\"sample string\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_string());
+ std::string value;
+ EXPECT_TRUE(root->GetAsString(&value));
+ EXPECT_EQ("sample string", value);
+ std::unique_ptr<ListValue> list =
+ ListValue::From(JSONReader().ReadToValue("[1, /* comment, 2 ] */ \n 3]"));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(2u, list->GetSize());
+ int int_val = 0;
+ EXPECT_TRUE(list->GetInteger(0, &int_val));
+ EXPECT_EQ(1, int_val);
+ EXPECT_TRUE(list->GetInteger(1, &int_val));
+ EXPECT_EQ(3, int_val);
+ list = ListValue::From(JSONReader().ReadToValue("[1, /*a*/2, 3]"));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(3u, list->GetSize());
+ root = JSONReader().ReadToValue("/* comment **/42");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_int());
+ EXPECT_TRUE(root->GetAsInteger(&int_val));
+ EXPECT_EQ(42, int_val);
+ root = JSONReader().ReadToValue(
+ "/* comment **/\n"
+ "// */ 43\n"
+ "44");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_int());
+ EXPECT_TRUE(root->GetAsInteger(&int_val));
+ EXPECT_EQ(44, int_val);
+}
- {
- // Test number formats
- std::unique_ptr<Value> root = JSONReader().ReadToValue("43");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_int());
- int int_val = 0;
- EXPECT_TRUE(root->GetAsInteger(&int_val));
- EXPECT_EQ(43, int_val);
- }
+TEST(JSONReaderTest, Ints) {
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("43");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_int());
+ int int_val = 0;
+ EXPECT_TRUE(root->GetAsInteger(&int_val));
+ EXPECT_EQ(43, int_val);
+}
- {
- // According to RFC4627, oct, hex, and leading zeros are invalid JSON.
- EXPECT_FALSE(JSONReader().ReadToValue("043"));
- EXPECT_FALSE(JSONReader().ReadToValue("0x43"));
- EXPECT_FALSE(JSONReader().ReadToValue("00"));
- }
+TEST(JSONReaderTest, NonDecimalNumbers) {
+ // According to RFC4627, oct, hex, and leading zeros are invalid JSON.
+ EXPECT_FALSE(JSONReader().ReadToValue("043"));
+ EXPECT_FALSE(JSONReader().ReadToValue("0x43"));
+ EXPECT_FALSE(JSONReader().ReadToValue("00"));
+}
- {
- // Test 0 (which needs to be special cased because of the leading zero
- // clause).
- std::unique_ptr<Value> root = JSONReader().ReadToValue("0");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_int());
- int int_val = 1;
- EXPECT_TRUE(root->GetAsInteger(&int_val));
- EXPECT_EQ(0, int_val);
- }
+TEST(JSONReaderTest, NumberZero) {
+ // Test 0 (which needs to be special cased because of the leading zero
+ // clause).
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("0");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_int());
+ int int_val = 1;
+ EXPECT_TRUE(root->GetAsInteger(&int_val));
+ EXPECT_EQ(0, int_val);
+}
- {
- // Numbers that overflow ints should succeed, being internally promoted to
- // storage as doubles
- std::unique_ptr<Value> root = JSONReader().ReadToValue("2147483648");
- ASSERT_TRUE(root);
- double double_val;
- EXPECT_TRUE(root->is_double());
- double_val = 0.0;
- EXPECT_TRUE(root->GetAsDouble(&double_val));
- EXPECT_DOUBLE_EQ(2147483648.0, double_val);
- root = JSONReader().ReadToValue("-2147483649");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_double());
- double_val = 0.0;
- EXPECT_TRUE(root->GetAsDouble(&double_val));
- EXPECT_DOUBLE_EQ(-2147483649.0, double_val);
- }
+TEST(JSONReaderTest, LargeIntPromotion) {
+ // Numbers that overflow ints should succeed, being internally promoted to
+ // storage as doubles
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("2147483648");
+ ASSERT_TRUE(root);
+ double double_val;
+ EXPECT_TRUE(root->is_double());
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(2147483648.0, double_val);
+ root = JSONReader().ReadToValue("-2147483649");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_double());
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(-2147483649.0, double_val);
+}
- {
- // Parse a double
- std::unique_ptr<Value> root = JSONReader().ReadToValue("43.1");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_double());
- double double_val = 0.0;
- EXPECT_TRUE(root->GetAsDouble(&double_val));
- EXPECT_DOUBLE_EQ(43.1, double_val);
+TEST(JSONReaderTest, Doubles) {
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("43.1");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_double());
+ double double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(43.1, double_val);
- root = JSONReader().ReadToValue("4.3e-1");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_double());
- double_val = 0.0;
- EXPECT_TRUE(root->GetAsDouble(&double_val));
- EXPECT_DOUBLE_EQ(.43, double_val);
+ root = JSONReader().ReadToValue("4.3e-1");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_double());
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(.43, double_val);
- root = JSONReader().ReadToValue("2.1e0");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_double());
- double_val = 0.0;
- EXPECT_TRUE(root->GetAsDouble(&double_val));
- EXPECT_DOUBLE_EQ(2.1, double_val);
+ root = JSONReader().ReadToValue("2.1e0");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_double());
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(2.1, double_val);
- root = JSONReader().ReadToValue("2.1e+0001");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_double());
- double_val = 0.0;
- EXPECT_TRUE(root->GetAsDouble(&double_val));
- EXPECT_DOUBLE_EQ(21.0, double_val);
+ root = JSONReader().ReadToValue("2.1e+0001");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_double());
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(21.0, double_val);
- root = JSONReader().ReadToValue("0.01");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_double());
- double_val = 0.0;
- EXPECT_TRUE(root->GetAsDouble(&double_val));
- EXPECT_DOUBLE_EQ(0.01, double_val);
+ root = JSONReader().ReadToValue("0.01");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_double());
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(0.01, double_val);
- root = JSONReader().ReadToValue("1.00");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_double());
- double_val = 0.0;
- EXPECT_TRUE(root->GetAsDouble(&double_val));
- EXPECT_DOUBLE_EQ(1.0, double_val);
- }
+ root = JSONReader().ReadToValue("1.00");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_double());
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(1.0, double_val);
+}
- {
- // Fractional parts must have a digit before and after the decimal point.
- EXPECT_FALSE(JSONReader().ReadToValue("1."));
- EXPECT_FALSE(JSONReader().ReadToValue(".1"));
- EXPECT_FALSE(JSONReader().ReadToValue("1.e10"));
- }
+TEST(JSONReaderTest, FractionalNumbers) {
+ // Fractional parts must have a digit before and after the decimal point.
+ EXPECT_FALSE(JSONReader().ReadToValue("1."));
+ EXPECT_FALSE(JSONReader().ReadToValue(".1"));
+ EXPECT_FALSE(JSONReader().ReadToValue("1.e10"));
+}
- {
- // Exponent must have a digit following the 'e'.
- EXPECT_FALSE(JSONReader().ReadToValue("1e"));
- EXPECT_FALSE(JSONReader().ReadToValue("1E"));
- EXPECT_FALSE(JSONReader().ReadToValue("1e1."));
- EXPECT_FALSE(JSONReader().ReadToValue("1e1.0"));
- }
+TEST(JSONReaderTest, ExponentialNumbers) {
+ // Exponent must have a digit following the 'e'.
+ EXPECT_FALSE(JSONReader().ReadToValue("1e"));
+ EXPECT_FALSE(JSONReader().ReadToValue("1E"));
+ EXPECT_FALSE(JSONReader().ReadToValue("1e1."));
+ EXPECT_FALSE(JSONReader().ReadToValue("1e1.0"));
+}
- {
- // INF/-INF/NaN are not valid
- EXPECT_FALSE(JSONReader().ReadToValue("1e1000"));
- EXPECT_FALSE(JSONReader().ReadToValue("-1e1000"));
- EXPECT_FALSE(JSONReader().ReadToValue("NaN"));
- EXPECT_FALSE(JSONReader().ReadToValue("nan"));
- EXPECT_FALSE(JSONReader().ReadToValue("inf"));
- }
+TEST(JSONReaderTest, InvalidNAN) {
+ EXPECT_FALSE(JSONReader().ReadToValue("1e1000"));
+ EXPECT_FALSE(JSONReader().ReadToValue("-1e1000"));
+ EXPECT_FALSE(JSONReader().ReadToValue("NaN"));
+ EXPECT_FALSE(JSONReader().ReadToValue("nan"));
+ EXPECT_FALSE(JSONReader().ReadToValue("inf"));
+}
- {
- // Invalid number formats
- EXPECT_FALSE(JSONReader().ReadToValue("4.3.1"));
- EXPECT_FALSE(JSONReader().ReadToValue("4e3.1"));
- }
+TEST(JSONReaderTest, InvalidNumbers) {
+ EXPECT_FALSE(JSONReader().ReadToValue("4.3.1"));
+ EXPECT_FALSE(JSONReader().ReadToValue("4e3.1"));
+}
- {
- // Test string parser
- std::unique_ptr<Value> root = JSONReader().ReadToValue("\"hello world\"");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_string());
- std::string str_val;
- EXPECT_TRUE(root->GetAsString(&str_val));
- EXPECT_EQ("hello world", str_val);
- }
+TEST(JSONReader, SimpleString) {
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("\"hello world\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_string());
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ("hello world", str_val);
+}
- {
- // Empty string
- std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\"");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_string());
- std::string str_val;
- EXPECT_TRUE(root->GetAsString(&str_val));
- EXPECT_EQ("", str_val);
- }
+TEST(JSONReaderTest, EmptyString) {
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_string());
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ("", str_val);
+}
- {
- // Test basic string escapes
- std::unique_ptr<Value> root =
- JSONReader().ReadToValue("\" \\\"\\\\\\/\\b\\f\\n\\r\\t\\v\"");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_string());
- std::string str_val;
- EXPECT_TRUE(root->GetAsString(&str_val));
- EXPECT_EQ(" \"\\/\b\f\n\r\t\v", str_val);
- }
+TEST(JSONReaderTest, BasicStringEscapes) {
+ std::unique_ptr<Value> root =
+ JSONReader().ReadToValue("\" \\\"\\\\\\/\\b\\f\\n\\r\\t\\v\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_string());
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ(" \"\\/\b\f\n\r\t\v", str_val);
+}
- {
- // Test hex and unicode escapes including the null character.
- std::unique_ptr<Value> root =
- JSONReader().ReadToValue("\"\\x41\\x00\\u1234\\u0000\"");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_string());
- std::string str_val;
- EXPECT_TRUE(root->GetAsString(&str_val));
- EXPECT_EQ(std::wstring(L"A\0\x1234\0", 4), UTF8ToWide(str_val));
- }
+TEST(JSONReaderTest, UnicodeEscapes) {
+ // Test hex and unicode escapes including the null character.
+ std::unique_ptr<Value> root =
+ JSONReader().ReadToValue("\"\\x41\\x00\\u1234\\u0000\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_string());
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ(std::wstring(L"A\0\x1234\0", 4), UTF8ToWide(str_val));
+}
- {
- // Test invalid strings
- EXPECT_FALSE(JSONReader().ReadToValue("\"no closing quote"));
- EXPECT_FALSE(JSONReader().ReadToValue("\"\\z invalid escape char\""));
- EXPECT_FALSE(JSONReader().ReadToValue("\"\\xAQ invalid hex code\""));
- EXPECT_FALSE(JSONReader().ReadToValue("not enough hex chars\\x1\""));
- EXPECT_FALSE(JSONReader().ReadToValue("\"not enough escape chars\\u123\""));
- EXPECT_FALSE(
- JSONReader().ReadToValue("\"extra backslash at end of input\\\""));
- }
+TEST(JSONReaderTest, InvalidStrings) {
+ EXPECT_FALSE(JSONReader().ReadToValue("\"no closing quote"));
+ EXPECT_FALSE(JSONReader().ReadToValue("\"\\z invalid escape char\""));
+ EXPECT_FALSE(JSONReader().ReadToValue("\"\\xAQ invalid hex code\""));
+ EXPECT_FALSE(JSONReader().ReadToValue("not enough hex chars\\x1\""));
+ EXPECT_FALSE(JSONReader().ReadToValue("\"not enough escape chars\\u123\""));
+ EXPECT_FALSE(
+ JSONReader().ReadToValue("\"extra backslash at end of input\\\""));
+}
- {
- // Basic array
- std::unique_ptr<ListValue> list =
- ListValue::From(JSONReader::Read("[true, false, null]"));
- ASSERT_TRUE(list);
- EXPECT_EQ(3U, list->GetSize());
-
- // Test with trailing comma. Should be parsed the same as above.
- std::unique_ptr<Value> root2 =
- JSONReader::Read("[true, false, null, ]", JSON_ALLOW_TRAILING_COMMAS);
- EXPECT_TRUE(list->Equals(root2.get()));
- }
+TEST(JSONReaderTest, BasicArray) {
+ std::unique_ptr<ListValue> list =
+ ListValue::From(JSONReader::Read("[true, false, null]"));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(3U, list->GetSize());
- {
- // Empty array
- std::unique_ptr<ListValue> list = ListValue::From(JSONReader::Read("[]"));
- ASSERT_TRUE(list);
- EXPECT_EQ(0U, list->GetSize());
- }
+ // Test with trailing comma. Should be parsed the same as above.
+ std::unique_ptr<Value> root2 =
+ JSONReader::Read("[true, false, null, ]", JSON_ALLOW_TRAILING_COMMAS);
+ EXPECT_TRUE(list->Equals(root2.get()));
+}
- {
- // Nested arrays
- std::unique_ptr<ListValue> list = ListValue::From(
- JSONReader::Read("[[true], [], [false, [], [null]], null]"));
- ASSERT_TRUE(list);
- EXPECT_EQ(4U, list->GetSize());
-
- // Lots of trailing commas.
- std::unique_ptr<Value> root2 =
- JSONReader::Read("[[true], [], [false, [], [null, ] , ], null,]",
- JSON_ALLOW_TRAILING_COMMAS);
- EXPECT_TRUE(list->Equals(root2.get()));
- }
+TEST(JSONReaderTest, EmptyArray) {
+ std::unique_ptr<ListValue> list = ListValue::From(JSONReader::Read("[]"));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(0U, list->GetSize());
+}
- {
- // Invalid, missing close brace.
- EXPECT_FALSE(JSONReader::Read("[[true], [], [false, [], [null]], null"));
+TEST(JSONReaderTest, NestedArrays) {
+ std::unique_ptr<ListValue> list = ListValue::From(
+ JSONReader::Read("[[true], [], [false, [], [null]], null]"));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(4U, list->GetSize());
+
+ // Lots of trailing commas.
+ std::unique_ptr<Value> root2 =
+ JSONReader::Read("[[true], [], [false, [], [null, ] , ], null,]",
+ JSON_ALLOW_TRAILING_COMMAS);
+ EXPECT_TRUE(list->Equals(root2.get()));
+}
- // Invalid, too many commas
- EXPECT_FALSE(JSONReader::Read("[true,, null]"));
- EXPECT_FALSE(JSONReader::Read("[true,, null]", JSON_ALLOW_TRAILING_COMMAS));
+TEST(JSONReaderTest, InvalidArrays) {
+ // Missing close brace.
+ EXPECT_FALSE(JSONReader::Read("[[true], [], [false, [], [null]], null"));
- // Invalid, no commas
- EXPECT_FALSE(JSONReader::Read("[true null]"));
+ // Too many commas.
+ EXPECT_FALSE(JSONReader::Read("[true,, null]"));
+ EXPECT_FALSE(JSONReader::Read("[true,, null]", JSON_ALLOW_TRAILING_COMMAS));
- // Invalid, trailing comma
- EXPECT_FALSE(JSONReader::Read("[true,]"));
- }
+ // No commas.
+ EXPECT_FALSE(JSONReader::Read("[true null]"));
- {
- // Valid if we set |allow_trailing_comma| to true.
- std::unique_ptr<ListValue> list = ListValue::From(
- JSONReader::Read("[true,]", JSON_ALLOW_TRAILING_COMMAS));
- ASSERT_TRUE(list);
- EXPECT_EQ(1U, list->GetSize());
- Value* tmp_value = nullptr;
- ASSERT_TRUE(list->Get(0, &tmp_value));
- EXPECT_TRUE(tmp_value->is_bool());
- bool bool_value = false;
- EXPECT_TRUE(tmp_value->GetAsBoolean(&bool_value));
- EXPECT_TRUE(bool_value);
- }
+ // Trailing comma.
+ EXPECT_FALSE(JSONReader::Read("[true,]"));
+}
- {
- // Don't allow empty elements, even if |allow_trailing_comma| is
- // true.
- EXPECT_FALSE(JSONReader::Read("[,]", JSON_ALLOW_TRAILING_COMMAS));
- EXPECT_FALSE(JSONReader::Read("[true,,]", JSON_ALLOW_TRAILING_COMMAS));
- EXPECT_FALSE(JSONReader::Read("[,true,]", JSON_ALLOW_TRAILING_COMMAS));
- EXPECT_FALSE(JSONReader::Read("[true,,false]", JSON_ALLOW_TRAILING_COMMAS));
- }
+TEST(JSONReaderTest, ArrayTrailingComma) {
+ // Valid if we set |allow_trailing_comma| to true.
+ std::unique_ptr<ListValue> list =
+ ListValue::From(JSONReader::Read("[true,]", JSON_ALLOW_TRAILING_COMMAS));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(1U, list->GetSize());
+ Value* tmp_value = nullptr;
+ ASSERT_TRUE(list->Get(0, &tmp_value));
+ EXPECT_TRUE(tmp_value->is_bool());
+ bool bool_value = false;
+ EXPECT_TRUE(tmp_value->GetAsBoolean(&bool_value));
+ EXPECT_TRUE(bool_value);
+}
- {
- // Test objects
- std::unique_ptr<DictionaryValue> dict_val =
- DictionaryValue::From(JSONReader::Read("{}"));
- ASSERT_TRUE(dict_val);
-
- dict_val = DictionaryValue::From(JSONReader::Read(
- "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\" }"));
- ASSERT_TRUE(dict_val);
- double double_val = 0.0;
- EXPECT_TRUE(dict_val->GetDouble("number", &double_val));
- EXPECT_DOUBLE_EQ(9.87654321, double_val);
- Value* null_val = nullptr;
- ASSERT_TRUE(dict_val->Get("null", &null_val));
- EXPECT_TRUE(null_val->is_none());
- std::string str_val;
- EXPECT_TRUE(dict_val->GetString("S", &str_val));
- EXPECT_EQ("str", str_val);
-
- std::unique_ptr<Value> root2 = JSONReader::Read(
- "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\", }",
- JSON_ALLOW_TRAILING_COMMAS);
- ASSERT_TRUE(root2);
- EXPECT_TRUE(dict_val->Equals(root2.get()));
-
- // Test newline equivalence.
- root2 = JSONReader::Read(
- "{\n"
- " \"number\":9.87654321,\n"
- " \"null\":null,\n"
- " \"\\x53\":\"str\",\n"
- "}\n",
- JSON_ALLOW_TRAILING_COMMAS);
- ASSERT_TRUE(root2);
- EXPECT_TRUE(dict_val->Equals(root2.get()));
-
- root2 = JSONReader::Read(
- "{\r\n"
- " \"number\":9.87654321,\r\n"
- " \"null\":null,\r\n"
- " \"\\x53\":\"str\",\r\n"
- "}\r\n",
- JSON_ALLOW_TRAILING_COMMAS);
- ASSERT_TRUE(root2);
- EXPECT_TRUE(dict_val->Equals(root2.get()));
- }
+TEST(JSONReaderTest, ArrayTrailingCommaNoEmptyElements) {
+ // Don't allow empty elements, even if |allow_trailing_comma| is
+ // true.
+ EXPECT_FALSE(JSONReader::Read("[,]", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(JSONReader::Read("[true,,]", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(JSONReader::Read("[,true,]", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(JSONReader::Read("[true,,false]", JSON_ALLOW_TRAILING_COMMAS));
+}
- {
- // Test nesting
- std::unique_ptr<DictionaryValue> dict_val =
- DictionaryValue::From(JSONReader::Read(
- "{\"inner\":{\"array\":[true]},\"false\":false,\"d\":{}}"));
- ASSERT_TRUE(dict_val);
- DictionaryValue* inner_dict = nullptr;
- ASSERT_TRUE(dict_val->GetDictionary("inner", &inner_dict));
- ListValue* inner_array = nullptr;
- ASSERT_TRUE(inner_dict->GetList("array", &inner_array));
- EXPECT_EQ(1U, inner_array->GetSize());
- bool bool_value = true;
- EXPECT_TRUE(dict_val->GetBoolean("false", &bool_value));
- EXPECT_FALSE(bool_value);
- inner_dict = nullptr;
- EXPECT_TRUE(dict_val->GetDictionary("d", &inner_dict));
-
- std::unique_ptr<Value> root2 = JSONReader::Read(
- "{\"inner\": {\"array\":[true] , },\"false\":false,\"d\":{},}",
- JSON_ALLOW_TRAILING_COMMAS);
- EXPECT_TRUE(dict_val->Equals(root2.get()));
- }
+TEST(JSONReaderTest, EmptyDictionary) {
+ std::unique_ptr<DictionaryValue> dict_val =
+ DictionaryValue::From(JSONReader::Read("{}"));
+ ASSERT_TRUE(dict_val);
+}
- {
- // Test keys with periods
- std::unique_ptr<DictionaryValue> dict_val = DictionaryValue::From(
- JSONReader::Read("{\"a.b\":3,\"c\":2,\"d.e.f\":{\"g.h.i.j\":1}}"));
- ASSERT_TRUE(dict_val);
- int integer_value = 0;
- EXPECT_TRUE(
- dict_val->GetIntegerWithoutPathExpansion("a.b", &integer_value));
- EXPECT_EQ(3, integer_value);
- EXPECT_TRUE(dict_val->GetIntegerWithoutPathExpansion("c", &integer_value));
- EXPECT_EQ(2, integer_value);
- DictionaryValue* inner_dict = nullptr;
- ASSERT_TRUE(
- dict_val->GetDictionaryWithoutPathExpansion("d.e.f", &inner_dict));
- EXPECT_EQ(1U, inner_dict->size());
- EXPECT_TRUE(
- inner_dict->GetIntegerWithoutPathExpansion("g.h.i.j", &integer_value));
- EXPECT_EQ(1, integer_value);
-
- dict_val =
- DictionaryValue::From(JSONReader::Read("{\"a\":{\"b\":2},\"a.b\":1}"));
- ASSERT_TRUE(dict_val);
- EXPECT_TRUE(dict_val->GetInteger("a.b", &integer_value));
- EXPECT_EQ(2, integer_value);
- EXPECT_TRUE(
- dict_val->GetIntegerWithoutPathExpansion("a.b", &integer_value));
- EXPECT_EQ(1, integer_value);
- }
+TEST(JSONReaderTest, CompleteDictionary) {
+ auto dict_val = DictionaryValue::From(JSONReader::Read(
+ "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\" }"));
+ ASSERT_TRUE(dict_val);
+ double double_val = 0.0;
+ EXPECT_TRUE(dict_val->GetDouble("number", &double_val));
+ EXPECT_DOUBLE_EQ(9.87654321, double_val);
+ Value* null_val = nullptr;
+ ASSERT_TRUE(dict_val->Get("null", &null_val));
+ EXPECT_TRUE(null_val->is_none());
+ std::string str_val;
+ EXPECT_TRUE(dict_val->GetString("S", &str_val));
+ EXPECT_EQ("str", str_val);
+
+ std::unique_ptr<Value> root2 = JSONReader::Read(
+ "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\", }",
+ JSON_ALLOW_TRAILING_COMMAS);
+ ASSERT_TRUE(root2);
+ EXPECT_TRUE(dict_val->Equals(root2.get()));
+
+ // Test newline equivalence.
+ root2 = JSONReader::Read(
+ "{\n"
+ " \"number\":9.87654321,\n"
+ " \"null\":null,\n"
+ " \"\\x53\":\"str\",\n"
+ "}\n",
+ JSON_ALLOW_TRAILING_COMMAS);
+ ASSERT_TRUE(root2);
+ EXPECT_TRUE(dict_val->Equals(root2.get()));
+
+ root2 = JSONReader::Read(
+ "{\r\n"
+ " \"number\":9.87654321,\r\n"
+ " \"null\":null,\r\n"
+ " \"\\x53\":\"str\",\r\n"
+ "}\r\n",
+ JSON_ALLOW_TRAILING_COMMAS);
+ ASSERT_TRUE(root2);
+ EXPECT_TRUE(dict_val->Equals(root2.get()));
+}
- {
- // Invalid, no closing brace
- EXPECT_FALSE(JSONReader::Read("{\"a\": true"));
-
- // Invalid, keys must be quoted
- EXPECT_FALSE(JSONReader::Read("{foo:true}"));
-
- // Invalid, trailing comma
- EXPECT_FALSE(JSONReader::Read("{\"a\":true,}"));
-
- // Invalid, too many commas
- EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}"));
- EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}",
- JSON_ALLOW_TRAILING_COMMAS));
-
- // Invalid, no separator
- EXPECT_FALSE(JSONReader::Read("{\"a\" \"b\"}"));
-
- // Invalid, lone comma.
- EXPECT_FALSE(JSONReader::Read("{,}"));
- EXPECT_FALSE(JSONReader::Read("{,}", JSON_ALLOW_TRAILING_COMMAS));
- EXPECT_FALSE(
- JSONReader::Read("{\"a\":true,,}", JSON_ALLOW_TRAILING_COMMAS));
- EXPECT_FALSE(JSONReader::Read("{,\"a\":true}", JSON_ALLOW_TRAILING_COMMAS));
- EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}",
- JSON_ALLOW_TRAILING_COMMAS));
- }
+TEST(JSONReaderTest, NestedDictionaries) {
+ std::unique_ptr<DictionaryValue> dict_val =
+ DictionaryValue::From(JSONReader::Read(
+ "{\"inner\":{\"array\":[true]},\"false\":false,\"d\":{}}"));
+ ASSERT_TRUE(dict_val);
+ DictionaryValue* inner_dict = nullptr;
+ ASSERT_TRUE(dict_val->GetDictionary("inner", &inner_dict));
+ ListValue* inner_array = nullptr;
+ ASSERT_TRUE(inner_dict->GetList("array", &inner_array));
+ EXPECT_EQ(1U, inner_array->GetSize());
+ bool bool_value = true;
+ EXPECT_TRUE(dict_val->GetBoolean("false", &bool_value));
+ EXPECT_FALSE(bool_value);
+ inner_dict = nullptr;
+ EXPECT_TRUE(dict_val->GetDictionary("d", &inner_dict));
+
+ std::unique_ptr<Value> root2 = JSONReader::Read(
+ "{\"inner\": {\"array\":[true] , },\"false\":false,\"d\":{},}",
+ JSON_ALLOW_TRAILING_COMMAS);
+ EXPECT_TRUE(dict_val->Equals(root2.get()));
+}
- {
- // Test stack overflow
- std::string evil(1000000, '[');
- evil.append(std::string(1000000, ']'));
- EXPECT_FALSE(JSONReader::Read(evil));
- }
+TEST(JSONReaderTest, DictionaryKeysWithPeriods) {
+ std::unique_ptr<DictionaryValue> dict_val = DictionaryValue::From(
+ JSONReader::Read("{\"a.b\":3,\"c\":2,\"d.e.f\":{\"g.h.i.j\":1}}"));
+ ASSERT_TRUE(dict_val);
+ int integer_value = 0;
+ EXPECT_TRUE(dict_val->GetIntegerWithoutPathExpansion("a.b", &integer_value));
+ EXPECT_EQ(3, integer_value);
+ EXPECT_TRUE(dict_val->GetIntegerWithoutPathExpansion("c", &integer_value));
+ EXPECT_EQ(2, integer_value);
+ DictionaryValue* inner_dict = nullptr;
+ ASSERT_TRUE(
+ dict_val->GetDictionaryWithoutPathExpansion("d.e.f", &inner_dict));
+ EXPECT_EQ(1U, inner_dict->size());
+ EXPECT_TRUE(
+ inner_dict->GetIntegerWithoutPathExpansion("g.h.i.j", &integer_value));
+ EXPECT_EQ(1, integer_value);
+
+ dict_val =
+ DictionaryValue::From(JSONReader::Read("{\"a\":{\"b\":2},\"a.b\":1}"));
+ ASSERT_TRUE(dict_val);
+ EXPECT_TRUE(dict_val->GetInteger("a.b", &integer_value));
+ EXPECT_EQ(2, integer_value);
+ EXPECT_TRUE(dict_val->GetIntegerWithoutPathExpansion("a.b", &integer_value));
+ EXPECT_EQ(1, integer_value);
+}
- {
- // A few thousand adjacent lists is fine.
- std::string not_evil("[");
- not_evil.reserve(15010);
- for (int i = 0; i < 5000; ++i)
- not_evil.append("[],");
- not_evil.append("[]]");
- std::unique_ptr<ListValue> list =
- ListValue::From(JSONReader::Read(not_evil));
- ASSERT_TRUE(list);
- EXPECT_EQ(5001U, list->GetSize());
- }
+TEST(JSONReaderTest, InvalidDictionaries) {
+ // No closing brace.
+ EXPECT_FALSE(JSONReader::Read("{\"a\": true"));
- {
- // Test utf8 encoded input
- std::unique_ptr<Value> root =
- JSONReader().ReadToValue("\"\xe7\xbd\x91\xe9\xa1\xb5\"");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_string());
- std::string str_val;
- EXPECT_TRUE(root->GetAsString(&str_val));
- EXPECT_EQ(L"\x7f51\x9875", UTF8ToWide(str_val));
-
- std::unique_ptr<DictionaryValue> dict_val =
- DictionaryValue::From(JSONReader().ReadToValue(
- "{\"path\": \"/tmp/\xc3\xa0\xc3\xa8\xc3\xb2.png\"}"));
- ASSERT_TRUE(dict_val);
- EXPECT_TRUE(dict_val->GetString("path", &str_val));
- EXPECT_EQ("/tmp/\xC3\xA0\xC3\xA8\xC3\xB2.png", str_val);
- }
+ // Keys must be quoted.
+ EXPECT_FALSE(JSONReader::Read("{foo:true}"));
- {
- // Test invalid utf8 encoded input
- EXPECT_FALSE(JSONReader().ReadToValue("\"345\xb0\xa1\xb0\xa2\""));
- EXPECT_FALSE(JSONReader().ReadToValue("\"123\xc0\x81\""));
- EXPECT_FALSE(JSONReader().ReadToValue("\"abc\xc0\xae\""));
- }
+ // Trailing comma.
+ EXPECT_FALSE(JSONReader::Read("{\"a\":true,}"));
- {
- // Test utf16 encoded strings.
- std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\\u20ac3,14\"");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_string());
- std::string str_val;
- EXPECT_TRUE(root->GetAsString(&str_val));
- EXPECT_EQ(
- "\xe2\x82\xac"
- "3,14",
- str_val);
-
- root = JSONReader().ReadToValue("\"\\ud83d\\udca9\\ud83d\\udc6c\"");
- ASSERT_TRUE(root);
- EXPECT_TRUE(root->is_string());
- str_val.clear();
- EXPECT_TRUE(root->GetAsString(&str_val));
- EXPECT_EQ("\xf0\x9f\x92\xa9\xf0\x9f\x91\xac", str_val);
- }
+ // Too many commas.
+ EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}"));
+ EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}",
+ JSON_ALLOW_TRAILING_COMMAS));
- {
- // Test invalid utf16 strings.
- const char* const cases[] = {
- "\"\\u123\"", // Invalid scalar.
- "\"\\ud83d\"", // Invalid scalar.
- "\"\\u$%@!\"", // Invalid scalar.
- "\"\\uzz89\"", // Invalid scalar.
- "\"\\ud83d\\udca\"", // Invalid lower surrogate.
- "\"\\ud83d\\ud83d\"", // Invalid lower surrogate.
- "\"\\ud83foo\"", // No lower surrogate.
- "\"\\ud83\\foo\"" // No lower surrogate.
- };
- std::unique_ptr<Value> root;
- for (size_t i = 0; i < arraysize(cases); ++i) {
- root = JSONReader().ReadToValue(cases[i]);
- EXPECT_FALSE(root) << cases[i];
- }
- }
+ // No separator.
+ EXPECT_FALSE(JSONReader::Read("{\"a\" \"b\"}"));
- {
- // Test literal root objects.
- std::unique_ptr<Value> root = JSONReader::Read("null");
- EXPECT_TRUE(root->is_none());
+ // Lone comma.
+ EXPECT_FALSE(JSONReader::Read("{,}"));
+ EXPECT_FALSE(JSONReader::Read("{,}", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(JSONReader::Read("{\"a\":true,,}", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(JSONReader::Read("{,\"a\":true}", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}",
+ JSON_ALLOW_TRAILING_COMMAS));
+}
- root = JSONReader::Read("true");
- ASSERT_TRUE(root);
- bool bool_value;
- EXPECT_TRUE(root->GetAsBoolean(&bool_value));
- EXPECT_TRUE(bool_value);
+TEST(JSONReaderTest, StackOverflow) {
+ std::string evil(1000000, '[');
+ evil.append(std::string(1000000, ']'));
+ EXPECT_FALSE(JSONReader::Read(evil));
+
+ // A few thousand adjacent lists is fine.
+ std::string not_evil("[");
+ not_evil.reserve(15010);
+ for (int i = 0; i < 5000; ++i)
+ not_evil.append("[],");
+ not_evil.append("[]]");
+ std::unique_ptr<ListValue> list = ListValue::From(JSONReader::Read(not_evil));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(5001U, list->GetSize());
+}
- root = JSONReader::Read("10");
- ASSERT_TRUE(root);
- int integer_value;
- EXPECT_TRUE(root->GetAsInteger(&integer_value));
- EXPECT_EQ(10, integer_value);
+TEST(JSONReaderTest, UTF8Input) {
+ std::unique_ptr<Value> root =
+ JSONReader().ReadToValue("\"\xe7\xbd\x91\xe9\xa1\xb5\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_string());
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ(L"\x7f51\x9875", UTF8ToWide(str_val));
+
+ std::unique_ptr<DictionaryValue> dict_val =
+ DictionaryValue::From(JSONReader().ReadToValue(
+ "{\"path\": \"/tmp/\xc3\xa0\xc3\xa8\xc3\xb2.png\"}"));
+ ASSERT_TRUE(dict_val);
+ EXPECT_TRUE(dict_val->GetString("path", &str_val));
+ EXPECT_EQ("/tmp/\xC3\xA0\xC3\xA8\xC3\xB2.png", str_val);
+}
- root = JSONReader::Read("\"root\"");
- ASSERT_TRUE(root);
- std::string str_val;
- EXPECT_TRUE(root->GetAsString(&str_val));
- EXPECT_EQ("root", str_val);
+TEST(JSONReaderTest, InvalidUTF8Input) {
+ EXPECT_FALSE(JSONReader().ReadToValue("\"345\xb0\xa1\xb0\xa2\""));
+ EXPECT_FALSE(JSONReader().ReadToValue("\"123\xc0\x81\""));
+ EXPECT_FALSE(JSONReader().ReadToValue("\"abc\xc0\xae\""));
+}
+
+TEST(JSONReaderTest, UTF16Escapes) {
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\\u20ac3,14\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_string());
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ(
+ "\xe2\x82\xac"
+ "3,14",
+ str_val);
+
+ root = JSONReader().ReadToValue("\"\\ud83d\\udca9\\ud83d\\udc6c\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_string());
+ str_val.clear();
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ("\xf0\x9f\x92\xa9\xf0\x9f\x91\xac", str_val);
+}
+
+TEST(JSONReaderTest, InvalidUTF16Escapes) {
+ const char* const cases[] = {
+ "\"\\u123\"", // Invalid scalar.
+ "\"\\ud83d\"", // Invalid scalar.
+ "\"\\u$%@!\"", // Invalid scalar.
+ "\"\\uzz89\"", // Invalid scalar.
+ "\"\\ud83d\\udca\"", // Invalid lower surrogate.
+ "\"\\ud83d\\ud83d\"", // Invalid lower surrogate.
+ "\"\\ud83foo\"", // No lower surrogate.
+ "\"\\ud83\\foo\"" // No lower surrogate.
+ };
+ std::unique_ptr<Value> root;
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ root = JSONReader().ReadToValue(cases[i]);
+ EXPECT_FALSE(root) << cases[i];
}
}
+TEST(JSONReaderTest, LiteralRoots) {
+ std::unique_ptr<Value> root = JSONReader::Read("null");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->is_none());
+
+ root = JSONReader::Read("true");
+ ASSERT_TRUE(root);
+ bool bool_value;
+ EXPECT_TRUE(root->GetAsBoolean(&bool_value));
+ EXPECT_TRUE(bool_value);
+
+ root = JSONReader::Read("10");
+ ASSERT_TRUE(root);
+ int integer_value;
+ EXPECT_TRUE(root->GetAsInteger(&integer_value));
+ EXPECT_EQ(10, integer_value);
+
+ root = JSONReader::Read("\"root\"");
+ ASSERT_TRUE(root);
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ("root", str_val);
+}
+
TEST(JSONReaderTest, ReadFromFile) {
FilePath path;
ASSERT_TRUE(PathService::Get(base::DIR_TEST_DATA, &path));
diff --git a/chromium/base/json/string_escape_fuzzer.cc b/chromium/base/json/string_escape_fuzzer.cc
new file mode 100644
index 00000000000..e44bd4fe16b
--- /dev/null
+++ b/chromium/base/json/string_escape_fuzzer.cc
@@ -0,0 +1,37 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/string_escape.h"
+
+#include <memory>
+
+std::string escaped_string;
+
+// Entry point for LibFuzzer.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ if (size < 2)
+ return 0;
+
+ const bool put_in_quotes = data[size - 1];
+
+ // Create a copy of input buffer, as otherwise we don't catch
+ // overflow that touches the last byte (which is used in put_in_quotes).
+ size_t actual_size_char8 = size - 1;
+ std::unique_ptr<char[]> input(new char[actual_size_char8]);
+ memcpy(input.get(), data, actual_size_char8);
+
+ base::StringPiece input_string(input.get(), actual_size_char8);
+ base::EscapeJSONString(input_string, put_in_quotes, &escaped_string);
+
+ // Test for wide-strings if available size is even.
+ if (actual_size_char8 & 1)
+ return 0;
+
+ size_t actual_size_char16 = actual_size_char8 / 2;
+ base::StringPiece16 input_string16(
+ reinterpret_cast<base::char16*>(input.get()), actual_size_char16);
+ base::EscapeJSONString(input_string16, put_in_quotes, &escaped_string);
+
+ return 0;
+}
diff --git a/chromium/base/logging.cc b/chromium/base/logging.cc
index 7db189f846e..2dac3e2d8a8 100644
--- a/chromium/base/logging.cc
+++ b/chromium/base/logging.cc
@@ -101,6 +101,11 @@ typedef pthread_mutex_t* MutexHandle;
#include <android/log.h>
#endif
+#if defined(OS_FUCHSIA)
+#include <zircon/process.h>
+#include <zircon/syscalls.h>
+#endif
+
namespace logging {
namespace {
@@ -161,6 +166,11 @@ LogMessageHandlerFunction log_message_handler = nullptr;
int32_t CurrentProcessId() {
#if defined(OS_WIN)
return GetCurrentProcessId();
+#elif defined(OS_FUCHSIA)
+ zx_info_handle_basic_t basic = {};
+ zx_object_get_info(zx_process_self(), ZX_INFO_HANDLE_BASIC, &basic,
+ sizeof(basic), nullptr, nullptr);
+ return basic.koid;
#elif defined(OS_POSIX)
return getpid();
#endif
@@ -171,6 +181,9 @@ uint64_t TickCount() {
return GetTickCount();
#elif defined(OS_MACOSX)
return mach_absolute_time();
+#elif defined(OS_FUCHSIA)
+ return zx_clock_get(ZX_CLOCK_MONOTONIC) /
+ static_cast<zx_time_t>(base::Time::kNanosecondsPerMicrosecond);
#elif defined(OS_NACL)
// NaCl sadly does not have _POSIX_TIMERS enabled in sys/features.h
// So we have to use clock() for now.
@@ -372,12 +385,12 @@ void CloseLogFileUnlocked() {
} // namespace
-#if DCHECK_IS_ON() && defined(SYZYASAN)
-// In DCHECK-enabled SyzyASAN builds, allow the meaning of LOG_DCHECK to be
+#if DCHECK_IS_CONFIGURABLE
+// In DCHECK-enabled Chrome builds, allow the meaning of LOG_DCHECK to be
// determined at run-time. We default it to INFO, to avoid it triggering
// crashes before the run-time has explicitly chosen the behaviour.
BASE_EXPORT logging::LogSeverity LOG_DCHECK = LOG_INFO;
-#endif
+#endif // DCHECK_IS_CONFIGURABLE
// This is never instantiated, it's just used for EAT_STREAM_PARAMETERS to have
// an object of the correct type on the LHS of the unused part of the ternary
diff --git a/chromium/base/logging.h b/chromium/base/logging.h
index ed9547344b9..1b97a4ab2ba 100644
--- a/chromium/base/logging.h
+++ b/chromium/base/logging.h
@@ -552,9 +552,26 @@ class CheckOpResult {
#define TRAP_SEQUENCE() __builtin_trap()
#endif // ARCH_CPU_*
+// CHECK() and the trap sequence can be invoked from a constexpr function.
+// This could make compilation fail on GCC, as it forbids directly using inline
+// asm inside a constexpr function. However, it allows calling a lambda
+// expression including the same asm.
+// The side effect is that the top of the stacktrace will not point to the
+// calling function, but to this anonymous lambda. This is still useful as the
+// full name of the lambda will typically include the name of the function that
+// calls CHECK() and the debugger will still break at the right line of code.
+#if !defined(__clang__)
+#define WRAPPED_TRAP_SEQUENCE() \
+ do { \
+ [] { TRAP_SEQUENCE(); }(); \
+ } while (false)
+#else
+#define WRAPPED_TRAP_SEQUENCE() TRAP_SEQUENCE()
+#endif
+
#define IMMEDIATE_CRASH() \
({ \
- TRAP_SEQUENCE(); \
+ WRAPPED_TRAP_SEQUENCE(); \
__builtin_unreachable(); \
})
@@ -819,7 +836,7 @@ DEFINE_CHECK_OP_IMPL(GT, > )
#if DCHECK_IS_ON()
-#if defined(SYZYASAN)
+#if DCHECK_IS_CONFIGURABLE
BASE_EXPORT extern LogSeverity LOG_DCHECK;
#else
const LogSeverity LOG_DCHECK = LOG_FATAL;
diff --git a/chromium/base/logging_unittest.cc b/chromium/base/logging_unittest.cc
index 7482a9e24eb..9025aaf4e55 100644
--- a/chromium/base/logging_unittest.cc
+++ b/chromium/base/logging_unittest.cc
@@ -412,7 +412,7 @@ void DcheckEmptyFunction1() {
}
void DcheckEmptyFunction2() {}
-#if DCHECK_IS_ON() && defined(SYZYASAN)
+#if DCHECK_IS_CONFIGURABLE
class ScopedDcheckSeverity {
public:
ScopedDcheckSeverity(LogSeverity new_severity) : old_severity_(LOG_DCHECK) {
@@ -424,7 +424,7 @@ class ScopedDcheckSeverity {
private:
LogSeverity old_severity_;
};
-#endif // DCHECK_IS_ON() && defined(SYZYASAN)
+#endif // DCHECK_IS_CONFIGURABLE
// https://crbug.com/709067 tracks test flakiness on iOS.
#if defined(OS_IOS)
@@ -433,12 +433,12 @@ class ScopedDcheckSeverity {
#define MAYBE_Dcheck Dcheck
#endif
TEST_F(LoggingTest, MAYBE_Dcheck) {
-#if DCHECK_IS_ON() && defined(SYZYASAN)
- // When DCHECKs are enabled in SyzyASAN builds, LOG_DCHECK is mutable but
- // defaults to non-fatal. Set it to LOG_FATAL to get the expected behavior
- // from the rest of this test.
+#if DCHECK_IS_CONFIGURABLE
+ // DCHECKs are enabled, and LOG_DCHECK is mutable, but defaults to non-fatal.
+ // Set it to LOG_FATAL to get the expected behavior from the rest of this
+ // test.
ScopedDcheckSeverity dcheck_severity(LOG_FATAL);
-#endif // DCHECK_IS_ON() && defined(SYZYASAN)
+#endif // DCHECK_IS_CONFIGURABLE
#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
// Release build.
@@ -599,9 +599,9 @@ namespace nested_test {
}
} // namespace nested_test
-#if DCHECK_IS_ON() && defined(SYZYASAN)
-TEST_F(LoggingTest, AsanConditionalDCheck) {
- // Verify that DCHECKs default to non-fatal in SyzyASAN builds.
+#if DCHECK_IS_CONFIGURABLE
+TEST_F(LoggingTest, ConfigurableDCheck) {
+ // Verify that DCHECKs default to non-fatal in configurable-DCHECK builds.
// Note that we require only that DCHECK is non-fatal by default, rather
// than requiring that it be exactly INFO, ERROR, etc level.
EXPECT_LT(LOG_DCHECK, LOG_FATAL);
@@ -625,8 +625,7 @@ TEST_F(LoggingTest, AsanConditionalDCheck) {
}
}
-TEST_F(LoggingTest, AsanConditionalDCheckFeature) {
- // Enable fatal DCHECKs, so that preconditions in
+TEST_F(LoggingTest, ConfigurableDCheckFeature) {
// Initialize FeatureList with and without DcheckIsFatal, and verify the
// value of LOG_DCHECK. Note that we don't require that DCHECK take a
// specific value when the feature is off, only that it is non-fatal.
@@ -650,7 +649,7 @@ TEST_F(LoggingTest, AsanConditionalDCheckFeature) {
EXPECT_LT(LOG_DCHECK, LOG_FATAL);
}
}
-#endif // DCHECK_IS_ON() && defined(SYZYASAN)
+#endif // DCHECK_IS_CONFIGURABLE
#if defined(OS_FUCHSIA)
TEST_F(LoggingTest, FuchsiaLogging) {
diff --git a/chromium/base/mac/sdk_forward_declarations.h b/chromium/base/mac/sdk_forward_declarations.h
index af7b2978600..82fa08e058e 100644
--- a/chromium/base/mac/sdk_forward_declarations.h
+++ b/chromium/base/mac/sdk_forward_declarations.h
@@ -258,6 +258,53 @@ BASE_EXPORT extern NSString* const CIDetectorTypeText;
#endif // MAC_OS_X_VERSION_10_12_1
+// Once Chrome no longer supports OSX 10.12, everything within this
+// preprocessor block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_13) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_13
+
+// VNRequest forward declarations.
+@class VNRequest;
+typedef void (^VNRequestCompletionHandler)(VNRequest* request, NSError* error);
+
+@interface VNRequest : NSObject<NSCopying>
+- (instancetype)initWithCompletionHandler:
+ (VNRequestCompletionHandler)completionHandler NS_DESIGNATED_INITIALIZER;
+@property(readonly, nonatomic, copy) NSArray* results;
+@end
+
+// VNDetectFaceLandmarksRequest forward declarations.
+@interface VNImageBasedRequest : VNRequest
+@end
+
+@protocol VNFaceObservationAccepting<NSObject>
+@end
+
+@interface VNDetectFaceLandmarksRequest
+ : VNImageBasedRequest<VNFaceObservationAccepting>
+@end
+
+// VNImageRequestHandler forward declarations.
+typedef NSString* VNImageOption NS_STRING_ENUM;
+
+@interface VNImageRequestHandler : NSObject
+- (instancetype)initWithCIImage:(CIImage*)image
+ options:(NSDictionary<VNImageOption, id>*)options;
+- (BOOL)performRequests:(NSArray<VNRequest*>*)requests error:(NSError**)error;
+@end
+
+// VNFaceObservation forward declarations.
+@interface VNObservation : NSObject<NSCopying, NSSecureCoding>
+@end
+
+@interface VNDetectedObjectObservation : VNObservation
+@property(readonly, nonatomic, assign) CGRect boundingBox;
+@end
+
+@interface VNFaceObservation : VNDetectedObjectObservation
+@end
+
+#endif // MAC_OS_X_VERSION_10_13
// ----------------------------------------------------------------------------
// The symbol for kCWSSIDDidChangeNotification is available in the
// CoreWLAN.framework for OSX versions 10.6 through 10.10. The symbol is not
diff --git a/chromium/base/macros.h b/chromium/base/macros.h
index ca5ed5f5264..91daf5b6d2e 100644
--- a/chromium/base/macros.h
+++ b/chromium/base/macros.h
@@ -83,6 +83,10 @@ namespace base {
#define CR_DEFINE_STATIC_LOCAL(type, name, arguments) \
static type& name = *new type arguments
+// Workaround for MSVC, which expands __VA_ARGS__ as one macro argument. To
+// work around this bug, wrap the entire expression in this macro...
+#define CR_EXPAND_ARG(arg) arg
+
} // base
#endif // BASE_MACROS_H_
diff --git a/chromium/base/memory/discardable_shared_memory.cc b/chromium/base/memory/discardable_shared_memory.cc
index e9a2d6338b8..2c14053c746 100644
--- a/chromium/base/memory/discardable_shared_memory.cc
+++ b/chromium/base/memory/discardable_shared_memory.cc
@@ -112,11 +112,11 @@ size_t AlignToPageSize(size_t size) {
// base address at which |memory| is mapped, and that |offset| and |length|
// are page-aligned by the caller.
+#if defined(OS_ANDROID)
// Returns SUCCESS on platforms which do not support discardable pages.
DiscardableSharedMemory::LockResult LockPages(const SharedMemory& memory,
size_t offset,
size_t length) {
-#if defined(OS_ANDROID)
SharedMemoryHandle handle = memory.handle();
if (handle.IsValid()) {
int pin_result = ashmem_pin_region(handle.GetHandle(), offset, length);
@@ -125,9 +125,9 @@ DiscardableSharedMemory::LockResult LockPages(const SharedMemory& memory,
if (pin_result < 0)
return DiscardableSharedMemory::FAILED;
}
-#endif
return DiscardableSharedMemory::SUCCESS;
}
+#endif
// UnlockPages() is a no-op on platforms not supporting discardable pages.
void UnlockPages(const SharedMemory& memory, size_t offset, size_t length) {
@@ -265,9 +265,32 @@ DiscardableSharedMemory::LockResult DiscardableSharedMemory::Lock(
if (!length)
return PURGED;
+#if defined(OS_ANDROID)
// Ensure that the platform won't discard the required pages.
return LockPages(shared_memory_,
AlignToPageSize(sizeof(SharedState)) + offset, length);
+#elif defined(OS_MACOSX)
+ // On macOS, there is no mechanism to lock pages. However, we do need to call
+ // madvise(MADV_FREE_REUSE) in order to correctly update accounting for memory
+ // footprint via task_info().
+ //
+ // Note that calling madvise(MADV_FREE_REUSE) on regions that haven't had
+ // madvise(MADV_FREE_REUSABLE) called on them has no effect.
+ //
+ // Note that the corresponding call to MADV_FREE_REUSABLE is in Purge(), since
+ // that's where the memory is actually released, rather than Unlock(), which
+ // is a no-op on macOS.
+ //
+ // For more information, see
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=823915.
+ if (madvise(reinterpret_cast<char*>(shared_memory_.memory()) +
+ AlignToPageSize(sizeof(SharedState)),
+ AlignToPageSize(mapped_size_), MADV_FREE_REUSE))
+ ;
+ return DiscardableSharedMemory::SUCCESS;
+#else
+ return DiscardableSharedMemory::SUCCESS;
+#endif
}
void DiscardableSharedMemory::Unlock(size_t offset, size_t length) {
diff --git a/chromium/base/memory/linked_ptr_unittest.cc b/chromium/base/memory/linked_ptr_unittest.cc
index 7e0c9e355d5..344ffa48d63 100644
--- a/chromium/base/memory/linked_ptr_unittest.cc
+++ b/chromium/base/memory/linked_ptr_unittest.cc
@@ -34,7 +34,7 @@ struct B: public A {
TEST(LinkedPtrTest, Test) {
{
linked_ptr<A> a0, a1, a2;
- a0 = a0;
+ a0 = *&a0; // The *& defeats Clang's -Wself-assign warning.
a1 = a2;
ASSERT_EQ(a0.get(), static_cast<A*>(nullptr));
ASSERT_EQ(a1.get(), static_cast<A*>(nullptr));
diff --git a/chromium/base/memory/platform_shared_memory_region.cc b/chromium/base/memory/platform_shared_memory_region.cc
new file mode 100644
index 00000000000..c145336ebdf
--- /dev/null
+++ b/chromium/base/memory/platform_shared_memory_region.cc
@@ -0,0 +1,37 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include "base/memory/shared_memory_mapping.h"
+
+namespace base {
+namespace subtle {
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::CreateWritable(
+ size_t size) {
+ return Create(Mode::kWritable, size);
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::CreateUnsafe(
+ size_t size) {
+ return Create(Mode::kUnsafe, size);
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion() = default;
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+ PlatformSharedMemoryRegion&& other) = default;
+PlatformSharedMemoryRegion& PlatformSharedMemoryRegion::operator=(
+ PlatformSharedMemoryRegion&& other) = default;
+PlatformSharedMemoryRegion::~PlatformSharedMemoryRegion() = default;
+
+PlatformSharedMemoryRegion::ScopedPlatformHandle
+PlatformSharedMemoryRegion::PassPlatformHandle() {
+ return std::move(handle_);
+}
+
+} // namespace subtle
+} // namespace base
diff --git a/chromium/base/memory/platform_shared_memory_region.h b/chromium/base/memory/platform_shared_memory_region.h
new file mode 100644
index 00000000000..3c9675dd2e8
--- /dev/null
+++ b/chromium/base/memory/platform_shared_memory_region.h
@@ -0,0 +1,222 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
+#define BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
+
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach.h>
+#include "base/mac/scoped_mach_port.h"
+#elif defined(OS_FUCHSIA)
+#include "base/fuchsia/scoped_zx_handle.h"
+#elif defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#include "base/win/windows_types.h"
+#elif defined(OS_POSIX)
+#include <sys/types.h>
+#include "base/file_descriptor_posix.h"
+#include "base/files/scoped_file.h"
+#endif
+
+namespace base {
+namespace subtle {
+
+#if defined(OS_POSIX) && (!defined(OS_MACOSX) || defined(OS_IOS)) && \
+ !defined(OS_FUCHSIA) && !defined(OS_ANDROID)
+// Helper structs to keep two descriptors on POSIX. It's needed to support
+// ConvertToReadOnly().
+struct BASE_EXPORT FDPair {
+ int fd;
+ int readonly_fd;
+};
+
+struct BASE_EXPORT ScopedFDPair {
+ ScopedFDPair();
+ ScopedFDPair(ScopedFD in_fd, ScopedFD in_readonly_fd);
+ ScopedFDPair(ScopedFDPair&&);
+ ScopedFDPair& operator=(ScopedFDPair&&);
+ ~ScopedFDPair();
+
+ FDPair get() const;
+
+ ScopedFD fd;
+ ScopedFD readonly_fd;
+};
+#endif
+
+// Implementation class for shared memory regions.
+//
+// This class does the following:
+//
+// - Wraps and owns a shared memory region platform handle.
+// - Provides a way to allocate a new region of platform shared memory of given
+// size.
+// - Provides a way to create mapping of the region in the current process'
+// address space, under special access-control constraints (see Mode).
+// - Provides methods to help transferring the handle across process boundaries.
+// - Holds a 128-bit unique identifier used to uniquely identify the same
+// kernel region resource across processes (used for memory tracking).
+// - Has a method to retrieve the region's size in bytes.
+//
+// IMPORTANT NOTE: Users should never use this directly, but
+// ReadOnlySharedMemoryRegion, WritableSharedMemoryRegion or
+// UnsafeSharedMemoryRegion since this is an implementation class.
+class BASE_EXPORT PlatformSharedMemoryRegion {
+ public:
+ // Permission mode of the platform handle. Each mode corresponds to one of the
+ // typed shared memory classes:
+ //
+ // * ReadOnlySharedMemoryRegion: A region that can only create read-only
+ // mappings.
+ //
+ // * WritableSharedMemoryRegion: A region that can only create writable
+ // mappings. The region can be demoted to ReadOnlySharedMemoryRegion without
+ // the possibility of promoting back to writable.
+ //
+ // * UnsafeSharedMemoryRegion: A region that can only create writable
+ // mappings. The region cannot be demoted to ReadOnlySharedMemoryRegion.
+ enum class Mode {
+ kReadOnly, // ReadOnlySharedMemoryRegion
+ kWritable, // WritableSharedMemoryRegion
+ kUnsafe, // UnsafeSharedMemoryRegion
+ };
+
+// Platform-specific shared memory type used by this class.
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ using PlatformHandle = mach_port_t;
+ using ScopedPlatformHandle = mac::ScopedMachSendRight;
+#elif defined(OS_FUCHSIA)
+ using PlatformHandle = zx_handle_t;
+ using ScopedPlatformHandle = ScopedZxHandle;
+#elif defined(OS_WIN)
+ using PlatformHandle = HANDLE;
+ using ScopedPlatformHandle = win::ScopedHandle;
+#elif defined(OS_ANDROID)
+ using PlatformHandle = int;
+ using ScopedPlatformHandle = ScopedFD;
+#else
+ using PlatformHandle = FDPair;
+ using ScopedPlatformHandle = ScopedFDPair;
+#endif
+
+ // The minimum alignment in bytes that any mapped address produced by Map()
+ // and MapAt() is guaranteed to have.
+ enum { kMapMinimumAlignment = 32 };
+
+ // Creates a new PlatformSharedMemoryRegion with corresponding mode and size.
+ // Creating in kReadOnly mode isn't supported because then there will be no
+ // way to modify memory content.
+ static PlatformSharedMemoryRegion CreateWritable(size_t size);
+ static PlatformSharedMemoryRegion CreateUnsafe(size_t size);
+
+ // Returns a new PlatformSharedMemoryRegion that takes ownership of the
+ // |handle|. All parameters must be taken from another valid
+ // PlatformSharedMemoryRegion instance, e.g. |size| must be equal to the
+ // actual region size as allocated by the kernel.
+ // Closes the |handle| and returns an invalid instance if passed parameters
+ // are invalid.
+ static PlatformSharedMemoryRegion Take(ScopedPlatformHandle handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid);
+
+ // Default constructor initializes an invalid instance, i.e. an instance that
+ // doesn't wrap any valid platform handle.
+ PlatformSharedMemoryRegion();
+
+ // Move operations are allowed.
+ PlatformSharedMemoryRegion(PlatformSharedMemoryRegion&&);
+ PlatformSharedMemoryRegion& operator=(PlatformSharedMemoryRegion&&);
+
+ // Destructor closes the platform handle. Does nothing if the handle is
+ // invalid.
+ ~PlatformSharedMemoryRegion();
+
+ // Passes ownership of the platform handle to the caller. The current instance
+ // becomes invalid. It's the responsibility of the caller to close the handle.
+ ScopedPlatformHandle PassPlatformHandle() WARN_UNUSED_RESULT;
+
+ // Returns the platform handle. The current instance keeps ownership of this
+ // handle.
+ PlatformHandle GetPlatformHandle() const;
+
+ // Whether the platform handle is valid.
+ bool IsValid() const;
+
+ // Duplicates the platform handle and creates a new PlatformSharedMemoryRegion
+ // with the same |mode_|, |size_| and |guid_| that owns this handle. Returns
+ // invalid region on failure, the current instance remains valid.
+ // Can be called only in kReadOnly and kUnsafe modes, CHECK-fails if is
+ // called in kWritable mode.
+ PlatformSharedMemoryRegion Duplicate() const;
+
+ // Converts the region to read-only. Returns whether the operation succeeded.
+ // Makes the current instance invalid on failure. Can be called only in
+ // kWritable mode, all other modes will CHECK-fail. The object will have
+ // kReadOnly mode after this call on success.
+ bool ConvertToReadOnly();
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // Same as above, but |mapped_addr| is used as a hint to avoid additional
+ // mapping of the memory object.
+ // |mapped_addr| must be mapped location of |memory_object_|. If the location
+ // is unknown, |mapped_addr| should be |nullptr|.
+ bool ConvertToReadOnly(void* mapped_addr);
+#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+
+ // Maps |size| bytes of the shared memory region starting with the given
+ // |offset| into the caller's address space. |offset| must be aligned to value
+ // of |SysInfo::VMAllocationGranularity()|. Fails if requested bytes are out
+ // of the region limits.
+ // Returns true and sets |memory| and |mapped_size| on success, returns false
+ // and leaves output parameters in unspecified state otherwise. The mapped
+ // address is guaranteed to have an alignment of at least
+ // |kMapMinimumAlignment|.
+ bool MapAt(off_t offset,
+ size_t size,
+ void** memory,
+ size_t* mapped_size) const;
+
+ const UnguessableToken& GetGUID() const { return guid_; }
+
+ size_t GetSize() const { return size_; }
+
+ Mode GetMode() const { return mode_; }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(PlatformSharedMemoryRegionTest,
+ CreateReadOnlyRegionDeathTest);
+ FRIEND_TEST_ALL_PREFIXES(PlatformSharedMemoryRegionTest,
+ CheckPlatformHandlePermissionsCorrespondToMode);
+ static PlatformSharedMemoryRegion Create(Mode mode, size_t size);
+
+ static bool CheckPlatformHandlePermissionsCorrespondToMode(
+ PlatformHandle handle,
+ Mode mode,
+ size_t size);
+
+ PlatformSharedMemoryRegion(ScopedPlatformHandle handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid);
+
+ ScopedPlatformHandle handle_;
+ Mode mode_ = Mode::kReadOnly;
+ size_t size_ = 0;
+ UnguessableToken guid_;
+
+ DISALLOW_COPY_AND_ASSIGN(PlatformSharedMemoryRegion);
+};
+
+} // namespace subtle
+} // namespace base
+
+#endif // BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
diff --git a/chromium/base/memory/platform_shared_memory_region_android.cc b/chromium/base/memory/platform_shared_memory_region_android.cc
new file mode 100644
index 00000000000..664d3d4b859
--- /dev/null
+++ b/chromium/base/memory/platform_shared_memory_region_android.cc
@@ -0,0 +1,190 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <sys/mman.h>
+
+#include "base/memory/shared_memory_tracker.h"
+#include "base/posix/eintr_wrapper.h"
+#include "third_party/ashmem/ashmem.h"
+
+namespace base {
+namespace subtle {
+
+// For Android, we use ashmem to implement SharedMemory. ashmem_create_region
+// will automatically pin the region. We never explicitly call pin/unpin. When
+// all the file descriptors from different processes associated with the region
+// are closed, the memory buffer will go away.
+
+namespace {
+
+static int GetAshmemRegionProtectionMask(int fd) {
+ int prot = ashmem_get_prot_region(fd);
+ if (prot < 0) {
+ DPLOG(ERROR) << "ashmem_get_prot_region failed";
+ return -1;
+ }
+ return prot;
+}
+
+} // namespace
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+ ScopedFD fd,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid) {
+ if (!fd.is_valid())
+ return {};
+
+ if (size == 0)
+ return {};
+
+ if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ return {};
+
+ CHECK(CheckPlatformHandlePermissionsCorrespondToMode(fd.get(), mode, size));
+
+ return PlatformSharedMemoryRegion(std::move(fd), mode, size, guid);
+}
+
+int PlatformSharedMemoryRegion::GetPlatformHandle() const {
+ return handle_.get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+ return handle_.is_valid();
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+ if (!IsValid())
+ return {};
+
+ CHECK_NE(mode_, Mode::kWritable)
+ << "Duplicating a writable shared memory region is prohibited";
+
+ ScopedFD duped_fd(HANDLE_EINTR(dup(handle_.get())));
+ if (!duped_fd.is_valid()) {
+ DPLOG(ERROR) << "dup(" << handle_.get() << ") failed";
+ return {};
+ }
+
+ return PlatformSharedMemoryRegion(std::move(duped_fd), mode_, size_, guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+ if (!IsValid())
+ return false;
+
+ CHECK_EQ(mode_, Mode::kWritable)
+ << "Only writable shared memory region can be converted to read-only";
+
+ ScopedFD handle_copy(handle_.release());
+
+ int prot = GetAshmemRegionProtectionMask(handle_copy.get());
+ if (prot < 0)
+ return false;
+
+ prot &= ~PROT_WRITE;
+ int ret = ashmem_set_prot_region(handle_copy.get(), prot);
+ if (ret != 0) {
+ DPLOG(ERROR) << "ashmem_set_prot_region failed";
+ return false;
+ }
+
+ handle_ = std::move(handle_copy);
+ mode_ = Mode::kReadOnly;
+ return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAt(off_t offset,
+ size_t size,
+ void** memory,
+ size_t* mapped_size) const {
+ if (!IsValid())
+ return false;
+
+ size_t end_byte;
+ if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
+ return false;
+ }
+
+ bool write_allowed = mode_ != Mode::kReadOnly;
+ *memory = mmap(nullptr, size, PROT_READ | (write_allowed ? PROT_WRITE : 0),
+ MAP_SHARED, handle_.get(), offset);
+
+ bool mmap_succeeded = *memory && *memory != reinterpret_cast<void*>(-1);
+ if (!mmap_succeeded) {
+ DPLOG(ERROR) << "mmap " << handle_.get() << " failed";
+ return false;
+ }
+
+ *mapped_size = size;
+ DCHECK_EQ(0U,
+ reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
+ return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+ size_t size) {
+ if (size == 0)
+ return {};
+
+ if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ return {};
+
+ CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+ "lead to this region being non-modifiable";
+
+ UnguessableToken guid = UnguessableToken::Create();
+
+ ScopedFD fd(ashmem_create_region(
+ SharedMemoryTracker::GetDumpNameForTracing(guid).c_str(), size));
+ if (!fd.is_valid()) {
+ DPLOG(ERROR) << "ashmem_create_region failed";
+ return {};
+ }
+
+ int err = ashmem_set_prot_region(fd.get(), PROT_READ | PROT_WRITE);
+ if (err < 0) {
+ DPLOG(ERROR) << "ashmem_set_prot_region failed";
+ return {};
+ }
+
+ return PlatformSharedMemoryRegion(std::move(fd), mode, size, guid);
+}
+
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+ PlatformHandle handle,
+ Mode mode,
+ size_t size) {
+ int prot = GetAshmemRegionProtectionMask(handle);
+ if (prot < 0)
+ return false;
+
+ bool is_read_only = (prot & PROT_WRITE) == 0;
+ bool expected_read_only = mode == Mode::kReadOnly;
+
+ if (is_read_only != expected_read_only) {
+ DLOG(ERROR) << "Ashmem region has a wrong protection mask: it is"
+ << (is_read_only ? " " : " not ") << "read-only but it should"
+ << (expected_read_only ? " " : " not ") << "be";
+ return false;
+ }
+
+ return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+ ScopedFD fd,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid)
+ : handle_(std::move(fd)), mode_(mode), size_(size), guid_(guid) {}
+
+} // namespace subtle
+} // namespace base
diff --git a/chromium/base/memory/platform_shared_memory_region_fuchsia.cc b/chromium/base/memory/platform_shared_memory_region_fuchsia.cc
new file mode 100644
index 00000000000..22a43b0ea79
--- /dev/null
+++ b/chromium/base/memory/platform_shared_memory_region_fuchsia.cc
@@ -0,0 +1,172 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <zircon/process.h>
+#include <zircon/rights.h>
+#include <zircon/syscalls.h>
+
+#include "base/bits.h"
+#include "base/numerics/checked_math.h"
+#include "base/process/process_metrics.h"
+
+namespace base {
+namespace subtle {
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+ ScopedZxHandle handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid) {
+ if (!handle.is_valid())
+ return {};
+
+ if (size == 0)
+ return {};
+
+ if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ return {};
+
+ CHECK(
+ CheckPlatformHandlePermissionsCorrespondToMode(handle.get(), mode, size));
+
+ return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
+}
+
+zx_handle_t PlatformSharedMemoryRegion::GetPlatformHandle() const {
+ return handle_.get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+ return handle_.is_valid();
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+ if (!IsValid())
+ return {};
+
+ CHECK_NE(mode_, Mode::kWritable)
+ << "Duplicating a writable shared memory region is prohibited";
+
+ ScopedZxHandle duped_handle;
+ zx_status_t status = zx_handle_duplicate(handle_.get(), ZX_RIGHT_SAME_RIGHTS,
+ duped_handle.receive());
+ if (status != ZX_OK) {
+ DLOG(ERROR) << "zx_handle_duplicate failed: "
+ << zx_status_get_string(status);
+ return {};
+ }
+
+ return PlatformSharedMemoryRegion(std::move(duped_handle), mode_, size_,
+ guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+ if (!IsValid())
+ return false;
+
+ CHECK_EQ(mode_, Mode::kWritable)
+ << "Only writable shared memory region can be converted to read-only";
+
+ ScopedZxHandle old_handle(handle_.release());
+ ScopedZxHandle new_handle;
+ const int kNoWriteOrExec =
+ ZX_DEFAULT_VMO_RIGHTS &
+ ~(ZX_RIGHT_WRITE | ZX_RIGHT_EXECUTE | ZX_RIGHT_SET_PROPERTY);
+ zx_status_t status =
+ zx_handle_replace(old_handle.get(), kNoWriteOrExec, new_handle.receive());
+ if (status != ZX_OK) {
+ DLOG(ERROR) << "zx_handle_replace failed: " << zx_status_get_string(status);
+ return false;
+ }
+ ignore_result(old_handle.release());
+
+ handle_ = std::move(new_handle);
+ mode_ = Mode::kReadOnly;
+ return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAt(off_t offset,
+ size_t size,
+ void** memory,
+ size_t* mapped_size) const {
+ if (!IsValid())
+ return false;
+
+ size_t end_byte;
+ if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
+ return false;
+ }
+
+ bool write_allowed = mode_ != Mode::kReadOnly;
+ uintptr_t addr;
+ zx_status_t status = zx_vmar_map(
+ zx_vmar_root_self(), 0, handle_.get(), offset, size,
+ ZX_VM_FLAG_PERM_READ | (write_allowed ? ZX_VM_FLAG_PERM_WRITE : 0),
+ &addr);
+ if (status != ZX_OK) {
+ DLOG(ERROR) << "zx_vmar_map failed: " << zx_status_get_string(status);
+ return false;
+ }
+
+ *memory = reinterpret_cast<void*>(addr);
+ *mapped_size = size;
+ DCHECK_EQ(0U,
+ reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
+ return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+ size_t size) {
+ if (size == 0)
+ return {};
+
+ size_t rounded_size = bits::Align(size, GetPageSize());
+ if (rounded_size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ return {};
+
+ CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+ "lead to this region being non-modifiable";
+
+ ScopedZxHandle vmo;
+ zx_status_t status = zx_vmo_create(rounded_size, 0, vmo.receive());
+ if (status != ZX_OK) {
+ DLOG(ERROR) << "zx_vmo_create failed: " << zx_status_get_string(status);
+ return {};
+ }
+
+ const int kNoExecFlags = ZX_DEFAULT_VMO_RIGHTS & ~ZX_RIGHT_EXECUTE;
+ ScopedZxHandle old_vmo(std::move(vmo));
+ status = zx_handle_replace(old_vmo.get(), kNoExecFlags, vmo.receive());
+ if (status != ZX_OK) {
+ DLOG(ERROR) << "zx_handle_replace failed: " << zx_status_get_string(status);
+ return {};
+ }
+ ignore_result(old_vmo.release());
+
+ return PlatformSharedMemoryRegion(std::move(vmo), mode, size,
+ UnguessableToken::Create());
+}
+
+// static
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+ PlatformHandle handle,
+ Mode mode,
+ size_t size) {
+ // TODO(https://crbug.com/825177): implement this.
+ return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+ ScopedZxHandle handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid)
+ : handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
+
+} // namespace subtle
+} // namespace base
diff --git a/chromium/base/memory/platform_shared_memory_region_mac.cc b/chromium/base/memory/platform_shared_memory_region_mac.cc
new file mode 100644
index 00000000000..a38d0c1fef6
--- /dev/null
+++ b/chromium/base/memory/platform_shared_memory_region_mac.cc
@@ -0,0 +1,198 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <mach/mach_vm.h>
+
+#include "base/mac/mach_logging.h"
+#include "base/mac/scoped_mach_vm.h"
+#include "base/numerics/checked_math.h"
+#include "build/build_config.h"
+
+#if defined(OS_IOS)
+#error "MacOS only - iOS uses platform_shared_memory_region_posix.cc"
+#endif
+
+namespace base {
+namespace subtle {
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+ mac::ScopedMachSendRight handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid) {
+ if (!handle.is_valid())
+ return {};
+
+ if (size == 0)
+ return {};
+
+ if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ return {};
+
+ CHECK(
+ CheckPlatformHandlePermissionsCorrespondToMode(handle.get(), mode, size));
+
+ return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
+}
+
+mach_port_t PlatformSharedMemoryRegion::GetPlatformHandle() const {
+ return handle_.get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+ return handle_.is_valid();
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+ if (!IsValid())
+ return {};
+
+ CHECK_NE(mode_, Mode::kWritable)
+ << "Duplicating a writable shared memory region is prohibited";
+
+ // Increment the ref count.
+ kern_return_t kr = mach_port_mod_refs(mach_task_self(), handle_.get(),
+ MACH_PORT_RIGHT_SEND, 1);
+ if (kr != KERN_SUCCESS) {
+ MACH_DLOG(ERROR, kr) << "mach_port_mod_refs";
+ return {};
+ }
+
+ return PlatformSharedMemoryRegion(mac::ScopedMachSendRight(handle_.get()),
+ mode_, size_, guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+ return ConvertToReadOnly(nullptr);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly(void* mapped_addr) {
+ if (!IsValid())
+ return false;
+
+ CHECK_EQ(mode_, Mode::kWritable)
+ << "Only writable shared memory region can be converted to read-only";
+
+ mac::ScopedMachSendRight handle_copy(handle_.release());
+
+ void* temp_addr = mapped_addr;
+ mac::ScopedMachVM scoped_memory;
+ if (!temp_addr) {
+ // Intentionally lower current prot and max prot to |VM_PROT_READ|.
+ kern_return_t kr = mach_vm_map(
+ mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&temp_addr),
+ size_, 0, VM_FLAGS_ANYWHERE, handle_copy.get(), 0, FALSE, VM_PROT_READ,
+ VM_PROT_READ, VM_INHERIT_NONE);
+ if (kr != KERN_SUCCESS) {
+ MACH_DLOG(ERROR, kr) << "mach_vm_map";
+ return false;
+ }
+ scoped_memory.reset(reinterpret_cast<vm_address_t>(temp_addr),
+ mach_vm_round_page(size_));
+ }
+
+ // Make new memory object.
+ memory_object_size_t allocation_size = size_;
+ mac::ScopedMachSendRight named_right;
+ kern_return_t kr = mach_make_memory_entry_64(
+ mach_task_self(), &allocation_size,
+ reinterpret_cast<memory_object_offset_t>(temp_addr), VM_PROT_READ,
+ named_right.receive(), MACH_PORT_NULL);
+ if (kr != KERN_SUCCESS) {
+ MACH_DLOG(ERROR, kr) << "mach_make_memory_entry_64";
+ return false;
+ }
+ DCHECK_GE(allocation_size, size_);
+
+ handle_ = std::move(named_right);
+ mode_ = Mode::kReadOnly;
+ return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAt(off_t offset,
+ size_t size,
+ void** memory,
+ size_t* mapped_size) const {
+ if (!IsValid())
+ return false;
+
+ size_t end_byte;
+ if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
+ return false;
+ }
+
+ bool write_allowed = mode_ != Mode::kReadOnly;
+ vm_prot_t vm_prot_write = write_allowed ? VM_PROT_WRITE : 0;
+ kern_return_t kr = mach_vm_map(
+ mach_task_self(),
+ reinterpret_cast<mach_vm_address_t*>(memory), // Output parameter
+ size,
+ 0, // Alignment mask
+ VM_FLAGS_ANYWHERE, handle_.get(), offset,
+ FALSE, // Copy
+ VM_PROT_READ | vm_prot_write, // Current protection
+ VM_PROT_READ | vm_prot_write, // Maximum protection
+ VM_INHERIT_NONE);
+ if (kr != KERN_SUCCESS) {
+ MACH_DLOG(ERROR, kr) << "mach_vm_map";
+ return false;
+ }
+
+ *mapped_size = size;
+ DCHECK_EQ(0U,
+ reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
+ return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+ size_t size) {
+ if (size == 0)
+ return {};
+
+ if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ return {};
+
+ CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+ "lead to this region being non-modifiable";
+
+ mach_vm_size_t vm_size = size;
+ mac::ScopedMachSendRight named_right;
+ kern_return_t kr = mach_make_memory_entry_64(
+ mach_task_self(), &vm_size,
+ 0, // Address.
+ MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE,
+ named_right.receive(),
+ MACH_PORT_NULL); // Parent handle.
+ if (kr != KERN_SUCCESS) {
+ MACH_DLOG(ERROR, kr) << "mach_make_memory_entry_64";
+ return {};
+ }
+ DCHECK_GE(vm_size, size);
+
+ return PlatformSharedMemoryRegion(std::move(named_right), mode, size,
+ UnguessableToken::Create());
+}
+
+// static
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+ PlatformHandle handle,
+ Mode mode,
+ size_t size) {
+ // TODO(https://crbug.com/825177): implement this.
+ return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+ mac::ScopedMachSendRight handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid)
+ : handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
+
+} // namespace subtle
+} // namespace base
diff --git a/chromium/base/memory/platform_shared_memory_region_posix.cc b/chromium/base/memory/platform_shared_memory_region_posix.cc
new file mode 100644
index 00000000000..8453c12eb5c
--- /dev/null
+++ b/chromium/base/memory/platform_shared_memory_region_posix.cc
@@ -0,0 +1,291 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+
+#include "base/files/file_util.h"
+#include "base/numerics/checked_math.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace subtle {
+
+namespace {
+
+struct ScopedPathUnlinkerTraits {
+ static const FilePath* InvalidValue() { return nullptr; }
+
+ static void Free(const FilePath* path) {
+ if (unlink(path->value().c_str()))
+ PLOG(WARNING) << "unlink";
+ }
+};
+
+// Unlinks the FilePath when the object is destroyed.
+using ScopedPathUnlinker =
+ ScopedGeneric<const FilePath*, ScopedPathUnlinkerTraits>;
+
+bool CheckFDAccessMode(int fd, int expected_mode) {
+ int fd_status = fcntl(fd, F_GETFL);
+ if (fd_status == -1) {
+ DPLOG(ERROR) << "fcntl(" << fd << ", F_GETFL) failed";
+ return false;
+ }
+
+ int mode = fd_status & O_ACCMODE;
+ if (mode != expected_mode) {
+ DLOG(ERROR) << "Descriptor access mode (" << mode
+ << ") differs from expected (" << expected_mode << ")";
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace
+
+ScopedFDPair::ScopedFDPair() = default;
+
+ScopedFDPair::ScopedFDPair(ScopedFDPair&&) = default;
+
+ScopedFDPair& ScopedFDPair::operator=(ScopedFDPair&&) = default;
+
+ScopedFDPair::~ScopedFDPair() = default;
+
+ScopedFDPair::ScopedFDPair(ScopedFD in_fd, ScopedFD in_readonly_fd)
+ : fd(std::move(in_fd)), readonly_fd(std::move(in_readonly_fd)) {}
+
+FDPair ScopedFDPair::get() const {
+ return {fd.get(), readonly_fd.get()};
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+ ScopedFDPair handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid) {
+ if (!handle.fd.is_valid())
+ return {};
+
+ if (size == 0)
+ return {};
+
+ if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ return {};
+
+ CHECK(
+ CheckPlatformHandlePermissionsCorrespondToMode(handle.get(), mode, size));
+
+ switch (mode) {
+ case Mode::kReadOnly:
+ case Mode::kUnsafe:
+ if (handle.readonly_fd.is_valid()) {
+ handle.readonly_fd.reset();
+ DLOG(WARNING) << "Readonly handle shouldn't be valid for a "
+ "non-writable memory region; closing";
+ }
+ break;
+ case Mode::kWritable:
+ if (!handle.readonly_fd.is_valid()) {
+ DLOG(ERROR)
+ << "Readonly handle must be valid for writable memory region";
+ return {};
+ }
+ break;
+ default:
+ DLOG(ERROR) << "Invalid permission mode: " << static_cast<int>(mode);
+ return {};
+ }
+
+ return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
+}
+
+FDPair PlatformSharedMemoryRegion::GetPlatformHandle() const {
+ return handle_.get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+ return handle_.fd.is_valid() &&
+ (mode_ == Mode::kWritable ? handle_.readonly_fd.is_valid() : true);
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+ if (!IsValid())
+ return {};
+
+ CHECK_NE(mode_, Mode::kWritable)
+ << "Duplicating a writable shared memory region is prohibited";
+
+ ScopedFD duped_fd(HANDLE_EINTR(dup(handle_.fd.get())));
+ if (!duped_fd.is_valid()) {
+ DPLOG(ERROR) << "dup(" << handle_.fd.get() << ") failed";
+ return {};
+ }
+
+ return PlatformSharedMemoryRegion({std::move(duped_fd), ScopedFD()}, mode_,
+ size_, guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+ if (!IsValid())
+ return false;
+
+ CHECK_EQ(mode_, Mode::kWritable)
+ << "Only writable shared memory region can be converted to read-only";
+
+ handle_.fd.reset(handle_.readonly_fd.release());
+ mode_ = Mode::kReadOnly;
+ return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAt(off_t offset,
+ size_t size,
+ void** memory,
+ size_t* mapped_size) const {
+ if (!IsValid())
+ return false;
+
+ size_t end_byte;
+ if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
+ return false;
+ }
+
+ bool write_allowed = mode_ != Mode::kReadOnly;
+ *memory = mmap(nullptr, size, PROT_READ | (write_allowed ? PROT_WRITE : 0),
+ MAP_SHARED, handle_.fd.get(), offset);
+
+ bool mmap_succeeded = *memory && *memory != reinterpret_cast<void*>(-1);
+ if (!mmap_succeeded) {
+ DPLOG(ERROR) << "mmap " << handle_.fd.get() << " failed";
+ return false;
+ }
+
+ *mapped_size = size;
+ DCHECK_EQ(0U,
+ reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
+ return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+ size_t size) {
+#if defined(OS_NACL)
+ // Untrusted code can't create descriptors or handles.
+ return {};
+#else
+ if (size == 0)
+ return {};
+
+ if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ return {};
+
+ CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+ "lead to this region being non-modifiable";
+
+ // This function theoretically can block on the disk, but realistically
+ // the temporary files we create will just go into the buffer cache
+ // and be deleted before they ever make it out to disk.
+ ThreadRestrictions::ScopedAllowIO allow_io;
+
+ // We don't use shm_open() API in order to support the --disable-dev-shm-usage
+ // flag.
+ FilePath directory;
+ if (!GetShmemTempDir(false /* executable */, &directory))
+ return {};
+
+ ScopedFD fd;
+ FilePath path;
+ fd.reset(CreateAndOpenFdForTemporaryFileInDir(directory, &path));
+
+ if (!fd.is_valid()) {
+ PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
+ FilePath dir = path.DirName();
+ if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
+ PLOG(ERROR) << "Unable to access(W_OK|X_OK) " << dir.value();
+ if (dir.value() == "/dev/shm") {
+ LOG(FATAL) << "This is frequently caused by incorrect permissions on "
+ << "/dev/shm. Try 'sudo chmod 1777 /dev/shm' to fix.";
+ }
+ }
+ return {};
+ }
+
+ // Deleting the file prevents anyone else from mapping it in (making it
+ // private), and prevents the need for cleanup (once the last fd is
+ // closed, it is truly freed).
+ ScopedPathUnlinker path_unlinker(&path);
+
+ ScopedFD readonly_fd;
+ if (mode == Mode::kWritable) {
+ // Also open as readonly so that we can ConvertToReadOnly().
+ readonly_fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
+ if (!readonly_fd.is_valid()) {
+ DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
+ return {};
+ }
+ }
+
+ // Get current size.
+ struct stat stat = {};
+ if (fstat(fd.get(), &stat) != 0)
+ return {};
+ const size_t current_size = stat.st_size;
+ if (current_size != size) {
+ if (HANDLE_EINTR(ftruncate(fd.get(), size)) != 0)
+ return {};
+ }
+
+ if (readonly_fd.is_valid()) {
+ struct stat readonly_stat = {};
+ if (fstat(readonly_fd.get(), &readonly_stat))
+ NOTREACHED();
+
+ if (stat.st_dev != readonly_stat.st_dev ||
+ stat.st_ino != readonly_stat.st_ino) {
+ LOG(ERROR) << "Writable and read-only inodes don't match; bailing";
+ return {};
+ }
+ }
+
+ return PlatformSharedMemoryRegion({std::move(fd), std::move(readonly_fd)},
+ mode, size, UnguessableToken::Create());
+#endif // !defined(OS_NACL)
+}
+
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+ PlatformHandle handle,
+ Mode mode,
+ size_t size) {
+ if (!CheckFDAccessMode(handle.fd,
+ mode == Mode::kReadOnly ? O_RDONLY : O_RDWR)) {
+ return false;
+ }
+
+ if (mode == Mode::kWritable)
+ return CheckFDAccessMode(handle.readonly_fd, O_RDONLY);
+
+ // The second descriptor must be invalid in kReadOnly and kUnsafe modes.
+ if (handle.readonly_fd != -1) {
+ DLOG(ERROR) << "The second descriptor must be invalid";
+ return false;
+ }
+
+ return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+ ScopedFDPair handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid)
+ : handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
+
+} // namespace subtle
+} // namespace base
diff --git a/chromium/base/memory/platform_shared_memory_region_unittest.cc b/chromium/base/memory/platform_shared_memory_region_unittest.cc
new file mode 100644
index 00000000000..f3c3d778e37
--- /dev/null
+++ b/chromium/base/memory/platform_shared_memory_region_unittest.cc
@@ -0,0 +1,302 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include "base/memory/shared_memory_mapping.h"
+#include "base/process/process_metrics.h"
+#include "base/sys_info.h"
+#include "base/test/gtest_util.h"
+#include "base/test/test_shared_memory_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach_vm.h>
+#endif
+
+namespace base {
+namespace subtle {
+
+const size_t kRegionSize = 1024;
+
+class PlatformSharedMemoryRegionTest : public ::testing::Test {
+ public:
+ SharedMemoryMapping MapAt(PlatformSharedMemoryRegion* region,
+ off_t offset,
+ size_t bytes) {
+ void* memory = nullptr;
+ size_t mapped_size = 0;
+ if (!region->MapAt(offset, bytes, &memory, &mapped_size))
+ return {};
+
+ return SharedMemoryMapping(memory, bytes, mapped_size, region->GetGUID());
+ }
+
+ void* GetMemory(SharedMemoryMapping* mapping) {
+ return mapping->raw_memory_ptr();
+ }
+};
+
+// Tests that a default constructed region is invalid and produces invalid
+// mappings.
+TEST_F(PlatformSharedMemoryRegionTest, DefaultConstructedRegionIsInvalid) {
+ PlatformSharedMemoryRegion region;
+ EXPECT_FALSE(region.IsValid());
+ SharedMemoryMapping mapping = MapAt(&region, 0, kRegionSize);
+ EXPECT_FALSE(mapping.IsValid());
+ PlatformSharedMemoryRegion duplicate = region.Duplicate();
+ EXPECT_FALSE(duplicate.IsValid());
+ EXPECT_FALSE(region.ConvertToReadOnly());
+}
+
+// Tests that creating a region of 0 size returns an invalid region.
+TEST_F(PlatformSharedMemoryRegionTest, CreateRegionOfZeroSizeIsInvalid) {
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(0);
+ EXPECT_FALSE(region.IsValid());
+
+ PlatformSharedMemoryRegion region2 =
+ PlatformSharedMemoryRegion::CreateUnsafe(0);
+ EXPECT_FALSE(region2.IsValid());
+}
+
+// Tests that creating a region of size bigger than the integer max value
+// returns an invalid region.
+TEST_F(PlatformSharedMemoryRegionTest, CreateTooLargeRegionIsInvalid) {
+ size_t too_large_region_size =
+ static_cast<size_t>(std::numeric_limits<int>::max()) + 1;
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(too_large_region_size);
+ EXPECT_FALSE(region.IsValid());
+
+ PlatformSharedMemoryRegion region2 =
+ PlatformSharedMemoryRegion::CreateUnsafe(too_large_region_size);
+ EXPECT_FALSE(region2.IsValid());
+}
+
+// Tests that regions consistently report their size as the size requested at
+// creation time even if their allocation size is larger due to platform
+// constraints.
+TEST_F(PlatformSharedMemoryRegionTest, ReportedSizeIsRequestedSize) {
+ constexpr size_t kTestSizes[] = {1, 2, 3, 64, 4096, 1024 * 1024};
+ for (size_t size : kTestSizes) {
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(size);
+ EXPECT_EQ(region.GetSize(), size);
+
+ region.ConvertToReadOnly();
+ EXPECT_EQ(region.GetSize(), size);
+ }
+}
+
+// Tests that the platform-specific handle converted to read-only cannot be used
+// to perform a writable mapping with low-level system APIs like mmap().
+TEST_F(PlatformSharedMemoryRegionTest, ReadOnlyHandleIsNotWritable) {
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ EXPECT_TRUE(region.ConvertToReadOnly());
+ EXPECT_EQ(region.GetMode(), PlatformSharedMemoryRegion::Mode::kReadOnly);
+ EXPECT_TRUE(
+ CheckReadOnlyPlatformSharedMemoryRegionForTesting(std::move(region)));
+}
+
+// Tests that the PassPlatformHandle() call invalidates the region.
+TEST_F(PlatformSharedMemoryRegionTest, InvalidAfterPass) {
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ ignore_result(region.PassPlatformHandle());
+ EXPECT_FALSE(region.IsValid());
+}
+
+// Tests that the region is invalid after move.
+TEST_F(PlatformSharedMemoryRegionTest, InvalidAfterMove) {
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ PlatformSharedMemoryRegion moved_region = std::move(region);
+ EXPECT_FALSE(region.IsValid());
+ EXPECT_TRUE(moved_region.IsValid());
+}
+
+// Tests that calling Take() with the size parameter equal to zero returns an
+// invalid region.
+TEST_F(PlatformSharedMemoryRegionTest, TakeRegionOfZeroSizeIsInvalid) {
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ PlatformSharedMemoryRegion region2 = PlatformSharedMemoryRegion::Take(
+ region.PassPlatformHandle(), region.GetMode(), 0, region.GetGUID());
+ EXPECT_FALSE(region2.IsValid());
+}
+
+// Tests that calling Take() with the size parameter bigger than the integer max
+// value returns an invalid region.
+TEST_F(PlatformSharedMemoryRegionTest, TakeTooLargeRegionIsInvalid) {
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ PlatformSharedMemoryRegion region2 = PlatformSharedMemoryRegion::Take(
+ region.PassPlatformHandle(), region.GetMode(),
+ static_cast<size_t>(std::numeric_limits<int>::max()) + 1,
+ region.GetGUID());
+ EXPECT_FALSE(region2.IsValid());
+}
+
+// Tests that mapping bytes out of the region limits fails.
+TEST_F(PlatformSharedMemoryRegionTest, MapAtOutOfTheRegionLimitsTest) {
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ SharedMemoryMapping mapping = MapAt(&region, 0, region.GetSize() + 1);
+ EXPECT_FALSE(mapping.IsValid());
+}
+
+// Tests that mapping with a size and offset causing overflow fails.
+TEST_F(PlatformSharedMemoryRegionTest, MapAtWithOverflowTest) {
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(
+ SysInfo::VMAllocationGranularity() * 2);
+ ASSERT_TRUE(region.IsValid());
+ size_t size = std::numeric_limits<size_t>::max();
+ size_t offset = SysInfo::VMAllocationGranularity();
+ // |size| + |offset| should be below the region size due to overflow but
+ // mapping a region with these parameters should be invalid.
+ EXPECT_LT(size + offset, region.GetSize());
+ SharedMemoryMapping mapping = MapAt(&region, offset, size);
+ EXPECT_FALSE(mapping.IsValid());
+}
+
+#if defined(OS_POSIX) && !defined(OS_ANDROID) && !defined(OS_FUCHSIA) && \
+ !defined(OS_MACOSX)
+// Tests that the second handle is closed after a conversion to read-only on
+// POSIX.
+TEST_F(PlatformSharedMemoryRegionTest,
+ ConvertToReadOnlyInvalidatesSecondHandle) {
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ ASSERT_TRUE(region.ConvertToReadOnly());
+ FDPair fds = region.GetPlatformHandle();
+ EXPECT_LT(fds.readonly_fd, 0);
+}
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+// Tests that protection bits are set correctly for read-only region on MacOS.
+TEST_F(PlatformSharedMemoryRegionTest, MapCurrentAndMaxProtectionSetCorrectly) {
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ ASSERT_TRUE(region.ConvertToReadOnly());
+ SharedMemoryMapping ro_mapping = MapAt(&region, 0, kRegionSize);
+ ASSERT_TRUE(ro_mapping.IsValid());
+
+ vm_region_basic_info_64 basic_info;
+ mach_vm_size_t dummy_size = 0;
+ void* temp_addr = GetMemory(&ro_mapping);
+ MachVMRegionResult result = GetBasicInfo(
+ mach_task_self(), &dummy_size,
+ reinterpret_cast<mach_vm_address_t*>(&temp_addr), &basic_info);
+ EXPECT_EQ(result, MachVMRegionResult::Success);
+ EXPECT_EQ(basic_info.protection & VM_PROT_ALL, VM_PROT_READ);
+ EXPECT_EQ(basic_info.max_protection & VM_PROT_ALL, VM_PROT_READ);
+}
+#endif
+
+// Tests that platform handle permissions are checked correctly.
+TEST_F(PlatformSharedMemoryRegionTest,
+ CheckPlatformHandlePermissionsCorrespondToMode) {
+ using Mode = PlatformSharedMemoryRegion::Mode;
+ auto check = [](const PlatformSharedMemoryRegion& region,
+ PlatformSharedMemoryRegion::Mode mode) {
+ return PlatformSharedMemoryRegion::
+ CheckPlatformHandlePermissionsCorrespondToMode(
+ region.GetPlatformHandle(), mode, region.GetSize());
+ };
+
+ // Check kWritable region.
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ EXPECT_TRUE(check(region, Mode::kWritable));
+
+ // Check kReadOnly region.
+ ASSERT_TRUE(region.ConvertToReadOnly());
+ EXPECT_TRUE(check(region, Mode::kReadOnly));
+
+ // Check kUnsafe region.
+ PlatformSharedMemoryRegion region2 =
+ PlatformSharedMemoryRegion::CreateUnsafe(kRegionSize);
+ ASSERT_TRUE(region2.IsValid());
+ EXPECT_TRUE(check(region2, Mode::kUnsafe));
+
+ // TODO(https://crbug.com/825177): add negative expectations once all
+ // platforms implement this check.
+}
+
+// Tests that it's impossible to create read-only platform shared memory region.
+TEST_F(PlatformSharedMemoryRegionTest, CreateReadOnlyRegionDeathTest) {
+#ifdef OFFICIAL_BUILD
+ // The official build does not print the reason a CHECK failed.
+ const char kErrorRegex[] = "";
+#else
+ const char kErrorRegex[] =
+ "Creating a region in read-only mode will lead to this region being "
+ "non-modifiable";
+#endif
+ EXPECT_DEATH_IF_SUPPORTED(
+ PlatformSharedMemoryRegion::Create(
+ PlatformSharedMemoryRegion::Mode::kReadOnly, kRegionSize),
+ kErrorRegex);
+}
+
+// Tests that it's prohibited to duplicate a writable region.
+TEST_F(PlatformSharedMemoryRegionTest, DuplicateWritableRegionDeathTest) {
+#ifdef OFFICIAL_BUILD
+ const char kErrorRegex[] = "";
+#else
+ const char kErrorRegex[] =
+ "Duplicating a writable shared memory region is prohibited";
+#endif
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ EXPECT_DEATH_IF_SUPPORTED(region.Duplicate(), kErrorRegex);
+}
+
+// Tests that it's prohibited to convert an unsafe region to read-only.
+TEST_F(PlatformSharedMemoryRegionTest, UnsafeRegionConvertToReadOnlyDeathTest) {
+#ifdef OFFICIAL_BUILD
+ const char kErrorRegex[] = "";
+#else
+ const char kErrorRegex[] =
+ "Only writable shared memory region can be converted to read-only";
+#endif
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateUnsafe(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ EXPECT_DEATH_IF_SUPPORTED(region.ConvertToReadOnly(), kErrorRegex);
+}
+
+// Tests that it's prohibited to convert a read-only region to read-only.
+TEST_F(PlatformSharedMemoryRegionTest,
+ ReadOnlyRegionConvertToReadOnlyDeathTest) {
+#ifdef OFFICIAL_BUILD
+ const char kErrorRegex[] = "";
+#else
+ const char kErrorRegex[] =
+ "Only writable shared memory region can be converted to read-only";
+#endif
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ EXPECT_TRUE(region.ConvertToReadOnly());
+ EXPECT_DEATH_IF_SUPPORTED(region.ConvertToReadOnly(), kErrorRegex);
+}
+
+} // namespace subtle
+} // namespace base
diff --git a/chromium/base/memory/platform_shared_memory_region_win.cc b/chromium/base/memory/platform_shared_memory_region_win.cc
new file mode 100644
index 00000000000..b6608da02f0
--- /dev/null
+++ b/chromium/base/memory/platform_shared_memory_region_win.cc
@@ -0,0 +1,345 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <aclapi.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/bits.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/numerics/checked_math.h"
+#include "base/process/process_handle.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+namespace subtle {
+
+namespace {
+
+// Errors that can occur during Shared Memory construction.
+// These match tools/metrics/histograms/histograms.xml.
+// This enum is append-only.
+enum CreateError {
+ SUCCESS = 0,
+ SIZE_ZERO = 1,
+ SIZE_TOO_LARGE = 2,
+ INITIALIZE_ACL_FAILURE = 3,
+ INITIALIZE_SECURITY_DESC_FAILURE = 4,
+ SET_SECURITY_DESC_FAILURE = 5,
+ CREATE_FILE_MAPPING_FAILURE = 6,
+ REDUCE_PERMISSIONS_FAILURE = 7,
+ ALREADY_EXISTS = 8,
+ CREATE_ERROR_LAST = ALREADY_EXISTS
+};
+
+// Emits UMA metrics about encountered errors. Pass zero (0) for |winerror|
+// if there is no associated Windows error.
+void LogError(CreateError error, DWORD winerror) {
+ UMA_HISTOGRAM_ENUMERATION("SharedMemory.CreateError", error,
+ CREATE_ERROR_LAST + 1);
+ static_assert(ERROR_SUCCESS == 0, "Windows error code changed!");
+ if (winerror != ERROR_SUCCESS)
+ UmaHistogramSparse("SharedMemory.CreateWinError", winerror);
+}
+
+typedef enum _SECTION_INFORMATION_CLASS {
+ SectionBasicInformation,
+} SECTION_INFORMATION_CLASS;
+
+typedef struct _SECTION_BASIC_INFORMATION {
+ PVOID BaseAddress;
+ ULONG Attributes;
+ LARGE_INTEGER Size;
+} SECTION_BASIC_INFORMATION, *PSECTION_BASIC_INFORMATION;
+
+typedef ULONG(__stdcall* NtQuerySectionType)(
+ HANDLE SectionHandle,
+ SECTION_INFORMATION_CLASS SectionInformationClass,
+ PVOID SectionInformation,
+ ULONG SectionInformationLength,
+ PULONG ResultLength);
+
+// Returns the length of the memory section starting at the supplied address.
+size_t GetMemorySectionSize(void* address) {
+ MEMORY_BASIC_INFORMATION memory_info;
+ if (!::VirtualQuery(address, &memory_info, sizeof(memory_info)))
+ return 0;
+ return memory_info.RegionSize -
+ (static_cast<char*>(address) -
+ static_cast<char*>(memory_info.AllocationBase));
+}
+
+// Checks if the section object is safe to map. At the moment this just means
+// it's not an image section.
+bool IsSectionSafeToMap(HANDLE handle) {
+ static NtQuerySectionType nt_query_section_func =
+ reinterpret_cast<NtQuerySectionType>(
+ ::GetProcAddress(::GetModuleHandle(L"ntdll.dll"), "NtQuerySection"));
+ DCHECK(nt_query_section_func);
+
+ // The handle must have SECTION_QUERY access for this to succeed.
+ SECTION_BASIC_INFORMATION basic_information = {};
+ ULONG status =
+ nt_query_section_func(handle, SectionBasicInformation, &basic_information,
+ sizeof(basic_information), nullptr);
+ if (status)
+ return false;
+ return (basic_information.Attributes & SEC_IMAGE) != SEC_IMAGE;
+}
+
+// Returns a HANDLE on success and |nullptr| on failure.
+// This function is similar to CreateFileMapping, but removes the permissions
+// WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE.
+//
+// A newly created file mapping has two sets of permissions. It has access
+// control permissions (WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE) and
+// file permissions (FILE_MAP_READ, FILE_MAP_WRITE, etc.). The Chrome sandbox
+// prevents HANDLEs with the WRITE_DAC permission from being duplicated into
+// unprivileged processes.
+//
+// In order to remove the access control permissions, after being created the
+// handle is duplicated with only the file access permissions.
+HANDLE CreateFileMappingWithReducedPermissions(SECURITY_ATTRIBUTES* sa,
+ size_t rounded_size,
+ LPCWSTR name) {
+ HANDLE h = CreateFileMapping(INVALID_HANDLE_VALUE, sa, PAGE_READWRITE, 0,
+ static_cast<DWORD>(rounded_size), name);
+ if (!h) {
+ LogError(CREATE_FILE_MAPPING_FAILURE, GetLastError());
+ return nullptr;
+ }
+
+ HANDLE h2;
+ ProcessHandle process = GetCurrentProcess();
+ BOOL success = ::DuplicateHandle(
+ process, h, process, &h2, FILE_MAP_READ | FILE_MAP_WRITE | SECTION_QUERY,
+ FALSE, 0);
+ BOOL rv = ::CloseHandle(h);
+ DCHECK(rv);
+
+ if (!success) {
+ LogError(REDUCE_PERMISSIONS_FAILURE, GetLastError());
+ return nullptr;
+ }
+
+ return h2;
+}
+
+} // namespace
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+ win::ScopedHandle handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid) {
+ if (!handle.IsValid())
+ return {};
+
+ if (size == 0)
+ return {};
+
+ if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ return {};
+
+ if (!IsSectionSafeToMap(handle.Get()))
+ return {};
+
+ CHECK(
+ CheckPlatformHandlePermissionsCorrespondToMode(handle.Get(), mode, size));
+
+ return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
+}
+
+HANDLE PlatformSharedMemoryRegion::GetPlatformHandle() const {
+ return handle_.Get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+ return handle_.IsValid();
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+ if (!IsValid())
+ return {};
+
+ CHECK_NE(mode_, Mode::kWritable)
+ << "Duplicating a writable shared memory region is prohibited";
+
+ HANDLE duped_handle;
+ ProcessHandle process = GetCurrentProcess();
+ BOOL success =
+ ::DuplicateHandle(process, handle_.Get(), process, &duped_handle, 0,
+ FALSE, DUPLICATE_SAME_ACCESS);
+ if (!success)
+ return {};
+
+ return PlatformSharedMemoryRegion(win::ScopedHandle(duped_handle), mode_,
+ size_, guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+ if (!IsValid())
+ return false;
+
+ CHECK_EQ(mode_, Mode::kWritable)
+ << "Only writable shared memory region can be converted to read-only";
+
+ win::ScopedHandle handle_copy(handle_.Take());
+
+ HANDLE duped_handle;
+ ProcessHandle process = GetCurrentProcess();
+ BOOL success =
+ ::DuplicateHandle(process, handle_copy.Get(), process, &duped_handle,
+ FILE_MAP_READ | SECTION_QUERY, FALSE, 0);
+ if (!success)
+ return false;
+
+ handle_.Set(duped_handle);
+ mode_ = Mode::kReadOnly;
+ return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAt(off_t offset,
+ size_t size,
+ void** memory,
+ size_t* mapped_size) const {
+ if (!IsValid())
+ return false;
+
+ size_t end_byte;
+ if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
+ return false;
+ }
+
+ bool write_allowed = mode_ != Mode::kReadOnly;
+ // Try to map the shared memory. On the first failure, release any reserved
+ // address space for a single entry.
+ for (int i = 0; i < 2; ++i) {
+ *memory = MapViewOfFile(
+ handle_.Get(), FILE_MAP_READ | (write_allowed ? FILE_MAP_WRITE : 0),
+ static_cast<uint64_t>(offset) >> 32, static_cast<DWORD>(offset), size);
+ if (*memory)
+ break;
+ ReleaseReservation();
+ }
+ if (!*memory) {
+ DPLOG(ERROR) << "Failed executing MapViewOfFile";
+ return false;
+ }
+
+ *mapped_size = GetMemorySectionSize(*memory);
+ DCHECK_EQ(0U,
+ reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
+ return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+ size_t size) {
+ // TODO(crbug.com/210609): NaCl forces us to round up 64k here, wasting 32k
+ // per mapping on average.
+ static const size_t kSectionSize = 65536;
+ if (size == 0) {
+ LogError(SIZE_ZERO, 0);
+ return {};
+ }
+
+ size_t rounded_size = bits::Align(size, kSectionSize);
+ if (rounded_size > static_cast<size_t>(std::numeric_limits<int>::max())) {
+ LogError(SIZE_TOO_LARGE, 0);
+ return {};
+ }
+
+ CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+ "lead to this region being non-modifiable";
+
+ // Add an empty DACL to enforce anonymous read-only sections.
+ ACL dacl;
+ SECURITY_DESCRIPTOR sd;
+ if (!InitializeAcl(&dacl, sizeof(dacl), ACL_REVISION)) {
+ LogError(INITIALIZE_ACL_FAILURE, GetLastError());
+ return {};
+ }
+ if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION)) {
+ LogError(INITIALIZE_SECURITY_DESC_FAILURE, GetLastError());
+ return {};
+ }
+ if (!SetSecurityDescriptorDacl(&sd, TRUE, &dacl, FALSE)) {
+ LogError(SET_SECURITY_DESC_FAILURE, GetLastError());
+ return {};
+ }
+
+ // Windows ignores DACLs on unnamed shared section. Generate a random name in
+ // order to be able to enforce read-only.
+ uint64_t rand_values[4];
+ RandBytes(&rand_values, sizeof(rand_values));
+ string16 name =
+ StringPrintf(L"CrSharedMem_%016llx%016llx%016llx%016llx", rand_values[0],
+ rand_values[1], rand_values[2], rand_values[3]);
+
+ SECURITY_ATTRIBUTES sa = {sizeof(sa), &sd, FALSE};
+ // Ask for the file mapping with reduced permisions to avoid passing the
+ // access control permissions granted by default into unpriviledged process.
+ HANDLE h =
+ CreateFileMappingWithReducedPermissions(&sa, rounded_size, name.c_str());
+ if (h == nullptr) {
+ // The error is logged within CreateFileMappingWithReducedPermissions().
+ return {};
+ }
+
+ win::ScopedHandle scoped_h(h);
+ // Check if the shared memory pre-exists.
+ if (GetLastError() == ERROR_ALREADY_EXISTS) {
+ LogError(ALREADY_EXISTS, ERROR_ALREADY_EXISTS);
+ return {};
+ }
+
+ return PlatformSharedMemoryRegion(std::move(scoped_h), mode, size,
+ UnguessableToken::Create());
+}
+
+// static
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+ PlatformHandle handle,
+ Mode mode,
+ size_t size) {
+ // Call ::DuplicateHandle() with FILE_MAP_WRITE as a desired access to check
+ // if the |handle| has a write access.
+ ProcessHandle process = GetCurrentProcess();
+ HANDLE duped_handle;
+ BOOL success = ::DuplicateHandle(process, handle, process, &duped_handle,
+ FILE_MAP_WRITE, FALSE, 0);
+ if (success) {
+ BOOL rv = ::CloseHandle(duped_handle);
+ DCHECK(rv);
+ }
+
+ bool is_read_only = !success;
+ bool expected_read_only = mode == Mode::kReadOnly;
+
+ if (is_read_only != expected_read_only) {
+ DLOG(ERROR) << "File mapping handle has wrong access rights: it is"
+ << (is_read_only ? " " : " not ") << "read-only but it should"
+ << (expected_read_only ? " " : " not ") << "be";
+ return false;
+ }
+
+ return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+ win::ScopedHandle handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid)
+ : handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
+
+} // namespace subtle
+} // namespace base
diff --git a/chromium/base/memory/protected_memory.cc b/chromium/base/memory/protected_memory.cc
new file mode 100644
index 00000000000..157a677e046
--- /dev/null
+++ b/chromium/base/memory/protected_memory.cc
@@ -0,0 +1,17 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/protected_memory.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+#if !defined(COMPONENT_BUILD)
+PROTECTED_MEMORY_SECTION int AutoWritableMemory::writers = 0;
+#endif // !defined(COMPONENT_BUILD)
+
+base::LazyInstance<Lock>::Leaky AutoWritableMemory::writers_lock =
+ LAZY_INSTANCE_INITIALIZER;
+
+} // namespace base
diff --git a/chromium/base/memory/protected_memory.h b/chromium/base/memory/protected_memory.h
index edf5555819a..3cb2ec3c836 100644
--- a/chromium/base/memory/protected_memory.h
+++ b/chromium/base/memory/protected_memory.h
@@ -8,8 +8,47 @@
// APIs to set memory read-write and read-only when required. Protected memory
// should be set read-write for the minimum amount of time required.
-// Variables stored in protected memory must be global variables declared in the
-// PROTECTED_MEMORY_SECTION so they are set to read-only upon start-up.
+// Normally mutable variables are held in read-write memory and constant data
+// is held in read-only memory to ensure it is not accidentally overwritten.
+// In some cases we want to hold mutable variables in read-only memory, except
+// when they are being written to, to ensure that they are not tampered with.
+//
+// ProtectedMemory is a container class intended to hold a single variable in
+// read-only memory, except when explicitly set read-write. The variable can be
+// set read-write by creating a scoped AutoWritableMemory object by calling
+// AutoWritableMemory::Create(), the memory stays writable until the returned
+// object goes out of scope and is destructed. The wrapped variable can be
+// accessed using operator* and operator->.
+//
+// Instances of ProtectedMemory must be declared in the PROTECTED_MEMORY_SECTION
+// and as global variables. Because protected memory variables are globals, the
+// the same rules apply disallowing non-trivial constructors and destructors.
+// Global definitions are required to avoid the linker placing statics in
+// inlinable functions into a comdat section and setting the protected memory
+// section read-write when they are merged.
+//
+// EXAMPLE:
+//
+// struct Items { void* item1; };
+// static PROTECTED_MEMORY_SECTION base::ProtectedMemory<Items> items;
+// void InitializeItems() {
+// // Explicitly set items read-write before writing to it.
+// auto writer = base::AutoWritableMemory::Create(items);
+// items->item1 = /* ... */;
+// assert(items->item1 != nullptr);
+// // items is set back to read-only on the destruction of writer
+// }
+//
+// using FnPtr = void (*)(void);
+// PROTECTED_MEMORY_SECTION base::ProtectedMemory<FnPtr> fnPtr;
+// FnPtr ResolveFnPtr(void) {
+// // The Initializer nested class is a helper class for creating a static
+// // initializer for a ProtectedMemory variable. It implicitly sets the
+// // variable read-write during initialization.
+// static base::ProtectedMemory<FnPtr>::Initializer I(&fnPtr,
+// reinterpret_cast<FnPtr>(dlsym(/* ... */)));
+// return *fnPtr;
+// }
#ifndef BASE_MEMORY_PROTECTED_MEMORY_H_
#define BASE_MEMORY_PROTECTED_MEMORY_H_
@@ -47,6 +86,30 @@ extern char __start_protected_memory __asm(
extern char __stop_protected_memory __asm(
"section$end$PROTECTED_MEMORY$protected_memory");
+#elif defined(OS_WIN)
+// Define a read-write prot section. The $a, $mem, and $z 'sub-sections' are
+// merged alphabetically so $a and $z are used to define the start and end of
+// the protected memory section, and $mem holds protected variables.
+// (Note: Sections in Portable Executables are equivalent to segments in other
+// executable formats, so this section is mapped into its own pages.)
+#pragma section("prot$a", read, write)
+#pragma section("prot$mem", read, write)
+#pragma section("prot$z", read, write)
+
+// We want the protected memory section to be read-only, not read-write so we
+// instruct the linker to set the section read-only at link time. We do this
+// at link time instead of compile time, because defining the prot section
+// read-only would cause mis-compiles due to optimizations assuming that the
+// section contents are constant.
+#pragma comment(linker, "/SECTION:prot,R")
+
+__declspec(allocate("prot$a")) __declspec(selectany)
+char __start_protected_memory;
+__declspec(allocate("prot$z")) __declspec(selectany)
+char __stop_protected_memory;
+
+#define PROTECTED_MEMORY_SECTION __declspec(allocate("prot$mem"))
+
#else
#undef PROTECTED_MEMORY_ENABLED
#define PROTECTED_MEMORY_ENABLED 0
@@ -55,48 +118,6 @@ extern char __stop_protected_memory __asm(
namespace base {
-// Normally mutable variables are held in read-write memory and constant data
-// is held in read-only memory to ensure it is not accidentally overwritten.
-// In some cases we want to hold mutable variables in read-only memory, except
-// when they are being written to, to ensure that they are not tampered with.
-//
-// ProtectedMemory is a container class intended to hold a single variable in
-// read-only memory, except when explicitly set read-write. The variable can be
-// set read-write by creating a scoped AutoWritableMemory object by calling
-// AutoWritableMemory::Create(), the memory stays writable until the returned
-// object goes out of scope and is destructed. The wrapped variable can be
-// accessed using operator* and operator->.
-//
-// Instances of ProtectedMemory must be declared in the PROTECTED_MEMORY_SECTION
-// and as global variables. Because protected memory variables are globals, the
-// the same rules apply disallowing non-trivial constructors and destructors.
-// Global definitions are required to avoid the linker placing statics in
-// inlinable functions into a comdat section and setting the protected memory
-// section read-write when they are merged.
-//
-// EXAMPLE:
-//
-// struct Items { void* item1; };
-// static PROTECTED_MEMORY_SECTION ProtectedMemory<Items> items;
-// void InitializeItems() {
-// // Explicitly set items read-write before writing to it.
-// auto writer = AutoWritableMemory::Create(items);
-// items->item1 = /* ... */;
-// assert(items->item1 != nullptr);
-// // items is set back to read-only on the destruction of writer
-// }
-//
-// using FnPtr = void (*)(void);
-// PROTECTED_MEMORY_SECTION ProtectedMemory<FnPtr> fnPtr;
-// FnPtr ResolveFnPtr(void) {
-// // The Initializer nested class is a helper class for creating a static
-// // initializer for a ProtectedMemory variable. It implicitly sets the
-// // variable read-write during initialization.
-// static ProtectedMemory<FnPtr>::Initializer(&fnPtr,
-// reinterpret_cast<FnPtr>(dlsym(/* ... */)));
-// return *fnPtr;
-// }
-
template <typename T>
class ProtectedMemory {
public:
diff --git a/chromium/base/memory/protected_memory_cfi.h b/chromium/base/memory/protected_memory_cfi.h
index 955a4488ae1..a90023bc81f 100644
--- a/chromium/base/memory/protected_memory_cfi.h
+++ b/chromium/base/memory/protected_memory_cfi.h
@@ -14,6 +14,7 @@
#include <utility>
#include "base/cfi_buildflags.h"
+#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/memory/protected_memory.h"
#include "build/build_config.h"
@@ -37,9 +38,7 @@ class UnsanitizedCfiCall {
UnsanitizedCfiCall(UnsanitizedCfiCall&&) = default;
template <typename... Args>
-#if !defined(COMPILER_MSVC)
- __attribute__((no_sanitize("cfi-icall")))
-#endif // !defined(COMPILER_MSVC)
+ NO_SANITIZE("cfi-icall")
auto operator()(Args&&... args) {
return function_(std::forward<Args>(args)...);
}
diff --git a/chromium/base/memory/protected_memory_posix.cc b/chromium/base/memory/protected_memory_posix.cc
index 1f5fd9002ea..d003d79bdbd 100644
--- a/chromium/base/memory/protected_memory_posix.cc
+++ b/chromium/base/memory/protected_memory_posix.cc
@@ -18,28 +18,24 @@
#endif // defined(OS_MACOSX) && !defined(OS_IOS)
#include "base/posix/eintr_wrapper.h"
+#include "base/process/process_metrics.h"
#include "base/synchronization/lock.h"
#include "build/build_config.h"
namespace base {
-#if !defined(COMPONENT_BUILD)
-PROTECTED_MEMORY_SECTION int AutoWritableMemory::writers = 0;
-#endif // !defined(COMPONENT_BUILD)
+namespace {
-base::LazyInstance<Lock>::Leaky AutoWritableMemory::writers_lock =
- LAZY_INSTANCE_INITIALIZER;
-
-static uintptr_t page_mask() {
- return ~(static_cast<uintptr_t>(getpagesize()) - 1);
-}
-
-static bool SetMemory(void* start, void* end, int prot) {
- const uintptr_t page_start = reinterpret_cast<uintptr_t>(start) & page_mask();
+bool SetMemory(void* start, void* end, int prot) {
+ DCHECK(end > start);
+ const uintptr_t page_mask = ~(base::GetPageSize() - 1);
+ const uintptr_t page_start = reinterpret_cast<uintptr_t>(start) & page_mask;
return mprotect(reinterpret_cast<void*>(page_start),
reinterpret_cast<uintptr_t>(end) - page_start, prot) == 0;
}
+} // namespace
+
bool AutoWritableMemory::SetMemoryReadWrite(void* start, void* end) {
return SetMemory(start, end, PROT_READ | PROT_WRITE);
}
@@ -51,7 +47,8 @@ bool AutoWritableMemory::SetMemoryReadOnly(void* start, void* end) {
#if defined(OS_LINUX)
void AssertMemoryIsReadOnly(const void* ptr) {
#if DCHECK_IS_ON()
- const uintptr_t page_start = reinterpret_cast<uintptr_t>(ptr) & page_mask();
+ const uintptr_t page_mask = ~(base::GetPageSize() - 1);
+ const uintptr_t page_start = reinterpret_cast<uintptr_t>(ptr) & page_mask;
// Note: We've casted away const here, which should not be meaningful since
// if the memory is written to we will abort immediately.
diff --git a/chromium/base/memory/protected_memory_win.cc b/chromium/base/memory/protected_memory_win.cc
new file mode 100644
index 00000000000..cf3da78dde1
--- /dev/null
+++ b/chromium/base/memory/protected_memory_win.cc
@@ -0,0 +1,52 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/protected_memory.h"
+
+#include <windows.h>
+
+#include <stdint.h>
+
+#include "base/process/process_metrics.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+bool SetMemory(void* start, void* end, DWORD prot) {
+ DCHECK(end > start);
+ const uintptr_t page_mask = ~(base::GetPageSize() - 1);
+ const uintptr_t page_start = reinterpret_cast<uintptr_t>(start) & page_mask;
+ DWORD old_prot;
+ return VirtualProtect(reinterpret_cast<void*>(page_start),
+ reinterpret_cast<uintptr_t>(end) - page_start, prot,
+ &old_prot) != 0;
+}
+
+} // namespace
+
+bool AutoWritableMemory::SetMemoryReadWrite(void* start, void* end) {
+ return SetMemory(start, end, PAGE_READWRITE);
+}
+
+bool AutoWritableMemory::SetMemoryReadOnly(void* start, void* end) {
+ return SetMemory(start, end, PAGE_READONLY);
+}
+
+void AssertMemoryIsReadOnly(const void* ptr) {
+#if DCHECK_IS_ON()
+ const uintptr_t page_mask = ~(base::GetPageSize() - 1);
+ const uintptr_t page_start = reinterpret_cast<uintptr_t>(ptr) & page_mask;
+
+ MEMORY_BASIC_INFORMATION info;
+ SIZE_T result =
+ VirtualQuery(reinterpret_cast<LPCVOID>(page_start), &info, sizeof(info));
+ DCHECK_GT(result, 0U);
+ DCHECK(info.Protect == PAGE_READONLY);
+#endif // DCHECK_IS_ON()
+}
+
+} // namespace base
diff --git a/chromium/base/memory/ptr_util.h b/chromium/base/memory/ptr_util.h
index 8a2d4877083..42f4f49eebd 100644
--- a/chromium/base/memory/ptr_util.h
+++ b/chromium/base/memory/ptr_util.h
@@ -18,13 +18,6 @@ std::unique_ptr<T> WrapUnique(T* ptr) {
return std::unique_ptr<T>(ptr);
}
-// TODO(crbug.com/755727): Inline all uses.
-template <typename T, typename... Args>
-auto MakeUnique(Args&&... args)
- -> decltype(std::make_unique<T>(std::forward<Args>(args)...)) {
- return std::make_unique<T>(std::forward<Args>(args)...);
-}
-
} // namespace base
#endif // BASE_MEMORY_PTR_UTIL_H_
diff --git a/chromium/base/memory/ptr_util_unittest.cc b/chromium/base/memory/ptr_util_unittest.cc
index 6b83b91442f..3fa40d8098d 100644
--- a/chromium/base/memory/ptr_util_unittest.cc
+++ b/chromium/base/memory/ptr_util_unittest.cc
@@ -37,34 +37,4 @@ TEST(PtrUtilTest, WrapUnique) {
EXPECT_EQ(0u, DeleteCounter::count());
}
-TEST(PtrUtilTest, MakeUniqueScalar) {
- auto s = std::make_unique<std::string>();
- EXPECT_EQ("", *s);
-
- auto s2 = std::make_unique<std::string>("test");
- EXPECT_EQ("test", *s2);
-}
-
-TEST(PtrUtilTest, MakeUniqueScalarWithMoveOnlyType) {
- using MoveOnly = std::unique_ptr<std::string>;
- auto p = std::make_unique<MoveOnly>(std::make_unique<std::string>("test"));
- EXPECT_EQ("test", **p);
-}
-
-TEST(PtrUtilTest, MakeUniqueArray) {
- EXPECT_EQ(0u, DeleteCounter::count());
- auto a = std::make_unique<DeleteCounter[]>(5);
- EXPECT_EQ(5u, DeleteCounter::count());
- a.reset();
- EXPECT_EQ(0u, DeleteCounter::count());
-}
-
-#if 0
-// TODO(dcheng): Move this into a nocompile test.
-TEST(PtrUtilTest, MakeUniqueArrayWithKnownBounds) {
- auto a = std::make_unique<DeleteCounter[1]>();
- auto b = std::make_unique<DeleteCounter[1]>(1);
-}
-#endif
-
} // namespace base
diff --git a/chromium/base/memory/read_only_shared_memory_region.cc b/chromium/base/memory/read_only_shared_memory_region.cc
new file mode 100644
index 00000000000..f39837afda1
--- /dev/null
+++ b/chromium/base/memory/read_only_shared_memory_region.cc
@@ -0,0 +1,95 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/read_only_shared_memory_region.h"
+
+#include <utility>
+
+#include "base/memory/shared_memory.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// static
+MappedReadOnlyRegion ReadOnlySharedMemoryRegion::Create(size_t size) {
+ subtle::PlatformSharedMemoryRegion handle =
+ subtle::PlatformSharedMemoryRegion::CreateWritable(size);
+ if (!handle.IsValid())
+ return {};
+
+ void* memory_ptr = nullptr;
+ size_t mapped_size = 0;
+ if (!handle.MapAt(0, handle.GetSize(), &memory_ptr, &mapped_size))
+ return {};
+
+ WritableSharedMemoryMapping mapping(memory_ptr, size, mapped_size,
+ handle.GetGUID());
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ handle.ConvertToReadOnly(memory_ptr);
+#else
+ handle.ConvertToReadOnly();
+#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+ ReadOnlySharedMemoryRegion region(std::move(handle));
+
+ if (!region.IsValid())
+ return {};
+
+ return {std::move(region), std::move(mapping)};
+}
+
+// static
+ReadOnlySharedMemoryRegion ReadOnlySharedMemoryRegion::Deserialize(
+ subtle::PlatformSharedMemoryRegion handle) {
+ return ReadOnlySharedMemoryRegion(std::move(handle));
+}
+
+// static
+subtle::PlatformSharedMemoryRegion
+ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
+ ReadOnlySharedMemoryRegion region) {
+ return std::move(region.handle_);
+}
+
+ReadOnlySharedMemoryRegion::ReadOnlySharedMemoryRegion() = default;
+ReadOnlySharedMemoryRegion::ReadOnlySharedMemoryRegion(
+ ReadOnlySharedMemoryRegion&& region) = default;
+ReadOnlySharedMemoryRegion& ReadOnlySharedMemoryRegion::operator=(
+ ReadOnlySharedMemoryRegion&& region) = default;
+ReadOnlySharedMemoryRegion::~ReadOnlySharedMemoryRegion() = default;
+
+ReadOnlySharedMemoryRegion ReadOnlySharedMemoryRegion::Duplicate() const {
+ return ReadOnlySharedMemoryRegion(handle_.Duplicate());
+}
+
+ReadOnlySharedMemoryMapping ReadOnlySharedMemoryRegion::Map() const {
+ return MapAt(0, handle_.GetSize());
+}
+
+ReadOnlySharedMemoryMapping ReadOnlySharedMemoryRegion::MapAt(
+ off_t offset,
+ size_t size) const {
+ if (!IsValid())
+ return {};
+
+ void* memory = nullptr;
+ size_t mapped_size = 0;
+ if (!handle_.MapAt(offset, size, &memory, &mapped_size))
+ return {};
+
+ return ReadOnlySharedMemoryMapping(memory, size, mapped_size,
+ handle_.GetGUID());
+}
+
+bool ReadOnlySharedMemoryRegion::IsValid() const {
+ return handle_.IsValid();
+}
+
+ReadOnlySharedMemoryRegion::ReadOnlySharedMemoryRegion(
+ subtle::PlatformSharedMemoryRegion handle)
+ : handle_(std::move(handle)) {
+ CHECK_EQ(handle_.GetMode(),
+ subtle::PlatformSharedMemoryRegion::Mode::kReadOnly);
+}
+
+} // namespace base
diff --git a/chromium/base/memory/read_only_shared_memory_region.h b/chromium/base/memory/read_only_shared_memory_region.h
new file mode 100644
index 00000000000..d9e47710569
--- /dev/null
+++ b/chromium/base/memory/read_only_shared_memory_region.h
@@ -0,0 +1,106 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
+#define BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
+
+#include <utility>
+
+#include "base/macros.h"
+#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/shared_memory_mapping.h"
+
+namespace base {
+
+struct MappedReadOnlyRegion;
+
+// Scoped move-only handle to a region of platform shared memory. The instance
+// owns the platform handle it wraps. Mappings created by this region are
+// read-only. These mappings remain valid even after the region handle is moved
+// or destroyed.
+class BASE_EXPORT ReadOnlySharedMemoryRegion {
+ public:
+ using MappingType = ReadOnlySharedMemoryMapping;
+ // Creates a new ReadOnlySharedMemoryRegion instance of a given size along
+ // with the WritableSharedMemoryMapping which provides the only way to modify
+ // the content of the newly created region.
+ //
+ // This means that the caller's process is the only process that can modify
+ // the region content. If you need to pass write access to another process,
+ // consider using WritableSharedMemoryRegion or UnsafeSharedMemoryRegion.
+ static MappedReadOnlyRegion Create(size_t size);
+
+ // Returns a ReadOnlySharedMemoryRegion built from a platform-specific handle
+ // that was taken from another ReadOnlySharedMemoryRegion instance. Returns an
+ // invalid region iff the |handle| is invalid. CHECK-fails if the |handle|
+ // isn't read-only.
+ // This should be used only by the code passing handles across process
+ // boundaries.
+ static ReadOnlySharedMemoryRegion Deserialize(
+ subtle::PlatformSharedMemoryRegion handle);
+
+ // Extracts a platform handle from the region. Ownership is transferred to the
+ // returned region object.
+ // This should be used only for sending the handle from the current process to
+ // another.
+ static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
+ ReadOnlySharedMemoryRegion region);
+
+ // Default constructor initializes an invalid instance.
+ ReadOnlySharedMemoryRegion();
+
+ // Move operations are allowed.
+ ReadOnlySharedMemoryRegion(ReadOnlySharedMemoryRegion&&);
+ ReadOnlySharedMemoryRegion& operator=(ReadOnlySharedMemoryRegion&&);
+
+ // Destructor closes shared memory region if valid.
+ // All created mappings will remain valid.
+ ~ReadOnlySharedMemoryRegion();
+
+ // Duplicates the underlying platform handle and creates a new
+ // ReadOnlySharedMemoryRegion instance that owns this handle. Returns a valid
+ // ReadOnlySharedMemoryRegion on success, invalid otherwise. The current
+ // region instance remains valid in any case.
+ ReadOnlySharedMemoryRegion Duplicate() const;
+
+ // Maps the shared memory region into the caller's address space with
+ // read-only access. The mapped address is guaranteed to have an alignment of
+ // at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
+ // Returns a valid ReadOnlySharedMemoryMapping instance on success, invalid
+ // otherwise.
+ ReadOnlySharedMemoryMapping Map() const;
+
+ // Same as above, but maps only |size| bytes of the shared memory region
+ // starting with the given |offset|. |offset| must be aligned to value of
+ // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
+ // requested bytes are out of the region limits.
+ ReadOnlySharedMemoryMapping MapAt(off_t offset, size_t size) const;
+
+ // Whether the underlying platform handle is valid.
+ bool IsValid() const;
+
+ // Returns the maximum mapping size that can be created from this region.
+ size_t GetSize() const {
+ DCHECK(IsValid());
+ return handle_.GetSize();
+ }
+
+ private:
+ explicit ReadOnlySharedMemoryRegion(
+ subtle::PlatformSharedMemoryRegion handle);
+
+ subtle::PlatformSharedMemoryRegion handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadOnlySharedMemoryRegion);
+};
+
+// Helper struct for return value of ReadOnlySharedMemoryRegion::Create().
+struct MappedReadOnlyRegion {
+ ReadOnlySharedMemoryRegion region;
+ WritableSharedMemoryMapping mapping;
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
diff --git a/chromium/base/memory/ref_counted_memory.cc b/chromium/base/memory/ref_counted_memory.cc
index c46bb9abbbf..7999d906c4e 100644
--- a/chromium/base/memory/ref_counted_memory.cc
+++ b/chromium/base/memory/ref_counted_memory.cc
@@ -4,6 +4,8 @@
#include "base/memory/ref_counted_memory.h"
+#include <utility>
+
#include "base/logging.h"
namespace base {
@@ -42,7 +44,7 @@ RefCountedBytes::RefCountedBytes(size_t size) : data_(size, 0) {}
scoped_refptr<RefCountedBytes> RefCountedBytes::TakeVector(
std::vector<unsigned char>* to_destroy) {
- scoped_refptr<RefCountedBytes> bytes(new RefCountedBytes);
+ auto bytes = MakeRefCounted<RefCountedBytes>();
bytes->data_.swap(*to_destroy);
return bytes;
}
@@ -66,7 +68,7 @@ RefCountedString::~RefCountedString() = default;
// static
scoped_refptr<RefCountedString> RefCountedString::TakeString(
std::string* to_destroy) {
- scoped_refptr<RefCountedString> self(new RefCountedString);
+ auto self = MakeRefCounted<RefCountedString>();
to_destroy->swap(self->data_);
return self;
}
@@ -80,4 +82,24 @@ size_t RefCountedString::size() const {
return data_.size();
}
+RefCountedSharedMemory::RefCountedSharedMemory(
+ std::unique_ptr<SharedMemory> shm,
+ size_t size)
+ : shm_(std::move(shm)), size_(size) {
+ DCHECK(shm_);
+ DCHECK(shm_->memory());
+ DCHECK_GT(size_, 0U);
+ DCHECK_LE(size_, shm_->mapped_size());
+}
+
+RefCountedSharedMemory::~RefCountedSharedMemory() = default;
+
+const unsigned char* RefCountedSharedMemory::front() const {
+ return reinterpret_cast<const unsigned char*>(shm_->memory());
+}
+
+size_t RefCountedSharedMemory::size() const {
+ return size_;
+}
+
} // namespace base
diff --git a/chromium/base/memory/ref_counted_memory.h b/chromium/base/memory/ref_counted_memory.h
index ca7c371681f..82a3eeb1ed1 100644
--- a/chromium/base/memory/ref_counted_memory.h
+++ b/chromium/base/memory/ref_counted_memory.h
@@ -7,21 +7,22 @@
#include <stddef.h>
+#include <memory>
#include <string>
#include <vector>
#include "base/base_export.h"
-#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/memory/shared_memory.h"
namespace base {
-// A generic interface to memory. This object is reference counted because one
-// of its two subclasses own the data they carry, and we need to have
-// heterogeneous containers of these two types of memory.
+// A generic interface to memory. This object is reference counted because most
+// of its subclasses own the data they carry, and this interface needs to
+// support heterogeneous containers of these different types of memory.
class BASE_EXPORT RefCountedMemory
- : public base::RefCountedThreadSafe<RefCountedMemory> {
+ : public RefCountedThreadSafe<RefCountedMemory> {
public:
// Retrieves a pointer to the beginning of the data we point to. If the data
// is empty, this will return NULL.
@@ -39,7 +40,7 @@ class BASE_EXPORT RefCountedMemory
}
protected:
- friend class base::RefCountedThreadSafe<RefCountedMemory>;
+ friend class RefCountedThreadSafe<RefCountedMemory>;
RefCountedMemory();
virtual ~RefCountedMemory();
};
@@ -48,13 +49,12 @@ class BASE_EXPORT RefCountedMemory
// matter.
class BASE_EXPORT RefCountedStaticMemory : public RefCountedMemory {
public:
- RefCountedStaticMemory()
- : data_(NULL), length_(0) {}
+ RefCountedStaticMemory() : data_(nullptr), length_(0) {}
RefCountedStaticMemory(const void* data, size_t length)
- : data_(static_cast<const unsigned char*>(length ? data : NULL)),
+ : data_(static_cast<const unsigned char*>(length ? data : nullptr)),
length_(length) {}
- // Overridden from RefCountedMemory:
+ // RefCountedMemory:
const unsigned char* front() const override;
size_t size() const override;
@@ -67,12 +67,13 @@ class BASE_EXPORT RefCountedStaticMemory : public RefCountedMemory {
DISALLOW_COPY_AND_ASSIGN(RefCountedStaticMemory);
};
-// An implementation of RefCountedMemory, where we own the data in a vector.
+// An implementation of RefCountedMemory, where the data is stored in a STL
+// vector.
class BASE_EXPORT RefCountedBytes : public RefCountedMemory {
public:
RefCountedBytes();
- // Constructs a RefCountedBytes object by _copying_ from |initializer|.
+ // Constructs a RefCountedBytes object by copying from |initializer|.
explicit RefCountedBytes(const std::vector<unsigned char>& initializer);
// Constructs a RefCountedBytes object by copying |size| bytes from |p|.
@@ -88,7 +89,7 @@ class BASE_EXPORT RefCountedBytes : public RefCountedMemory {
static scoped_refptr<RefCountedBytes> TakeVector(
std::vector<unsigned char>* to_destroy);
- // Overridden from RefCountedMemory:
+ // RefCountedMemory:
const unsigned char* front() const override;
size_t size() const override;
@@ -111,7 +112,7 @@ class BASE_EXPORT RefCountedBytes : public RefCountedMemory {
DISALLOW_COPY_AND_ASSIGN(RefCountedBytes);
};
-// An implementation of RefCountedMemory, where the bytes are stored in an STL
+// An implementation of RefCountedMemory, where the bytes are stored in a STL
// string. Use this if your data naturally arrives in that format.
class BASE_EXPORT RefCountedString : public RefCountedMemory {
public:
@@ -122,7 +123,7 @@ class BASE_EXPORT RefCountedString : public RefCountedMemory {
// copy into object->data()).
static scoped_refptr<RefCountedString> TakeString(std::string* to_destroy);
- // Overridden from RefCountedMemory:
+ // RefCountedMemory:
const unsigned char* front() const override;
size_t size() const override;
@@ -137,6 +138,27 @@ class BASE_EXPORT RefCountedString : public RefCountedMemory {
DISALLOW_COPY_AND_ASSIGN(RefCountedString);
};
+// An implementation of RefCountedMemory, where the bytes are stored in shared
+// memory.
+class BASE_EXPORT RefCountedSharedMemory : public RefCountedMemory {
+ public:
+ // Constructs a RefCountedMemory object by taking ownership of an already
+ // mapped SharedMemory object.
+ RefCountedSharedMemory(std::unique_ptr<SharedMemory> shm, size_t size);
+
+ // RefCountedMemory:
+ const unsigned char* front() const override;
+ size_t size() const override;
+
+ private:
+ ~RefCountedSharedMemory() override;
+
+ const std::unique_ptr<SharedMemory> shm_;
+ const size_t size_;
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedSharedMemory);
+};
+
} // namespace base
#endif // BASE_MEMORY_REF_COUNTED_MEMORY_H_
diff --git a/chromium/base/memory/ref_counted_memory_unittest.cc b/chromium/base/memory/ref_counted_memory_unittest.cc
index 72046e52eb9..43f7bbeb57a 100644
--- a/chromium/base/memory/ref_counted_memory_unittest.cc
+++ b/chromium/base/memory/ref_counted_memory_unittest.cc
@@ -6,6 +6,8 @@
#include <stdint.h>
+#include <utility>
+
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -70,6 +72,20 @@ TEST(RefCountedMemoryUnitTest, RefCountedString) {
EXPECT_EQ('e', mem->front()[9]);
}
+TEST(RefCountedMemoryUnitTest, RefCountedSharedMemory) {
+ static const char kData[] = "shm_dummy_data";
+ auto shm = std::make_unique<SharedMemory>();
+ ASSERT_TRUE(shm->CreateAndMapAnonymous(sizeof(kData)));
+ memcpy(shm->memory(), kData, sizeof(kData));
+
+ auto mem =
+ MakeRefCounted<RefCountedSharedMemory>(std::move(shm), sizeof(kData));
+ ASSERT_EQ(sizeof(kData), mem->size());
+ EXPECT_EQ('s', mem->front()[0]);
+ EXPECT_EQ('h', mem->front()[1]);
+ EXPECT_EQ('_', mem->front()[9]);
+}
+
TEST(RefCountedMemoryUnitTest, Equals) {
std::string s1("same");
scoped_refptr<RefCountedMemory> mem1 = RefCountedString::TakeString(&s1);
diff --git a/chromium/base/memory/ref_counted_unittest.cc b/chromium/base/memory/ref_counted_unittest.cc
index 71e75bce8e3..d88fc5409a8 100644
--- a/chromium/base/memory/ref_counted_unittest.cc
+++ b/chromium/base/memory/ref_counted_unittest.cc
@@ -159,7 +159,7 @@ class InitialRefCountIsOne : public base::RefCounted<InitialRefCountIsOne> {
TEST(RefCountedUnitTest, TestSelfAssignment) {
SelfAssign* p = new SelfAssign;
scoped_refptr<SelfAssign> var(p);
- var = var;
+ var = *&var; // The *& defeats Clang's -Wself-assign warning.
EXPECT_EQ(var.get(), p);
var = std::move(var);
EXPECT_EQ(var.get(), p);
diff --git a/chromium/base/memory/scoped_refptr.h b/chromium/base/memory/scoped_refptr.h
index d3803b1ce5d..a2576170bf9 100644
--- a/chromium/base/memory/scoped_refptr.h
+++ b/chromium/base/memory/scoped_refptr.h
@@ -115,13 +115,13 @@ scoped_refptr<T> WrapRefCounted(T* t) {
// };
//
// void some_function() {
-// scoped_refptr<MyFoo> foo = new MyFoo();
+// scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
// foo->Method(param);
// // |foo| is released when this function returns
// }
//
// void some_other_function() {
-// scoped_refptr<MyFoo> foo = new MyFoo();
+// scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
// ...
// foo = nullptr; // explicitly releases |foo|
// ...
@@ -134,7 +134,7 @@ scoped_refptr<T> WrapRefCounted(T* t) {
// references between the two objects, like so:
//
// {
-// scoped_refptr<MyFoo> a = new MyFoo();
+// scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
// scoped_refptr<MyFoo> b;
//
// b.swap(a);
@@ -145,7 +145,7 @@ scoped_refptr<T> WrapRefCounted(T* t) {
// object, simply use the assignment operator:
//
// {
-// scoped_refptr<MyFoo> a = new MyFoo();
+// scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
// scoped_refptr<MyFoo> b;
//
// b = a;
diff --git a/chromium/base/memory/shared_memory_mapping.cc b/chromium/base/memory/shared_memory_mapping.cc
new file mode 100644
index 00000000000..005e3fcc31a
--- /dev/null
+++ b/chromium/base/memory/shared_memory_mapping.cc
@@ -0,0 +1,115 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_mapping.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+#include <sys/mman.h>
+#endif
+
+#if defined(OS_WIN)
+#include <aclapi.h>
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach_vm.h>
+#include "base/mac/mach_logging.h"
+#endif
+
+#if defined(OS_FUCHSIA)
+#include <zircon/process.h>
+#include <zircon/status.h>
+#include <zircon/syscalls.h>
+#endif
+
+namespace base {
+
+SharedMemoryMapping::SharedMemoryMapping() = default;
+
+SharedMemoryMapping::SharedMemoryMapping(SharedMemoryMapping&& mapping)
+ : memory_(mapping.memory_),
+ size_(mapping.size_),
+ mapped_size_(mapping.mapped_size_),
+ guid_(mapping.guid_) {
+ mapping.memory_ = nullptr;
+}
+
+SharedMemoryMapping& SharedMemoryMapping::operator=(
+ SharedMemoryMapping&& mapping) {
+ Unmap();
+ memory_ = mapping.memory_;
+ size_ = mapping.size_;
+ mapped_size_ = mapping.mapped_size_;
+ guid_ = mapping.guid_;
+ mapping.memory_ = nullptr;
+ return *this;
+}
+
+SharedMemoryMapping::~SharedMemoryMapping() {
+ Unmap();
+}
+
+SharedMemoryMapping::SharedMemoryMapping(void* memory,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid)
+ : memory_(memory), size_(size), mapped_size_(mapped_size), guid_(guid) {
+ SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+}
+
+void SharedMemoryMapping::Unmap() {
+ if (!IsValid())
+ return;
+
+ SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
+#if defined(OS_WIN)
+ if (!UnmapViewOfFile(memory_))
+ DPLOG(ERROR) << "UnmapViewOfFile";
+#elif defined(OS_FUCHSIA)
+ uintptr_t addr = reinterpret_cast<uintptr_t>(memory_);
+ zx_status_t status = zx_vmar_unmap(zx_vmar_root_self(), addr, size_);
+ DLOG_IF(ERROR, status != ZX_OK)
+ << "zx_vmar_unmap failed: " << zx_status_get_string(status);
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+ kern_return_t kr = mach_vm_deallocate(
+ mach_task_self(), reinterpret_cast<mach_vm_address_t>(memory_), size_);
+ MACH_DLOG_IF(ERROR, kr != KERN_SUCCESS, kr) << "mach_vm_deallocate";
+#else
+ if (munmap(memory_, size_) < 0)
+ DPLOG(ERROR) << "munmap";
+#endif
+}
+
+ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping() = default;
+ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
+ ReadOnlySharedMemoryMapping&&) = default;
+ReadOnlySharedMemoryMapping& ReadOnlySharedMemoryMapping::operator=(
+ ReadOnlySharedMemoryMapping&&) = default;
+ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
+ void* address,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid)
+ : SharedMemoryMapping(address, size, mapped_size, guid) {}
+
+WritableSharedMemoryMapping::WritableSharedMemoryMapping() = default;
+WritableSharedMemoryMapping::WritableSharedMemoryMapping(
+ WritableSharedMemoryMapping&&) = default;
+WritableSharedMemoryMapping& WritableSharedMemoryMapping::operator=(
+ WritableSharedMemoryMapping&&) = default;
+WritableSharedMemoryMapping::WritableSharedMemoryMapping(
+ void* address,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid)
+ : SharedMemoryMapping(address, size, mapped_size, guid) {}
+
+} // namespace base
diff --git a/chromium/base/memory/shared_memory_mapping.h b/chromium/base/memory/shared_memory_mapping.h
new file mode 100644
index 00000000000..434a5871965
--- /dev/null
+++ b/chromium/base/memory/shared_memory_mapping.h
@@ -0,0 +1,143 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
+#define BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
+
+#include <cstddef>
+
+#include "base/macros.h"
+#include "base/unguessable_token.h"
+
+namespace base {
+
+namespace subtle {
+class PlatformSharedMemoryRegion;
+class PlatformSharedMemoryRegionTest;
+} // namespace subtle
+
+// Base class for scoped handles to a shared memory mapping created from a
+// shared memory region. Created shared memory mappings remain valid even if the
+// creator region is transferred or destroyed.
+//
+// Each mapping has an UnguessableToken that identifies the shared memory region
+// it was created from. This is used for memory metrics, to avoid overcounting
+// shared memory.
+class BASE_EXPORT SharedMemoryMapping {
+ public:
+ // Default constructor initializes an invalid instance.
+ SharedMemoryMapping();
+
+ // Move operations are allowed.
+ SharedMemoryMapping(SharedMemoryMapping&& mapping);
+ SharedMemoryMapping& operator=(SharedMemoryMapping&& mapping);
+
+ // Unmaps the region if the mapping is valid.
+ virtual ~SharedMemoryMapping();
+
+ // Returns true iff the mapping is valid. False means there is no
+ // corresponding area of memory.
+ bool IsValid() const { return memory_ != nullptr; }
+
+ // Returns the logical size of the mapping in bytes. This is precisely the
+ // size requested by whoever created the mapping, and it is always less than
+ // or equal to |mapped_size()|. This is undefined for invalid instances.
+ size_t size() const {
+ DCHECK(IsValid());
+ return size_;
+ }
+
+ // Returns the actual size of the mapping in bytes. This is always at least
+ // as large as |size()| but may be larger due to platform mapping alignment
+ // constraints. This is undefined for invalid instances.
+ size_t mapped_size() const {
+ DCHECK(IsValid());
+ return mapped_size_;
+ }
+
+ // Returns 128-bit GUID of the region this mapping belongs to.
+ const UnguessableToken& guid() const {
+ DCHECK(IsValid());
+ return guid_;
+ }
+
+ protected:
+ SharedMemoryMapping(void* address,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid);
+ void* raw_memory_ptr() const { return memory_; }
+
+ private:
+ friend class subtle::PlatformSharedMemoryRegionTest;
+ friend class SharedMemoryTracker;
+
+ void Unmap();
+
+ void* memory_ = nullptr;
+ size_t size_ = 0;
+ size_t mapped_size_ = 0;
+ UnguessableToken guid_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedMemoryMapping);
+};
+
+// Class modeling a read-only mapping of a shared memory region into the
+// current process' address space. This is created by ReadOnlySharedMemoryRegion
+// instances.
+class BASE_EXPORT ReadOnlySharedMemoryMapping : public SharedMemoryMapping {
+ public:
+ // Default constructor initializes an invalid instance.
+ ReadOnlySharedMemoryMapping();
+
+ // Move operations are allowed.
+ ReadOnlySharedMemoryMapping(ReadOnlySharedMemoryMapping&&);
+ ReadOnlySharedMemoryMapping& operator=(ReadOnlySharedMemoryMapping&&);
+
+ // Returns the base address of the mapping. This is read-only memory. This is
+ // page-aligned. This is nullptr for invalid instances.
+ const void* memory() const { return raw_memory_ptr(); }
+
+ private:
+ friend class ReadOnlySharedMemoryRegion;
+ ReadOnlySharedMemoryMapping(void* address,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid);
+
+ DISALLOW_COPY_AND_ASSIGN(ReadOnlySharedMemoryMapping);
+};
+
+// Class modeling a writable mapping of a shared memory region into the
+// current process' address space. This is created by *SharedMemoryRegion
+// instances.
+class BASE_EXPORT WritableSharedMemoryMapping : public SharedMemoryMapping {
+ public:
+ // Default constructor initializes an invalid instance.
+ WritableSharedMemoryMapping();
+
+ // Move operations are allowed.
+ WritableSharedMemoryMapping(WritableSharedMemoryMapping&&);
+ WritableSharedMemoryMapping& operator=(WritableSharedMemoryMapping&&);
+
+ // Returns the base address of the mapping. This is writable memory. This is
+ // page-aligned. This is nullptr for invalid instances.
+ void* memory() const { return raw_memory_ptr(); }
+
+ private:
+ friend class subtle::PlatformSharedMemoryRegion;
+ friend class ReadOnlySharedMemoryRegion;
+ friend class WritableSharedMemoryRegion;
+ friend class UnsafeSharedMemoryRegion;
+ WritableSharedMemoryMapping(void* address,
+ size_t size,
+ size_t mapped_size,
+ const UnguessableToken& guid);
+
+ DISALLOW_COPY_AND_ASSIGN(WritableSharedMemoryMapping);
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
diff --git a/chromium/base/memory/shared_memory_region_unittest.cc b/chromium/base/memory/shared_memory_region_unittest.cc
new file mode 100644
index 00000000000..16b7d4e70bf
--- /dev/null
+++ b/chromium/base/memory/shared_memory_region_unittest.cc
@@ -0,0 +1,296 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <utility>
+
+#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/read_only_shared_memory_region.h"
+#include "base/memory/unsafe_shared_memory_region.h"
+#include "base/memory/writable_shared_memory_region.h"
+#include "base/sys_info.h"
+#include "base/test/test_shared_memory_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+const size_t kRegionSize = 1024;
+
+bool IsMemoryFilledWithByte(const void* memory, size_t size, char byte) {
+ const char* start_ptr = static_cast<const char*>(memory);
+ const char* end_ptr = start_ptr + size;
+ for (const char* ptr = start_ptr; ptr < end_ptr; ++ptr) {
+ if (*ptr != byte)
+ return false;
+ }
+
+ return true;
+}
+
+template <typename SharedMemoryRegionType>
+class SharedMemoryRegionTest : public ::testing::Test {
+ public:
+ void SetUp() override {
+ std::tie(region_, rw_mapping_) = CreateWithMapping(kRegionSize);
+ ASSERT_TRUE(region_.IsValid());
+ ASSERT_TRUE(rw_mapping_.IsValid());
+ memset(rw_mapping_.memory(), 'G', kRegionSize);
+ EXPECT_TRUE(IsMemoryFilledWithByte(rw_mapping_.memory(), kRegionSize, 'G'));
+ }
+
+ static std::pair<SharedMemoryRegionType, WritableSharedMemoryMapping>
+ CreateWithMapping(size_t size) {
+ SharedMemoryRegionType region = SharedMemoryRegionType::Create(size);
+ WritableSharedMemoryMapping mapping = region.Map();
+ return {std::move(region), std::move(mapping)};
+ }
+
+ protected:
+ SharedMemoryRegionType region_;
+ WritableSharedMemoryMapping rw_mapping_;
+};
+
+// Template specialization of SharedMemoryRegionTest<>::CreateWithMapping() for
+// the ReadOnlySharedMemoryRegion. We need this because
+// ReadOnlySharedMemoryRegion::Create() has a different return type.
+template <>
+std::pair<ReadOnlySharedMemoryRegion, WritableSharedMemoryMapping>
+SharedMemoryRegionTest<ReadOnlySharedMemoryRegion>::CreateWithMapping(
+ size_t size) {
+ MappedReadOnlyRegion mapped_region = ReadOnlySharedMemoryRegion::Create(size);
+ return {std::move(mapped_region.region), std::move(mapped_region.mapping)};
+}
+
+typedef ::testing::Types<WritableSharedMemoryRegion,
+ UnsafeSharedMemoryRegion,
+ ReadOnlySharedMemoryRegion>
+ AllRegionTypes;
+TYPED_TEST_CASE(SharedMemoryRegionTest, AllRegionTypes);
+
+TYPED_TEST(SharedMemoryRegionTest, NonValidRegion) {
+ TypeParam region;
+ EXPECT_FALSE(region.IsValid());
+ // We shouldn't crash on Map but should return an invalid mapping.
+ typename TypeParam::MappingType mapping = region.Map();
+ EXPECT_FALSE(mapping.IsValid());
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MoveRegion) {
+ TypeParam moved_region = std::move(this->region_);
+ EXPECT_FALSE(this->region_.IsValid());
+ ASSERT_TRUE(moved_region.IsValid());
+
+ // Check that moved region maps correctly.
+ typename TypeParam::MappingType mapping = moved_region.Map();
+ ASSERT_TRUE(mapping.IsValid());
+ EXPECT_NE(this->rw_mapping_.memory(), mapping.memory());
+ EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(), kRegionSize),
+ 0);
+
+ // Verify that the second mapping reflects changes in the first.
+ memset(this->rw_mapping_.memory(), '#', kRegionSize);
+ EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(), kRegionSize),
+ 0);
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MappingValidAfterClose) {
+ // Check the mapping is still valid after the region is closed.
+ this->region_ = TypeParam();
+ EXPECT_FALSE(this->region_.IsValid());
+ ASSERT_TRUE(this->rw_mapping_.IsValid());
+ EXPECT_TRUE(
+ IsMemoryFilledWithByte(this->rw_mapping_.memory(), kRegionSize, 'G'));
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapTwice) {
+ // The second mapping is either writable or read-only.
+ typename TypeParam::MappingType mapping = this->region_.Map();
+ ASSERT_TRUE(mapping.IsValid());
+ EXPECT_NE(this->rw_mapping_.memory(), mapping.memory());
+ EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(), kRegionSize),
+ 0);
+
+ // Verify that the second mapping reflects changes in the first.
+ memset(this->rw_mapping_.memory(), '#', kRegionSize);
+ EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(), kRegionSize),
+ 0);
+
+ // Close the region and unmap the first memory segment, verify the second
+ // still has the right data.
+ this->region_ = TypeParam();
+ this->rw_mapping_ = WritableSharedMemoryMapping();
+ EXPECT_TRUE(IsMemoryFilledWithByte(mapping.memory(), kRegionSize, '#'));
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapUnmapMap) {
+ this->rw_mapping_ = WritableSharedMemoryMapping();
+
+ typename TypeParam::MappingType mapping = this->region_.Map();
+ ASSERT_TRUE(mapping.IsValid());
+ EXPECT_TRUE(IsMemoryFilledWithByte(mapping.memory(), kRegionSize, 'G'));
+}
+
+TYPED_TEST(SharedMemoryRegionTest, SerializeAndDeserialize) {
+ subtle::PlatformSharedMemoryRegion platform_region =
+ TypeParam::TakeHandleForSerialization(std::move(this->region_));
+ EXPECT_EQ(platform_region.GetGUID(), this->rw_mapping_.guid());
+ TypeParam region = TypeParam::Deserialize(std::move(platform_region));
+ EXPECT_TRUE(region.IsValid());
+ EXPECT_FALSE(this->region_.IsValid());
+ typename TypeParam::MappingType mapping = region.Map();
+ ASSERT_TRUE(mapping.IsValid());
+ EXPECT_TRUE(IsMemoryFilledWithByte(mapping.memory(), kRegionSize, 'G'));
+
+ // Verify that the second mapping reflects changes in the first.
+ memset(this->rw_mapping_.memory(), '#', kRegionSize);
+ EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(), kRegionSize),
+ 0);
+}
+
+// Map() will return addresses which are aligned to the platform page size, this
+// varies from platform to platform though. Since we'd like to advertise a
+// minimum alignment that callers can count on, test for it here.
+TYPED_TEST(SharedMemoryRegionTest, MapMinimumAlignment) {
+ EXPECT_EQ(0U,
+ reinterpret_cast<uintptr_t>(this->rw_mapping_.memory()) &
+ (subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment - 1));
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapSize) {
+ EXPECT_EQ(this->rw_mapping_.size(), kRegionSize);
+ EXPECT_GE(this->rw_mapping_.mapped_size(), kRegionSize);
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapGranularity) {
+ EXPECT_LT(this->rw_mapping_.mapped_size(),
+ kRegionSize + SysInfo::VMAllocationGranularity());
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapAt) {
+ const size_t kPageSize = SysInfo::VMAllocationGranularity();
+ ASSERT_TRUE(kPageSize >= sizeof(uint32_t));
+ ASSERT_EQ(kPageSize % sizeof(uint32_t), 0U);
+ const size_t kDataSize = kPageSize * 2;
+ const size_t kCount = kDataSize / sizeof(uint32_t);
+
+ TypeParam region;
+ WritableSharedMemoryMapping rw_mapping;
+ std::tie(region, rw_mapping) = TestFixture::CreateWithMapping(kDataSize);
+ ASSERT_TRUE(region.IsValid());
+ ASSERT_TRUE(rw_mapping.IsValid());
+ uint32_t* ptr = static_cast<uint32_t*>(rw_mapping.memory());
+
+ for (size_t i = 0; i < kCount; ++i)
+ ptr[i] = i;
+
+ rw_mapping = WritableSharedMemoryMapping();
+ off_t bytes_offset = kPageSize;
+ typename TypeParam::MappingType mapping =
+ region.MapAt(bytes_offset, kDataSize - bytes_offset);
+ ASSERT_TRUE(mapping.IsValid());
+
+ off_t int_offset = bytes_offset / sizeof(uint32_t);
+ const uint32_t* ptr2 = static_cast<const uint32_t*>(mapping.memory());
+ for (size_t i = int_offset; i < kCount; ++i) {
+ EXPECT_EQ(ptr2[i - int_offset], i);
+ }
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapAtNotAlignedOffsetFails) {
+ const size_t kDataSize = SysInfo::VMAllocationGranularity();
+
+ TypeParam region;
+ WritableSharedMemoryMapping rw_mapping;
+ std::tie(region, rw_mapping) = TestFixture::CreateWithMapping(kDataSize);
+ ASSERT_TRUE(region.IsValid());
+ ASSERT_TRUE(rw_mapping.IsValid());
+ off_t offset = kDataSize / 2;
+ typename TypeParam::MappingType mapping =
+ region.MapAt(offset, kDataSize - offset);
+ EXPECT_FALSE(mapping.IsValid());
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapMoreBytesThanRegionSizeFails) {
+ size_t region_real_size = this->region_.GetSize();
+ typename TypeParam::MappingType mapping =
+ this->region_.MapAt(0, region_real_size + 1);
+ EXPECT_FALSE(mapping.IsValid());
+}
+
+template <typename DuplicatableSharedMemoryRegion>
+class DuplicatableSharedMemoryRegionTest
+ : public SharedMemoryRegionTest<DuplicatableSharedMemoryRegion> {};
+
+typedef ::testing::Types<UnsafeSharedMemoryRegion, ReadOnlySharedMemoryRegion>
+ DuplicatableRegionTypes;
+TYPED_TEST_CASE(DuplicatableSharedMemoryRegionTest, DuplicatableRegionTypes);
+
+TYPED_TEST(DuplicatableSharedMemoryRegionTest, Duplicate) {
+ TypeParam dup_region = this->region_.Duplicate();
+ typename TypeParam::MappingType mapping = dup_region.Map();
+ ASSERT_TRUE(mapping.IsValid());
+ EXPECT_NE(this->rw_mapping_.memory(), mapping.memory());
+ EXPECT_EQ(this->rw_mapping_.guid(), mapping.guid());
+ EXPECT_TRUE(IsMemoryFilledWithByte(mapping.memory(), kRegionSize, 'G'));
+}
+
+class ReadOnlySharedMemoryRegionTest : public ::testing::Test {
+ public:
+ ReadOnlySharedMemoryRegion GetInitiallyReadOnlyRegion(size_t size) {
+ MappedReadOnlyRegion mapped_region =
+ ReadOnlySharedMemoryRegion::Create(size);
+ ReadOnlySharedMemoryRegion region = std::move(mapped_region.region);
+ return region;
+ }
+
+ ReadOnlySharedMemoryRegion GetConvertedToReadOnlyRegion(size_t size) {
+ WritableSharedMemoryRegion region =
+ WritableSharedMemoryRegion::Create(kRegionSize);
+ ReadOnlySharedMemoryRegion ro_region =
+ WritableSharedMemoryRegion::ConvertToReadOnly(std::move(region));
+ return ro_region;
+ }
+};
+
+TEST_F(ReadOnlySharedMemoryRegionTest,
+ InitiallyReadOnlyRegionCannotBeMappedAsWritable) {
+ ReadOnlySharedMemoryRegion region = GetInitiallyReadOnlyRegion(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+
+ EXPECT_TRUE(CheckReadOnlyPlatformSharedMemoryRegionForTesting(
+ ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region))));
+}
+
+TEST_F(ReadOnlySharedMemoryRegionTest,
+ ConvertedToReadOnlyRegionCannotBeMappedAsWritable) {
+ ReadOnlySharedMemoryRegion region = GetConvertedToReadOnlyRegion(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+
+ EXPECT_TRUE(CheckReadOnlyPlatformSharedMemoryRegionForTesting(
+ ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region))));
+}
+
+TEST_F(ReadOnlySharedMemoryRegionTest,
+ InitiallyReadOnlyRegionProducedMappingWriteDeathTest) {
+ ReadOnlySharedMemoryRegion region = GetInitiallyReadOnlyRegion(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ ReadOnlySharedMemoryMapping mapping = region.Map();
+ ASSERT_TRUE(mapping.IsValid());
+ void* memory_ptr = const_cast<void*>(mapping.memory());
+ EXPECT_DEATH_IF_SUPPORTED(memset(memory_ptr, 'G', kRegionSize), "");
+}
+
+TEST_F(ReadOnlySharedMemoryRegionTest,
+ ConvertedToReadOnlyRegionProducedMappingWriteDeathTest) {
+ ReadOnlySharedMemoryRegion region = GetConvertedToReadOnlyRegion(kRegionSize);
+ ASSERT_TRUE(region.IsValid());
+ ReadOnlySharedMemoryMapping mapping = region.Map();
+ ASSERT_TRUE(mapping.IsValid());
+ void* memory_ptr = const_cast<void*>(mapping.memory());
+ EXPECT_DEATH_IF_SUPPORTED(memset(memory_ptr, 'G', kRegionSize), "");
+}
+
+} // namespace base
diff --git a/chromium/base/memory/shared_memory_tracker.cc b/chromium/base/memory/shared_memory_tracker.cc
index 2b823381373..8eefb7ea534 100644
--- a/chromium/base/memory/shared_memory_tracker.cc
+++ b/chromium/base/memory/shared_memory_tracker.cc
@@ -39,20 +39,80 @@ const trace_event::MemoryAllocatorDump*
SharedMemoryTracker::GetOrCreateSharedMemoryDump(
const SharedMemory* shared_memory,
trace_event::ProcessMemoryDump* pmd) {
- const std::string dump_name =
- GetDumpNameForTracing(shared_memory->mapped_id());
+ return GetOrCreateSharedMemoryDumpInternal(shared_memory->memory(),
+ shared_memory->mapped_size(),
+ shared_memory->mapped_id(), pmd);
+}
+
+void SharedMemoryTracker::IncrementMemoryUsage(
+ const SharedMemory& shared_memory) {
+ AutoLock hold(usages_lock_);
+ DCHECK(usages_.find(shared_memory.memory()) == usages_.end());
+ usages_.emplace(shared_memory.memory(), UsageInfo(shared_memory.mapped_size(),
+ shared_memory.mapped_id()));
+}
+
+void SharedMemoryTracker::IncrementMemoryUsage(
+ const SharedMemoryMapping& mapping) {
+ AutoLock hold(usages_lock_);
+ DCHECK(usages_.find(mapping.raw_memory_ptr()) == usages_.end());
+ usages_.emplace(mapping.raw_memory_ptr(),
+ UsageInfo(mapping.mapped_size(), mapping.guid()));
+}
+
+void SharedMemoryTracker::DecrementMemoryUsage(
+ const SharedMemory& shared_memory) {
+ AutoLock hold(usages_lock_);
+ DCHECK(usages_.find(shared_memory.memory()) != usages_.end());
+ usages_.erase(shared_memory.memory());
+}
+
+void SharedMemoryTracker::DecrementMemoryUsage(
+ const SharedMemoryMapping& mapping) {
+ AutoLock hold(usages_lock_);
+ DCHECK(usages_.find(mapping.raw_memory_ptr()) != usages_.end());
+ usages_.erase(mapping.raw_memory_ptr());
+}
+
+SharedMemoryTracker::SharedMemoryTracker() {
+ trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ this, "SharedMemoryTracker", nullptr);
+}
+
+SharedMemoryTracker::~SharedMemoryTracker() = default;
+
+bool SharedMemoryTracker::OnMemoryDump(const trace_event::MemoryDumpArgs& args,
+ trace_event::ProcessMemoryDump* pmd) {
+ AutoLock hold(usages_lock_);
+ for (const auto& usage : usages_) {
+ const trace_event::MemoryAllocatorDump* dump =
+ GetOrCreateSharedMemoryDumpInternal(
+ usage.first, usage.second.mapped_size, usage.second.mapped_id, pmd);
+ DCHECK(dump);
+ }
+ return true;
+}
+
+// static
+const trace_event::MemoryAllocatorDump*
+SharedMemoryTracker::GetOrCreateSharedMemoryDumpInternal(
+ void* mapped_memory,
+ size_t mapped_size,
+ const UnguessableToken& mapped_id,
+ trace_event::ProcessMemoryDump* pmd) {
+ const std::string dump_name = GetDumpNameForTracing(mapped_id);
trace_event::MemoryAllocatorDump* local_dump =
pmd->GetAllocatorDump(dump_name);
if (local_dump)
return local_dump;
- size_t virtual_size = shared_memory->mapped_size();
+ size_t virtual_size = mapped_size;
// If resident size is not available, a virtual size is used as fallback.
size_t size = virtual_size;
#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
base::Optional<size_t> resident_size =
trace_event::ProcessMemoryDump::CountResidentBytesInSharedMemory(
- *shared_memory);
+ mapped_memory, mapped_size);
if (resident_size.has_value())
size = resident_size.value();
#endif
@@ -63,7 +123,7 @@ SharedMemoryTracker::GetOrCreateSharedMemoryDump(
local_dump->AddScalar("virtual_size",
trace_event::MemoryAllocatorDump::kUnitsBytes,
virtual_size);
- auto global_dump_guid = GetGlobalDumpIdForTracing(shared_memory->mapped_id());
+ auto global_dump_guid = GetGlobalDumpIdForTracing(mapped_id);
trace_event::MemoryAllocatorDump* global_dump =
pmd->CreateSharedGlobalAllocatorDump(global_dump_guid);
global_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
@@ -75,38 +135,4 @@ SharedMemoryTracker::GetOrCreateSharedMemoryDump(
return local_dump;
}
-void SharedMemoryTracker::IncrementMemoryUsage(
- const SharedMemory& shared_memory) {
- AutoLock hold(usages_lock_);
- DCHECK(usages_.find(&shared_memory) == usages_.end());
- usages_[&shared_memory] = shared_memory.mapped_size();
-}
-
-void SharedMemoryTracker::DecrementMemoryUsage(
- const SharedMemory& shared_memory) {
- AutoLock hold(usages_lock_);
- DCHECK(usages_.find(&shared_memory) != usages_.end());
- usages_.erase(&shared_memory);
-}
-
-bool SharedMemoryTracker::OnMemoryDump(const trace_event::MemoryDumpArgs& args,
- trace_event::ProcessMemoryDump* pmd) {
- {
- AutoLock hold(usages_lock_);
- for (const auto& usage : usages_) {
- const trace_event::MemoryAllocatorDump* dump =
- GetOrCreateSharedMemoryDump(usage.first, pmd);
- DCHECK(dump);
- }
- }
- return true;
-}
-
-SharedMemoryTracker::SharedMemoryTracker() {
- trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
- this, "SharedMemoryTracker", nullptr);
-}
-
-SharedMemoryTracker::~SharedMemoryTracker() = default;
-
} // namespace
diff --git a/chromium/base/memory/shared_memory_tracker.h b/chromium/base/memory/shared_memory_tracker.h
index 9b1e1212b9e..66fa160fc6e 100644
--- a/chromium/base/memory/shared_memory_tracker.h
+++ b/chromium/base/memory/shared_memory_tracker.h
@@ -9,6 +9,7 @@
#include <string>
#include "base/memory/shared_memory.h"
+#include "base/memory/shared_memory_mapping.h"
#include "base/synchronization/lock.h"
#include "base/trace_event/memory_dump_provider.h"
@@ -38,11 +39,15 @@ class BASE_EXPORT SharedMemoryTracker : public trace_event::MemoryDumpProvider {
const SharedMemory* shared_memory,
trace_event::ProcessMemoryDump* pmd);
- // Records shared memory usage on mapping.
+ // Records shared memory usage on valid mapping.
void IncrementMemoryUsage(const SharedMemory& shared_memory);
+ // We're in the middle of a refactor https://crbug.com/795291. Eventually, the
+ // first call will go away.
+ void IncrementMemoryUsage(const SharedMemoryMapping& mapping);
// Records shared memory usage on unmapping.
void DecrementMemoryUsage(const SharedMemory& shared_memory);
+ void DecrementMemoryUsage(const SharedMemoryMapping& mapping);
// Root dump name for all shared memory dumps.
static const char kDumpRootName[];
@@ -55,9 +60,24 @@ class BASE_EXPORT SharedMemoryTracker : public trace_event::MemoryDumpProvider {
bool OnMemoryDump(const trace_event::MemoryDumpArgs& args,
trace_event::ProcessMemoryDump* pmd) override;
+ static const trace_event::MemoryAllocatorDump*
+ GetOrCreateSharedMemoryDumpInternal(void* mapped_memory,
+ size_t mapped_size,
+ const UnguessableToken& mapped_id,
+ trace_event::ProcessMemoryDump* pmd);
+
+ // Information associated with each mapped address.
+ struct UsageInfo {
+ UsageInfo(size_t size, const UnguessableToken& id)
+ : mapped_size(size), mapped_id(id) {}
+
+ size_t mapped_size;
+ UnguessableToken mapped_id;
+ };
+
// Used to lock when |usages_| is modified or read.
Lock usages_lock_;
- std::map<const SharedMemory*, size_t> usages_;
+ std::map<void*, UsageInfo> usages_;
DISALLOW_COPY_AND_ASSIGN(SharedMemoryTracker);
};
diff --git a/chromium/base/memory/unsafe_shared_memory_region.cc b/chromium/base/memory/unsafe_shared_memory_region.cc
new file mode 100644
index 00000000000..a98866ef636
--- /dev/null
+++ b/chromium/base/memory/unsafe_shared_memory_region.cc
@@ -0,0 +1,74 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/unsafe_shared_memory_region.h"
+
+#include <utility>
+
+#include "base/memory/shared_memory.h"
+
+namespace base {
+
+// static
+UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Create(size_t size) {
+ subtle::PlatformSharedMemoryRegion handle =
+ subtle::PlatformSharedMemoryRegion::CreateUnsafe(size);
+
+ return UnsafeSharedMemoryRegion(std::move(handle));
+}
+
+// static
+UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Deserialize(
+ subtle::PlatformSharedMemoryRegion handle) {
+ return UnsafeSharedMemoryRegion(std::move(handle));
+}
+
+// static
+subtle::PlatformSharedMemoryRegion
+UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ UnsafeSharedMemoryRegion region) {
+ return std::move(region.handle_);
+}
+
+UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion() = default;
+UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion(
+ UnsafeSharedMemoryRegion&& region) = default;
+UnsafeSharedMemoryRegion& UnsafeSharedMemoryRegion::operator=(
+ UnsafeSharedMemoryRegion&& region) = default;
+UnsafeSharedMemoryRegion::~UnsafeSharedMemoryRegion() = default;
+
+UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Duplicate() const {
+ return UnsafeSharedMemoryRegion(handle_.Duplicate());
+}
+
+WritableSharedMemoryMapping UnsafeSharedMemoryRegion::Map() const {
+ return MapAt(0, handle_.GetSize());
+}
+
+WritableSharedMemoryMapping UnsafeSharedMemoryRegion::MapAt(off_t offset,
+ size_t size) const {
+ if (!IsValid())
+ return {};
+
+ void* memory = nullptr;
+ size_t mapped_size = 0;
+ if (!handle_.MapAt(offset, size, &memory, &mapped_size))
+ return {};
+
+ return WritableSharedMemoryMapping(memory, size, mapped_size,
+ handle_.GetGUID());
+}
+
+bool UnsafeSharedMemoryRegion::IsValid() const {
+ return handle_.IsValid();
+}
+
+UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion(
+ subtle::PlatformSharedMemoryRegion handle)
+ : handle_(std::move(handle)) {
+ CHECK_EQ(handle_.GetMode(),
+ subtle::PlatformSharedMemoryRegion::Mode::kUnsafe);
+}
+
+} // namespace base
diff --git a/chromium/base/memory/unsafe_shared_memory_region.h b/chromium/base/memory/unsafe_shared_memory_region.h
new file mode 100644
index 00000000000..0b93acf90f5
--- /dev/null
+++ b/chromium/base/memory/unsafe_shared_memory_region.h
@@ -0,0 +1,100 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
+#define BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
+
+#include "base/macros.h"
+#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/shared_memory_mapping.h"
+
+namespace base {
+
+// Scoped move-only handle to a region of platform shared memory. The instance
+// owns the platform handle it wraps. Mappings created by this region are
+// writable. These mappings remain valid even after the region handle is moved
+// or destroyed.
+//
+// NOTE: UnsafeSharedMemoryRegion cannot be converted to a read-only region. Use
+// with caution as the region will be writable to any process with a handle to
+// the region.
+//
+// Use this if and only if the following is true:
+// - You do not need to share the region as read-only, and,
+// - You need to have several instances of the region simultaneously, possibly
+// in different processes, that can produce writable mappings.
+
+class BASE_EXPORT UnsafeSharedMemoryRegion {
+ public:
+ using MappingType = WritableSharedMemoryMapping;
+ // Creates a new UnsafeSharedMemoryRegion instance of a given size that can be
+ // used for mapping writable shared memory into the virtual address space.
+ static UnsafeSharedMemoryRegion Create(size_t size);
+
+ // Returns an UnsafeSharedMemoryRegion built from a platform-specific handle
+ // that was taken from another UnsafeSharedMemoryRegion instance. Returns an
+ // invalid region iff the |handle| is invalid. CHECK-fails if the |handle|
+ // isn't unsafe.
+ // This should be used only by the code passing a handle across
+ // process boundaries.
+ static UnsafeSharedMemoryRegion Deserialize(
+ subtle::PlatformSharedMemoryRegion handle);
+
+ // Extracts a platform handle from the region. Ownership is transferred to the
+ // returned region object.
+ // This should be used only for sending the handle from the current
+ // process to another.
+ static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
+ UnsafeSharedMemoryRegion region);
+
+ // Default constructor initializes an invalid instance.
+ UnsafeSharedMemoryRegion();
+
+ // Move operations are allowed.
+ UnsafeSharedMemoryRegion(UnsafeSharedMemoryRegion&&);
+ UnsafeSharedMemoryRegion& operator=(UnsafeSharedMemoryRegion&&);
+
+ // Destructor closes shared memory region if valid.
+ // All created mappings will remain valid.
+ ~UnsafeSharedMemoryRegion();
+
+ // Duplicates the underlying platform handle and creates a new
+ // UnsafeSharedMemoryRegion instance that owns the newly created handle.
+ // Returns a valid UnsafeSharedMemoryRegion on success, invalid otherwise.
+ // The current region instance remains valid in any case.
+ UnsafeSharedMemoryRegion Duplicate() const;
+
+ // Maps the shared memory region into the caller's address space with write
+ // access. The mapped address is guaranteed to have an alignment of
+ // at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
+ // Returns a valid WritableSharedMemoryMapping instance on success, invalid
+ // otherwise.
+ WritableSharedMemoryMapping Map() const;
+
+ // Same as above, but maps only |size| bytes of the shared memory region
+ // starting with the given |offset|. |offset| must be aligned to value of
+ // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
+ // requested bytes are out of the region limits.
+ WritableSharedMemoryMapping MapAt(off_t offset, size_t size) const;
+
+ // Whether the underlying platform handle is valid.
+ bool IsValid() const;
+
+ // Returns the maximum mapping size that can be created from this region.
+ size_t GetSize() const {
+ DCHECK(IsValid());
+ return handle_.GetSize();
+ }
+
+ private:
+ explicit UnsafeSharedMemoryRegion(subtle::PlatformSharedMemoryRegion handle);
+
+ subtle::PlatformSharedMemoryRegion handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(UnsafeSharedMemoryRegion);
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
diff --git a/chromium/base/memory/writable_shared_memory_region.cc b/chromium/base/memory/writable_shared_memory_region.cc
new file mode 100644
index 00000000000..e89328dc62c
--- /dev/null
+++ b/chromium/base/memory/writable_shared_memory_region.cc
@@ -0,0 +1,82 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/writable_shared_memory_region.h"
+
+#include <utility>
+
+#include "base/memory/shared_memory.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// static
+WritableSharedMemoryRegion WritableSharedMemoryRegion::Create(size_t size) {
+ subtle::PlatformSharedMemoryRegion handle =
+ subtle::PlatformSharedMemoryRegion::CreateWritable(size);
+
+ return WritableSharedMemoryRegion(std::move(handle));
+}
+
+// static
+WritableSharedMemoryRegion WritableSharedMemoryRegion::Deserialize(
+ subtle::PlatformSharedMemoryRegion handle) {
+ return WritableSharedMemoryRegion(std::move(handle));
+}
+
+// static
+subtle::PlatformSharedMemoryRegion
+WritableSharedMemoryRegion::TakeHandleForSerialization(
+ WritableSharedMemoryRegion region) {
+ return std::move(region.handle_);
+}
+
+// static
+ReadOnlySharedMemoryRegion WritableSharedMemoryRegion::ConvertToReadOnly(
+ WritableSharedMemoryRegion region) {
+ subtle::PlatformSharedMemoryRegion handle = std::move(region.handle_);
+ if (!handle.ConvertToReadOnly())
+ return {};
+
+ return ReadOnlySharedMemoryRegion::Deserialize(std::move(handle));
+}
+
+WritableSharedMemoryRegion::WritableSharedMemoryRegion() = default;
+WritableSharedMemoryRegion::WritableSharedMemoryRegion(
+ WritableSharedMemoryRegion&& region) = default;
+WritableSharedMemoryRegion& WritableSharedMemoryRegion::operator=(
+ WritableSharedMemoryRegion&& region) = default;
+WritableSharedMemoryRegion::~WritableSharedMemoryRegion() = default;
+
+WritableSharedMemoryMapping WritableSharedMemoryRegion::Map() const {
+ return MapAt(0, handle_.GetSize());
+}
+
+WritableSharedMemoryMapping WritableSharedMemoryRegion::MapAt(
+ off_t offset,
+ size_t size) const {
+ if (!IsValid())
+ return {};
+
+ void* memory = nullptr;
+ size_t mapped_size = 0;
+ if (!handle_.MapAt(offset, size, &memory, &mapped_size))
+ return {};
+
+ return WritableSharedMemoryMapping(memory, size, mapped_size,
+ handle_.GetGUID());
+}
+
+bool WritableSharedMemoryRegion::IsValid() const {
+ return handle_.IsValid();
+}
+
+WritableSharedMemoryRegion::WritableSharedMemoryRegion(
+ subtle::PlatformSharedMemoryRegion handle)
+ : handle_(std::move(handle)) {
+ CHECK_EQ(handle_.GetMode(),
+ subtle::PlatformSharedMemoryRegion::Mode::kWritable);
+}
+
+} // namespace base
diff --git a/chromium/base/memory/writable_shared_memory_region.h b/chromium/base/memory/writable_shared_memory_region.h
new file mode 100644
index 00000000000..b953a10038d
--- /dev/null
+++ b/chromium/base/memory/writable_shared_memory_region.h
@@ -0,0 +1,97 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_
+#define BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_
+
+#include "base/macros.h"
+#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/read_only_shared_memory_region.h"
+#include "base/memory/shared_memory_mapping.h"
+
+namespace base {
+
+// Scoped move-only handle to a region of platform shared memory. The instance
+// owns the platform handle it wraps. Mappings created by this region are
+// writable. These mappings remain valid even after the region handle is moved
+// or destroyed.
+//
+// This region can be locked to read-only access by converting it to a
+// ReadOnlySharedMemoryRegion. However, unlike ReadOnlySharedMemoryRegion and
+// UnsafeSharedMemoryRegion, ownership of this region (while writable) is unique
+// and may only be transferred, not duplicated.
+class BASE_EXPORT WritableSharedMemoryRegion {
+ public:
+ using MappingType = WritableSharedMemoryMapping;
+ // Creates a new WritableSharedMemoryRegion instance of a given
+ // size that can be used for mapping writable shared memory into the virtual
+ // address space.
+ static WritableSharedMemoryRegion Create(size_t size);
+
+ // Returns a WritableSharedMemoryRegion built from a platform handle that was
+ // taken from another WritableSharedMemoryRegion instance. Returns an invalid
+ // region iff the |handle| is invalid. CHECK-fails if the |handle| isn't
+ // writable.
+ // This should be used only by the code passing handles across process
+ // boundaries.
+ static WritableSharedMemoryRegion Deserialize(
+ subtle::PlatformSharedMemoryRegion handle);
+
+ // Extracts a platform handle from the region. Ownership is transferred to the
+ // returned region object.
+ // This should be used only for sending the handle from the current
+ // process to another.
+ static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
+ WritableSharedMemoryRegion region);
+
+ // Makes the region read-only. No new writable mappings of the region can be
+ // created after this call. Returns an invalid region on failure.
+ static ReadOnlySharedMemoryRegion ConvertToReadOnly(
+ WritableSharedMemoryRegion region);
+
+ // Default constructor initializes an invalid instance.
+ WritableSharedMemoryRegion();
+
+ // Move operations are allowed.
+ WritableSharedMemoryRegion(WritableSharedMemoryRegion&&);
+ WritableSharedMemoryRegion& operator=(WritableSharedMemoryRegion&&);
+
+ // Destructor closes shared memory region if valid.
+ // All created mappings will remain valid.
+ ~WritableSharedMemoryRegion();
+
+ // Maps the shared memory region into the caller's address space with write
+ // access. The mapped address is guaranteed to have an alignment of
+ // at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
+ // Returns a valid WritableSharedMemoryMapping instance on success, invalid
+ // otherwise.
+ WritableSharedMemoryMapping Map() const;
+
+ // Same as above, but maps only |size| bytes of the shared memory block
+ // starting with the given |offset|. |offset| must be aligned to value of
+ // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
+ // requested bytes are out of the region limits.
+ WritableSharedMemoryMapping MapAt(off_t offset, size_t size) const;
+
+ // Whether underlying platform handles are valid.
+ bool IsValid() const;
+
+ // Returns the maximum mapping size that can be created from this region.
+ size_t GetSize() const {
+ DCHECK(IsValid());
+ return handle_.GetSize();
+ }
+
+ private:
+ explicit WritableSharedMemoryRegion(
+ subtle::PlatformSharedMemoryRegion handle);
+
+ subtle::PlatformSharedMemoryRegion handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(WritableSharedMemoryRegion);
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_
diff --git a/chromium/base/message_loop/message_loop.cc b/chromium/base/message_loop/message_loop.cc
index 028cf9f8d95..b417f2fffe8 100644
--- a/chromium/base/message_loop/message_loop.cc
+++ b/chromium/base/message_loop/message_loop.cc
@@ -12,6 +12,7 @@
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_pump_default.h"
+#include "base/message_loop/message_pump_for_ui.h"
#include "base/run_loop.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "base/threading/thread_id_name_manager.h"
@@ -22,18 +23,6 @@
#if defined(OS_MACOSX)
#include "base/message_loop/message_pump_mac.h"
#endif
-#if defined(OS_POSIX) && !defined(OS_IOS) && !defined(OS_FUCHSIA)
-#include "base/message_loop/message_pump_libevent.h"
-#endif
-#if defined(OS_FUCHSIA)
-#include "base/message_loop/message_pump_fuchsia.h"
-#endif
-#if defined(OS_ANDROID)
-#include "base/message_loop/message_pump_android.h"
-#endif
-#if defined(USE_GLIB)
-#include "base/message_loop/message_pump_glib.h"
-#endif
namespace base {
@@ -47,22 +36,6 @@ base::ThreadLocalPointer<MessageLoop>* GetTLSMessageLoop() {
}
MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = nullptr;
-#if defined(OS_IOS)
-using MessagePumpForIO = MessagePumpIOSForIO;
-#elif defined(OS_NACL_SFI)
-using MessagePumpForIO = MessagePumpDefault;
-#elif defined(OS_FUCHSIA)
-using MessagePumpForIO = MessagePumpFuchsia;
-#elif defined(OS_POSIX)
-using MessagePumpForIO = MessagePumpLibevent;
-#endif
-
-#if !defined(OS_NACL_SFI)
-MessagePumpForIO* ToPumpIO(MessagePump* pump) {
- return static_cast<MessagePumpForIO*>(pump);
-}
-#endif // !defined(OS_NACL_SFI)
-
std::unique_ptr<MessagePump> ReturnPump(std::unique_ptr<MessagePump> pump) {
return pump;
}
@@ -161,30 +134,21 @@ bool MessageLoop::InitMessagePumpForUIFactory(MessagePumpFactory* factory) {
// static
std::unique_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
-// TODO(rvargas): Get rid of the OS guards.
-#if defined(USE_GLIB) && !defined(OS_NACL)
- using MessagePumpForUI = MessagePumpGlib;
-#elif (defined(OS_LINUX) && !defined(OS_NACL)) || defined(OS_BSD)
- using MessagePumpForUI = MessagePumpLibevent;
-#elif defined(OS_FUCHSIA)
- using MessagePumpForUI = MessagePumpFuchsia;
-#endif
-
+ if (type == MessageLoop::TYPE_UI) {
+ if (message_pump_for_ui_factory_)
+ return message_pump_for_ui_factory_();
#if defined(OS_IOS) || defined(OS_MACOSX)
-#define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>(MessagePumpMac::Create())
+ return MessagePumpMac::Create();
#elif defined(OS_NACL) || defined(OS_AIX)
-// Currently NaCl and AIX don't have a UI MessageLoop.
-// TODO(abarth): Figure out if we need this.
-#define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>()
+ // Currently NaCl and AIX don't have a UI MessageLoop.
+ // TODO(abarth): Figure out if we need this.
+ NOTREACHED();
+ return nullptr;
#else
-#define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>(new MessagePumpForUI())
+ return std::make_unique<MessagePumpForUI>();
#endif
-
- if (type == MessageLoop::TYPE_UI) {
- if (message_pump_for_ui_factory_)
- return message_pump_for_ui_factory_();
- return MESSAGE_PUMP_UI;
}
+
if (type == MessageLoop::TYPE_IO)
return std::unique_ptr<MessagePump>(new MessagePumpForIO());
@@ -225,8 +189,6 @@ Closure MessageLoop::QuitWhenIdleClosure() {
void MessageLoop::SetNestableTasksAllowed(bool allowed) {
if (allowed) {
- CHECK(RunLoop::IsNestingAllowedOnCurrentThread());
-
// Kick the native pump just in case we enter a OS-driven nested message
// loop that does not go through RunLoop::Run().
pump_->ScheduleWork();
@@ -243,13 +205,11 @@ bool MessageLoop::NestableTasksAllowed() const {
// implementation detail. http://crbug.com/703346
void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
DCHECK_EQ(this, current());
- CHECK(allow_task_observers_);
task_observers_.AddObserver(task_observer);
}
void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
DCHECK_EQ(this, current());
- CHECK(allow_task_observers_);
task_observers_.RemoveObserver(task_observer);
}
@@ -383,7 +343,6 @@ bool MessageLoop::ProcessNextDelayedNonNestableTask() {
void MessageLoop::RunTask(PendingTask* pending_task) {
DCHECK(task_execution_allowed_);
- current_pending_task_ = pending_task;
// Execute the task and assume the worst: It is probably not reentrant.
task_execution_allowed_ = false;
@@ -397,8 +356,6 @@ void MessageLoop::RunTask(PendingTask* pending_task) {
observer.DidProcessTask(*pending_task);
task_execution_allowed_ = true;
-
- current_pending_task_ = nullptr;
}
bool MessageLoop::DeferOrRunPendingTask(PendingTask pending_task) {
@@ -543,14 +500,10 @@ bool MessageLoopForUI::WatchFileDescriptor(
int fd,
bool persistent,
MessagePumpLibevent::Mode mode,
- MessagePumpLibevent::FileDescriptorWatcher *controller,
- MessagePumpLibevent::Watcher *delegate) {
- return static_cast<MessagePumpLibevent*>(pump_.get())->WatchFileDescriptor(
- fd,
- persistent,
- mode,
- controller,
- delegate);
+ MessagePumpLibevent::FdWatchController* controller,
+ MessagePumpLibevent::FdWatcher* delegate) {
+ return static_cast<MessagePumpForUI*>(pump_.get())
+ ->WatchFileDescriptor(fd, persistent, mode, controller, delegate);
}
#endif
@@ -561,24 +514,37 @@ bool MessageLoopForUI::WatchFileDescriptor(
#if !defined(OS_NACL_SFI)
+namespace {
+
+MessagePumpForIO* ToPumpIO(MessagePump* pump) {
+ return static_cast<MessagePumpForIO*>(pump);
+}
+
+} // namespace
+
#if defined(OS_WIN)
-void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
+void MessageLoopForIO::RegisterIOHandler(HANDLE file,
+ MessagePumpForIO::IOHandler* handler) {
ToPumpIO(pump_.get())->RegisterIOHandler(file, handler);
}
-bool MessageLoopForIO::RegisterJobObject(HANDLE job, IOHandler* handler) {
+bool MessageLoopForIO::RegisterJobObject(HANDLE job,
+ MessagePumpForIO::IOHandler* handler) {
return ToPumpIO(pump_.get())->RegisterJobObject(job, handler);
}
-bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
+bool MessageLoopForIO::WaitForIOCompletion(
+ DWORD timeout,
+ MessagePumpForIO::IOHandler* filter) {
return ToPumpIO(pump_.get())->WaitForIOCompletion(timeout, filter);
}
#elif defined(OS_POSIX)
-bool MessageLoopForIO::WatchFileDescriptor(int fd,
- bool persistent,
- Mode mode,
- FileDescriptorWatcher* controller,
- Watcher* delegate) {
+bool MessageLoopForIO::WatchFileDescriptor(
+ int fd,
+ bool persistent,
+ MessagePumpForIO::Mode mode,
+ MessagePumpForIO::FdWatchController* controller,
+ MessagePumpForIO::FdWatcher* delegate) {
return ToPumpIO(pump_.get())->WatchFileDescriptor(
fd,
persistent,
@@ -592,11 +558,12 @@ bool MessageLoopForIO::WatchFileDescriptor(int fd,
#if defined(OS_FUCHSIA)
// Additional watch API for native platform resources.
-bool MessageLoopForIO::WatchZxHandle(zx_handle_t handle,
- bool persistent,
- zx_signals_t signals,
- ZxHandleWatchController* controller,
- ZxHandleWatcher* delegate) {
+bool MessageLoopForIO::WatchZxHandle(
+ zx_handle_t handle,
+ bool persistent,
+ zx_signals_t signals,
+ MessagePumpForIO::ZxHandleWatchController* controller,
+ MessagePumpForIO::ZxHandleWatcher* delegate) {
return ToPumpIO(pump_.get())
->WatchZxHandle(handle, persistent, signals, controller, delegate);
}
diff --git a/chromium/base/message_loop/message_loop.h b/chromium/base/message_loop/message_loop.h
index 27ee7fe8155..439d364fc54 100644
--- a/chromium/base/message_loop/message_loop.h
+++ b/chromium/base/message_loop/message_loop.h
@@ -17,6 +17,8 @@
#include "base/message_loop/incoming_task_queue.h"
#include "base/message_loop/message_loop_task_runner.h"
#include "base/message_loop/message_pump.h"
+#include "base/message_loop/message_pump_for_io.h"
+#include "base/message_loop/message_pump_for_ui.h"
#include "base/message_loop/timer_slack.h"
#include "base/observer_list.h"
#include "base/pending_task.h"
@@ -26,17 +28,6 @@
#include "base/time/time.h"
#include "build/build_config.h"
-// TODO(sky): these includes should not be necessary. Nuke them.
-#if defined(OS_WIN)
-#include "base/message_loop/message_pump_win.h"
-#elif defined(OS_FUCHSIA)
-#include "base/message_loop/message_pump_fuchsia.h"
-#elif defined(OS_IOS)
-#include "base/message_loop/message_pump_io_ios.h"
-#elif defined(OS_POSIX)
-#include "base/message_loop/message_pump_libevent.h"
-#endif
-
namespace base {
class ThreadTaskRunnerHandle;
@@ -276,10 +267,6 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate,
// Runs the specified PendingTask.
void RunTask(PendingTask* pending_task);
- // Disallow task observers. After this is called, calling
- // Add/RemoveTaskObserver() on this MessageLoop will crash.
- void DisallowTaskObservers() { allow_task_observers_ = false; }
-
//----------------------------------------------------------------------------
protected:
std::unique_ptr<MessagePump> pump_;
@@ -302,9 +289,7 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate,
friend class internal::IncomingTaskQueue;
friend class ScheduleWorkTest;
friend class Thread;
- friend struct PendingTask;
FRIEND_TEST_ALL_PREFIXES(MessageLoopTest, DeleteUnboundLoop);
- friend class PendingTaskTest;
// Creates a MessageLoop without binding to a thread.
// If |type| is TYPE_CUSTOM non-null |pump_factory| must be also given
@@ -379,13 +364,6 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate,
ObserverList<TaskObserver> task_observers_;
- // Used to allow creating a breadcrumb of program counters in PostTask.
- // This variable is only initialized while a task is being executed and is
- // meant only to store context for creating a backtrace breadcrumb. Do not
- // attach other semantics to it without thinking through the use caes
- // thoroughly.
- const PendingTask* current_pending_task_ = nullptr;
-
scoped_refptr<internal::IncomingTaskQueue> incoming_task_queue_;
// A task runner which we haven't bound to a thread yet.
@@ -399,9 +377,6 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate,
// MessageLoop is bound to its thread and constant forever after.
PlatformThreadId thread_id_ = kInvalidThreadId;
- // Whether task observers are allowed.
- bool allow_task_observers_ = true;
-
// Holds data stored through the SequenceLocalStorageSlot API.
internal::SequenceLocalStorageMap sequence_local_storage_map_;
@@ -468,12 +443,14 @@ class BASE_EXPORT MessageLoopForUI : public MessageLoop {
#if (defined(USE_OZONE) && !defined(OS_FUCHSIA)) || \
(defined(USE_X11) && !defined(USE_GLIB))
// Please see MessagePumpLibevent for definition.
- bool WatchFileDescriptor(
- int fd,
- bool persistent,
- MessagePumpLibevent::Mode mode,
- MessagePumpLibevent::FileDescriptorWatcher* controller,
- MessagePumpLibevent::Watcher* delegate);
+ static_assert(std::is_same<MessagePumpForUI, MessagePumpLibevent>::value,
+ "MessageLoopForUI::WatchFileDescriptor is not supported when "
+ "MessagePumpForUI is not a MessagePumpLibevent.");
+ bool WatchFileDescriptor(int fd,
+ bool persistent,
+ MessagePumpForUI::Mode mode,
+ MessagePumpForUI::FdWatchController* controller,
+ MessagePumpForUI::FdWatcher* delegate);
#endif
};
@@ -513,51 +490,18 @@ class BASE_EXPORT MessageLoopForIO : public MessageLoop {
#if !defined(OS_NACL_SFI)
#if defined(OS_WIN)
- typedef MessagePumpForIO::IOHandler IOHandler;
- typedef MessagePumpForIO::IOContext IOContext;
-#elif defined(OS_FUCHSIA)
- typedef MessagePumpFuchsia::FdWatcher Watcher;
- typedef MessagePumpFuchsia::FdWatchController FileDescriptorWatcher;
-
- enum Mode{WATCH_READ = MessagePumpFuchsia::WATCH_READ,
- WATCH_WRITE = MessagePumpFuchsia::WATCH_WRITE,
- WATCH_READ_WRITE = MessagePumpFuchsia::WATCH_READ_WRITE};
-
- typedef MessagePumpFuchsia::ZxHandleWatchController ZxHandleWatchController;
- typedef MessagePumpFuchsia::ZxHandleWatcher ZxHandleWatcher;
-#elif defined(OS_IOS)
- typedef MessagePumpIOSForIO::Watcher Watcher;
- typedef MessagePumpIOSForIO::FileDescriptorWatcher
- FileDescriptorWatcher;
-
- enum Mode {
- WATCH_READ = MessagePumpIOSForIO::WATCH_READ,
- WATCH_WRITE = MessagePumpIOSForIO::WATCH_WRITE,
- WATCH_READ_WRITE = MessagePumpIOSForIO::WATCH_READ_WRITE
- };
-#elif defined(OS_POSIX)
- using Watcher = MessagePumpLibevent::Watcher;
- using FileDescriptorWatcher = MessagePumpLibevent::FileDescriptorWatcher;
-
- enum Mode {
- WATCH_READ = MessagePumpLibevent::WATCH_READ,
- WATCH_WRITE = MessagePumpLibevent::WATCH_WRITE,
- WATCH_READ_WRITE = MessagePumpLibevent::WATCH_READ_WRITE
- };
-#endif
-
-#if defined(OS_WIN)
// Please see MessagePumpWin for definitions of these methods.
- void RegisterIOHandler(HANDLE file, IOHandler* handler);
- bool RegisterJobObject(HANDLE job, IOHandler* handler);
- bool WaitForIOCompletion(DWORD timeout, IOHandler* filter);
+ void RegisterIOHandler(HANDLE file, MessagePumpForIO::IOHandler* handler);
+ bool RegisterJobObject(HANDLE job, MessagePumpForIO::IOHandler* handler);
+ bool WaitForIOCompletion(DWORD timeout, MessagePumpForIO::IOHandler* filter);
#elif defined(OS_POSIX)
- // Please see MessagePumpIOSForIO/MessagePumpLibevent for definition.
+ // Please see WatchableIOMessagePumpPosix for definition.
+ // Prefer base::FileDescriptorWatcher for non-critical IO.
bool WatchFileDescriptor(int fd,
bool persistent,
- Mode mode,
- FileDescriptorWatcher* controller,
- Watcher* delegate);
+ MessagePumpForIO::Mode mode,
+ MessagePumpForIO::FdWatchController* controller,
+ MessagePumpForIO::FdWatcher* delegate);
#endif // defined(OS_IOS) || defined(OS_POSIX)
#endif // !defined(OS_NACL_SFI)
@@ -566,8 +510,8 @@ class BASE_EXPORT MessageLoopForIO : public MessageLoop {
bool WatchZxHandle(zx_handle_t handle,
bool persistent,
zx_signals_t signals,
- ZxHandleWatchController* controller,
- ZxHandleWatcher* delegate);
+ MessagePumpForIO::ZxHandleWatchController* controller,
+ MessagePumpForIO::ZxHandleWatcher* delegate);
#endif
};
diff --git a/chromium/base/message_loop/message_loop_io_posix_unittest.cc b/chromium/base/message_loop/message_loop_io_posix_unittest.cc
index 89962a3efc5..2c6d394bc86 100644
--- a/chromium/base/message_loop/message_loop_io_posix_unittest.cc
+++ b/chromium/base/message_loop/message_loop_io_posix_unittest.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_pump_for_io.h"
#include "base/posix/eintr_wrapper.h"
#include "base/run_loop.h"
#include "base/test/gtest_util.h"
@@ -52,7 +53,7 @@ class MessageLoopForIoPosixTest : public testing::Test {
DISALLOW_COPY_AND_ASSIGN(MessageLoopForIoPosixTest);
};
-class TestHandler : public MessageLoopForIO::Watcher {
+class TestHandler : public MessagePumpForIO::FdWatcher {
public:
void OnFileCanReadWithoutBlocking(int fd) override {
watcher_to_delete_ = nullptr;
@@ -69,13 +70,13 @@ class TestHandler : public MessageLoopForIO::Watcher {
bool is_writable_ = false;
// If set then the contained watcher will be deleted on notification.
- std::unique_ptr<MessageLoopForIO::FileDescriptorWatcher> watcher_to_delete_;
+ std::unique_ptr<MessagePumpForIO::FdWatchController> watcher_to_delete_;
};
// Watcher that calls specified closures when read/write events occur. Verifies
// that each non-null closure passed to this class is called once and only once.
// Also resets the read event by reading from the FD.
-class CallClosureHandler : public MessageLoopForIO::Watcher {
+class CallClosureHandler : public MessagePumpForIO::FdWatcher {
public:
CallClosureHandler(OnceClosure read_closure, OnceClosure write_closure)
: read_closure_(std::move(read_closure)),
@@ -128,13 +129,13 @@ TEST_F(MessageLoopForIoPosixTest, FileDescriptorWatcherOutlivesMessageLoop) {
// This could happen when people use the Singleton pattern or atexit.
// Arrange for watcher to live longer than message loop.
- MessageLoopForIO::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpForIO::FdWatchController watcher(FROM_HERE);
TestHandler handler;
{
MessageLoopForIO message_loop;
message_loop.WatchFileDescriptor(write_fd_.get(), true,
- MessageLoopForIO::WATCH_WRITE, &watcher,
+ MessagePumpForIO::WATCH_WRITE, &watcher,
&handler);
// Don't run the message loop, just destroy it.
}
@@ -149,11 +150,11 @@ TEST_F(MessageLoopForIoPosixTest, FileDescriptorWatcherDoubleStop) {
// Arrange for message loop to live longer than watcher.
MessageLoopForIO message_loop;
{
- MessageLoopForIO::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpForIO::FdWatchController watcher(FROM_HERE);
TestHandler handler;
message_loop.WatchFileDescriptor(write_fd_.get(), true,
- MessageLoopForIO::WATCH_WRITE, &watcher,
+ MessagePumpForIO::WATCH_WRITE, &watcher,
&handler);
ASSERT_TRUE(watcher.StopWatchingFileDescriptor());
ASSERT_TRUE(watcher.StopWatchingFileDescriptor());
@@ -167,10 +168,10 @@ TEST_F(MessageLoopForIoPosixTest, FileDescriptorWatcherDeleteInCallback) {
TestHandler handler;
handler.watcher_to_delete_ =
- std::make_unique<MessageLoopForIO::FileDescriptorWatcher>(FROM_HERE);
+ std::make_unique<MessagePumpForIO::FdWatchController>(FROM_HERE);
message_loop.WatchFileDescriptor(write_fd_.get(), true,
- MessageLoopForIO::WATCH_WRITE,
+ MessagePumpForIO::WATCH_WRITE,
handler.watcher_to_delete_.get(), &handler);
RunLoop().Run();
}
@@ -178,12 +179,12 @@ TEST_F(MessageLoopForIoPosixTest, FileDescriptorWatcherDeleteInCallback) {
// Verify that basic readable notification works.
TEST_F(MessageLoopForIoPosixTest, WatchReadable) {
MessageLoopForIO message_loop;
- MessageLoopForIO::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpForIO::FdWatchController watcher(FROM_HERE);
TestHandler handler;
// Watch the pipe for readability.
ASSERT_TRUE(MessageLoopForIO::current()->WatchFileDescriptor(
- read_fd_.get(), /*persistent=*/false, MessageLoopForIO::WATCH_READ,
+ read_fd_.get(), /*persistent=*/false, MessagePumpForIO::WATCH_READ,
&watcher, &handler));
// The pipe should not be readable when first created.
@@ -204,12 +205,12 @@ TEST_F(MessageLoopForIoPosixTest, WatchReadable) {
// Verify that watching a file descriptor for writability succeeds.
TEST_F(MessageLoopForIoPosixTest, WatchWritable) {
MessageLoopForIO message_loop;
- MessageLoopForIO::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpForIO::FdWatchController watcher(FROM_HERE);
TestHandler handler;
// Watch the pipe for writability.
ASSERT_TRUE(MessageLoopForIO::current()->WatchFileDescriptor(
- write_fd_.get(), /*persistent=*/false, MessageLoopForIO::WATCH_WRITE,
+ write_fd_.get(), /*persistent=*/false, MessagePumpForIO::WATCH_WRITE,
&watcher, &handler));
// We should not receive a writable notification until we process events.
@@ -227,12 +228,12 @@ TEST_F(MessageLoopForIoPosixTest, WatchWritable) {
// Verify that RunUntilIdle() receives IO notifications.
TEST_F(MessageLoopForIoPosixTest, RunUntilIdle) {
MessageLoopForIO message_loop;
- MessageLoopForIO::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpForIO::FdWatchController watcher(FROM_HERE);
TestHandler handler;
// Watch the pipe for readability.
ASSERT_TRUE(MessageLoopForIO::current()->WatchFileDescriptor(
- read_fd_.get(), /*persistent=*/false, MessageLoopForIO::WATCH_READ,
+ read_fd_.get(), /*persistent=*/false, MessagePumpForIO::WATCH_READ,
&watcher, &handler));
// The pipe should not be readable when first created.
@@ -245,7 +246,7 @@ TEST_F(MessageLoopForIoPosixTest, RunUntilIdle) {
RunLoop().RunUntilIdle();
}
-void StopWatching(MessageLoopForIO::FileDescriptorWatcher* controller,
+void StopWatching(MessagePumpForIO::FdWatchController* controller,
RunLoop* run_loop) {
controller->StopWatchingFileDescriptor();
run_loop->Quit();
@@ -255,13 +256,13 @@ void StopWatching(MessageLoopForIO::FileDescriptorWatcher* controller,
TEST_F(MessageLoopForIoPosixTest, StopFromHandler) {
MessageLoopForIO message_loop;
RunLoop run_loop;
- MessageLoopForIO::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpForIO::FdWatchController watcher(FROM_HERE);
CallClosureHandler handler(BindOnce(&StopWatching, &watcher, &run_loop),
OnceClosure());
// Create persistent watcher.
ASSERT_TRUE(MessageLoopForIO::current()->WatchFileDescriptor(
- read_fd_.get(), /*persistent=*/true, MessageLoopForIO::WATCH_READ,
+ read_fd_.get(), /*persistent=*/true, MessagePumpForIO::WATCH_READ,
&watcher, &handler));
TriggerReadEvent();
@@ -275,14 +276,14 @@ TEST_F(MessageLoopForIoPosixTest, StopFromHandler) {
// Verify that non-persistent watcher is called only once.
TEST_F(MessageLoopForIoPosixTest, NonPersistentWatcher) {
MessageLoopForIO message_loop;
- MessageLoopForIO::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpForIO::FdWatchController watcher(FROM_HERE);
RunLoop run_loop;
CallClosureHandler handler(run_loop.QuitClosure(), OnceClosure());
// Create a non-persistent watcher.
ASSERT_TRUE(MessageLoopForIO::current()->WatchFileDescriptor(
- read_fd_.get(), /*persistent=*/false, MessageLoopForIO::WATCH_READ,
+ read_fd_.get(), /*persistent=*/false, MessagePumpForIO::WATCH_READ,
&watcher, &handler));
TriggerReadEvent();
@@ -296,14 +297,14 @@ TEST_F(MessageLoopForIoPosixTest, NonPersistentWatcher) {
// Verify that persistent watcher is called every time the event is triggered.
TEST_F(MessageLoopForIoPosixTest, PersistentWatcher) {
MessageLoopForIO message_loop;
- MessageLoopForIO::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpForIO::FdWatchController watcher(FROM_HERE);
RunLoop run_loop1;
CallClosureHandler handler(run_loop1.QuitClosure(), OnceClosure());
// Create persistent watcher.
ASSERT_TRUE(MessageLoopForIO::current()->WatchFileDescriptor(
- read_fd_.get(), /*persistent=*/true, MessageLoopForIO::WATCH_READ,
+ read_fd_.get(), /*persistent=*/true, MessagePumpForIO::WATCH_READ,
&watcher, &handler));
TriggerReadEvent();
@@ -318,15 +319,14 @@ TEST_F(MessageLoopForIoPosixTest, PersistentWatcher) {
run_loop2.Run();
}
-void StopWatchingAndWatchAgain(
- MessageLoopForIO::FileDescriptorWatcher* controller,
- int fd,
- MessageLoopForIO::Watcher* new_handler,
- RunLoop* run_loop) {
+void StopWatchingAndWatchAgain(MessagePumpForIO::FdWatchController* controller,
+ int fd,
+ MessagePumpForIO::FdWatcher* new_handler,
+ RunLoop* run_loop) {
controller->StopWatchingFileDescriptor();
ASSERT_TRUE(MessageLoopForIO::current()->WatchFileDescriptor(
- fd, /*persistent=*/true, MessageLoopForIO::WATCH_READ, controller,
+ fd, /*persistent=*/true, MessagePumpForIO::WATCH_READ, controller,
new_handler));
run_loop->Quit();
@@ -335,7 +335,7 @@ void StopWatchingAndWatchAgain(
// Verify that a watcher can be stopped and reused from an event handler.
TEST_F(MessageLoopForIoPosixTest, StopAndRestartFromHandler) {
MessageLoopForIO message_loop;
- MessageLoopForIO::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpForIO::FdWatchController watcher(FROM_HERE);
RunLoop run_loop1;
RunLoop run_loop2;
@@ -346,7 +346,7 @@ TEST_F(MessageLoopForIoPosixTest, StopAndRestartFromHandler) {
// Create persistent watcher.
ASSERT_TRUE(MessageLoopForIO::current()->WatchFileDescriptor(
- read_fd_.get(), /*persistent=*/true, MessageLoopForIO::WATCH_READ,
+ read_fd_.get(), /*persistent=*/true, MessagePumpForIO::WATCH_READ,
&watcher, &handler1));
TriggerReadEvent();
@@ -361,7 +361,7 @@ TEST_F(MessageLoopForIoPosixTest, StopAndRestartFromHandler) {
// Verify that the pump properly handles a delayed task after an IO event.
TEST_F(MessageLoopForIoPosixTest, IoEventThenTimer) {
MessageLoopForIO message_loop;
- MessageLoopForIO::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpForIO::FdWatchController watcher(FROM_HERE);
RunLoop timer_run_loop;
message_loop.task_runner()->PostDelayedTask(
@@ -373,7 +373,7 @@ TEST_F(MessageLoopForIoPosixTest, IoEventThenTimer) {
// Create a non-persistent watcher.
ASSERT_TRUE(MessageLoopForIO::current()->WatchFileDescriptor(
- read_fd_.get(), /*persistent=*/false, MessageLoopForIO::WATCH_READ,
+ read_fd_.get(), /*persistent=*/false, MessagePumpForIO::WATCH_READ,
&watcher, &handler));
TriggerReadEvent();
@@ -391,7 +391,7 @@ TEST_F(MessageLoopForIoPosixTest, IoEventThenTimer) {
// Verify that the pipe can handle an IO event after a delayed task.
TEST_F(MessageLoopForIoPosixTest, TimerThenIoEvent) {
MessageLoopForIO message_loop;
- MessageLoopForIO::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpForIO::FdWatchController watcher(FROM_HERE);
// Trigger read event from a delayed task.
message_loop.task_runner()->PostDelayedTask(
@@ -404,7 +404,7 @@ TEST_F(MessageLoopForIoPosixTest, TimerThenIoEvent) {
// Create a non-persistent watcher.
ASSERT_TRUE(MessageLoopForIO::current()->WatchFileDescriptor(
- read_fd_.get(), /*persistent=*/false, MessageLoopForIO::WATCH_READ,
+ read_fd_.get(), /*persistent=*/false, MessagePumpForIO::WATCH_READ,
&watcher, &handler));
run_loop.Run();
diff --git a/chromium/base/message_loop/message_loop_unittest.cc b/chromium/base/message_loop/message_loop_unittest.cc
index cc9a9e938b8..e551e420d07 100644
--- a/chromium/base/message_loop/message_loop_unittest.cc
+++ b/chromium/base/message_loop/message_loop_unittest.cc
@@ -15,12 +15,15 @@
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_pump_for_io.h"
#include "base/pending_task.h"
#include "base/posix/eintr_wrapper.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/task_scheduler.h"
#include "base/test/test_simple_task_runner.h"
+#include "base/test/test_timeouts.h"
#include "base/threading/platform_thread.h"
#include "base/threading/sequence_local_storage_slot.h"
#include "base/threading/thread.h"
@@ -253,6 +256,54 @@ void PostNTasks(int posts_remaining) {
}
}
+enum class TaskSchedulerAvailability {
+ NO_TASK_SCHEDULER,
+ WITH_TASK_SCHEDULER,
+};
+
+std::string TaskSchedulerAvailabilityToString(
+ TaskSchedulerAvailability availability) {
+ switch (availability) {
+ case TaskSchedulerAvailability::NO_TASK_SCHEDULER:
+ return "NoTaskScheduler";
+ case TaskSchedulerAvailability::WITH_TASK_SCHEDULER:
+ return "WithTaskScheduler";
+ }
+ NOTREACHED();
+ return "Unknown";
+}
+
+class MessageLoopTest
+ : public ::testing::TestWithParam<TaskSchedulerAvailability> {
+ public:
+ MessageLoopTest() = default;
+ ~MessageLoopTest() override = default;
+
+ void SetUp() override {
+ if (GetParam() == TaskSchedulerAvailability::WITH_TASK_SCHEDULER)
+ TaskScheduler::CreateAndStartWithDefaultParams("MessageLoopTest");
+ }
+
+ void TearDown() override {
+ if (GetParam() == TaskSchedulerAvailability::WITH_TASK_SCHEDULER) {
+ // Failure to call FlushForTesting() could result in task leaks as tasks
+ // are skipped on shutdown.
+ base::TaskScheduler::GetInstance()->FlushForTesting();
+ base::TaskScheduler::GetInstance()->Shutdown();
+ base::TaskScheduler::GetInstance()->JoinForTesting();
+ base::TaskScheduler::SetInstance(nullptr);
+ }
+ }
+
+ static std::string ParamInfoToString(
+ ::testing::TestParamInfo<TaskSchedulerAvailability> param_info) {
+ return TaskSchedulerAvailabilityToString(param_info.param);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MessageLoopTest);
+};
+
#if defined(OS_ANDROID)
void DoNotRun() {
ASSERT_TRUE(false);
@@ -291,23 +342,23 @@ void RunTest_AbortDontRunMoreTasks(bool delayed, bool init_java_first) {
android::JavaHandlerThreadHelpers::IsExceptionTestException(exception));
}
-TEST(MessageLoopTest, JavaExceptionAbort) {
+TEST_P(MessageLoopTest, JavaExceptionAbort) {
constexpr bool delayed = false;
constexpr bool init_java_first = false;
RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
}
-TEST(MessageLoopTest, DelayedJavaExceptionAbort) {
+TEST_P(MessageLoopTest, DelayedJavaExceptionAbort) {
constexpr bool delayed = true;
constexpr bool init_java_first = false;
RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
}
-TEST(MessageLoopTest, JavaExceptionAbortInitJavaFirst) {
+TEST_P(MessageLoopTest, JavaExceptionAbortInitJavaFirst) {
constexpr bool delayed = false;
constexpr bool init_java_first = true;
RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
}
-TEST(MessageLoopTest, RunTasksWhileShuttingDownJavaThread) {
+TEST_P(MessageLoopTest, RunTasksWhileShuttingDownJavaThread) {
const int kNumPosts = 6;
DummyTaskObserver observer(kNumPosts, 1);
@@ -557,11 +608,11 @@ void PostNTasksThenQuit(int posts_remaining) {
#if defined(OS_WIN)
-class TestIOHandler : public MessageLoopForIO::IOHandler {
+class TestIOHandler : public MessagePumpForIO::IOHandler {
public:
TestIOHandler(const wchar_t* name, HANDLE signal, bool wait);
- void OnIOCompleted(MessageLoopForIO::IOContext* context,
+ void OnIOCompleted(MessagePumpForIO::IOContext* context,
DWORD bytes_transfered,
DWORD error) override;
@@ -572,7 +623,7 @@ class TestIOHandler : public MessageLoopForIO::IOHandler {
private:
char buffer_[48];
- MessageLoopForIO::IOContext context_;
+ MessagePumpForIO::IOContext context_;
HANDLE signal_;
win::ScopedHandle file_;
bool wait_;
@@ -597,8 +648,9 @@ void TestIOHandler::Init() {
WaitForIO();
}
-void TestIOHandler::OnIOCompleted(MessageLoopForIO::IOContext* context,
- DWORD bytes_transfered, DWORD error) {
+void TestIOHandler::OnIOCompleted(MessagePumpForIO::IOContext* context,
+ DWORD bytes_transfered,
+ DWORD error) {
ASSERT_TRUE(context == &context_);
ASSERT_TRUE(SetEvent(signal_));
}
@@ -703,35 +755,83 @@ void RunTest_WaitForIO() {
namespace {
+struct MessageLoopTypedTestParams {
+ MessageLoopTypedTestParams(
+ MessageLoop::Type type_in,
+ TaskSchedulerAvailability task_scheduler_availability_in) {
+ type = type_in;
+ task_scheduler_availability = task_scheduler_availability_in;
+ }
+
+ MessageLoop::Type type;
+ TaskSchedulerAvailability task_scheduler_availability;
+};
+
class MessageLoopTypedTest
- : public ::testing::TestWithParam<MessageLoop::Type> {
+ : public ::testing::TestWithParam<MessageLoopTypedTestParams> {
public:
MessageLoopTypedTest() = default;
~MessageLoopTypedTest() = default;
+ void SetUp() override {
+ if (GetTaskSchedulerAvailability() ==
+ TaskSchedulerAvailability::WITH_TASK_SCHEDULER) {
+ TaskScheduler::CreateAndStartWithDefaultParams("MessageLoopTypedTest");
+ }
+ }
+
+ void TearDown() override {
+ if (GetTaskSchedulerAvailability() ==
+ TaskSchedulerAvailability::WITH_TASK_SCHEDULER) {
+ // Failure to call FlushForTesting() could result in task leaks as tasks
+ // are skipped on shutdown.
+ base::TaskScheduler::GetInstance()->FlushForTesting();
+ base::TaskScheduler::GetInstance()->Shutdown();
+ base::TaskScheduler::GetInstance()->JoinForTesting();
+ base::TaskScheduler::SetInstance(nullptr);
+ }
+ }
+
static std::string ParamInfoToString(
- ::testing::TestParamInfo<MessageLoop::Type> param_info) {
- switch (param_info.param) {
+ ::testing::TestParamInfo<MessageLoopTypedTestParams> param_info) {
+ return MessageLoopTypeToString(param_info.param.type) + "_" +
+ TaskSchedulerAvailabilityToString(
+ param_info.param.task_scheduler_availability);
+ }
+
+ protected:
+ MessageLoop::Type GetMessageLoopType() { return GetParam().type; }
+
+ private:
+ static std::string MessageLoopTypeToString(MessageLoop::Type type) {
+ switch (type) {
case MessageLoop::TYPE_DEFAULT:
return "Default";
case MessageLoop::TYPE_IO:
return "IO";
case MessageLoop::TYPE_UI:
return "UI";
- default:
- NOTREACHED();
- return "Unknown";
+ case MessageLoop::TYPE_CUSTOM:
+#if defined(OS_ANDROID)
+ case MessageLoop::TYPE_JAVA:
+#endif // defined(OS_ANDROID)
+ break;
}
+ NOTREACHED();
+ return "NotSupported";
+ }
+
+ TaskSchedulerAvailability GetTaskSchedulerAvailability() {
+ return GetParam().task_scheduler_availability;
}
- private:
DISALLOW_COPY_AND_ASSIGN(MessageLoopTypedTest);
};
} // namespace
TEST_P(MessageLoopTypedTest, PostTask) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
// Add tests to message loop
scoped_refptr<Foo> foo(new Foo());
std::string a("a"), b("b"), c("c"), d("d");
@@ -759,7 +859,7 @@ TEST_P(MessageLoopTypedTest, PostTask) {
}
TEST_P(MessageLoopTypedTest, PostDelayedTask_Basic) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
// Test that PostDelayedTask results in a delayed task.
@@ -779,7 +879,7 @@ TEST_P(MessageLoopTypedTest, PostDelayedTask_Basic) {
}
TEST_P(MessageLoopTypedTest, PostDelayedTask_InDelayOrder) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
// Test that two tasks with different delays run in the right order.
int num_tasks = 2;
@@ -801,7 +901,7 @@ TEST_P(MessageLoopTypedTest, PostDelayedTask_InDelayOrder) {
}
TEST_P(MessageLoopTypedTest, PostDelayedTask_InPostOrder) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
// Test that two tasks with the same delay run in the order in which they
// were posted.
@@ -828,7 +928,7 @@ TEST_P(MessageLoopTypedTest, PostDelayedTask_InPostOrder) {
}
TEST_P(MessageLoopTypedTest, PostDelayedTask_InPostOrder_2) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
// Test that a delayed task still runs after a normal tasks even if the
// normal tasks take a long time to run.
@@ -854,7 +954,7 @@ TEST_P(MessageLoopTypedTest, PostDelayedTask_InPostOrder_2) {
}
TEST_P(MessageLoopTypedTest, PostDelayedTask_InPostOrder_3) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
// Test that a delayed task still runs after a pile of normal tasks. The key
// difference between this test and the previous one is that here we return
@@ -881,7 +981,7 @@ TEST_P(MessageLoopTypedTest, PostDelayedTask_InPostOrder_3) {
}
TEST_P(MessageLoopTypedTest, PostDelayedTask_SharedTimer) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
// Test that the interval of the timer, used to run the next delayed task, is
// set to a value corresponding to when the next delayed task should run.
@@ -951,7 +1051,7 @@ TEST_P(MessageLoopTypedTest, DISABLED_EnsureDeletion) {
bool a_was_deleted = false;
bool b_was_deleted = false;
{
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
loop.task_runner()->PostTask(
FROM_HERE, BindOnce(&RecordDeletionProbe::Run,
new RecordDeletionProbe(nullptr, &a_was_deleted)));
@@ -974,7 +1074,7 @@ TEST_P(MessageLoopTypedTest, DISABLED_EnsureDeletion_Chain) {
bool b_was_deleted = false;
bool c_was_deleted = false;
{
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
// The scoped_refptr for each of the below is held either by the chained
// RecordDeletionProbe, or the bound RecordDeletionProbe::Run() callback.
RecordDeletionProbe* a = new RecordDeletionProbe(nullptr, &a_was_deleted);
@@ -1005,7 +1105,7 @@ void NestingFunc(int* depth) {
} // namespace
TEST_P(MessageLoopTypedTest, Nesting) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
int depth = 50;
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
@@ -1015,7 +1115,7 @@ TEST_P(MessageLoopTypedTest, Nesting) {
}
TEST_P(MessageLoopTypedTest, RecursiveDenial1) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
TaskList order;
@@ -1064,7 +1164,7 @@ void OrderedFunc(TaskList* order, int cookie) {
} // namespace
TEST_P(MessageLoopTypedTest, RecursiveDenial3) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
TaskList order;
@@ -1102,7 +1202,7 @@ TEST_P(MessageLoopTypedTest, RecursiveDenial3) {
}
TEST_P(MessageLoopTypedTest, RecursiveSupport1) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
TaskList order;
ThreadTaskRunnerHandle::Get()->PostTask(
@@ -1134,7 +1234,7 @@ TEST_P(MessageLoopTypedTest, RecursiveSupport1) {
// Tests that non nestable tasks run in FIFO if there are no nested loops.
TEST_P(MessageLoopTypedTest, NonNestableWithNoNesting) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
TaskList order;
@@ -1160,10 +1260,7 @@ namespace {
void FuncThatPumps(TaskList* order, int cookie) {
order->RecordStart(PUMPS, cookie);
- {
- MessageLoop::ScopedNestableTaskAllower allow(MessageLoop::current());
- RunLoop().RunUntilIdle();
- }
+ RunLoop(RunLoop::Type::kNestableTasksAllowed).RunUntilIdle();
order->RecordEnd(PUMPS, cookie);
}
@@ -1177,7 +1274,7 @@ void SleepFunc(TaskList* order, int cookie, TimeDelta delay) {
// Tests that non nestable tasks don't run when there's code in the call stack.
TEST_P(MessageLoopTypedTest, NonNestableDelayedInNestedLoop) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
TaskList order;
@@ -1232,7 +1329,7 @@ void FuncThatQuitsNow() {
// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
TEST_P(MessageLoopTypedTest, QuitNow) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
TaskList order;
@@ -1266,7 +1363,7 @@ TEST_P(MessageLoopTypedTest, QuitNow) {
// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
TEST_P(MessageLoopTypedTest, RunLoopQuitTop) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
TaskList order;
@@ -1296,7 +1393,7 @@ TEST_P(MessageLoopTypedTest, RunLoopQuitTop) {
// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
TEST_P(MessageLoopTypedTest, RunLoopQuitNested) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
TaskList order;
@@ -1337,7 +1434,7 @@ void QuitAndRunNestedLoop(TaskList* order,
// Test that we can run nested loop after quitting the current one.
TEST_P(MessageLoopTypedTest, RunLoopNestedAfterQuit) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
TaskList order;
@@ -1361,7 +1458,7 @@ TEST_P(MessageLoopTypedTest, RunLoopNestedAfterQuit) {
// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
TEST_P(MessageLoopTypedTest, RunLoopQuitBogus) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
TaskList order;
@@ -1394,7 +1491,7 @@ TEST_P(MessageLoopTypedTest, RunLoopQuitBogus) {
// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
TEST_P(MessageLoopTypedTest, RunLoopQuitDeep) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
TaskList order;
@@ -1462,7 +1559,7 @@ TEST_P(MessageLoopTypedTest, RunLoopQuitDeep) {
// Tests RunLoopQuit works before RunWithID.
TEST_P(MessageLoopTypedTest, RunLoopQuitOrderBefore) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
TaskList order;
@@ -1482,7 +1579,7 @@ TEST_P(MessageLoopTypedTest, RunLoopQuitOrderBefore) {
// Tests RunLoopQuit works during RunWithID.
TEST_P(MessageLoopTypedTest, RunLoopQuitOrderDuring) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
TaskList order;
@@ -1507,7 +1604,7 @@ TEST_P(MessageLoopTypedTest, RunLoopQuitOrderDuring) {
// Tests RunLoopQuit works after RunWithID.
TEST_P(MessageLoopTypedTest, RunLoopQuitOrderAfter) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
TaskList order;
@@ -1554,21 +1651,21 @@ TEST_P(MessageLoopTypedTest, RunLoopQuitOrderAfter) {
// times to reproduce the bug.
TEST_P(MessageLoopTypedTest, RecursivePosts) {
const int kNumTimes = 1 << 17;
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
loop.task_runner()->PostTask(FROM_HERE,
BindOnce(&PostNTasksThenQuit, kNumTimes));
RunLoop().Run();
}
TEST_P(MessageLoopTypedTest, NestableTasksAllowedAtTopLevel) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
}
// Nestable tasks shouldn't be allowed to run reentrantly by default (regression
// test for https://crbug.com/754112).
TEST_P(MessageLoopTypedTest, NestableTasksDisallowedByDefault) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
RunLoop run_loop;
loop.task_runner()->PostTask(
FROM_HERE,
@@ -1582,7 +1679,7 @@ TEST_P(MessageLoopTypedTest, NestableTasksDisallowedByDefault) {
}
TEST_P(MessageLoopTypedTest, NestableTasksProcessedWhenRunLoopAllows) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
RunLoop run_loop;
loop.task_runner()->PostTask(
FROM_HERE,
@@ -1615,7 +1712,7 @@ TEST_P(MessageLoopTypedTest, NestableTasksProcessedWhenRunLoopAllows) {
}
TEST_P(MessageLoopTypedTest, NestableTasksAllowedExplicitlyInScope) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
RunLoop run_loop;
loop.task_runner()->PostTask(
FROM_HERE,
@@ -1634,7 +1731,7 @@ TEST_P(MessageLoopTypedTest, NestableTasksAllowedExplicitlyInScope) {
}
TEST_P(MessageLoopTypedTest, NestableTasksAllowedManually) {
- MessageLoop loop(GetParam());
+ MessageLoop loop(GetMessageLoopType());
RunLoop run_loop;
loop.task_runner()->PostTask(
FROM_HERE,
@@ -1651,32 +1748,75 @@ TEST_P(MessageLoopTypedTest, NestableTasksAllowedManually) {
run_loop.Run();
}
-INSTANTIATE_TEST_CASE_P(,
- MessageLoopTypedTest,
- ::testing::Values(MessageLoop::TYPE_DEFAULT,
- MessageLoop::TYPE_IO,
- MessageLoop::TYPE_UI),
- MessageLoopTypedTest::ParamInfoToString);
+INSTANTIATE_TEST_CASE_P(
+ ,
+ MessageLoopTypedTest,
+ ::testing::Values(MessageLoopTypedTestParams(
+ MessageLoop::TYPE_DEFAULT,
+ TaskSchedulerAvailability::NO_TASK_SCHEDULER),
+ MessageLoopTypedTestParams(
+ MessageLoop::TYPE_IO,
+ TaskSchedulerAvailability::NO_TASK_SCHEDULER),
+ MessageLoopTypedTestParams(
+ MessageLoop::TYPE_UI,
+ TaskSchedulerAvailability::NO_TASK_SCHEDULER),
+ MessageLoopTypedTestParams(
+ MessageLoop::TYPE_DEFAULT,
+ TaskSchedulerAvailability::WITH_TASK_SCHEDULER),
+ MessageLoopTypedTestParams(
+ MessageLoop::TYPE_IO,
+ TaskSchedulerAvailability::WITH_TASK_SCHEDULER),
+ MessageLoopTypedTestParams(
+ MessageLoop::TYPE_UI,
+ TaskSchedulerAvailability::WITH_TASK_SCHEDULER)),
+ MessageLoopTypedTest::ParamInfoToString);
#if defined(OS_WIN)
-TEST(MessageLoopTest, PostDelayedTask_SharedTimer_SubPump) {
+// Verifies that the MessageLoop ignores WM_QUIT, rather than quitting.
+// Users of MessageLoop typically expect to control when their RunLoops stop
+// Run()ning explicitly, via QuitClosure() etc (see https://crbug.com/720078)
+TEST_P(MessageLoopTest, WmQuitIsIgnored) {
+ MessageLoop loop(MessageLoop::TYPE_UI);
+ RunLoop run_loop;
+ // Post a WM_QUIT message to the current thread.
+ ::PostQuitMessage(0);
+
+ // Post a task to the current thread, with a small delay to make it less
+ // likely that we process the posted task before looking for WM_* messages.
+ bool task_was_run = false;
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(
+ [](bool* flag, OnceClosure closure) {
+ *flag = true;
+ std::move(closure).Run();
+ },
+ &task_was_run, run_loop.QuitClosure()),
+ TestTimeouts::tiny_timeout());
+
+ // Run the loop, and ensure that the posted task is processed before we quit.
+ run_loop.Run();
+ EXPECT_TRUE(task_was_run);
+}
+
+TEST_P(MessageLoopTest, PostDelayedTask_SharedTimer_SubPump) {
RunTest_PostDelayedTask_SharedTimer_SubPump();
}
// This test occasionally hangs. See http://crbug.com/44567.
-TEST(MessageLoopTest, DISABLED_RecursiveDenial2) {
+TEST_P(MessageLoopTest, DISABLED_RecursiveDenial2) {
RunTest_RecursiveDenial2(MessageLoop::TYPE_DEFAULT);
RunTest_RecursiveDenial2(MessageLoop::TYPE_UI);
RunTest_RecursiveDenial2(MessageLoop::TYPE_IO);
}
-TEST(MessageLoopTest, RecursiveSupport2) {
+TEST_P(MessageLoopTest, RecursiveSupport2) {
// This test requires a UI loop.
RunTest_RecursiveSupport2(MessageLoop::TYPE_UI);
}
#endif // defined(OS_WIN)
-TEST(MessageLoopTest, TaskObserver) {
+TEST_P(MessageLoopTest, TaskObserver) {
const int kNumPosts = 6;
DummyTaskObserver observer(kNumPosts);
@@ -1692,15 +1832,15 @@ TEST(MessageLoopTest, TaskObserver) {
}
#if defined(OS_WIN)
-TEST(MessageLoopTest, IOHandler) {
+TEST_P(MessageLoopTest, IOHandler) {
RunTest_IOHandler();
}
-TEST(MessageLoopTest, WaitForIO) {
+TEST_P(MessageLoopTest, WaitForIO) {
RunTest_WaitForIO();
}
-TEST(MessageLoopTest, HighResolutionTimer) {
+TEST_P(MessageLoopTest, HighResolutionTimer) {
MessageLoop message_loop;
Time::EnableHighResolutionTimer(true);
@@ -1792,7 +1932,7 @@ class MLDestructionObserver : public MessageLoop::DestructionObserver {
} // namespace
-TEST(MessageLoopTest, DestructionObserverTest) {
+TEST_P(MessageLoopTest, DestructionObserverTest) {
// Verify that the destruction observer gets called at the very end (after
// all the pending tasks have been destroyed).
MessageLoop* loop = new MessageLoop;
@@ -1819,7 +1959,7 @@ TEST(MessageLoopTest, DestructionObserverTest) {
// Verify that MessageLoop sets ThreadMainTaskRunner::current() and it
// posts tasks on that message loop.
-TEST(MessageLoopTest, ThreadMainTaskRunner) {
+TEST_P(MessageLoopTest, ThreadMainTaskRunner) {
MessageLoop loop;
scoped_refptr<Foo> foo(new Foo());
@@ -1838,7 +1978,7 @@ TEST(MessageLoopTest, ThreadMainTaskRunner) {
EXPECT_EQ(foo->result(), "a");
}
-TEST(MessageLoopTest, IsType) {
+TEST_P(MessageLoopTest, IsType) {
MessageLoop loop(MessageLoop::TYPE_UI);
EXPECT_TRUE(loop.IsType(MessageLoop::TYPE_UI));
EXPECT_FALSE(loop.IsType(MessageLoop::TYPE_IO));
@@ -1913,7 +2053,7 @@ LRESULT CALLBACK TestWndProcThunk(HWND hwnd, UINT message,
return 0;
}
-TEST(MessageLoopTest, AlwaysHaveUserMessageWhenNesting) {
+TEST_P(MessageLoopTest, AlwaysHaveUserMessageWhenNesting) {
MessageLoop loop(MessageLoop::TYPE_UI);
HINSTANCE instance = CURRENT_MODULE();
WNDCLASSEX wc = {0};
@@ -1936,7 +2076,7 @@ TEST(MessageLoopTest, AlwaysHaveUserMessageWhenNesting) {
}
#endif // defined(OS_WIN)
-TEST(MessageLoopTest, SetTaskRunner) {
+TEST_P(MessageLoopTest, SetTaskRunner) {
MessageLoop loop;
scoped_refptr<SingleThreadTaskRunner> new_runner(new TestSimpleTaskRunner());
@@ -1945,7 +2085,7 @@ TEST(MessageLoopTest, SetTaskRunner) {
EXPECT_EQ(new_runner, ThreadTaskRunnerHandle::Get());
}
-TEST(MessageLoopTest, OriginalRunnerWorks) {
+TEST_P(MessageLoopTest, OriginalRunnerWorks) {
MessageLoop loop;
scoped_refptr<SingleThreadTaskRunner> new_runner(new TestSimpleTaskRunner());
scoped_refptr<SingleThreadTaskRunner> original_runner(loop.task_runner());
@@ -1957,7 +2097,7 @@ TEST(MessageLoopTest, OriginalRunnerWorks) {
EXPECT_EQ(1, foo->test_count());
}
-TEST(MessageLoopTest, DeleteUnboundLoop) {
+TEST_P(MessageLoopTest, DeleteUnboundLoop) {
// It should be possible to delete an unbound message loop on a thread which
// already has another active loop. This happens when thread creation fails.
MessageLoop loop;
@@ -1968,7 +2108,7 @@ TEST(MessageLoopTest, DeleteUnboundLoop) {
EXPECT_EQ(loop.task_runner(), ThreadTaskRunnerHandle::Get());
}
-TEST(MessageLoopTest, ThreadName) {
+TEST_P(MessageLoopTest, ThreadName) {
{
std::string kThreadName("foo");
MessageLoop loop;
@@ -1986,7 +2126,7 @@ TEST(MessageLoopTest, ThreadName) {
// Verify that tasks posted to and code running in the scope of the same
// MessageLoop access the same SequenceLocalStorage values.
-TEST(MessageLoopTest, SequenceLocalStorageSetGet) {
+TEST_P(MessageLoopTest, SequenceLocalStorageSetGet) {
MessageLoop loop;
SequenceLocalStorageSlot<int> slot;
@@ -2008,7 +2148,7 @@ TEST(MessageLoopTest, SequenceLocalStorageSetGet) {
// Verify that tasks posted to and code running in different MessageLoops access
// different SequenceLocalStorage values.
-TEST(MessageLoopTest, SequenceLocalStorageDifferentMessageLoops) {
+TEST_P(MessageLoopTest, SequenceLocalStorageDifferentMessageLoops) {
SequenceLocalStorageSlot<int> slot;
{
@@ -2033,4 +2173,11 @@ TEST(MessageLoopTest, SequenceLocalStorageDifferentMessageLoops) {
EXPECT_NE(slot.Get(), 11);
}
+INSTANTIATE_TEST_CASE_P(
+ ,
+ MessageLoopTest,
+ ::testing::Values(TaskSchedulerAvailability::NO_TASK_SCHEDULER,
+ TaskSchedulerAvailability::WITH_TASK_SCHEDULER),
+ MessageLoopTest::ParamInfoToString);
+
} // namespace base
diff --git a/chromium/base/message_loop/message_pump_for_io.h b/chromium/base/message_loop/message_pump_for_io.h
new file mode 100644
index 00000000000..6aac1e609fb
--- /dev/null
+++ b/chromium/base/message_loop/message_pump_for_io.h
@@ -0,0 +1,44 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_FOR_IO_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_FOR_IO_H_
+
+// This header is a forwarding header to coalesce the various platform specific
+// types representing MessagePumpForIO.
+
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/message_loop/message_pump_win.h"
+#elif defined(OS_IOS)
+#include "base/message_loop/message_pump_io_ios.h"
+#elif defined(OS_NACL_SFI)
+#include "base/message_loop/message_pump_default.h"
+#elif defined(OS_FUCHSIA)
+#include "base/message_loop/message_pump_fuchsia.h"
+#elif defined(OS_POSIX)
+#include "base/message_loop/message_pump_libevent.h"
+#endif
+
+namespace base {
+
+#if defined(OS_WIN)
+// Windows defines it as-is.
+using MessagePumpForIO = MessagePumpForIO;
+#elif defined(OS_IOS)
+using MessagePumpForIO = MessagePumpIOSForIO;
+#elif defined(OS_NACL_SFI)
+using MessagePumpForIO = MessagePumpDefault;
+#elif defined(OS_FUCHSIA)
+using MessagePumpForIO = MessagePumpFuchsia;
+#elif defined(OS_POSIX)
+using MessagePumpForIO = MessagePumpLibevent;
+#else
+#error Platform does not define MessagePumpForIO
+#endif
+
+} // namespace base
+
+#endif // BASE_MESSAGE_LOOP_MESSAGE_PUMP_FOR_IO_H_
diff --git a/chromium/base/message_loop/message_pump_for_ui.h b/chromium/base/message_loop/message_pump_for_ui.h
new file mode 100644
index 00000000000..2d3e17513b4
--- /dev/null
+++ b/chromium/base/message_loop/message_pump_for_ui.h
@@ -0,0 +1,55 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_FOR_UI_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_FOR_UI_H_
+
+// This header is a forwarding header to coalesce the various platform specific
+// implementations of MessagePumpForUI.
+
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/message_loop/message_pump_win.h"
+#elif defined(OS_ANDROID)
+#include "base/message_loop/message_pump_android.h"
+#elif defined(OS_MACOSX)
+// No MessagePumpForUI, see below.
+#elif defined(OS_NACL) || defined(OS_AIX)
+// No MessagePumpForUI, see below.
+#elif defined(USE_GLIB)
+#include "base/message_loop/message_pump_glib.h"
+#elif defined(OS_LINUX) || defined(OS_BSD)
+#include "base/message_loop/message_pump_libevent.h"
+#elif defined(OS_FUCHSIA)
+#include "base/message_loop/message_pump_fuchsia.h"
+#endif
+
+namespace base {
+
+#if defined(OS_WIN)
+// Windows defines it as-is.
+using MessagePumpForUI = MessagePumpForUI;
+#elif defined(OS_ANDROID)
+// Android defines it as-is.
+using MessagePumpForUI = MessagePumpForUI;
+#elif defined(OS_MACOSX)
+// MessagePumpForUI doesn't exists on Mac, MessagePumpMac::Create defines which
+// Mac specific pump is used.
+#elif defined(OS_NACL) || defined(OS_AIX)
+// Currently NaCl and AIX don't have a MessagePumpForUI.
+// TODO(abarth): Figure out if we need this.
+#elif defined(USE_GLIB)
+using MessagePumpForUI = MessagePumpGlib;
+#elif defined(OS_LINUX) || defined(OS_BSD)
+using MessagePumpForUI = MessagePumpLibevent;
+#elif defined(OS_FUCHSIA)
+using MessagePumpForUI = MessagePumpFuchsia;
+#else
+#error Platform does not define MessagePumpForUI
+#endif
+
+} // namespace base
+
+#endif // BASE_MESSAGE_LOOP_MESSAGE_PUMP_FOR_UI_H_
diff --git a/chromium/base/message_loop/message_pump_fuchsia.cc b/chromium/base/message_loop/message_pump_fuchsia.cc
index e1cda047404..6504d177333 100644
--- a/chromium/base/message_loop/message_pump_fuchsia.cc
+++ b/chromium/base/message_loop/message_pump_fuchsia.cc
@@ -71,7 +71,8 @@ void MessagePumpFuchsia::FdWatchController::OnZxHandleSignalled(
MessagePumpFuchsia::FdWatchController::FdWatchController(
const Location& from_here)
- : ZxHandleWatchController(from_here) {}
+ : FdWatchControllerInterface(from_here),
+ ZxHandleWatchController(from_here) {}
MessagePumpFuchsia::FdWatchController::~FdWatchController() {
if (!StopWatchingFileDescriptor())
diff --git a/chromium/base/message_loop/message_pump_fuchsia.h b/chromium/base/message_loop/message_pump_fuchsia.h
index 8ffa76eb8f2..dbcd7d4dc38 100644
--- a/chromium/base/message_loop/message_pump_fuchsia.h
+++ b/chromium/base/message_loop/message_pump_fuchsia.h
@@ -11,6 +11,7 @@
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_pump.h"
+#include "base/message_loop/watchable_io_message_pump_posix.h"
#include <fdio/io.h>
#include <fdio/private.h>
@@ -18,7 +19,8 @@
namespace base {
-class BASE_EXPORT MessagePumpFuchsia : public MessagePump {
+class BASE_EXPORT MessagePumpFuchsia : public MessagePump,
+ public WatchableIOMessagePumpPosix {
public:
// Implemented by callers to receive notifications of handle & fd events.
class ZxHandleWatcher {
@@ -30,14 +32,6 @@ class BASE_EXPORT MessagePumpFuchsia : public MessagePump {
virtual ~ZxHandleWatcher() {}
};
- class FdWatcher {
- public:
- virtual void OnFileCanReadWithoutBlocking(int fd) = 0;
- virtual void OnFileCanWriteWithoutBlocking(int fd) = 0;
- protected:
- virtual ~FdWatcher() {}
- };
-
// Manages an active watch on an zx_handle_t.
class ZxHandleWatchController {
public:
@@ -97,14 +91,15 @@ class BASE_EXPORT MessagePumpFuchsia : public MessagePump {
DISALLOW_COPY_AND_ASSIGN(ZxHandleWatchController);
};
- // Object returned by WatchFileDescriptor to manage further watching.
- class FdWatchController : public ZxHandleWatchController,
+ class FdWatchController : public FdWatchControllerInterface,
+ public ZxHandleWatchController,
public ZxHandleWatcher {
public:
explicit FdWatchController(const Location& from_here);
~FdWatchController() override;
- bool StopWatchingFileDescriptor();
+ // FdWatchControllerInterface:
+ bool StopWatchingFileDescriptor() override;
private:
friend class MessagePumpFuchsia;
diff --git a/chromium/base/message_loop/message_pump_io_ios.cc b/chromium/base/message_loop/message_pump_io_ios.cc
index 74a3f158303..9b43e8edb22 100644
--- a/chromium/base/message_loop/message_pump_io_ios.cc
+++ b/chromium/base/message_loop/message_pump_io_ios.cc
@@ -6,20 +6,15 @@
namespace base {
-MessagePumpIOSForIO::FileDescriptorWatcher::FileDescriptorWatcher(
+MessagePumpIOSForIO::FdWatchController::FdWatchController(
const Location& from_here)
- : is_persistent_(false),
- fdref_(NULL),
- callback_types_(0),
- fd_source_(NULL),
- watcher_(NULL),
- created_from_location_(from_here) {}
-
-MessagePumpIOSForIO::FileDescriptorWatcher::~FileDescriptorWatcher() {
+ : FdWatchControllerInterface(from_here) {}
+
+MessagePumpIOSForIO::FdWatchController::~FdWatchController() {
StopWatchingFileDescriptor();
}
-bool MessagePumpIOSForIO::FileDescriptorWatcher::StopWatchingFileDescriptor() {
+bool MessagePumpIOSForIO::FdWatchController::StopWatchingFileDescriptor() {
if (fdref_ == NULL)
return true;
@@ -34,11 +29,10 @@ bool MessagePumpIOSForIO::FileDescriptorWatcher::StopWatchingFileDescriptor() {
return true;
}
-void MessagePumpIOSForIO::FileDescriptorWatcher::Init(
- CFFileDescriptorRef fdref,
- CFOptionFlags callback_types,
- CFRunLoopSourceRef fd_source,
- bool is_persistent) {
+void MessagePumpIOSForIO::FdWatchController::Init(CFFileDescriptorRef fdref,
+ CFOptionFlags callback_types,
+ CFRunLoopSourceRef fd_source,
+ bool is_persistent) {
DCHECK(fdref);
DCHECK(!fdref_.is_valid());
@@ -48,14 +42,14 @@ void MessagePumpIOSForIO::FileDescriptorWatcher::Init(
fd_source_.reset(fd_source);
}
-void MessagePumpIOSForIO::FileDescriptorWatcher::OnFileCanReadWithoutBlocking(
+void MessagePumpIOSForIO::FdWatchController::OnFileCanReadWithoutBlocking(
int fd,
MessagePumpIOSForIO* pump) {
DCHECK(callback_types_ & kCFFileDescriptorReadCallBack);
watcher_->OnFileCanReadWithoutBlocking(fd);
}
-void MessagePumpIOSForIO::FileDescriptorWatcher::OnFileCanWriteWithoutBlocking(
+void MessagePumpIOSForIO::FdWatchController::OnFileCanWriteWithoutBlocking(
int fd,
MessagePumpIOSForIO* pump) {
DCHECK(callback_types_ & kCFFileDescriptorWriteCallBack);
@@ -68,12 +62,11 @@ MessagePumpIOSForIO::MessagePumpIOSForIO() : weak_factory_(this) {
MessagePumpIOSForIO::~MessagePumpIOSForIO() {
}
-bool MessagePumpIOSForIO::WatchFileDescriptor(
- int fd,
- bool persistent,
- int mode,
- FileDescriptorWatcher *controller,
- Watcher *delegate) {
+bool MessagePumpIOSForIO::WatchFileDescriptor(int fd,
+ bool persistent,
+ int mode,
+ FdWatchController* controller,
+ FdWatcher* delegate) {
DCHECK_GE(fd, 0);
DCHECK(controller);
DCHECK(delegate);
@@ -153,8 +146,7 @@ void MessagePumpIOSForIO::RemoveRunLoopSource(CFRunLoopSourceRef source) {
void MessagePumpIOSForIO::HandleFdIOEvent(CFFileDescriptorRef fdref,
CFOptionFlags callback_types,
void* context) {
- FileDescriptorWatcher* controller =
- static_cast<FileDescriptorWatcher*>(context);
+ FdWatchController* controller = static_cast<FdWatchController*>(context);
DCHECK_EQ(fdref, controller->fdref_.get());
// Ensure that |fdref| will remain live for the duration of this function
@@ -170,7 +162,7 @@ void MessagePumpIOSForIO::HandleFdIOEvent(CFFileDescriptorRef fdref,
controller->OnFileCanWriteWithoutBlocking(fd, pump);
// Perform the read callback only if the file descriptor has not been
- // invalidated in the write callback. As |FileDescriptorWatcher| invalidates
+ // invalidated in the write callback. As |FdWatchController| invalidates
// its file descriptor on destruction, the file descriptor being valid also
// guarantees that |controller| has not been deleted.
if (callback_types & kCFFileDescriptorReadCallBack &&
diff --git a/chromium/base/message_loop/message_pump_io_ios.h b/chromium/base/message_loop/message_pump_io_ios.h
index e842a6c9ecf..b3905441690 100644
--- a/chromium/base/message_loop/message_pump_io_ios.h
+++ b/chromium/base/message_loop/message_pump_io_ios.h
@@ -6,48 +6,31 @@
#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_IO_IOS_H_
#include "base/base_export.h"
-#include "base/location.h"
#include "base/mac/scoped_cffiledescriptorref.h"
#include "base/mac/scoped_cftyperef.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_pump_mac.h"
+#include "base/message_loop/watchable_io_message_pump_posix.h"
#include "base/threading/thread_checker.h"
namespace base {
// This file introduces a class to monitor sockets and issue callbacks when
// sockets are ready for I/O on iOS.
-class BASE_EXPORT MessagePumpIOSForIO : public MessagePumpNSRunLoop {
+class BASE_EXPORT MessagePumpIOSForIO : public MessagePumpNSRunLoop,
+ public WatchableIOMessagePumpPosix {
public:
- // Used with WatchFileDescriptor to asynchronously monitor the I/O readiness
- // of a file descriptor.
- class Watcher {
+ class FdWatchController : public FdWatchControllerInterface {
public:
- // Called from MessageLoop::Run when an FD can be read from/written to
- // without blocking
- virtual void OnFileCanReadWithoutBlocking(int fd) = 0;
- virtual void OnFileCanWriteWithoutBlocking(int fd) = 0;
+ explicit FdWatchController(const Location& from_here);
- protected:
- virtual ~Watcher() {}
- };
-
- // Object returned by WatchFileDescriptor to manage further watching.
- class FileDescriptorWatcher {
- public:
- explicit FileDescriptorWatcher(const Location& from_here);
- ~FileDescriptorWatcher(); // Implicitly calls StopWatchingFileDescriptor.
-
- // NOTE: These methods aren't called StartWatching()/StopWatching() to
- // avoid confusion with the win32 ObjectWatcher class.
+ // Implicitly calls StopWatchingFileDescriptor.
+ ~FdWatchController() override;
- // Stop watching the FD, always safe to call. No-op if there's nothing
- // to do.
- bool StopWatchingFileDescriptor();
-
- const Location& created_from_location() { return created_from_location_; }
+ // FdWatchControllerInterface:
+ bool StopWatchingFileDescriptor() override;
private:
friend class MessagePumpIOSForIO;
@@ -63,48 +46,29 @@ class BASE_EXPORT MessagePumpIOSForIO : public MessagePumpNSRunLoop {
void set_pump(base::WeakPtr<MessagePumpIOSForIO> pump) { pump_ = pump; }
const base::WeakPtr<MessagePumpIOSForIO>& pump() const { return pump_; }
- void set_watcher(Watcher* watcher) { watcher_ = watcher; }
+ void set_watcher(FdWatcher* watcher) { watcher_ = watcher; }
void OnFileCanReadWithoutBlocking(int fd, MessagePumpIOSForIO* pump);
void OnFileCanWriteWithoutBlocking(int fd, MessagePumpIOSForIO* pump);
- bool is_persistent_; // false if this event is one-shot.
+ bool is_persistent_ = false; // false if this event is one-shot.
base::mac::ScopedCFFileDescriptorRef fdref_;
- CFOptionFlags callback_types_;
+ CFOptionFlags callback_types_ = 0;
base::ScopedCFTypeRef<CFRunLoopSourceRef> fd_source_;
base::WeakPtr<MessagePumpIOSForIO> pump_;
- Watcher* watcher_;
-
- Location created_from_location_;
-
- DISALLOW_COPY_AND_ASSIGN(FileDescriptorWatcher);
- };
+ FdWatcher* watcher_ = nullptr;
- enum Mode {
- WATCH_READ = 1 << 0,
- WATCH_WRITE = 1 << 1,
- WATCH_READ_WRITE = WATCH_READ | WATCH_WRITE
+ DISALLOW_COPY_AND_ASSIGN(FdWatchController);
};
MessagePumpIOSForIO();
~MessagePumpIOSForIO() override;
- // Have the current thread's message loop watch for a a situation in which
- // reading/writing to the FD can be performed without blocking.
- // Callers must provide a preallocated FileDescriptorWatcher object which
- // can later be used to manage the lifetime of this event.
- // If a FileDescriptorWatcher is passed in which is already attached to
- // an event, then the effect is cumulative i.e. after the call |controller|
- // will watch both the previous event and the new one.
- // If an error occurs while calling this method in a cumulative fashion, the
- // event previously attached to |controller| is aborted.
- // Returns true on success.
- // Must be called on the same thread the message_pump is running on.
bool WatchFileDescriptor(int fd,
bool persistent,
int mode,
- FileDescriptorWatcher *controller,
- Watcher *delegate);
+ FdWatchController* controller,
+ FdWatcher* delegate);
void RemoveRunLoopSource(CFRunLoopSourceRef source);
diff --git a/chromium/base/message_loop/message_pump_io_ios_unittest.cc b/chromium/base/message_loop/message_pump_io_ios_unittest.cc
index 6976ee8ae19..dc78309590f 100644
--- a/chromium/base/message_loop/message_pump_io_ios_unittest.cc
+++ b/chromium/base/message_loop/message_pump_io_ios_unittest.cc
@@ -44,7 +44,7 @@ class MessagePumpIOSForIOTest : public testing::Test {
return static_cast<MessageLoopForIO*>(io_thread_.message_loop());
}
- void HandleFdIOEvent(MessageLoopForIO::FileDescriptorWatcher* watcher) {
+ void HandleFdIOEvent(MessagePumpForIO::FdWatchController* watcher) {
MessagePumpIOSForIO::HandleFdIOEvent(watcher->fdref_.get(),
kCFFileDescriptorReadCallBack | kCFFileDescriptorWriteCallBack,
watcher);
@@ -62,13 +62,13 @@ class MessagePumpIOSForIOTest : public testing::Test {
namespace {
-// Concrete implementation of MessagePumpIOSForIO::Watcher that does
+// Concrete implementation of MessagePumpIOSForIO::FdWatcher that does
// nothing useful.
-class StupidWatcher : public MessagePumpIOSForIO::Watcher {
+class StupidWatcher : public MessagePumpIOSForIO::FdWatcher {
public:
~StupidWatcher() override {}
- // base:MessagePumpIOSForIO::Watcher interface
+ // base:MessagePumpIOSForIO::FdWatcher interface
void OnFileCanReadWithoutBlocking(int fd) override {}
void OnFileCanWriteWithoutBlocking(int fd) override {}
};
@@ -76,36 +76,33 @@ class StupidWatcher : public MessagePumpIOSForIO::Watcher {
// Test to make sure that we catch calling WatchFileDescriptor off of the wrong
// thread.
TEST_F(MessagePumpIOSForIOTest, TestWatchingFromBadThread) {
- MessagePumpIOSForIO::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpIOSForIO::FdWatchController watcher(FROM_HERE);
StupidWatcher delegate;
- ASSERT_DCHECK_DEATH(
- io_loop()->WatchFileDescriptor(STDOUT_FILENO, false,
- MessageLoopForIO::WATCH_READ, &watcher,
- &delegate));
+ ASSERT_DCHECK_DEATH(io_loop()->WatchFileDescriptor(
+ STDOUT_FILENO, false, MessagePumpForIO::WATCH_READ, &watcher, &delegate));
}
-class BaseWatcher : public MessagePumpIOSForIO::Watcher {
+class BaseWatcher : public MessagePumpIOSForIO::FdWatcher {
public:
- BaseWatcher(MessagePumpIOSForIO::FileDescriptorWatcher* controller)
+ BaseWatcher(MessagePumpIOSForIO::FdWatchController* controller)
: controller_(controller) {
DCHECK(controller_);
}
~BaseWatcher() override {}
- // MessagePumpIOSForIO::Watcher interface
+ // MessagePumpIOSForIO::FdWatcher interface
void OnFileCanReadWithoutBlocking(int /* fd */) override { NOTREACHED(); }
void OnFileCanWriteWithoutBlocking(int /* fd */) override { NOTREACHED(); }
protected:
- MessagePumpIOSForIO::FileDescriptorWatcher* controller_;
+ MessagePumpIOSForIO::FdWatchController* controller_;
};
class DeleteWatcher : public BaseWatcher {
public:
- explicit DeleteWatcher(
- MessagePumpIOSForIO::FileDescriptorWatcher* controller)
+ explicit DeleteWatcher(MessagePumpIOSForIO::FdWatchController* controller)
: BaseWatcher(controller) {}
~DeleteWatcher() override { DCHECK(!controller_); }
@@ -119,8 +116,8 @@ class DeleteWatcher : public BaseWatcher {
TEST_F(MessagePumpIOSForIOTest, DeleteWatcher) {
std::unique_ptr<MessagePumpIOSForIO> pump(new MessagePumpIOSForIO);
- MessagePumpIOSForIO::FileDescriptorWatcher* watcher =
- new MessagePumpIOSForIO::FileDescriptorWatcher(FROM_HERE);
+ MessagePumpIOSForIO::FdWatchController* watcher =
+ new MessagePumpIOSForIO::FdWatchController(FROM_HERE);
DeleteWatcher delegate(watcher);
pump->WatchFileDescriptor(pipefds_[1],
false, MessagePumpIOSForIO::WATCH_READ_WRITE, watcher, &delegate);
@@ -131,7 +128,7 @@ TEST_F(MessagePumpIOSForIOTest, DeleteWatcher) {
class StopWatcher : public BaseWatcher {
public:
- StopWatcher(MessagePumpIOSForIO::FileDescriptorWatcher* controller,
+ StopWatcher(MessagePumpIOSForIO::FdWatchController* controller,
MessagePumpIOSForIO* pump,
int fd_to_start_watching = -1)
: BaseWatcher(controller),
@@ -155,7 +152,7 @@ class StopWatcher : public BaseWatcher {
TEST_F(MessagePumpIOSForIOTest, StopWatcher) {
std::unique_ptr<MessagePumpIOSForIO> pump(new MessagePumpIOSForIO);
- MessagePumpIOSForIO::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpIOSForIO::FdWatchController watcher(FROM_HERE);
StopWatcher delegate(&watcher, pump.get());
pump->WatchFileDescriptor(pipefds_[1],
false, MessagePumpIOSForIO::WATCH_READ_WRITE, &watcher, &delegate);
@@ -166,7 +163,7 @@ TEST_F(MessagePumpIOSForIOTest, StopWatcher) {
TEST_F(MessagePumpIOSForIOTest, StopWatcherAndWatchSomethingElse) {
std::unique_ptr<MessagePumpIOSForIO> pump(new MessagePumpIOSForIO);
- MessagePumpIOSForIO::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpIOSForIO::FdWatchController watcher(FROM_HERE);
StopWatcher delegate(&watcher, pump.get(), alternate_pipefds_[1]);
pump->WatchFileDescriptor(pipefds_[1],
false, MessagePumpIOSForIO::WATCH_READ_WRITE, &watcher, &delegate);
diff --git a/chromium/base/message_loop/message_pump_libevent.cc b/chromium/base/message_loop/message_pump_libevent.cc
index dc5efc69215..2a595e5fe5c 100644
--- a/chromium/base/message_loop/message_pump_libevent.cc
+++ b/chromium/base/message_loop/message_pump_libevent.cc
@@ -7,7 +7,7 @@
#include <errno.h>
#include <unistd.h>
-#include <memory>
+#include <utility>
#include "base/auto_reset.h"
#include "base/compiler_specific.h"
@@ -29,29 +29,25 @@
// struct event (of which there is roughly one per socket).
// The socket's struct event is created in
// MessagePumpLibevent::WatchFileDescriptor(),
-// is owned by the FileDescriptorWatcher, and is destroyed in
+// is owned by the FdWatchController, and is destroyed in
// StopWatchingFileDescriptor().
// It is moved into and out of lists in struct event_base by
// the libevent functions event_add() and event_del().
//
// TODO(dkegel):
-// At the moment bad things happen if a FileDescriptorWatcher
+// At the moment bad things happen if a FdWatchController
// is active after its MessagePumpLibevent has been destroyed.
-// See MessageLoopTest.FileDescriptorWatcherOutlivesMessageLoop
+// See MessageLoopTest.FdWatchControllerOutlivesMessageLoop
// Not clear yet whether that situation occurs in practice,
// but if it does, we need to fix it.
namespace base {
-MessagePumpLibevent::FileDescriptorWatcher::FileDescriptorWatcher(
+MessagePumpLibevent::FdWatchController::FdWatchController(
const Location& from_here)
- : event_(nullptr),
- pump_(nullptr),
- watcher_(nullptr),
- was_destroyed_(nullptr),
- created_from_location_(from_here) {}
+ : FdWatchControllerInterface(from_here) {}
-MessagePumpLibevent::FileDescriptorWatcher::~FileDescriptorWatcher() {
+MessagePumpLibevent::FdWatchController::~FdWatchController() {
if (event_) {
StopWatchingFileDescriptor();
}
@@ -61,33 +57,30 @@ MessagePumpLibevent::FileDescriptorWatcher::~FileDescriptorWatcher() {
}
}
-bool MessagePumpLibevent::FileDescriptorWatcher::StopWatchingFileDescriptor() {
- event* e = ReleaseEvent();
- if (e == nullptr)
+bool MessagePumpLibevent::FdWatchController::StopWatchingFileDescriptor() {
+ std::unique_ptr<event> e = ReleaseEvent();
+ if (!e)
return true;
// event_del() is a no-op if the event isn't active.
- int rv = event_del(e);
- delete e;
+ int rv = event_del(e.get());
pump_ = nullptr;
watcher_ = nullptr;
return (rv == 0);
}
-void MessagePumpLibevent::FileDescriptorWatcher::Init(event* e) {
+void MessagePumpLibevent::FdWatchController::Init(std::unique_ptr<event> e) {
DCHECK(e);
DCHECK(!event_);
- event_ = e;
+ event_ = std::move(e);
}
-event* MessagePumpLibevent::FileDescriptorWatcher::ReleaseEvent() {
- struct event* e = event_;
- event_ = nullptr;
- return e;
+std::unique_ptr<event> MessagePumpLibevent::FdWatchController::ReleaseEvent() {
+ return std::move(event_);
}
-void MessagePumpLibevent::FileDescriptorWatcher::OnFileCanReadWithoutBlocking(
+void MessagePumpLibevent::FdWatchController::OnFileCanReadWithoutBlocking(
int fd,
MessagePumpLibevent* pump) {
// Since OnFileCanWriteWithoutBlocking() gets called first, it can stop
@@ -97,7 +90,7 @@ void MessagePumpLibevent::FileDescriptorWatcher::OnFileCanReadWithoutBlocking(
watcher_->OnFileCanReadWithoutBlocking(fd);
}
-void MessagePumpLibevent::FileDescriptorWatcher::OnFileCanWriteWithoutBlocking(
+void MessagePumpLibevent::FdWatchController::OnFileCanWriteWithoutBlocking(
int fd,
MessagePumpLibevent* pump) {
DCHECK(watcher_);
@@ -134,8 +127,8 @@ MessagePumpLibevent::~MessagePumpLibevent() {
bool MessagePumpLibevent::WatchFileDescriptor(int fd,
bool persistent,
int mode,
- FileDescriptorWatcher* controller,
- Watcher* delegate) {
+ FdWatchController* controller,
+ FdWatcher* delegate) {
DCHECK_GE(fd, 0);
DCHECK(controller);
DCHECK(delegate);
@@ -189,12 +182,9 @@ bool MessagePumpLibevent::WatchFileDescriptor(int fd,
return false;
}
- // Transfer ownership of evt to controller.
- controller->Init(evt.release());
-
+ controller->Init(std::move(evt));
controller->set_watcher(delegate);
controller->set_pump(this);
-
return true;
}
@@ -314,8 +304,7 @@ bool MessagePumpLibevent::Init() {
void MessagePumpLibevent::OnLibeventNotification(int fd,
short flags,
void* context) {
- FileDescriptorWatcher* controller =
- static_cast<FileDescriptorWatcher*>(context);
+ FdWatchController* controller = static_cast<FdWatchController*>(context);
DCHECK(controller);
TRACE_EVENT2("toplevel", "MessagePumpLibevent::OnLibeventNotification",
"src_file", controller->created_from_location().file_name(),
diff --git a/chromium/base/message_loop/message_pump_libevent.h b/chromium/base/message_loop/message_pump_libevent.h
index cee4ad361c2..002c36cb771 100644
--- a/chromium/base/message_loop/message_pump_libevent.h
+++ b/chromium/base/message_loop/message_pump_libevent.h
@@ -5,10 +5,12 @@
#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_LIBEVENT_H_
#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_LIBEVENT_H_
+#include <memory>
+
#include "base/compiler_specific.h"
-#include "base/location.h"
#include "base/macros.h"
#include "base/message_loop/message_pump.h"
+#include "base/message_loop/watchable_io_message_pump_posix.h"
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
@@ -20,95 +22,55 @@ namespace base {
// Class to monitor sockets and issue callbacks when sockets are ready for I/O
// TODO(dkegel): add support for background file IO somehow
-class BASE_EXPORT MessagePumpLibevent : public MessagePump {
+class BASE_EXPORT MessagePumpLibevent : public MessagePump,
+ public WatchableIOMessagePumpPosix {
public:
- // Used with WatchFileDescriptor to asynchronously monitor the I/O readiness
- // of a file descriptor.
- class Watcher {
- public:
- // Called from MessageLoop::Run when an FD can be read from/written to
- // without blocking
- virtual void OnFileCanReadWithoutBlocking(int fd) = 0;
- virtual void OnFileCanWriteWithoutBlocking(int fd) = 0;
-
- protected:
- virtual ~Watcher() = default;
- };
-
- // Object returned by WatchFileDescriptor to manage further watching.
- class FileDescriptorWatcher {
+ class FdWatchController : public FdWatchControllerInterface {
public:
- explicit FileDescriptorWatcher(const Location& from_here);
- ~FileDescriptorWatcher(); // Implicitly calls StopWatchingFileDescriptor.
+ explicit FdWatchController(const Location& from_here);
- // NOTE: These methods aren't called StartWatching()/StopWatching() to
- // avoid confusion with the win32 ObjectWatcher class.
+ // Implicitly calls StopWatchingFileDescriptor.
+ ~FdWatchController() override;
- // Stop watching the FD, always safe to call. No-op if there's nothing
- // to do.
- bool StopWatchingFileDescriptor();
-
- const Location& created_from_location() { return created_from_location_; }
+ // FdWatchControllerInterface:
+ bool StopWatchingFileDescriptor() override;
private:
friend class MessagePumpLibevent;
friend class MessagePumpLibeventTest;
- // Called by MessagePumpLibevent, ownership of |e| is transferred to this
- // object.
- void Init(event* e);
+ // Called by MessagePumpLibevent.
+ void Init(std::unique_ptr<event> e);
- // Used by MessagePumpLibevent to take ownership of event_.
- event* ReleaseEvent();
+ // Used by MessagePumpLibevent to take ownership of |event_|.
+ std::unique_ptr<event> ReleaseEvent();
void set_pump(MessagePumpLibevent* pump) { pump_ = pump; }
MessagePumpLibevent* pump() const { return pump_; }
- void set_watcher(Watcher* watcher) { watcher_ = watcher; }
+ void set_watcher(FdWatcher* watcher) { watcher_ = watcher; }
void OnFileCanReadWithoutBlocking(int fd, MessagePumpLibevent* pump);
void OnFileCanWriteWithoutBlocking(int fd, MessagePumpLibevent* pump);
- event* event_;
- MessagePumpLibevent* pump_;
- Watcher* watcher_;
+ std::unique_ptr<event> event_;
+ MessagePumpLibevent* pump_ = nullptr;
+ FdWatcher* watcher_ = nullptr;
// If this pointer is non-NULL, the pointee is set to true in the
// destructor.
- bool* was_destroyed_;
-
- const Location created_from_location_;
-
- DISALLOW_COPY_AND_ASSIGN(FileDescriptorWatcher);
- };
+ bool* was_destroyed_ = nullptr;
- enum Mode {
- WATCH_READ = 1 << 0,
- WATCH_WRITE = 1 << 1,
- WATCH_READ_WRITE = WATCH_READ | WATCH_WRITE
+ DISALLOW_COPY_AND_ASSIGN(FdWatchController);
};
MessagePumpLibevent();
~MessagePumpLibevent() override;
- // Registers |delegate| with the current thread's message loop so that its
- // methods are invoked when file descriptor |fd| becomes ready for reading or
- // writing (or both) without blocking. |mode| selects ready for reading, for
- // writing, or both. (See "enum Mode" above. TODO(unknown): nuke the
- // plethora of equivalent "enum Mode" declarations.) |controller| manages
- // the lifetime of registrations. ("Registrations" are also ambiguously
- // called "events" in many places, for instance in libevent.) It is an error
- // to use the same |controller| for different file descriptors; however, the
- // same controller can be reused to add registrations with a different
- // |mode|. If |controller| is already attached to one or more registrations,
- // the new registration is added on to those. If an error occurs while
- // calling this method, any registration previously attached to |controller|
- // is removed. Returns true on success. Must be called on the same thread
- // the message_pump is running on.
bool WatchFileDescriptor(int fd,
bool persistent,
int mode,
- FileDescriptorWatcher* controller,
- Watcher* delegate);
+ FdWatchController* controller,
+ FdWatcher* delegate);
// MessagePump methods:
void Run(Delegate* delegate) override;
diff --git a/chromium/base/message_loop/message_pump_libevent_unittest.cc b/chromium/base/message_loop/message_pump_libevent_unittest.cc
index 8deea7fdf1f..0444e35ace2 100644
--- a/chromium/base/message_loop/message_pump_libevent_unittest.cc
+++ b/chromium/base/message_loop/message_pump_libevent_unittest.cc
@@ -61,7 +61,7 @@ class MessagePumpLibeventTest : public testing::Test {
void OnLibeventNotification(
MessagePumpLibevent* pump,
- MessagePumpLibevent::FileDescriptorWatcher* controller) {
+ MessagePumpLibevent::FdWatchController* controller) {
pump->OnLibeventNotification(0, EV_WRITE | EV_READ, controller);
}
@@ -74,13 +74,13 @@ class MessagePumpLibeventTest : public testing::Test {
namespace {
-// Concrete implementation of MessagePumpLibevent::Watcher that does
+// Concrete implementation of MessagePumpLibevent::FdWatcher that does
// nothing useful.
-class StupidWatcher : public MessagePumpLibevent::Watcher {
+class StupidWatcher : public MessagePumpLibevent::FdWatcher {
public:
~StupidWatcher() override = default;
- // base:MessagePumpLibevent::Watcher interface
+ // base:MessagePumpLibevent::FdWatcher interface
void OnFileCanReadWithoutBlocking(int fd) override {}
void OnFileCanWriteWithoutBlocking(int fd) override {}
};
@@ -88,17 +88,15 @@ class StupidWatcher : public MessagePumpLibevent::Watcher {
// Test to make sure that we catch calling WatchFileDescriptor off of the
// wrong thread.
TEST_F(MessagePumpLibeventTest, TestWatchingFromBadThread) {
- MessagePumpLibevent::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpLibevent::FdWatchController watcher(FROM_HERE);
StupidWatcher delegate;
// Ensure that |io_thread_| has started, otherwise we're racing against
// creation of the thread's MessagePump.
WaitUntilIoThreadStarted();
- ASSERT_DCHECK_DEATH(
- io_loop()->WatchFileDescriptor(STDOUT_FILENO, false,
- MessageLoopForIO::WATCH_READ, &watcher,
- &delegate));
+ ASSERT_DCHECK_DEATH(io_loop()->WatchFileDescriptor(
+ STDOUT_FILENO, false, MessagePumpForIO::WATCH_READ, &watcher, &delegate));
}
TEST_F(MessagePumpLibeventTest, QuitOutsideOfRun) {
@@ -106,27 +104,26 @@ TEST_F(MessagePumpLibeventTest, QuitOutsideOfRun) {
ASSERT_DCHECK_DEATH(pump->Quit());
}
-class BaseWatcher : public MessagePumpLibevent::Watcher {
+class BaseWatcher : public MessagePumpLibevent::FdWatcher {
public:
- explicit BaseWatcher(MessagePumpLibevent::FileDescriptorWatcher* controller)
+ explicit BaseWatcher(MessagePumpLibevent::FdWatchController* controller)
: controller_(controller) {
DCHECK(controller_);
}
~BaseWatcher() override = default;
- // base:MessagePumpLibevent::Watcher interface
+ // base:MessagePumpLibevent::FdWatcher interface
void OnFileCanReadWithoutBlocking(int /* fd */) override { NOTREACHED(); }
void OnFileCanWriteWithoutBlocking(int /* fd */) override { NOTREACHED(); }
protected:
- MessagePumpLibevent::FileDescriptorWatcher* controller_;
+ MessagePumpLibevent::FdWatchController* controller_;
};
class DeleteWatcher : public BaseWatcher {
public:
- explicit DeleteWatcher(
- MessagePumpLibevent::FileDescriptorWatcher* controller)
+ explicit DeleteWatcher(MessagePumpLibevent::FdWatchController* controller)
: BaseWatcher(controller) {}
~DeleteWatcher() override { DCHECK(!controller_); }
@@ -140,8 +137,8 @@ class DeleteWatcher : public BaseWatcher {
TEST_F(MessagePumpLibeventTest, DeleteWatcher) {
std::unique_ptr<MessagePumpLibevent> pump(new MessagePumpLibevent);
- MessagePumpLibevent::FileDescriptorWatcher* watcher =
- new MessagePumpLibevent::FileDescriptorWatcher(FROM_HERE);
+ MessagePumpLibevent::FdWatchController* watcher =
+ new MessagePumpLibevent::FdWatchController(FROM_HERE);
DeleteWatcher delegate(watcher);
pump->WatchFileDescriptor(pipefds_[1],
false, MessagePumpLibevent::WATCH_READ_WRITE, watcher, &delegate);
@@ -152,8 +149,7 @@ TEST_F(MessagePumpLibeventTest, DeleteWatcher) {
class StopWatcher : public BaseWatcher {
public:
- explicit StopWatcher(
- MessagePumpLibevent::FileDescriptorWatcher* controller)
+ explicit StopWatcher(MessagePumpLibevent::FdWatchController* controller)
: BaseWatcher(controller) {}
~StopWatcher() override = default;
@@ -165,7 +161,7 @@ class StopWatcher : public BaseWatcher {
TEST_F(MessagePumpLibeventTest, StopWatcher) {
std::unique_ptr<MessagePumpLibevent> pump(new MessagePumpLibevent);
- MessagePumpLibevent::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpLibevent::FdWatchController watcher(FROM_HERE);
StopWatcher delegate(&watcher);
pump->WatchFileDescriptor(pipefds_[1],
false, MessagePumpLibevent::WATCH_READ_WRITE, &watcher, &delegate);
@@ -182,7 +178,7 @@ void QuitMessageLoopAndStart(const Closure& quit_closure) {
runloop.Run();
}
-class NestedPumpWatcher : public MessagePumpLibevent::Watcher {
+class NestedPumpWatcher : public MessagePumpLibevent::FdWatcher {
public:
NestedPumpWatcher() = default;
~NestedPumpWatcher() override = default;
@@ -199,7 +195,7 @@ class NestedPumpWatcher : public MessagePumpLibevent::Watcher {
TEST_F(MessagePumpLibeventTest, NestedPumpWatcher) {
std::unique_ptr<MessagePumpLibevent> pump(new MessagePumpLibevent);
- MessagePumpLibevent::FileDescriptorWatcher watcher(FROM_HERE);
+ MessagePumpLibevent::FdWatchController watcher(FROM_HERE);
NestedPumpWatcher delegate;
pump->WatchFileDescriptor(pipefds_[1],
false, MessagePumpLibevent::WATCH_READ, &watcher, &delegate);
@@ -214,7 +210,7 @@ void FatalClosure() {
class QuitWatcher : public BaseWatcher {
public:
- QuitWatcher(MessagePumpLibevent::FileDescriptorWatcher* controller,
+ QuitWatcher(MessagePumpLibevent::FdWatchController* controller,
base::Closure quit_closure)
: BaseWatcher(controller), quit_closure_(std::move(quit_closure)) {}
@@ -245,7 +241,7 @@ TEST_F(MessagePumpLibeventTest, QuitWatcher) {
MessagePumpLibevent* pump = new MessagePumpLibevent; // owned by |loop|.
MessageLoop loop(WrapUnique(pump));
RunLoop run_loop;
- MessagePumpLibevent::FileDescriptorWatcher controller(FROM_HERE);
+ MessagePumpLibevent::FdWatchController controller(FROM_HERE);
QuitWatcher delegate(&controller, run_loop.QuitClosure());
WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
diff --git a/chromium/base/message_loop/message_pump_mac.h b/chromium/base/message_loop/message_pump_mac.h
index 9a5243441ee..fa88c3a7693 100644
--- a/chromium/base/message_loop/message_pump_mac.h
+++ b/chromium/base/message_loop/message_pump_mac.h
@@ -376,7 +376,7 @@ class BASE_EXPORT MessagePumpMac {
//
// Otherwise creates an instance of MessagePumpNSApplication using a
// default NSApplication.
- static MessagePump* Create();
+ static std::unique_ptr<MessagePump> Create();
#if !defined(OS_IOS)
// If a pump is created before the required CrAppProtocol is
diff --git a/chromium/base/message_loop/message_pump_mac.mm b/chromium/base/message_loop/message_pump_mac.mm
index 3ff1ccbf243..eed9247079e 100644
--- a/chromium/base/message_loop/message_pump_mac.mm
+++ b/chromium/base/message_loop/message_pump_mac.mm
@@ -918,13 +918,13 @@ bool MessagePumpMac::IsHandlingSendEvent() {
#endif // !defined(OS_IOS)
// static
-MessagePump* MessagePumpMac::Create() {
+std::unique_ptr<MessagePump> MessagePumpMac::Create() {
if ([NSThread isMainThread]) {
#if defined(OS_IOS)
- return new MessagePumpUIApplication;
+ return std::make_unique<MessagePumpUIApplication>();
#else
if ([NSApp conformsToProtocol:@protocol(CrAppProtocol)])
- return new MessagePumpCrApplication;
+ return std::make_unique<MessagePumpCrApplication>();
// The main-thread MessagePump implementations REQUIRE an NSApp.
// Executables which have specific requirements for their
@@ -932,11 +932,11 @@ MessagePump* MessagePumpMac::Create() {
// creating an event loop.
[NSApplication sharedApplication];
g_not_using_cr_app = true;
- return new MessagePumpNSApplication;
+ return std::make_unique<MessagePumpNSApplication>();
#endif
}
- return new MessagePumpNSRunLoop;
+ return std::make_unique<MessagePumpNSRunLoop>();
}
} // namespace base
diff --git a/chromium/base/message_loop/message_pump_win.cc b/chromium/base/message_loop/message_pump_win.cc
index 5069b852492..ca257370fb5 100644
--- a/chromium/base/message_loop/message_pump_win.cc
+++ b/chromium/base/message_loop/message_pump_win.cc
@@ -9,6 +9,7 @@
#include <limits>
+#include "base/debug/alias.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram_macros.h"
@@ -209,6 +210,9 @@ void MessagePumpForUI::WaitForWork() {
if (delay < 0) // Negative value means no timers waiting.
delay = INFINITE;
+ // Tell the optimizer to retain these values to simplify analyzing hangs.
+ base::debug::Alias(&delay);
+ base::debug::Alias(&wait_flags);
DWORD result = MsgWaitForMultipleObjectsEx(0, nullptr, delay, QS_ALLINPUT,
wait_flags);
@@ -311,6 +315,8 @@ void MessagePumpForUI::RescheduleTimer() {
if (delay_msec < USER_TIMER_MINIMUM)
delay_msec = USER_TIMER_MINIMUM;
+ // Tell the optimizer to retain these values to simplify analyzing hangs.
+ base::debug::Alias(&delay_msec);
// Create a WM_TIMER event that will wake us up to check for any pending
// timers (in case we are running within a nested, external sub-pump).
UINT_PTR ret = SetTimer(message_window_.hwnd(), 0, delay_msec, nullptr);
@@ -345,14 +351,12 @@ bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) {
TRACE_EVENT1("base", "MessagePumpForUI::ProcessMessageHelper",
"message", msg.message);
if (WM_QUIT == msg.message) {
- // Receiving WM_QUIT is unusual and unexpected on most message loops.
+ // WM_QUIT is the standard way to exit a GetMessage() loop. Our MessageLoop
+ // has its own quit mechanism, so WM_QUIT is unexpected and should be
+ // ignored.
UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem",
RECEIVED_WM_QUIT_ERROR, MESSAGE_LOOP_PROBLEM_MAX);
- // Repost the QUIT message so that it will be retrieved by the primary
- // GetMessage() loop.
- state_->should_quit = true;
- PostQuitMessage(static_cast<int>(msg.wParam));
- return false;
+ return true;
}
// While running our main message pump, we discard kMsgHaveWork messages.
@@ -510,6 +514,8 @@ void MessagePumpForIO::WaitForWork() {
if (timeout < 0) // Negative value means no timers waiting.
timeout = INFINITE;
+ // Tell the optimizer to retain these values to simplify analyzing hangs.
+ base::debug::Alias(&timeout);
WaitForIOCompletion(timeout, nullptr);
}
diff --git a/chromium/base/message_loop/watchable_io_message_pump_posix.cc b/chromium/base/message_loop/watchable_io_message_pump_posix.cc
new file mode 100644
index 00000000000..18501376040
--- /dev/null
+++ b/chromium/base/message_loop/watchable_io_message_pump_posix.cc
@@ -0,0 +1,16 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/watchable_io_message_pump_posix.h"
+
+namespace base {
+
+WatchableIOMessagePumpPosix::FdWatchControllerInterface::
+ FdWatchControllerInterface(const Location& from_here)
+ : created_from_location_(from_here) {}
+
+WatchableIOMessagePumpPosix::FdWatchControllerInterface::
+ ~FdWatchControllerInterface() = default;
+
+} // namespace base
diff --git a/chromium/base/message_loop/watchable_io_message_pump_posix.h b/chromium/base/message_loop/watchable_io_message_pump_posix.h
new file mode 100644
index 00000000000..74583d9a217
--- /dev/null
+++ b/chromium/base/message_loop/watchable_io_message_pump_posix.h
@@ -0,0 +1,88 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_WATCHABLE_IO_MESSAGE_PUMP_POSIX_H_
+#define BASE_MESSAGE_LOOP_WATCHABLE_IO_MESSAGE_PUMP_POSIX_H_
+
+#include "base/location.h"
+#include "base/macros.h"
+
+namespace base {
+
+class WatchableIOMessagePumpPosix {
+ public:
+ // Used with WatchFileDescriptor to asynchronously monitor the I/O readiness
+ // of a file descriptor.
+ class FdWatcher {
+ public:
+ virtual void OnFileCanReadWithoutBlocking(int fd) = 0;
+ virtual void OnFileCanWriteWithoutBlocking(int fd) = 0;
+
+ protected:
+ virtual ~FdWatcher() = default;
+ };
+
+ class FdWatchControllerInterface {
+ public:
+ explicit FdWatchControllerInterface(const Location& from_here);
+ // Subclasses must call StopWatchingFileDescriptor() in their destructor
+ // (this parent class cannot generically do it for them as it must usually
+ // be invoked before they destroy their state which happens before the
+ // parent destructor is invoked).
+ virtual ~FdWatchControllerInterface();
+
+ // NOTE: This method isn't called StopWatching() to avoid confusion with the
+ // win32 ObjectWatcher class. While this doesn't really need to be virtual
+ // as there's only one impl per platform and users don't use pointers to the
+ // base class. Having this interface forces implementers to share similar
+ // implementations (a problem in the past).
+
+ // Stop watching the FD, always safe to call. No-op if there's nothing to
+ // do.
+ virtual bool StopWatchingFileDescriptor() = 0;
+
+ const Location& created_from_location() const {
+ return created_from_location_;
+ }
+
+ private:
+ const Location created_from_location_;
+
+ DISALLOW_COPY_AND_ASSIGN(FdWatchControllerInterface);
+ };
+
+ enum Mode {
+ WATCH_READ = 1 << 0,
+ WATCH_WRITE = 1 << 1,
+ WATCH_READ_WRITE = WATCH_READ | WATCH_WRITE
+ };
+
+ // Every subclass of WatchableIOMessagePumpPosix must provide a
+ // WatchFileDescriptor() which has the following signature where
+ // |FdWatchController| must be the complete type based on
+ // FdWatchControllerInterface.
+
+ // Registers |delegate| with the current thread's message loop so that its
+ // methods are invoked when file descriptor |fd| becomes ready for reading or
+ // writing (or both) without blocking. |mode| selects ready for reading, for
+ // writing, or both. See "enum Mode" above. |controller| manages the
+ // lifetime of registrations. ("Registrations" are also ambiguously called
+ // "events" in many places, for instance in libevent.) It is an error to use
+ // the same |controller| for different file descriptors; however, the same
+ // controller can be reused to add registrations with a different |mode|. If
+ // |controller| is already attached to one or more registrations, the new
+ // registration is added onto those. If an error occurs while calling this
+ // method, any registration previously attached to |controller| is removed.
+ // Returns true on success. Must be called on the same thread the MessagePump
+ // is running on.
+ // bool WatchFileDescriptor(int fd,
+ // bool persistent,
+ // int mode,
+ // FdWatchController* controller,
+ // FdWatcher* delegate) = 0;
+};
+
+} // namespace base
+
+#endif // BASE_MESSAGE_LOOP_WATCHABLE_IO_MESSAGE_PUMP_POSIX_H_
diff --git a/chromium/base/metrics/OWNERS b/chromium/base/metrics/OWNERS
index 2f98bde4363..4cc69ff0630 100644
--- a/chromium/base/metrics/OWNERS
+++ b/chromium/base/metrics/OWNERS
@@ -1,4 +1,5 @@
asvitkine@chromium.org
+bcwhite@chromium.org
gayane@chromium.org
holte@chromium.org
isherman@chromium.org
diff --git a/chromium/base/metrics/dummy_histogram.cc b/chromium/base/metrics/dummy_histogram.cc
new file mode 100644
index 00000000000..2707733b2d4
--- /dev/null
+++ b/chromium/base/metrics/dummy_histogram.cc
@@ -0,0 +1,102 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/dummy_histogram.h"
+
+#include <memory>
+
+#include "base/logging.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/metrics_hashes.h"
+
+namespace base {
+
+namespace {
+
+// Helper classes for DummyHistogram.
+class DummySampleCountIterator : public SampleCountIterator {
+ public:
+ DummySampleCountIterator() {}
+ ~DummySampleCountIterator() override {}
+
+ // SampleCountIterator:
+ bool Done() const override { return true; }
+ void Next() override { NOTREACHED(); }
+ void Get(HistogramBase::Sample* min,
+ int64_t* max,
+ HistogramBase::Count* count) const override {
+ NOTREACHED();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DummySampleCountIterator);
+};
+
+class DummyHistogramSamples : public HistogramSamples {
+ public:
+ explicit DummyHistogramSamples() : HistogramSamples(0, new LocalMetadata()) {}
+ ~DummyHistogramSamples() override {
+ delete static_cast<LocalMetadata*>(meta());
+ }
+
+ // HistogramSamples:
+ void Accumulate(HistogramBase::Sample value,
+ HistogramBase::Count count) override {}
+ HistogramBase::Count GetCount(HistogramBase::Sample value) const override {
+ return HistogramBase::Count();
+ }
+ HistogramBase::Count TotalCount() const override {
+ return HistogramBase::Count();
+ }
+ std::unique_ptr<SampleCountIterator> Iterator() const override {
+ return std::make_unique<DummySampleCountIterator>();
+ }
+ bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override {
+ return true;
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DummyHistogramSamples);
+};
+
+} // namespace
+
+// static
+DummyHistogram* DummyHistogram::GetInstance() {
+ static base::NoDestructor<DummyHistogram> dummy_histogram;
+ return dummy_histogram.get();
+}
+
+uint64_t DummyHistogram::name_hash() const {
+ return HashMetricName(histogram_name());
+}
+
+HistogramType DummyHistogram::GetHistogramType() const {
+ return DUMMY_HISTOGRAM;
+}
+
+bool DummyHistogram::HasConstructionArguments(
+ Sample expected_minimum,
+ Sample expected_maximum,
+ uint32_t expected_bucket_count) const {
+ return true;
+}
+
+bool DummyHistogram::AddSamplesFromPickle(PickleIterator* iter) {
+ return true;
+}
+
+std::unique_ptr<HistogramSamples> DummyHistogram::SnapshotSamples() const {
+ return std::make_unique<DummyHistogramSamples>();
+}
+
+std::unique_ptr<HistogramSamples> DummyHistogram::SnapshotDelta() {
+ return std::make_unique<DummyHistogramSamples>();
+}
+
+std::unique_ptr<HistogramSamples> DummyHistogram::SnapshotFinalDelta() const {
+ return std::make_unique<DummyHistogramSamples>();
+}
+
+} // namespace base
diff --git a/chromium/base/metrics/dummy_histogram.h b/chromium/base/metrics/dummy_histogram.h
new file mode 100644
index 00000000000..e2cb64ecbbb
--- /dev/null
+++ b/chromium/base/metrics/dummy_histogram.h
@@ -0,0 +1,61 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_DUMMY_HISTOGRAM_H_
+#define BASE_METRICS_DUMMY_HISTOGRAM_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/metrics/histogram_base.h"
+#include "base/no_destructor.h"
+
+namespace base {
+
+// DummyHistogram is used for mocking histogram objects for histograms that
+// shouldn't be recorded. It doesn't do any actual processing.
+class BASE_EXPORT DummyHistogram : public HistogramBase {
+ public:
+ static DummyHistogram* GetInstance();
+
+ // HistogramBase:
+ void CheckName(const StringPiece& name) const override {}
+ uint64_t name_hash() const override;
+ HistogramType GetHistogramType() const override;
+ bool HasConstructionArguments(Sample expected_minimum,
+ Sample expected_maximum,
+ uint32_t expected_bucket_count) const override;
+ void Add(Sample value) override {}
+ void AddCount(Sample value, int count) override {}
+ void AddSamples(const HistogramSamples& samples) override {}
+ bool AddSamplesFromPickle(PickleIterator* iter) override;
+ std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
+ std::unique_ptr<HistogramSamples> SnapshotDelta() override;
+ std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
+ void WriteHTMLGraph(std::string* output) const override {}
+ void WriteAscii(std::string* output) const override {}
+
+ protected:
+ // HistogramBase:
+ void SerializeInfoImpl(Pickle* pickle) const override {}
+ void GetParameters(DictionaryValue* params) const override {}
+ void GetCountAndBucketData(Count* count,
+ int64_t* sum,
+ ListValue* buckets) const override {}
+
+ private:
+ friend class NoDestructor<DummyHistogram>;
+
+ DummyHistogram() : HistogramBase("dummy_histogram") {}
+ ~DummyHistogram() override {}
+
+ DISALLOW_COPY_AND_ASSIGN(DummyHistogram);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_DUMMY_HISTOGRAM_H_
diff --git a/chromium/base/metrics/field_trial.cc b/chromium/base/metrics/field_trial.cc
index 790767c7c86..ff37880c5fb 100644
--- a/chromium/base/metrics/field_trial.cc
+++ b/chromium/base/metrics/field_trial.cc
@@ -981,10 +981,11 @@ FieldTrial* FieldTrialList::CreateFieldTrial(
}
// static
-void FieldTrialList::AddObserver(Observer* observer) {
+bool FieldTrialList::AddObserver(Observer* observer) {
if (!global_)
- return;
+ return false;
global_->observer_list_->AddObserver(observer);
+ return true;
}
// static
@@ -995,6 +996,18 @@ void FieldTrialList::RemoveObserver(Observer* observer) {
}
// static
+void FieldTrialList::SetSynchronousObserver(Observer* observer) {
+ DCHECK(!global_->synchronous_observer_);
+ global_->synchronous_observer_ = observer;
+}
+
+// static
+void FieldTrialList::RemoveSynchronousObserver(Observer* observer) {
+ DCHECK_EQ(global_->synchronous_observer_, observer);
+ global_->synchronous_observer_ = nullptr;
+}
+
+// static
void FieldTrialList::OnGroupFinalized(bool is_locked, FieldTrial* field_trial) {
if (!global_)
return;
@@ -1035,6 +1048,11 @@ void FieldTrialList::NotifyFieldTrialGroupSelection(FieldTrial* field_trial) {
field_trial->group_name_internal());
}
+ if (global_->synchronous_observer_) {
+ global_->synchronous_observer_->OnFieldTrialGroupFinalized(
+ field_trial->trial_name(), field_trial->group_name_internal());
+ }
+
global_->observer_list_->Notify(
FROM_HERE, &FieldTrialList::Observer::OnFieldTrialGroupFinalized,
field_trial->trial_name(), field_trial->group_name_internal());
@@ -1442,15 +1460,14 @@ void FieldTrialList::ActivateFieldTrialEntryWhileLocked(
FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
// Check if we're in the child process and return early if so.
- if (allocator && allocator->IsReadonly())
+ if (!allocator || allocator->IsReadonly())
return;
FieldTrial::FieldTrialRef ref = field_trial->ref_;
if (ref == FieldTrialAllocator::kReferenceNull) {
// It's fine to do this even if the allocator hasn't been instantiated
// yet -- it'll just return early.
- AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
- field_trial);
+ AddToAllocatorWhileLocked(allocator, field_trial);
} else {
// It's also okay to do this even though the callee doesn't have a lock --
// the only thing that happens on a stale read here is a slight performance
diff --git a/chromium/base/metrics/field_trial.h b/chromium/base/metrics/field_trial.h
index c8e479eea11..ac4ea1c044e 100644
--- a/chromium/base/metrics/field_trial.h
+++ b/chromium/base/metrics/field_trial.h
@@ -614,12 +614,27 @@ class BASE_EXPORT FieldTrialList {
// Add an observer to be notified when a field trial is irrevocably committed
// to being part of some specific field_group (and hence the group_name is
- // also finalized for that field_trial).
- static void AddObserver(Observer* observer);
+ // also finalized for that field_trial). Returns false and does nothing if
+ // there is no FieldTrialList singleton.
+ static bool AddObserver(Observer* observer);
// Remove an observer.
static void RemoveObserver(Observer* observer);
+ // Similar to AddObserver(), but the passed observer will be notified
+ // synchronously when a field trial is activated and its group selected. It
+ // will be notified synchronously on the same thread where the activation and
+ // group selection happened. It is the responsibility of the observer to make
+ // sure that this is a safe operation and the operation must be fast, as this
+ // work is done synchronously as part of group() or related APIs. Only a
+ // single such observer is supported, exposed specifically for crash
+ // reporting. Must be called on the main thread before any other threads
+ // have been started.
+ static void SetSynchronousObserver(Observer* observer);
+
+ // Removes the single synchronous observer.
+ static void RemoveSynchronousObserver(Observer* observer);
+
// Grabs the lock if necessary and adds the field trial to the allocator. This
// should only be called from FinalizeGroupChoice().
static void OnGroupFinalized(bool is_locked, FieldTrial* field_trial);
@@ -762,6 +777,9 @@ class BASE_EXPORT FieldTrialList {
// List of observers to be notified when a group is selected for a FieldTrial.
scoped_refptr<ObserverListThreadSafe<Observer> > observer_list_;
+ // Single synchronous observer to be notified when a trial group is chosen.
+ Observer* synchronous_observer_ = nullptr;
+
// Allocator in shared memory containing field trial data. Used in both
// browser and child processes, but readonly in the child.
// In the future, we may want to move this to a more generic place if we want
diff --git a/chromium/base/metrics/field_trial_unittest.cc b/chromium/base/metrics/field_trial_unittest.cc
index 2d63590949a..3f7cc309ac7 100644
--- a/chromium/base/metrics/field_trial_unittest.cc
+++ b/chromium/base/metrics/field_trial_unittest.cc
@@ -53,11 +53,24 @@ int OneYearBeforeBuildTime() {
// FieldTrialList::Observer implementation for testing.
class TestFieldTrialObserver : public FieldTrialList::Observer {
public:
- TestFieldTrialObserver() {
- FieldTrialList::AddObserver(this);
+ enum Type {
+ ASYNCHRONOUS,
+ SYNCHRONOUS,
+ };
+
+ TestFieldTrialObserver(Type type) : type_(type) {
+ if (type == SYNCHRONOUS)
+ FieldTrialList::SetSynchronousObserver(this);
+ else
+ FieldTrialList::AddObserver(this);
}
- ~TestFieldTrialObserver() override { FieldTrialList::RemoveObserver(this); }
+ ~TestFieldTrialObserver() override {
+ if (type_ == SYNCHRONOUS)
+ FieldTrialList::RemoveSynchronousObserver(this);
+ else
+ FieldTrialList::RemoveObserver(this);
+ }
void OnFieldTrialGroupFinalized(const std::string& trial,
const std::string& group) override {
@@ -69,6 +82,7 @@ class TestFieldTrialObserver : public FieldTrialList::Observer {
const std::string& group_name() const { return group_name_; }
private:
+ const Type type_;
std::string trial_name_;
std::string group_name_;
@@ -602,7 +616,7 @@ TEST_F(FieldTrialTest, CreateTrialsFromStringForceActivation) {
TEST_F(FieldTrialTest, CreateTrialsFromStringNotActiveObserver) {
ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
- TestFieldTrialObserver observer;
+ TestFieldTrialObserver observer(TestFieldTrialObserver::ASYNCHRONOUS);
ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/",
std::set<std::string>()));
RunLoop().RunUntilIdle();
@@ -918,7 +932,7 @@ TEST_F(FieldTrialTest, Observe) {
const char kTrialName[] = "TrialToObserve1";
const char kSecondaryGroupName[] = "SecondaryGroup";
- TestFieldTrialObserver observer;
+ TestFieldTrialObserver observer(TestFieldTrialObserver::ASYNCHRONOUS);
int default_group = -1;
scoped_refptr<FieldTrial> trial =
CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
@@ -926,7 +940,31 @@ TEST_F(FieldTrialTest, Observe) {
const int chosen_group = trial->group();
EXPECT_TRUE(chosen_group == default_group || chosen_group == secondary_group);
+ // Observers are called asynchronously.
+ EXPECT_TRUE(observer.trial_name().empty());
+ EXPECT_TRUE(observer.group_name().empty());
RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(kTrialName, observer.trial_name());
+ if (chosen_group == default_group)
+ EXPECT_EQ(kDefaultGroupName, observer.group_name());
+ else
+ EXPECT_EQ(kSecondaryGroupName, observer.group_name());
+}
+
+TEST_F(FieldTrialTest, SynchronousObserver) {
+ const char kTrialName[] = "TrialToObserve1";
+ const char kSecondaryGroupName[] = "SecondaryGroup";
+
+ TestFieldTrialObserver observer(TestFieldTrialObserver::SYNCHRONOUS);
+ int default_group = -1;
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+ const int secondary_group = trial->AppendGroup(kSecondaryGroupName, 50);
+ const int chosen_group = trial->group();
+ EXPECT_TRUE(chosen_group == default_group || chosen_group == secondary_group);
+
+ // The observer should be notified synchronously by the group() call.
EXPECT_EQ(kTrialName, observer.trial_name());
if (chosen_group == default_group)
EXPECT_EQ(kDefaultGroupName, observer.group_name());
@@ -937,7 +975,7 @@ TEST_F(FieldTrialTest, Observe) {
TEST_F(FieldTrialTest, ObserveDisabled) {
const char kTrialName[] = "TrialToObserve2";
- TestFieldTrialObserver observer;
+ TestFieldTrialObserver observer(TestFieldTrialObserver::ASYNCHRONOUS);
int default_group = -1;
scoped_refptr<FieldTrial> trial =
CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
@@ -961,7 +999,7 @@ TEST_F(FieldTrialTest, ObserveDisabled) {
TEST_F(FieldTrialTest, ObserveForcedDisabled) {
const char kTrialName[] = "TrialToObserve3";
- TestFieldTrialObserver observer;
+ TestFieldTrialObserver observer(TestFieldTrialObserver::ASYNCHRONOUS);
int default_group = -1;
scoped_refptr<FieldTrial> trial =
CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
@@ -1058,7 +1096,7 @@ TEST_F(FieldTrialTest, CreateSimulatedFieldTrial) {
};
for (size_t i = 0; i < arraysize(test_cases); ++i) {
- TestFieldTrialObserver observer;
+ TestFieldTrialObserver observer(TestFieldTrialObserver::ASYNCHRONOUS);
scoped_refptr<FieldTrial> trial(
FieldTrial::CreateSimulatedFieldTrial(kTrialName, 100, kDefaultGroupName,
test_cases[i].entropy_value));
diff --git a/chromium/base/metrics/histogram.cc b/chromium/base/metrics/histogram.cc
index 488facd066d..c46a18e1d87 100644
--- a/chromium/base/metrics/histogram.cc
+++ b/chromium/base/metrics/histogram.cc
@@ -21,6 +21,7 @@
#include "base/debug/alias.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
+#include "base/metrics/dummy_histogram.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/metrics_hashes.h"
#include "base/metrics/persistent_histogram_allocator.h"
@@ -38,15 +39,6 @@ namespace base {
namespace {
-// A constant to be stored in the dummy field and later verified. This could
-// be either 32 or 64 bit but clang won't truncate the value without an error.
-// TODO(bcwhite): Remove this once crbug/736675 is fixed.
-#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
-constexpr uintptr_t kDummyValue = 0xFEEDC0DEDEADBEEF;
-#else
-constexpr uintptr_t kDummyValue = 0xDEADBEEF;
-#endif
-
bool ReadHistogramArguments(PickleIterator* iter,
std::string* histogram_name,
int* flags,
@@ -84,6 +76,11 @@ bool ReadHistogramArguments(PickleIterator* iter,
bool ValidateRangeChecksum(const HistogramBase& histogram,
uint32_t range_checksum) {
+ // Normally, |histogram| should have type HISTOGRAM or be inherited from it.
+ // However, if it's expired, it will actually be a DUMMY_HISTOGRAM.
+ // Skip the checks in that case.
+ if (histogram.GetHistogramType() == DUMMY_HISTOGRAM)
+ return true;
const Histogram& casted_histogram =
static_cast<const Histogram&>(histogram);
@@ -161,6 +158,12 @@ class Histogram::Factory {
HistogramBase* Histogram::Factory::Build() {
HistogramBase* histogram = StatisticsRecorder::FindHistogram(name_);
if (!histogram) {
+ // TODO(gayane): |HashMetricName()| is called again in Histogram
+ // constructor. Refactor code to avoid the additional call.
+ bool should_record =
+ StatisticsRecorder::ShouldRecordHistogram(HashMetricName(name_));
+ if (!should_record)
+ return DummyHistogram::GetInstance();
// To avoid racy destruction at shutdown, the following will be leaked.
const BucketRanges* created_ranges = CreateRanges();
const BucketRanges* registered_ranges =
@@ -538,47 +541,6 @@ void Histogram::WriteAscii(std::string* output) const {
WriteAsciiImpl(true, "\n", output);
}
-bool Histogram::ValidateHistogramContents(bool crash_if_invalid,
- int identifier) const {
- enum Fields : int {
- kUnloggedBucketRangesField,
- kUnloggedSamplesField,
- kLoggedSamplesField,
- kIdField,
- kHistogramNameField,
- kFlagsField,
- kLoggedBucketRangesField,
- kDummyField,
- };
-
- uint32_t bad_fields = 0;
- if (!unlogged_samples_)
- bad_fields |= 1 << kUnloggedSamplesField;
- else if (!unlogged_samples_->bucket_ranges())
- bad_fields |= 1 << kUnloggedBucketRangesField;
- if (!logged_samples_)
- bad_fields |= 1 << kLoggedSamplesField;
- else if (!logged_samples_->bucket_ranges())
- bad_fields |= 1 << kLoggedBucketRangesField;
- else if (logged_samples_->id() == 0)
- bad_fields |= 1 << kIdField;
- if (flags() == 0)
- bad_fields |= 1 << kFlagsField;
- if (dummy_ != kDummyValue)
- bad_fields |= 1 << kDummyField;
-
- const bool is_valid = (bad_fields & ~(1 << kFlagsField)) == 0;
- if (is_valid || !crash_if_invalid)
- return is_valid;
-
- // Abort if a problem is found (except "flags", which could legally be zero).
- std::string debug_string = base::StringPrintf(
- "%s/%" PRIu32 "#%d", histogram_name(), bad_fields, identifier);
- CHECK(false) << debug_string;
- debug::Alias(&bad_fields);
- return false;
-}
-
void Histogram::SerializeInfoImpl(Pickle* pickle) const {
DCHECK(bucket_ranges()->HasValidChecksum());
pickle->WriteString(histogram_name());
@@ -594,9 +556,8 @@ Histogram::Histogram(const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges)
- : HistogramBase(name), dummy_(kDummyValue) {
- // TODO(bcwhite): Make this a DCHECK once crbug/734049 is resolved.
- CHECK(ranges) << name << ": " << minimum << "-" << maximum;
+ : HistogramBase(name) {
+ DCHECK(ranges) << name << ": " << minimum << "-" << maximum;
unlogged_samples_.reset(new SampleVector(HashMetricName(name), ranges));
logged_samples_.reset(new SampleVector(unlogged_samples_->id(), ranges));
}
@@ -609,9 +570,8 @@ Histogram::Histogram(const char* name,
const DelayedPersistentAllocation& logged_counts,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta)
- : HistogramBase(name), dummy_(kDummyValue) {
- // TODO(bcwhite): Make this a DCHECK once crbug/734049 is resolved.
- CHECK(ranges) << name << ": " << minimum << "-" << maximum;
+ : HistogramBase(name) {
+ DCHECK(ranges) << name << ": " << minimum << "-" << maximum;
unlogged_samples_.reset(
new PersistentSampleVector(HashMetricName(name), ranges, meta, counts));
logged_samples_.reset(new PersistentSampleVector(
@@ -679,15 +639,9 @@ std::unique_ptr<SampleVector> Histogram::SnapshotAllSamples() const {
}
std::unique_ptr<SampleVector> Histogram::SnapshotUnloggedSamples() const {
- // TODO(bcwhite): Remove these CHECKs once crbug/734049 is resolved.
- HistogramSamples* unlogged = unlogged_samples_.get();
- CHECK(unlogged_samples_);
- CHECK(unlogged_samples_->id());
- CHECK(bucket_ranges());
std::unique_ptr<SampleVector> samples(
new SampleVector(unlogged_samples_->id(), bucket_ranges()));
samples->Add(*unlogged_samples_);
- debug::Alias(&unlogged);
return samples;
}
@@ -857,6 +811,11 @@ class LinearHistogram::Factory : public Histogram::Factory {
void FillHistogram(HistogramBase* base_histogram) override {
Histogram::Factory::FillHistogram(base_histogram);
+ // Normally, |base_histogram| should have type LINEAR_HISTOGRAM or be
+ // inherited from it. However, if it's expired, it will actually be a
+ // DUMMY_HISTOGRAM. Skip filling in that case.
+ if (base_histogram->GetHistogramType() == DUMMY_HISTOGRAM)
+ return;
LinearHistogram* histogram = static_cast<LinearHistogram*>(base_histogram);
// Set range descriptions.
if (descriptions_) {
diff --git a/chromium/base/metrics/histogram.h b/chromium/base/metrics/histogram.h
index 373f3992481..4c4b150ed6a 100644
--- a/chromium/base/metrics/histogram.h
+++ b/chromium/base/metrics/histogram.h
@@ -208,15 +208,6 @@ class BASE_EXPORT Histogram : public HistogramBase {
void WriteHTMLGraph(std::string* output) const override;
void WriteAscii(std::string* output) const override;
- // Validates the histogram contents. If |crash_if_invalid| is true and the
- // histogram is invalid, this will trigger a CHECK. Otherwise, it will return
- // a bool indicating if the histogram is valid. |corrupted_count| is extra
- // information the caller can provide about the number of corrupt histograms
- // if available.
- // TODO(bcwhite): Remove this after crbug/736675.
- bool ValidateHistogramContents(bool crash_if_invalid,
- int identifier) const override;
-
protected:
// This class, defined entirely within the .cc file, contains all the
// common logic for building a Histogram and can be overridden by more
@@ -318,13 +309,6 @@ class BASE_EXPORT Histogram : public HistogramBase {
// Accumulation of all samples that have been logged with SnapshotDelta().
std::unique_ptr<SampleVectorBase> logged_samples_;
- // This is a dummy field placed where corruption is frequently seen on
- // current Android builds. The hope is that it will mitigate the problem
- // sufficiently to continue with the M61 beta branch while investigation
- // into the true problem continues.
- // TODO(bcwhite): Remove this once crbug/736675 is fixed.
- const uintptr_t dummy_;
-
#if DCHECK_IS_ON() // Don't waste memory if it won't be used.
// Flag to indicate if PrepareFinalDelta has been previously called. It is
// used to DCHECK that a final delta is not created multiple times.
diff --git a/chromium/base/metrics/histogram_base.cc b/chromium/base/metrics/histogram_base.cc
index a563b936156..d8ae483b239 100644
--- a/chromium/base/metrics/histogram_base.cc
+++ b/chromium/base/metrics/histogram_base.cc
@@ -39,6 +39,8 @@ std::string HistogramTypeToString(HistogramType type) {
return "CUSTOM_HISTOGRAM";
case SPARSE_HISTOGRAM:
return "SPARSE_HISTOGRAM";
+ case DUMMY_HISTOGRAM:
+ return "DUMMY_HISTOGRAM";
}
NOTREACHED();
return "UNKNOWN";
@@ -128,11 +130,6 @@ uint32_t HistogramBase::FindCorruption(const HistogramSamples& samples) const {
return NO_INCONSISTENCIES;
}
-bool HistogramBase::ValidateHistogramContents(bool crash_if_invalid,
- int corrupted_count) const {
- return true;
-}
-
void HistogramBase::WriteJSON(std::string* output,
JSONVerbosityLevel verbosity_level) const {
Count count;
diff --git a/chromium/base/metrics/histogram_base.h b/chromium/base/metrics/histogram_base.h
index f4cfc41b961..1971a65b69b 100644
--- a/chromium/base/metrics/histogram_base.h
+++ b/chromium/base/metrics/histogram_base.h
@@ -39,6 +39,7 @@ enum HistogramType {
BOOLEAN_HISTOGRAM,
CUSTOM_HISTOGRAM,
SPARSE_HISTOGRAM,
+ DUMMY_HISTOGRAM,
};
// Controls the verbosity of the information when the histogram is serialized to
@@ -150,10 +151,10 @@ class BASE_EXPORT HistogramBase {
const char* histogram_name() const { return histogram_name_; }
- // Comapres |name| to the histogram name and triggers a DCHECK if they do not
+ // Compares |name| to the histogram name and triggers a DCHECK if they do not
// match. This is a helper function used by histogram macros, which results in
// in more compact machine code being generated by the macros.
- void CheckName(const StringPiece& name) const;
+ virtual void CheckName(const StringPiece& name) const;
// Get a unique ID for this histogram's samples.
virtual uint64_t name_hash() const = 0;
@@ -229,10 +230,6 @@ class BASE_EXPORT HistogramBase {
virtual void WriteHTMLGraph(std::string* output) const = 0;
virtual void WriteAscii(std::string* output) const = 0;
- // TODO(bcwhite): Remove this after crbug/736675.
- virtual bool ValidateHistogramContents(bool crash_if_invalid,
- int corrupted_count) const;
-
// Produce a JSON representation of the histogram with |verbosity_level| as
// the serialization verbosity. This is implemented with the help of
// GetParameters and GetCountAndBucketData; overwrite them to customize the
diff --git a/chromium/base/metrics/histogram_macros.h b/chromium/base/metrics/histogram_macros.h
index 083bae753cb..04e913013c7 100644
--- a/chromium/base/metrics/histogram_macros.h
+++ b/chromium/base/metrics/histogram_macros.h
@@ -5,6 +5,7 @@
#ifndef BASE_METRICS_HISTOGRAM_MACROS_H_
#define BASE_METRICS_HISTOGRAM_MACROS_H_
+#include "base/macros.h"
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_macros_internal.h"
#include "base/metrics/histogram_macros_local.h"
@@ -35,6 +36,28 @@
// an element of the Enum.
// All of these macros must be called with |name| as a runtime constant.
+// The first variant of UMA_HISTOGRAM_ENUMERATION accepts two arguments: the
+// histogram name and the enum sample. It deduces the correct boundary value to
+// use by looking for an enumerator with the name kMaxValue. kMaxValue should
+// share the value of the highest enumerator: this avoids switch statements
+// having to handle a sentinel no-op value.
+//
+// Sample usage:
+// // These values are persisted to logs. Entries should not be renumbered and
+// // numeric values should never be reused.
+// enum class MyEnum {
+// kFirstValue = 0,
+// kSecondValue = 1,
+// ...
+// kFinalValue = N,
+// kMaxValue = kFinalValue,
+// };
+// UMA_HISTOGRAM_ENUMERATION("My.Enumeration", MyEnum::kSomeValue);
+//
+// The second variant requires three arguments: the first two are the same as
+// before, and the third argument is the enum boundary: this must be strictly
+// greater than any other enumerator that will be sampled.
+//
// Sample usage:
// // These values are persisted to logs. Entries should not be renumbered and
// // numeric values should never be reused.
@@ -48,11 +71,16 @@
// UMA_HISTOGRAM_ENUMERATION("My.Enumeration",
// MyEnum::SOME_VALUE, MyEnum::COUNT);
//
-// Note: The value in |sample| must be strictly less than |enum_size|.
-
-#define UMA_HISTOGRAM_ENUMERATION(name, sample, enum_size) \
- INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG( \
- name, sample, enum_size, base::HistogramBase::kUmaTargetedHistogramFlag)
+// Note: If the enum is used in a switch, it is often desirable to avoid writing
+// a case statement to handle an unused sentinel value (i.e. COUNT in the above
+// example). For scoped enums, this is awkward since it requires casting the
+// enum to an arithmetic type and adding one. Instead, prefer the two argument
+// version of the macro which automatically deduces the boundary from kMaxValue.
+#define UMA_HISTOGRAM_ENUMERATION(name, ...) \
+ CR_EXPAND_ARG(INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO( \
+ __VA_ARGS__, INTERNAL_UMA_HISTOGRAM_ENUMERATION_SPECIFY_BOUNDARY, \
+ INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY)( \
+ name, __VA_ARGS__, base::HistogramBase::kUmaTargetedHistogramFlag))
// Histogram for boolean values.
diff --git a/chromium/base/metrics/histogram_macros_internal.h b/chromium/base/metrics/histogram_macros_internal.h
index 84defae32f6..04dca02f5b7 100644
--- a/chromium/base/metrics/histogram_macros_internal.h
+++ b/chromium/base/metrics/histogram_macros_internal.h
@@ -16,11 +16,39 @@
#include "base/metrics/sparse_histogram.h"
#include "base/time/time.h"
-// This is for macros internal to base/metrics. They should not be used outside
-// of this directory. For writing to UMA histograms, see histogram_macros.h.
+// This is for macros and helpers internal to base/metrics. They should not be
+// used outside of this directory. For writing to UMA histograms, see
+// histogram_macros.h.
+
+namespace base {
+namespace internal {
+
+// Helper traits for deducing the boundary value for enums.
+template <typename Enum, typename SFINAE = void>
+struct EnumSizeTraits {
+ static constexpr Enum Count() {
+ static_assert(sizeof(Enum) == 0,
+ "enumerator must define kLast enumerator to use this macro!");
+ return Enum();
+ }
+};
+
+// Since the UMA histogram macros expect a value one larger than the max defined
+// enumerator value, add one.
+template <typename Enum>
+struct EnumSizeTraits<
+ Enum,
+ std::enable_if_t<std::is_enum<decltype(Enum::kMaxValue)>::value>> {
+ static constexpr Enum Count() {
+ return static_cast<Enum>(
+ static_cast<std::underlying_type_t<Enum>>(Enum::kMaxValue) + 1);
+ }
+};
+
+} // namespace internal
+} // namespace base
// TODO(rkaplow): Improve commenting of these methods.
-
//------------------------------------------------------------------------------
// Histograms are often put in areas where they are called many many times, and
// performance is critical. As a result, they are designed to have a very low
@@ -31,7 +59,6 @@
// have to validate using the pointers at any time during the running of the
// process.
-
// In some cases (integration into 3rd party code), it's useful to separate the
// definition of |atomic_histogram_pointer| from its use. To achieve this we
// define HISTOGRAM_POINTER_USE, which uses an |atomic_histogram_pointer|, and
@@ -127,6 +154,21 @@
flag)); \
} while (0)
+// Helper for 'overloading' UMA_HISTOGRAM_ENUMERATION with a variable number of
+// arguments.
+#define INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO(_1, _2, NAME, ...) NAME
+
+#define INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY(name, sample, \
+ flags) \
+ INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG( \
+ name, sample, base::internal::EnumSizeTraits<decltype(sample)>::Count(), \
+ flags)
+
+// Note: The value in |sample| must be strictly less than |enum_size|.
+#define INTERNAL_UMA_HISTOGRAM_ENUMERATION_SPECIFY_BOUNDARY(name, sample, \
+ enum_size, flags) \
+ INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, enum_size, flags)
+
// Similar to the previous macro but intended for enumerations. This delegates
// the work to the previous macro, but supports scoped enumerations as well by
// forcing an explicit cast to the HistogramBase::Sample integral type.
diff --git a/chromium/base/metrics/histogram_macros_local.h b/chromium/base/metrics/histogram_macros_local.h
index 7571a9c4ad2..94d59dbc105 100644
--- a/chromium/base/metrics/histogram_macros_local.h
+++ b/chromium/base/metrics/histogram_macros_local.h
@@ -18,10 +18,11 @@
//
// For usage details, see the equivalents in histogram_macros.h.
-#define LOCAL_HISTOGRAM_ENUMERATION(name, sample, enum_max) \
- INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG( \
- name, sample, enum_max, \
- base::HistogramBase::kNoFlags)
+#define LOCAL_HISTOGRAM_ENUMERATION(name, ...) \
+ CR_EXPAND_ARG(INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO( \
+ __VA_ARGS__, INTERNAL_UMA_HISTOGRAM_ENUMERATION_SPECIFY_BOUNDARY, \
+ INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY)( \
+ name, __VA_ARGS__, base::HistogramBase::kNoFlags))
#define LOCAL_HISTOGRAM_BOOLEAN(name, sample) \
STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample), \
diff --git a/chromium/base/metrics/histogram_macros_unittest.cc b/chromium/base/metrics/histogram_macros_unittest.cc
index 33a9c6e5b2e..3c592b00e56 100644
--- a/chromium/base/metrics/histogram_macros_unittest.cc
+++ b/chromium/base/metrics/histogram_macros_unittest.cc
@@ -40,10 +40,18 @@ TEST(HistogramMacro, ScopedEnumeration) {
FIRST_VALUE,
SECOND_VALUE,
THIRD_VALUE,
+ kMaxValue = THIRD_VALUE,
+ };
+ UMA_HISTOGRAM_ENUMERATION("Test.ScopedEnumeration", TestEnum::FIRST_VALUE);
+
+ enum class TestEnum2 {
+ FIRST_VALUE,
+ SECOND_VALUE,
+ THIRD_VALUE,
MAX_ENTRIES,
};
- UMA_HISTOGRAM_ENUMERATION("Test.ScopedEnumeration", TestEnum::SECOND_VALUE,
- TestEnum::MAX_ENTRIES);
+ UMA_HISTOGRAM_ENUMERATION("Test.ScopedEnumeration2", TestEnum2::SECOND_VALUE,
+ TestEnum2::MAX_ENTRIES);
}
} // namespace base
diff --git a/chromium/base/metrics/histogram_samples.h b/chromium/base/metrics/histogram_samples.h
index 23237b0fe40..6908873cee3 100644
--- a/chromium/base/metrics/histogram_samples.h
+++ b/chromium/base/metrics/histogram_samples.h
@@ -199,6 +199,9 @@ class BASE_EXPORT HistogramSamples {
Metadata* meta() { return meta_; }
private:
+ // Depending on derived class meta values can come from local stoarge or
+ // external storage in which case HistogramSamples class cannot take ownership
+ // of Metadata*.
Metadata* meta_;
DISALLOW_COPY_AND_ASSIGN(HistogramSamples);
diff --git a/chromium/base/metrics/histogram_snapshot_manager.cc b/chromium/base/metrics/histogram_snapshot_manager.cc
index 5d21e431b4b..705e325e37e 100644
--- a/chromium/base/metrics/histogram_snapshot_manager.cc
+++ b/chromium/base/metrics/histogram_snapshot_manager.cc
@@ -55,15 +55,11 @@ void HistogramSnapshotManager::PrepareDeltas(
}
void HistogramSnapshotManager::PrepareDelta(HistogramBase* histogram) {
- if (!histogram->ValidateHistogramContents(true, 0))
- return;
PrepareSamples(histogram, histogram->SnapshotDelta());
}
void HistogramSnapshotManager::PrepareFinalDelta(
const HistogramBase* histogram) {
- if (!histogram->ValidateHistogramContents(true, 0))
- return;
PrepareSamples(histogram, histogram->SnapshotFinalDelta());
}
diff --git a/chromium/base/metrics/histogram_unittest.cc b/chromium/base/metrics/histogram_unittest.cc
index c824eb75fe3..c692f275470 100644
--- a/chromium/base/metrics/histogram_unittest.cc
+++ b/chromium/base/metrics/histogram_unittest.cc
@@ -16,8 +16,10 @@
#include "base/logging.h"
#include "base/metrics/bucket_ranges.h"
#include "base/metrics/histogram_macros.h"
+#include "base/metrics/metrics_hashes.h"
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/metrics/persistent_memory_allocator.h"
+#include "base/metrics/record_histogram_checker.h"
#include "base/metrics/sample_vector.h"
#include "base/metrics/statistics_recorder.h"
#include "base/pickle.h"
@@ -27,6 +29,22 @@
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
+namespace {
+
+const char kExpiredHistogramName[] = "ExpiredHistogram";
+
+// Test implementation of RecordHistogramChecker interface.
+class TestRecordHistogramChecker : public RecordHistogramChecker {
+ public:
+ ~TestRecordHistogramChecker() override = default;
+
+ // RecordHistogramChecker:
+ bool ShouldRecord(uint64_t histogram_hash) const override {
+ return histogram_hash != HashMetricName(kExpiredHistogramName);
+ }
+};
+
+} // namespace
// Test parameter indicates if a persistent memory allocator should be used
// for histogram allocation. False will allocate histograms from the process
@@ -58,6 +76,8 @@ class HistogramTest : public testing::TestWithParam<bool> {
void InitializeStatisticsRecorder() {
DCHECK(!statistics_recorder_);
statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
+ auto record_checker = std::make_unique<TestRecordHistogramChecker>();
+ StatisticsRecorder::SetRecordChecker(std::move(record_checker));
}
void UninitializeStatisticsRecorder() {
@@ -65,11 +85,6 @@ class HistogramTest : public testing::TestWithParam<bool> {
}
void CreatePersistentHistogramAllocator() {
- // By getting the results-histogram before any persistent allocator
- // is attached, that histogram is guaranteed not to be stored in
- // any persistent memory segment (which simplifies some tests).
- GlobalHistogramAllocator::GetCreateHistogramResultHistogram();
-
GlobalHistogramAllocator::CreateWithLocalMemory(
kAllocatorMemorySize, 0, "HistogramAllocatorTest");
allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
@@ -769,4 +784,57 @@ TEST(HistogramDeathTest, BadRangesTest) {
"");
}
+TEST_P(HistogramTest, ExpiredHistogramTest) {
+ HistogramBase* expired = Histogram::FactoryGet(kExpiredHistogramName, 1, 1000,
+ 10, HistogramBase::kNoFlags);
+ ASSERT_TRUE(expired);
+ expired->Add(5);
+ expired->Add(500);
+ auto samples = expired->SnapshotDelta();
+ EXPECT_EQ(0, samples->TotalCount());
+
+ HistogramBase* linear_expired = LinearHistogram::FactoryGet(
+ kExpiredHistogramName, 1, 1000, 10, HistogramBase::kNoFlags);
+ ASSERT_TRUE(linear_expired);
+ linear_expired->Add(5);
+ linear_expired->Add(500);
+ samples = linear_expired->SnapshotDelta();
+ EXPECT_EQ(0, samples->TotalCount());
+
+ std::vector<int> custom_ranges;
+ custom_ranges.push_back(1);
+ custom_ranges.push_back(5);
+ HistogramBase* custom_expired = CustomHistogram::FactoryGet(
+ kExpiredHistogramName, custom_ranges, HistogramBase::kNoFlags);
+ ASSERT_TRUE(custom_expired);
+ custom_expired->Add(2);
+ custom_expired->Add(4);
+ samples = custom_expired->SnapshotDelta();
+ EXPECT_EQ(0, samples->TotalCount());
+
+ HistogramBase* valid = Histogram::FactoryGet("ValidHistogram", 1, 1000, 10,
+ HistogramBase::kNoFlags);
+ ASSERT_TRUE(valid);
+ valid->Add(5);
+ valid->Add(500);
+ samples = valid->SnapshotDelta();
+ EXPECT_EQ(2, samples->TotalCount());
+
+ HistogramBase* linear_valid = LinearHistogram::FactoryGet(
+ "LinearHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+ ASSERT_TRUE(linear_valid);
+ linear_valid->Add(5);
+ linear_valid->Add(500);
+ samples = linear_valid->SnapshotDelta();
+ EXPECT_EQ(2, samples->TotalCount());
+
+ HistogramBase* custom_valid = CustomHistogram::FactoryGet(
+ "CustomHistogram", custom_ranges, HistogramBase::kNoFlags);
+ ASSERT_TRUE(custom_valid);
+ custom_valid->Add(2);
+ custom_valid->Add(4);
+ samples = custom_valid->SnapshotDelta();
+ EXPECT_EQ(2, samples->TotalCount());
+}
+
} // namespace base
diff --git a/chromium/base/metrics/persistent_histogram_allocator.cc b/chromium/base/metrics/persistent_histogram_allocator.cc
index 6178b21a48d..bfbb44b9a13 100644
--- a/chromium/base/metrics/persistent_histogram_allocator.cc
+++ b/chromium/base/metrics/persistent_histogram_allocator.cc
@@ -33,9 +33,6 @@ namespace base {
namespace {
-// Name of histogram for storing results of local operations.
-const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result";
-
// Type identifiers used when storing in persistent memory so they can be
// identified during extraction; the first 4 bytes of the SHA1 of the name
// is used as a unique integer. A "version number" is added to the base
@@ -304,7 +301,6 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
// but that doesn't work because the allocated block may have been
// aligned to the next boundary value.
HashMetricName(data->name) != data->samples_metadata.id) {
- RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA);
NOTREACHED();
return nullptr;
}
@@ -323,10 +319,8 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
// This also allows differentiating on the dashboard between allocations
// failed due to a corrupt allocator and the number of process instances
// with one, the latter being idicated by "newly corrupt", below.
- if (memory_allocator_->IsCorrupt()) {
- RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT);
+ if (memory_allocator_->IsCorrupt())
return nullptr;
- }
// Create the metadata necessary for a persistent sparse histogram. This
// is done first because it is a small subset of what is required for
@@ -431,21 +425,8 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
return histogram;
}
- CreateHistogramResultType result;
- if (memory_allocator_->IsCorrupt()) {
- RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT);
- result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT;
- } else if (memory_allocator_->IsFull()) {
- result = CREATE_HISTOGRAM_ALLOCATOR_FULL;
- } else {
- result = CREATE_HISTOGRAM_ALLOCATOR_ERROR;
- }
- RecordCreateHistogramResult(result);
-
- // Crash for failures caused by internal bugs but not "full" which is
- // dependent on outside code.
- if (result != CREATE_HISTOGRAM_ALLOCATOR_FULL)
- NOTREACHED() << memory_allocator_->Name() << ", error=" << result;
+ if (memory_allocator_->IsCorrupt())
+ NOTREACHED() << memory_allocator_->Name() << " is corrupt!";
return nullptr;
}
@@ -521,58 +502,9 @@ void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
subtle::NoBarrier_Store(&last_created_, 0);
}
-// static
-HistogramBase*
-PersistentHistogramAllocator::GetCreateHistogramResultHistogram() {
- // A value that can be stored in an AtomicWord as a flag. It must not be zero
- // or a valid address.
- constexpr subtle::AtomicWord kHistogramUnderConstruction = 1;
-
- // This is a similar to LazyInstance but with return-if-under-construction
- // rather than yielding the CPU until construction completes. This is
- // necessary because the FactoryGet() below creates a histogram and thus
- // recursively calls this method to try to store the result.
-
- // Get the existing pointer. If the "under construction" flag is present,
- // abort now. It's okay to return null from this method.
- static subtle::AtomicWord atomic_histogram_pointer = 0;
- subtle::AtomicWord histogram_value =
- subtle::Acquire_Load(&atomic_histogram_pointer);
- if (histogram_value == kHistogramUnderConstruction)
- return nullptr;
-
- // If a valid histogram pointer already exists, return it.
- if (histogram_value)
- return reinterpret_cast<HistogramBase*>(histogram_value);
-
- // Set the "under construction" flag; abort if something has changed.
- if (subtle::NoBarrier_CompareAndSwap(&atomic_histogram_pointer, 0,
- kHistogramUnderConstruction) != 0) {
- return nullptr;
- }
-
- // Only one thread can be here. Even recursion will be thwarted above.
-
- if (GlobalHistogramAllocator::Get()) {
- DVLOG(1) << "Creating the results-histogram inside persistent"
- << " memory can cause future allocations to crash if"
- << " that memory is ever released (for testing).";
- }
-
- HistogramBase* histogram_pointer = LinearHistogram::FactoryGet(
- kResultHistogram, 1, CREATE_HISTOGRAM_MAX, CREATE_HISTOGRAM_MAX + 1,
- HistogramBase::kUmaTargetedHistogramFlag);
- subtle::Release_Store(
- &atomic_histogram_pointer,
- reinterpret_cast<subtle::AtomicWord>(histogram_pointer));
-
- return histogram_pointer;
-}
-
std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
PersistentHistogramData* histogram_data_ptr) {
if (!histogram_data_ptr) {
- RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER);
NOTREACHED();
return nullptr;
}
@@ -585,7 +517,6 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
&histogram_data_ptr->logged_metadata);
DCHECK(histogram);
histogram->SetFlags(histogram_data_ptr->flags);
- RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
return histogram;
}
@@ -616,7 +547,6 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
if (!ranges_data || histogram_bucket_count < 2 ||
histogram_bucket_count >= max_buckets ||
allocated_bytes < required_bytes) {
- RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
NOTREACHED();
return nullptr;
}
@@ -624,7 +554,6 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
std::unique_ptr<const BucketRanges> created_ranges = CreateRangesFromData(
ranges_data, histogram_ranges_checksum, histogram_bucket_count + 1);
if (!created_ranges) {
- RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
NOTREACHED();
return nullptr;
}
@@ -638,7 +567,6 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
if (counts_bytes == 0 ||
(counts_ref != 0 &&
memory_allocator_->GetAllocSize(counts_ref) < counts_bytes)) {
- RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY);
NOTREACHED();
return nullptr;
}
@@ -701,9 +629,6 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
if (histogram) {
DCHECK_EQ(histogram_type, histogram->GetHistogramType());
histogram->SetFlags(histogram_flags);
- RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
- } else {
- RecordCreateHistogramResult(CREATE_HISTOGRAM_UNKNOWN_TYPE);
}
return histogram;
@@ -740,14 +665,6 @@ PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
}
-// static
-void PersistentHistogramAllocator::RecordCreateHistogramResult(
- CreateHistogramResultType result) {
- HistogramBase* result_histogram = GetCreateHistogramResultHistogram();
- if (result_histogram)
- result_histogram->Add(result);
-}
-
GlobalHistogramAllocator::~GlobalHistogramAllocator() = default;
// static
@@ -1020,15 +937,6 @@ GlobalHistogramAllocator::ReleaseForTesting() {
const PersistentHistogramData* data;
while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) {
StatisticsRecorder::ForgetHistogramForTesting(data->name);
-
- // If a test breaks here then a memory region containing a histogram
- // actively used by this code is being released back to the test.
- // If that memory segment were to be deleted, future calls to create
- // persistent histograms would crash. To avoid this, have the test call
- // the method GetCreateHistogramResultHistogram() *before* setting
- // the (temporary) memory allocator via SetGlobalAllocator() so that
- // histogram is instead allocated from the process heap.
- DCHECK_NE(kResultHistogram, data->name);
}
subtle::Release_Store(&g_histogram_allocator, 0);
diff --git a/chromium/base/metrics/persistent_histogram_allocator.h b/chromium/base/metrics/persistent_histogram_allocator.h
index 3766ab123f6..395511fb74e 100644
--- a/chromium/base/metrics/persistent_histogram_allocator.h
+++ b/chromium/base/metrics/persistent_histogram_allocator.h
@@ -287,9 +287,6 @@ class BASE_EXPORT PersistentHistogramAllocator {
// operation without that optimization.
void ClearLastCreatedReferenceForTesting();
- // Histogram containing creation results. Visible for testing.
- static HistogramBase* GetCreateHistogramResultHistogram();
-
protected:
// The structure used to hold histogram data in persistent memory. It is
// defined and used entirely within the .cc file.
@@ -307,42 +304,6 @@ class BASE_EXPORT PersistentHistogramAllocator {
Reference ignore);
private:
- // Enumerate possible creation results for reporting.
- enum CreateHistogramResultType {
- // Everything was fine.
- CREATE_HISTOGRAM_SUCCESS = 0,
-
- // Pointer to metadata was not valid.
- CREATE_HISTOGRAM_INVALID_METADATA_POINTER,
-
- // Histogram metadata was not valid.
- CREATE_HISTOGRAM_INVALID_METADATA,
-
- // Ranges information was not valid.
- CREATE_HISTOGRAM_INVALID_RANGES_ARRAY,
-
- // Counts information was not valid.
- CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY,
-
- // Could not allocate histogram memory due to corruption.
- CREATE_HISTOGRAM_ALLOCATOR_CORRUPT,
-
- // Could not allocate histogram memory due to lack of space.
- CREATE_HISTOGRAM_ALLOCATOR_FULL,
-
- // Could not allocate histogram memory due to unknown error.
- CREATE_HISTOGRAM_ALLOCATOR_ERROR,
-
- // Histogram was of unknown type.
- CREATE_HISTOGRAM_UNKNOWN_TYPE,
-
- // Instance has detected a corrupt allocator (recorded only once).
- CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT,
-
- // Always keep this at the end.
- CREATE_HISTOGRAM_MAX
- };
-
// Create a histogram based on saved (persistent) information about it.
std::unique_ptr<HistogramBase> CreateHistogram(
PersistentHistogramData* histogram_data_ptr);
@@ -353,9 +314,6 @@ class BASE_EXPORT PersistentHistogramAllocator {
HistogramBase* GetOrCreateStatisticsRecorderHistogram(
const HistogramBase* histogram);
- // Record the result of a histogram creation.
- static void RecordCreateHistogramResult(CreateHistogramResultType result);
-
// The memory allocator that provides the actual histogram storage.
std::unique_ptr<PersistentMemoryAllocator> memory_allocator_;
diff --git a/chromium/base/metrics/persistent_histogram_allocator_unittest.cc b/chromium/base/metrics/persistent_histogram_allocator_unittest.cc
index c492a246d58..7e07386d1b5 100644
--- a/chromium/base/metrics/persistent_histogram_allocator_unittest.cc
+++ b/chromium/base/metrics/persistent_histogram_allocator_unittest.cc
@@ -34,7 +34,6 @@ class PersistentHistogramAllocatorTest : public testing::Test {
GlobalHistogramAllocator::ReleaseForTesting();
memset(allocator_memory_.get(), 0, kAllocatorMemorySize);
- GlobalHistogramAllocator::GetCreateHistogramResultHistogram();
GlobalHistogramAllocator::CreateWithPersistentMemory(
allocator_memory_.get(), kAllocatorMemorySize, 0, 0,
"PersistentHistogramAllocatorTest");
diff --git a/chromium/base/metrics/persistent_histogram_storage.cc b/chromium/base/metrics/persistent_histogram_storage.cc
new file mode 100644
index 00000000000..0676e47ba18
--- /dev/null
+++ b/chromium/base/metrics/persistent_histogram_storage.cc
@@ -0,0 +1,101 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_histogram_storage.h"
+
+#include "base/files/file_util.h"
+#include "base/files/important_file_writer.h"
+#include "base/logging.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace {
+
+constexpr size_t kAllocSize = 1 << 20; // 1 MiB
+
+} // namespace
+
+namespace base {
+
+PersistentHistogramStorage::PersistentHistogramStorage(
+ StringPiece allocator_name,
+ StorageDirManagement storage_dir_management)
+ : storage_dir_management_(storage_dir_management) {
+ DCHECK(!allocator_name.empty());
+ DCHECK(IsStringASCII(allocator_name));
+
+ GlobalHistogramAllocator::CreateWithLocalMemory(kAllocSize,
+ 0, // No identifier.
+ allocator_name);
+ GlobalHistogramAllocator::Get()->CreateTrackingHistograms(allocator_name);
+}
+
+PersistentHistogramStorage::~PersistentHistogramStorage() {
+ PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
+ allocator->UpdateTrackingHistograms();
+
+ if (disabled_)
+ return;
+
+ // Stop if the storage base directory has not been properly set.
+ if (storage_base_dir_.empty()) {
+ LOG(ERROR)
+ << "Could not write \"" << allocator->Name()
+ << "\" persistent histograms to file as the storage base directory "
+ "is not properly set.";
+ return;
+ }
+
+ FilePath storage_dir = storage_base_dir_.AppendASCII(allocator->Name());
+
+ switch (storage_dir_management_) {
+ case StorageDirManagement::kCreate:
+ if (!CreateDirectory(storage_dir)) {
+ LOG(ERROR)
+ << "Could not write \"" << allocator->Name()
+ << "\" persistent histograms to file as the storage directory "
+ "cannot be created.";
+ return;
+ }
+ break;
+ case StorageDirManagement::kUseExisting:
+ if (!DirectoryExists(storage_dir)) {
+ // When the consumer of this class decides to use an existing storage
+ // directory, it should ensure the directory's existence if it's
+ // essential.
+ LOG(ERROR)
+ << "Could not write \"" << allocator->Name()
+ << "\" persistent histograms to file as the storage directory "
+ "does not exist.";
+ return;
+ }
+ break;
+ }
+
+ // Save data using the current time as the filename. The actual filename
+ // doesn't matter (so long as it ends with the correct extension) but this
+ // works as well as anything.
+ Time::Exploded exploded;
+ Time::Now().LocalExplode(&exploded);
+ const FilePath file_path =
+ storage_dir
+ .AppendASCII(StringPrintf("%04d%02d%02d%02d%02d%02d", exploded.year,
+ exploded.month, exploded.day_of_month,
+ exploded.hour, exploded.minute,
+ exploded.second))
+ .AddExtension(PersistentMemoryAllocator::kFileExtension);
+
+ StringPiece contents(static_cast<const char*>(allocator->data()),
+ allocator->used());
+ if (!ImportantFileWriter::WriteFileAtomically(file_path, contents)) {
+ LOG(ERROR) << "Persistent histograms fail to write to file: "
+ << file_path.value();
+ }
+}
+
+} // namespace base
diff --git a/chromium/base/metrics/persistent_histogram_storage.h b/chromium/base/metrics/persistent_histogram_storage.h
new file mode 100644
index 00000000000..397236dd75f
--- /dev/null
+++ b/chromium/base/metrics/persistent_histogram_storage.h
@@ -0,0 +1,68 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_PERSISTENT_HISTOGRAM_STORAGE_H_
+#define BASE_METRICS_PERSISTENT_HISTOGRAM_STORAGE_H_
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// This class creates a fixed sized persistent memory to allow histograms to be
+// stored in it. When a PersistentHistogramStorage is destructed, histograms
+// recorded during its lifetime are persisted in the directory
+// |storage_base_dir_|/|allocator_name| (see the ctor for allocator_name).
+// Histograms are not persisted if the storage directory does not exist on
+// destruction. PersistentHistogramStorage should be instantiated as early as
+// possible in the process lifetime and should never be instantiated again.
+// Persisted histograms will eventually be reported by Chrome.
+class BASE_EXPORT PersistentHistogramStorage {
+ public:
+ enum class StorageDirManagement { kCreate, kUseExisting };
+
+ // Creates a process-wide storage location for histograms that will be written
+ // to a file within a directory provided by |set_storage_base_dir()| on
+ // destruction.
+ // The |allocator_name| is used both as an internal name for the allocator,
+ // well as the leaf directory name for the file to which the histograms are
+ // persisted. The string must be ASCII.
+ // |storage_dir_management| specifies if this instance reuses an existing
+ // storage directory, or is responsible for creating one.
+ PersistentHistogramStorage(StringPiece allocator_name,
+ StorageDirManagement storage_dir_management);
+
+ ~PersistentHistogramStorage();
+
+ // The storage directory isn't always known during initial construction so
+ // it's set separately. The last one wins if there are multiple calls to this
+ // method.
+ void set_storage_base_dir(const FilePath& storage_base_dir) {
+ storage_base_dir_ = storage_base_dir;
+ }
+
+ // Disables histogram storage.
+ void Disable() { disabled_ = true; }
+
+ private:
+ // Metrics files are written into directory
+ // |storage_base_dir_|/|allocator_name| (see the ctor for allocator_name).
+ FilePath storage_base_dir_;
+
+ // The setting of the storage directory management.
+ const StorageDirManagement storage_dir_management_;
+
+ // A flag indicating if histogram storage is disabled. It starts with false,
+ // but can be set to true by the caller who decides to throw away its
+ // histogram data.
+ bool disabled_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(PersistentHistogramStorage);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_PERSISTENT_HISTOGRAM_STORAGE_H_
diff --git a/chromium/base/metrics/persistent_histogram_storage_unittest.cc b/chromium/base/metrics/persistent_histogram_storage_unittest.cc
new file mode 100644
index 00000000000..adbcdfc3371
--- /dev/null
+++ b/chromium/base/metrics/persistent_histogram_storage_unittest.cc
@@ -0,0 +1,75 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_histogram_storage.h"
+
+#include <memory>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// Name of the allocator for storing histograms.
+constexpr char kTestHistogramAllocatorName[] = "TestMetrics";
+
+} // namespace
+
+class PersistentHistogramStorageTest : public testing::Test {
+ protected:
+ PersistentHistogramStorageTest() = default;
+ ~PersistentHistogramStorageTest() override = default;
+
+ // Creates a unique temporary directory, and sets the test storage directory.
+ void SetUp() override {
+ ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+ test_storage_dir_ =
+ temp_dir_path().AppendASCII(kTestHistogramAllocatorName);
+ }
+
+ // Gets the path to the temporary directory.
+ const FilePath& temp_dir_path() { return temp_dir_.GetPath(); }
+
+ const FilePath& test_storage_dir() { return test_storage_dir_; }
+
+ private:
+ // A temporary directory where all file IO operations take place.
+ ScopedTempDir temp_dir_;
+
+ // The directory into which metrics files are written.
+ FilePath test_storage_dir_;
+
+ DISALLOW_COPY_AND_ASSIGN(PersistentHistogramStorageTest);
+};
+
+#if !defined(OS_NACL)
+TEST_F(PersistentHistogramStorageTest, HistogramWriteTest) {
+ auto persistent_histogram_storage =
+ std::make_unique<PersistentHistogramStorage>(
+ kTestHistogramAllocatorName,
+ PersistentHistogramStorage::StorageDirManagement::kCreate);
+
+ persistent_histogram_storage->set_storage_base_dir(temp_dir_path());
+
+ // Log some random data.
+ UMA_HISTOGRAM_BOOLEAN("Some.Test.Metric", true);
+
+ // Deleting the object causes the data to be written to the disk.
+ persistent_histogram_storage.reset();
+
+ // The storage directory and the histogram file are created during the
+ // destruction of the PersistentHistogramStorage instance.
+ EXPECT_TRUE(DirectoryExists(test_storage_dir()));
+ EXPECT_FALSE(IsDirectoryEmpty(test_storage_dir()));
+}
+#endif // !defined(OS_NACL)
+
+} // namespace base
diff --git a/chromium/base/metrics/sparse_histogram.cc b/chromium/base/metrics/sparse_histogram.cc
index e33fd3c88f2..30175a0780f 100644
--- a/chromium/base/metrics/sparse_histogram.cc
+++ b/chromium/base/metrics/sparse_histogram.cc
@@ -7,6 +7,7 @@
#include <utility>
#include "base/memory/ptr_util.h"
+#include "base/metrics/dummy_histogram.h"
#include "base/metrics/metrics_hashes.h"
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/metrics/persistent_sample_map.h"
@@ -26,6 +27,12 @@ HistogramBase* SparseHistogram::FactoryGet(const std::string& name,
int32_t flags) {
HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
if (!histogram) {
+ // TODO(gayane): |HashMetricName| is called again in Histogram constructor.
+ // Refactor code to avoid the additional call.
+ bool should_record =
+ StatisticsRecorder::ShouldRecordHistogram(HashMetricName(name));
+ if (!should_record)
+ return DummyHistogram::GetInstance();
// Try to create the histogram using a "persistent" allocator. As of
// 2016-02-25, the availability of such is controlled by a base::Feature
// that is off by default. If the allocator doesn't exist or if
diff --git a/chromium/base/metrics/sparse_histogram_unittest.cc b/chromium/base/metrics/sparse_histogram_unittest.cc
index 0bf216792a1..72dd9054410 100644
--- a/chromium/base/metrics/sparse_histogram_unittest.cc
+++ b/chromium/base/metrics/sparse_histogram_unittest.cc
@@ -59,11 +59,6 @@ class SparseHistogramTest : public testing::TestWithParam<bool> {
}
void CreatePersistentMemoryAllocator() {
- // By getting the results-histogram before any persistent allocator
- // is attached, that histogram is guaranteed not to be stored in
- // any persistent memory segment (which simplifies some tests).
- GlobalHistogramAllocator::GetCreateHistogramResultHistogram();
-
GlobalHistogramAllocator::CreateWithLocalMemory(
kAllocatorMemorySize, 0, "SparseHistogramAllocatorTest");
allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
diff --git a/chromium/base/metrics/statistics_recorder.h b/chromium/base/metrics/statistics_recorder.h
index 03651a9d675..87a93110e1f 100644
--- a/chromium/base/metrics/statistics_recorder.h
+++ b/chromium/base/metrics/statistics_recorder.h
@@ -200,9 +200,6 @@ class BASE_EXPORT StatisticsRecorder {
// method must be called very early, before any threads have started.
// Record checker methods can be called on any thread, so they shouldn't
// mutate any state.
- //
- // TODO(iburak): This is not yet hooked up to histogram recording
- // infrastructure.
static void SetRecordChecker(
std::unique_ptr<RecordHistogramChecker> record_checker);
diff --git a/chromium/base/metrics/statistics_recorder_unittest.cc b/chromium/base/metrics/statistics_recorder_unittest.cc
index 9e3197e10d3..63ba136086e 100644
--- a/chromium/base/metrics/statistics_recorder_unittest.cc
+++ b/chromium/base/metrics/statistics_recorder_unittest.cc
@@ -63,10 +63,6 @@ class StatisticsRecorderTest : public testing::TestWithParam<bool> {
const int32_t kAllocatorMemorySize = 64 << 10; // 64 KiB
StatisticsRecorderTest() : use_persistent_histogram_allocator_(GetParam()) {
- // Get this first so it never gets created in persistent storage and will
- // not appear in the StatisticsRecorder after it is re-initialized.
- PersistentHistogramAllocator::GetCreateHistogramResultHistogram();
-
// Each test will have a clean state (no Histogram / BucketRanges
// registered).
InitializeStatisticsRecorder();
diff --git a/chromium/base/native_library.h b/chromium/base/native_library.h
index e2b9ca7e6d1..eaf827ceb7f 100644
--- a/chromium/base/native_library.h
+++ b/chromium/base/native_library.h
@@ -98,13 +98,21 @@ BASE_EXPORT void UnloadNativeLibrary(NativeLibrary library);
BASE_EXPORT void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
StringPiece name);
-// Returns the full platform specific name for a native library.
-// |name| must be ASCII.
-// For example:
-// "mylib" returns "mylib.dll" on Windows, "libmylib.so" on Linux,
-// "libmylib.dylib" on Mac.
+// Returns the full platform-specific name for a native library. |name| must be
+// ASCII. This is also the default name for the output of a gn |shared_library|
+// target. See tools/gn/docs/reference.md#shared_library.
+// For example for "mylib", it returns:
+// - "mylib.dll" on Windows
+// - "libmylib.so" on Linux
+// - "libmylib.dylib" on Mac
BASE_EXPORT std::string GetNativeLibraryName(StringPiece name);
+// Returns the full platform-specific name for a gn |loadable_module| target.
+// See tools/gn/docs/reference.md#loadable_module
+// The returned name is the same as GetNativeLibraryName() on all platforms
+// except for Mac where for "mylib" it returns "mylib.so".
+BASE_EXPORT std::string GetLoadableModuleName(StringPiece name);
+
} // namespace base
#endif // BASE_NATIVE_LIBRARY_H_
diff --git a/chromium/base/native_library_fuchsia.cc b/chromium/base/native_library_fuchsia.cc
new file mode 100644
index 00000000000..1d68cbd1667
--- /dev/null
+++ b/chromium/base/native_library_fuchsia.cc
@@ -0,0 +1,95 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/native_library.h"
+
+#include <fcntl.h>
+#include <fdio/io.h>
+#include <stdio.h>
+#include <zircon/dlfcn.h>
+#include <zircon/status.h>
+#include <zircon/syscalls.h>
+
+#include "base/base_paths_fuchsia.h"
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/fuchsia/fuchsia_logging.h"
+#include "base/fuchsia/scoped_zx_handle.h"
+#include "base/logging.h"
+#include "base/path_service.h"
+#include "base/posix/safe_strerror.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+std::string NativeLibraryLoadError::ToString() const {
+ return message;
+}
+
+NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
+ const NativeLibraryOptions& options,
+ NativeLibraryLoadError* error) {
+ std::vector<base::FilePath::StringType> components;
+ library_path.GetComponents(&components);
+ if (components.size() != 1u) {
+ NOTREACHED() << "library_path is a path, should be a filename: "
+ << library_path.MaybeAsASCII();
+ return nullptr;
+ }
+
+ // Fuchsia libraries must live under the "lib" directory, which may be located
+ // in /system/lib or /pkg/lib depending on whether the executable is running
+ // inside a package.
+ // TODO(https://crbug.com/805057): Remove the non-package codepath when bootfs
+ // is deprecated.
+ FilePath computed_path = base::GetPackageRoot();
+ if (computed_path.empty()) {
+ CHECK(PathService::Get(DIR_EXE, &computed_path));
+ }
+ computed_path = computed_path.AppendASCII("lib").Append(components[0]);
+ base::File library(computed_path,
+ base::File::FLAG_OPEN | base::File::FLAG_READ);
+ if (!library.IsValid()) {
+ if (error) {
+ error->message = base::StringPrintf(
+ "open library: %s",
+ base::File::ErrorToString(library.error_details()).c_str());
+ }
+ return nullptr;
+ }
+
+ base::ScopedZxHandle vmo;
+ zx_status_t status =
+ fdio_get_vmo_clone(library.GetPlatformFile(), vmo.receive());
+ if (status != ZX_OK) {
+ if (error) {
+ error->message = base::StringPrintf("fdio_get_vmo_clone: %s",
+ zx_status_get_string(status));
+ }
+ return nullptr;
+ }
+ NativeLibrary result = dlopen_vmo(vmo.get(), RTLD_LAZY | RTLD_LOCAL);
+ return result;
+}
+
+void UnloadNativeLibrary(NativeLibrary library) {
+ // dlclose() is a no-op on Fuchsia, so do nothing here.
+}
+
+void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
+ StringPiece name) {
+ return dlsym(library, name.data());
+}
+
+std::string GetNativeLibraryName(StringPiece name) {
+ return base::StringPrintf("lib%s.so", name.as_string().c_str());
+}
+
+std::string GetLoadableModuleName(StringPiece name) {
+ return GetNativeLibraryName(name);
+}
+
+} // namespace base
diff --git a/chromium/base/native_library_ios.mm b/chromium/base/native_library_ios.mm
index 578240eeead..dbcafb41f1e 100644
--- a/chromium/base/native_library_ios.mm
+++ b/chromium/base/native_library_ios.mm
@@ -14,7 +14,6 @@ std::string NativeLibraryLoadError::ToString() const {
return message;
}
-// static
NativeLibrary LoadNativeLibraryWithOptions(const base::FilePath& library_path,
const NativeLibraryOptions& options,
NativeLibraryLoadError* error) {
@@ -24,23 +23,24 @@ NativeLibrary LoadNativeLibraryWithOptions(const base::FilePath& library_path,
return nullptr;
}
-// static
void UnloadNativeLibrary(NativeLibrary library) {
NOTIMPLEMENTED();
DCHECK(!library);
}
-// static
void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
StringPiece name) {
NOTIMPLEMENTED();
return nullptr;
}
-// static
std::string GetNativeLibraryName(StringPiece name) {
DCHECK(IsStringASCII(name));
return name.as_string();
}
+std::string GetLoadableModuleName(StringPiece name) {
+ return GetNativeLibraryName(name);
+}
+
} // namespace base
diff --git a/chromium/base/native_library_mac.mm b/chromium/base/native_library_mac.mm
index db770c6c374..0d31b80a88d 100644
--- a/chromium/base/native_library_mac.mm
+++ b/chromium/base/native_library_mac.mm
@@ -38,7 +38,6 @@ std::string NativeLibraryLoadError::ToString() const {
return message;
}
-// static
NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
const NativeLibraryOptions& options,
NativeLibraryLoadError* error) {
@@ -75,7 +74,6 @@ NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
return native_lib;
}
-// static
void UnloadNativeLibrary(NativeLibrary library) {
if (library->objc_status == OBJC_NOT_PRESENT) {
if (library->type == BUNDLE) {
@@ -95,7 +93,6 @@ void UnloadNativeLibrary(NativeLibrary library) {
delete library;
}
-// static
void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
StringPiece name) {
void* function_pointer = nullptr;
@@ -118,10 +115,14 @@ void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
return function_pointer;
}
-// static
std::string GetNativeLibraryName(StringPiece name) {
DCHECK(IsStringASCII(name));
return "lib" + name.as_string() + ".dylib";
}
+std::string GetLoadableModuleName(StringPiece name) {
+ DCHECK(IsStringASCII(name));
+ return name.as_string() + ".so";
+}
+
} // namespace base
diff --git a/chromium/base/native_library_posix.cc b/chromium/base/native_library_posix.cc
index 49925375238..19ff7a4b0ba 100644
--- a/chromium/base/native_library_posix.cc
+++ b/chromium/base/native_library_posix.cc
@@ -18,7 +18,6 @@ std::string NativeLibraryLoadError::ToString() const {
return message;
}
-// static
NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
const NativeLibraryOptions& options,
NativeLibraryLoadError* error) {
@@ -46,7 +45,6 @@ NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
return dl;
}
-// static
void UnloadNativeLibrary(NativeLibrary library) {
int ret = dlclose(library);
if (ret < 0) {
@@ -55,16 +53,18 @@ void UnloadNativeLibrary(NativeLibrary library) {
}
}
-// static
void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
StringPiece name) {
return dlsym(library, name.data());
}
-// static
std::string GetNativeLibraryName(StringPiece name) {
DCHECK(IsStringASCII(name));
return "lib" + name.as_string() + ".so";
}
+std::string GetLoadableModuleName(StringPiece name) {
+ return GetNativeLibraryName(name);
+}
+
} // namespace base
diff --git a/chromium/base/native_library_unittest.cc b/chromium/base/native_library_unittest.cc
index 320442bdd11..8b4001716b2 100644
--- a/chromium/base/native_library_unittest.cc
+++ b/chromium/base/native_library_unittest.cc
@@ -40,6 +40,20 @@ TEST(NativeLibraryTest, GetNativeLibraryName) {
EXPECT_EQ(kExpectedName, GetNativeLibraryName("mylib"));
}
+TEST(NativeLibraryTest, GetLoadableModuleName) {
+ const char kExpectedName[] =
+#if defined(OS_IOS)
+ "mylib";
+#elif defined(OS_MACOSX)
+ "mylib.so";
+#elif defined(OS_POSIX)
+ "libmylib.so";
+#elif defined(OS_WIN)
+ "mylib.dll";
+#endif
+ EXPECT_EQ(kExpectedName, GetLoadableModuleName("mylib"));
+}
+
// We don't support dynamic loading on iOS, and ASAN will complain about our
// intentional ODR violation because of |g_native_library_exported_value| being
// defined globally both here and in the shared library.
@@ -63,7 +77,12 @@ class TestLibrary {
explicit TestLibrary(const NativeLibraryOptions& options)
: library_(nullptr) {
base::FilePath exe_path;
+
+#if !defined(OS_FUCHSIA)
+ // Libraries do not sit alongside the executable in Fuchsia. NativeLibrary
+ // is aware of this and is able to resolve library paths correctly.
CHECK(base::PathService::Get(base::DIR_EXE, &exe_path));
+#endif
library_ = LoadNativeLibraryWithOptions(
exe_path.AppendASCII(kTestLibraryName), options, nullptr);
diff --git a/chromium/base/native_library_win.cc b/chromium/base/native_library_win.cc
index 7a5c9f62875..ca944681682 100644
--- a/chromium/base/native_library_win.cc
+++ b/chromium/base/native_library_win.cc
@@ -150,28 +150,28 @@ std::string NativeLibraryLoadError::ToString() const {
return StringPrintf("%lu", code);
}
-// static
NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
const NativeLibraryOptions& options,
NativeLibraryLoadError* error) {
return LoadNativeLibraryHelper(library_path, error);
}
-// static
void UnloadNativeLibrary(NativeLibrary library) {
FreeLibrary(library);
}
-// static
void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
StringPiece name) {
- return GetProcAddress(library, name.data());
+ return reinterpret_cast<void*>(GetProcAddress(library, name.data()));
}
-// static
std::string GetNativeLibraryName(StringPiece name) {
DCHECK(IsStringASCII(name));
return name.as_string() + ".dll";
}
+std::string GetLoadableModuleName(StringPiece name) {
+ return GetNativeLibraryName(name);
+}
+
} // namespace base
diff --git a/chromium/base/no_destructor_unittest.cc b/chromium/base/no_destructor_unittest.cc
index c14e479abe9..8f9d4a48c7a 100644
--- a/chromium/base/no_destructor_unittest.cc
+++ b/chromium/base/no_destructor_unittest.cc
@@ -8,6 +8,7 @@
#include <utility>
#include "base/logging.h"
+#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -62,10 +63,14 @@ TEST(NoDestructorTest, Accessors) {
EXPECT_EQ(0, awesome.get()->compare("awesome"));
}
+// Passing initializer list to a NoDestructor like in this test
+// is ambiguous in GCC.
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84849
+#if !defined(COMPILER_GCC) && !defined(__clang__)
TEST(NoDestructorTest, InitializerList) {
static NoDestructor<std::vector<std::string>> vector({"a", "b", "c"});
}
-
+#endif
} // namespace
} // namespace base
diff --git a/chromium/base/observer_list_unittest.cc b/chromium/base/observer_list_unittest.cc
index 8575bc7d44a..010ee9cb8cf 100644
--- a/chromium/base/observer_list_unittest.cc
+++ b/chromium/base/observer_list_unittest.cc
@@ -211,7 +211,7 @@ TEST(ObserverListTest, BasicTest) {
EXPECT_EQ(it3, it1);
EXPECT_EQ(it3, it2);
// Self assignment.
- it3 = it3;
+ it3 = *&it3; // The *& defeats Clang's -Wself-assign warning.
EXPECT_EQ(it3, it1);
EXPECT_EQ(it3, it2);
}
@@ -228,7 +228,7 @@ TEST(ObserverListTest, BasicTest) {
EXPECT_EQ(it3, it1);
EXPECT_EQ(it3, it2);
// Self assignment.
- it3 = it3;
+ it3 = *&it3; // The *& defeats Clang's -Wself-assign warning.
EXPECT_EQ(it3, it1);
EXPECT_EQ(it3, it2);
}
@@ -255,7 +255,7 @@ TEST(ObserverListTest, BasicTest) {
EXPECT_EQ(it3, it1);
EXPECT_EQ(it3, it2);
// Self assignment.
- it3 = it3;
+ it3 = *&it3; // The *& defeats Clang's -Wself-assign warning.
EXPECT_EQ(it3, it1);
EXPECT_EQ(it3, it2);
// Iterator post increment.
@@ -278,7 +278,7 @@ TEST(ObserverListTest, BasicTest) {
EXPECT_EQ(it3, it1);
EXPECT_EQ(it3, it2);
// Self assignment.
- it3 = it3;
+ it3 = *&it3; // The *& defeats Clang's -Wself-assign warning.
EXPECT_EQ(it3, it1);
EXPECT_EQ(it3, it2);
// Iterator post increment.
diff --git a/chromium/base/optional.h b/chromium/base/optional.h
index 0b391b45396..c1d11ca7a18 100644
--- a/chromium/base/optional.h
+++ b/chromium/base/optional.h
@@ -153,7 +153,8 @@ struct OptionalStorage : OptionalStorageBase<T> {
Init(other.value_);
}
- OptionalStorage(OptionalStorage&& other) {
+ OptionalStorage(OptionalStorage&& other) noexcept(
+ std::is_nothrow_move_constructible<T>::value) {
if (other.is_populated_)
Init(std::move(other.value_));
}
@@ -172,7 +173,8 @@ struct OptionalStorage<T,
OptionalStorage() = default;
OptionalStorage(const OptionalStorage& other) = default;
- OptionalStorage(OptionalStorage&& other) {
+ OptionalStorage(OptionalStorage&& other) noexcept(
+ std::is_nothrow_move_constructible<T>::value) {
if (other.is_populated_)
Init(std::move(other.value_));
}
@@ -393,6 +395,17 @@ using RemoveCvRefT = std::remove_cv_t<std::remove_reference_t<T>>;
} // namespace internal
+// On Windows, by default, empty-base class optimization does not work,
+// which means even if the base class is empty struct, it still consumes one
+// byte for its body. __declspec(empty_bases) enables the optimization.
+// cf)
+// https://blogs.msdn.microsoft.com/vcblog/2016/03/30/optimizing-the-layout-of-empty-base-classes-in-vs2015-update-2-3/
+#ifdef OS_WIN
+#define OPTIONAL_DECLSPEC_EMPTY_BASES __declspec(empty_bases)
+#else
+#define OPTIONAL_DECLSPEC_EMPTY_BASES
+#endif
+
// base::Optional is a Chromium version of the C++17 optional class:
// std::optional documentation:
// http://en.cppreference.com/w/cpp/utility/optional
@@ -413,7 +426,7 @@ using RemoveCvRefT = std::remove_cv_t<std::remove_reference_t<T>>;
// both clang and gcc has same limitation. MSVC SFINAE looks to have different
// behavior, but anyway it reports an error, too.
template <typename T>
-class Optional
+class OPTIONAL_DECLSPEC_EMPTY_BASES Optional
: public internal::OptionalBase<T>,
public internal::CopyConstructible<std::is_copy_constructible<T>::value>,
public internal::MoveConstructible<std::is_move_constructible<T>::value>,
@@ -422,6 +435,7 @@ class Optional
public internal::MoveAssignable<std::is_move_constructible<T>::value &&
std::is_move_assignable<T>::value> {
public:
+#undef OPTIONAL_DECLSPEC_EMPTY_BASES
using value_type = T;
// Defer default/copy/move constructor implementation to OptionalBase.
@@ -560,39 +574,57 @@ class Optional
return *this;
}
- constexpr const T* operator->() const { return &value(); }
+ constexpr const T* operator->() const {
+ CHECK(storage_.is_populated_);
+ return &storage_.value_;
+ }
- constexpr T* operator->() { return &value(); }
+ constexpr T* operator->() {
+ CHECK(storage_.is_populated_);
+ return &storage_.value_;
+ }
- constexpr const T& operator*() const& { return value(); }
+ constexpr const T& operator*() const & {
+ CHECK(storage_.is_populated_);
+ return storage_.value_;
+ }
- constexpr T& operator*() & { return value(); }
+ constexpr T& operator*() & {
+ CHECK(storage_.is_populated_);
+ return storage_.value_;
+ }
- constexpr const T&& operator*() const&& { return std::move(value()); }
+ constexpr const T&& operator*() const && {
+ CHECK(storage_.is_populated_);
+ return std::move(storage_.value_);
+ }
- constexpr T&& operator*() && { return std::move(value()); }
+ constexpr T&& operator*() && {
+ CHECK(storage_.is_populated_);
+ return std::move(storage_.value_);
+ }
constexpr explicit operator bool() const { return storage_.is_populated_; }
constexpr bool has_value() const { return storage_.is_populated_; }
constexpr T& value() & {
- DCHECK(storage_.is_populated_);
+ CHECK(storage_.is_populated_);
return storage_.value_;
}
constexpr const T& value() const & {
- DCHECK(storage_.is_populated_);
+ CHECK(storage_.is_populated_);
return storage_.value_;
}
constexpr T&& value() && {
- DCHECK(storage_.is_populated_);
+ CHECK(storage_.is_populated_);
return std::move(storage_.value_);
}
constexpr const T&& value() const && {
- DCHECK(storage_.is_populated_);
+ CHECK(storage_.is_populated_);
return std::move(storage_.value_);
}
diff --git a/chromium/base/optional_unittest.cc b/chromium/base/optional_unittest.cc
index d33ef62d2d7..7bdb46b7617 100644
--- a/chromium/base/optional_unittest.cc
+++ b/chromium/base/optional_unittest.cc
@@ -179,6 +179,10 @@ static_assert(
!std::is_trivially_destructible<Optional<NonTriviallyDestructible>>::value,
"OptionalIsTriviallyDestructible");
+static_assert(sizeof(Optional<int>) == sizeof(internal::OptionalBase<int>),
+ "internal::{Copy,Move}{Constructible,Assignable} structs "
+ "should be 0-sized");
+
TEST(OptionalTest, DefaultConstructor) {
{
constexpr Optional<float> o;
@@ -2111,27 +2115,58 @@ TEST(OptionalTest, DontCallNewMemberFunction) {
}
TEST(OptionalTest, Noexcept) {
- // non-noexcept move-constructible.
+ // Trivial copy ctor, non-trivial move ctor, nothrow move assign.
struct Test1 {
+ Test1(const Test1&) = default;
Test1(Test1&&) {}
Test1& operator=(Test1&&) = default;
};
- // non-noexcept move-assignable.
+ // Non-trivial copy ctor, trivial move ctor, throw move assign.
struct Test2 {
+ Test2(const Test2&) {}
Test2(Test2&&) = default;
Test2& operator=(Test2&&) { return *this; }
};
+ // Trivial copy ctor, non-trivial nothrow move ctor.
+ struct Test3 {
+ Test3(const Test3&) = default;
+ Test3(Test3&&) noexcept {}
+ };
+ // Non-trivial copy ctor, non-trivial nothrow move ctor.
+ struct Test4 {
+ Test4(const Test4&) {}
+ Test4(Test4&&) noexcept {}
+ };
+ // Non-trivial copy ctor, non-trivial move ctor.
+ struct Test5 {
+ Test5(const Test5&) {}
+ Test5(Test5&&) {}
+ };
static_assert(
noexcept(Optional<int>(std::declval<Optional<int>>())),
- "move constructor for noexcept move-constructible T must be noexcept");
+ "move constructor for noexcept move-constructible T must be noexcept "
+ "(trivial copy, trivial move)");
static_assert(
!noexcept(Optional<Test1>(std::declval<Optional<Test1>>())),
"move constructor for non-noexcept move-constructible T must not be "
- "noexcept");
+ "noexcept (trivial copy)");
static_assert(
noexcept(Optional<Test2>(std::declval<Optional<Test2>>())),
- "move constructor for noexcept move-constructible T must be noexcept");
+ "move constructor for noexcept move-constructible T must be noexcept "
+ "(non-trivial copy, trivial move)");
+ static_assert(
+ noexcept(Optional<Test3>(std::declval<Optional<Test3>>())),
+ "move constructor for noexcept move-constructible T must be noexcept "
+ "(trivial copy, non-trivial move)");
+ static_assert(
+ noexcept(Optional<Test4>(std::declval<Optional<Test4>>())),
+ "move constructor for noexcept move-constructible T must be noexcept "
+ "(non-trivial copy, non-trivial move)");
+ static_assert(
+ !noexcept(Optional<Test5>(std::declval<Optional<Test5>>())),
+ "move constructor for non-noexcept move-constructible T must not be "
+ "noexcept (non-trivial copy)");
static_assert(
noexcept(std::declval<Optional<int>>() = std::declval<Optional<int>>()),
diff --git a/chromium/base/pending_task.cc b/chromium/base/pending_task.cc
index 31f2d2d9beb..7224a6b089c 100644
--- a/chromium/base/pending_task.cc
+++ b/chromium/base/pending_task.cc
@@ -15,21 +15,7 @@ PendingTask::PendingTask(const Location& posted_from,
: task(std::move(task)),
posted_from(posted_from),
delayed_run_time(delayed_run_time),
- sequence_num(0),
- nestable(nestable),
- is_high_res(false) {
- const PendingTask* parent_task =
- MessageLoop::current() ? MessageLoop::current()->current_pending_task_
- : nullptr;
- if (parent_task) {
- task_backtrace[0] = parent_task->posted_from.program_counter();
- std::copy(parent_task->task_backtrace.begin(),
- parent_task->task_backtrace.end() - 1,
- task_backtrace.begin() + 1);
- } else {
- task_backtrace.fill(nullptr);
- }
-}
+ nestable(nestable) {}
PendingTask::PendingTask(PendingTask&& other) = default;
diff --git a/chromium/base/pending_task.h b/chromium/base/pending_task.h
index 8c7854b14cb..495015ba661 100644
--- a/chromium/base/pending_task.h
+++ b/chromium/base/pending_task.h
@@ -44,17 +44,18 @@ struct BASE_EXPORT PendingTask {
// The time when the task should be run.
base::TimeTicks delayed_run_time;
- // Task backtrace.
- std::array<const void*, 4> task_backtrace;
+ // Task backtrace. mutable so it can be set while annotating const PendingTask
+ // objects from TaskAnnotator::DidQueueTask().
+ mutable std::array<const void*, 4> task_backtrace = {};
// Secondary sort key for run time.
- int sequence_num;
+ int sequence_num = 0;
// OK to dispatch from a nested loop.
Nestable nestable;
// Needs high resolution timers.
- bool is_high_res;
+ bool is_high_res = false;
};
using TaskQueue = base::queue<PendingTask>;
diff --git a/chromium/base/pending_task_unittest.cc b/chromium/base/pending_task_unittest.cc
deleted file mode 100644
index 2a3e0c01bf9..00000000000
--- a/chromium/base/pending_task_unittest.cc
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright (c) 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/pending_task.h"
-
-#include <vector>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/message_loop/message_loop.h"
-#include "base/run_loop.h"
-#include "base/strings/stringprintf.h"
-#include "base/threading/thread.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-
-class PendingTaskTest : public ::testing::Test {
- public:
- PendingTaskTest() = default;
-
- ~PendingTaskTest() override = default;
-
- protected:
- using ExpectedTrace = std::vector<const void*>;
-
- static void VerifyTraceAndPost(const scoped_refptr<TaskRunner>& task_runner,
- const Location& posted_from,
- const Location& next_from_here,
- const std::vector<const void*>& expected_trace,
- Closure task) {
- SCOPED_TRACE(StringPrintf("Callback Depth: %zu", expected_trace.size()));
-
- // Beyond depth + 1, the trace is nonsensical because there haven't been
- // enough nested tasks called.
- const PendingTask* current_pending_task =
- MessageLoop::current()->current_pending_task_;
- size_t window = std::min(current_pending_task->task_backtrace.size(),
- expected_trace.size());
-
- EXPECT_EQ(posted_from,
- MessageLoop::current()->current_pending_task_->posted_from);
- for (size_t i = 0; i < window; i++) {
- SCOPED_TRACE(StringPrintf("Trace frame: %zu", i));
- EXPECT_EQ(expected_trace[i], current_pending_task->task_backtrace[i]);
- }
- task_runner->PostTask(next_from_here, std::move(task));
- }
-
- static void RunTwo(Closure c1, Closure c2) {
- c1.Run();
- c2.Run();
- }
-};
-
-// Ensure the task backtrace populates correctly.
-TEST_F(PendingTaskTest, SingleThreadedSimple) {
- MessageLoop loop;
- const Location& location0 = FROM_HERE;
- const Location& location1 = FROM_HERE;
- const Location& location2 = FROM_HERE;
- const Location& location3 = FROM_HERE;
- const Location& location4 = FROM_HERE;
- const Location& location5 = FROM_HERE;
-
- Closure task5 = Bind(
- &PendingTaskTest::VerifyTraceAndPost, loop.task_runner(), location4,
- location5,
- ExpectedTrace({location3.program_counter(), location2.program_counter(),
- location1.program_counter(), location0.program_counter()}),
- DoNothing());
- Closure task4 = Bind(
- &PendingTaskTest::VerifyTraceAndPost, loop.task_runner(), location3,
- location4,
- ExpectedTrace({location2.program_counter(), location1.program_counter(),
- location0.program_counter(), nullptr}),
- task5);
- Closure task3 = Bind(
- &PendingTaskTest::VerifyTraceAndPost, loop.task_runner(), location2,
- location3, ExpectedTrace({location1.program_counter(),
- location0.program_counter(), nullptr, nullptr}),
- task4);
- Closure task2 =
- Bind(&PendingTaskTest::VerifyTraceAndPost, loop.task_runner(), location1,
- location2, ExpectedTrace({location0.program_counter()}), task3);
- Closure task1 = Bind(&PendingTaskTest::VerifyTraceAndPost, loop.task_runner(),
- location0, location1, ExpectedTrace({}), task2);
-
- loop.task_runner()->PostTask(location0, task1);
-
- RunLoop().RunUntilIdle();
-}
-
-// Post a task onto another thread. Ensure on the other thread, it has the
-// right stack trace.
-TEST_F(PendingTaskTest, MultipleThreads) {
- MessageLoop loop; // Implicitly "thread a."
- Thread thread_b("pt_test_b");
- Thread thread_c("pt_test_c");
- thread_b.StartAndWaitForTesting();
- thread_c.StartAndWaitForTesting();
-
- const Location& location_a0 = FROM_HERE;
- const Location& location_a1 = FROM_HERE;
- const Location& location_a2 = FROM_HERE;
- const Location& location_a3 = FROM_HERE;
-
- const Location& location_b0 = FROM_HERE;
- const Location& location_b1 = FROM_HERE;
-
- const Location& location_c0 = FROM_HERE;
-
- // On thread c, post a task back to thread a that verifies its trace
- // and terminates after one more self-post.
- Closure task_a2 =
- Bind(&PendingTaskTest::VerifyTraceAndPost, loop.task_runner(),
- location_a2, location_a3,
- ExpectedTrace(
- {location_c0.program_counter(), location_b0.program_counter(),
- location_a1.program_counter(), location_a0.program_counter()}),
- DoNothing());
- Closure task_c0 = Bind(&PendingTaskTest::VerifyTraceAndPost,
- loop.task_runner(), location_c0, location_a2,
- ExpectedTrace({location_b0.program_counter(),
- location_a1.program_counter(),
- location_a0.program_counter()}),
- task_a2);
-
- // On thread b run two tasks that conceptually come from the same location
- // (managed via RunTwo().) One will post back to thread b and another will
- // post to thread c to test spawning multiple tasks on different message
- // loops. The task posted to thread c will not get location b1 whereas the
- // one posted back to thread b will.
- Closure task_b0_fork =
- Bind(&PendingTaskTest::VerifyTraceAndPost,
- thread_c.message_loop()->task_runner(), location_b0, location_c0,
- ExpectedTrace({location_a1.program_counter(),
- location_a0.program_counter(), nullptr}),
- task_c0);
- Closure task_b0_local =
- Bind(&PendingTaskTest::VerifyTraceAndPost,
- thread_b.message_loop()->task_runner(), location_b0, location_b1,
- ExpectedTrace({location_a1.program_counter(),
- location_a0.program_counter(), nullptr}),
- DoNothing());
-
- // Push one frame onto the stack in thread a then pass to thread b.
- Closure task_a1 =
- Bind(&PendingTaskTest::VerifyTraceAndPost,
- thread_b.message_loop()->task_runner(), location_a1, location_b0,
- ExpectedTrace({location_a0.program_counter(), nullptr}),
- Bind(&PendingTaskTest::RunTwo, task_b0_local, task_b0_fork));
- Closure task_a0 =
- Bind(&PendingTaskTest::VerifyTraceAndPost, loop.task_runner(),
- location_a0, location_a1, ExpectedTrace({nullptr}), task_a1);
-
- loop.task_runner()->PostTask(location_a0, task_a0);
-
- RunLoop().RunUntilIdle();
-
- thread_b.FlushForTesting();
- thread_b.Stop();
-
- thread_c.FlushForTesting();
- thread_c.Stop();
-}
-
-} // namespace base
diff --git a/chromium/base/posix/unix_domain_socket.cc b/chromium/base/posix/unix_domain_socket.cc
index 578a53cb349..7c087a53b5c 100644
--- a/chromium/base/posix/unix_domain_socket.cc
+++ b/chromium/base/posix/unix_domain_socket.cc
@@ -189,6 +189,11 @@ ssize_t UnixDomainSocket::RecvMsgWithFlags(int fd,
}
if (msg.msg_flags & MSG_TRUNC || msg.msg_flags & MSG_CTRUNC) {
+ if (msg.msg_flags & MSG_CTRUNC) {
+ // Extraordinary case, not caller fixable. Log something.
+ LOG(ERROR) << "recvmsg returned MSG_CTRUNC flag, buffer len is "
+ << msg.msg_controllen;
+ }
for (unsigned i = 0; i < wire_fds_len; ++i)
close(wire_fds[i]);
errno = EMSGSIZE;
diff --git a/chromium/base/power_monitor/power_monitor_device_source_posix.cc b/chromium/base/power_monitor/power_monitor_device_source_stub.cc
index f24e5b23f0a..f24e5b23f0a 100644
--- a/chromium/base/power_monitor/power_monitor_device_source_posix.cc
+++ b/chromium/base/power_monitor/power_monitor_device_source_stub.cc
diff --git a/chromium/base/process/kill.cc b/chromium/base/process/kill.cc
index 9fa0a0e1039..0332ac0303e 100644
--- a/chromium/base/process/kill.cc
+++ b/chromium/base/process/kill.cc
@@ -4,7 +4,10 @@
#include "base/process/kill.h"
+#include "base/bind.h"
#include "base/process/process_iterator.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/time/time.h"
namespace base {
@@ -28,4 +31,31 @@ bool KillProcesses(const FilePath::StringType& executable_name,
return result;
}
+#if defined(OS_WIN) || defined(OS_FUCHSIA)
+// Common implementation for platforms under which |process| is a handle to
+// the process, rather than an identifier that must be "reaped".
+void EnsureProcessTerminated(Process process) {
+ DCHECK(!process.is_current());
+
+ if (process.WaitForExitWithTimeout(TimeDelta(), nullptr))
+ return;
+
+ PostDelayedTaskWithTraits(
+ FROM_HERE,
+ {TaskPriority::BACKGROUND, TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+ BindOnce(
+ [](Process process) {
+ if (process.WaitForExitWithTimeout(TimeDelta(), nullptr))
+ return;
+#if defined(OS_WIN)
+ process.Terminate(win::kProcessKilledExitCode, false);
+#else
+ process.Terminate(-1, false);
+#endif
+ },
+ std::move(process)),
+ TimeDelta::FromSeconds(2));
+}
+#endif // defined(OS_WIN) || defined(OS_FUCHSIA)
+
} // namespace base
diff --git a/chromium/base/process/kill.h b/chromium/base/process/kill.h
index 524ed040d9b..005b72e2aa4 100644
--- a/chromium/base/process/kill.h
+++ b/chromium/base/process/kill.h
@@ -110,8 +110,22 @@ BASE_EXPORT TerminationStatus GetTerminationStatus(ProcessHandle handle,
//
BASE_EXPORT TerminationStatus GetKnownDeadTerminationStatus(
ProcessHandle handle, int* exit_code);
+
+#if defined(OS_LINUX)
+// Spawns a thread to wait asynchronously for the child |process| to exit
+// and then reaps it.
+BASE_EXPORT void EnsureProcessGetsReaped(Process process);
+#endif // defined(OS_LINUX)
#endif // defined(OS_POSIX) && !defined(OS_FUCHSIA)
+// Registers |process| to be asynchronously monitored for termination, forcibly
+// terminated if necessary, and reaped on exit. The caller should have signalled
+// |process| to exit before calling this API. The API will allow a couple of
+// seconds grace period before forcibly terminating |process|.
+// TODO(https://crbug.com/806451): The Mac implementation currently blocks the
+// calling thread for up to two seconds.
+BASE_EXPORT void EnsureProcessTerminated(Process process);
+
// These are only sparingly used, and not needed on Fuchsia. They could be
// implemented if necessary.
#if !defined(OS_FUCHSIA)
@@ -136,28 +150,6 @@ BASE_EXPORT bool CleanupProcesses(const FilePath::StringType& executable_name,
const ProcessFilter* filter);
#endif // !defined(OS_FUCHSIA)
-// This method ensures that the specified process eventually terminates, and
-// then it closes the given process handle.
-//
-// It assumes that the process has already been signalled to exit, and it
-// begins by waiting a small amount of time for it to exit. If the process
-// does not appear to have exited, then this function starts to become
-// aggressive about ensuring that the process terminates.
-//
-// On Linux this method does not block the calling thread.
-// On OS X and Fuchsia, this method may block for up to 2 seconds.
-//
-// NOTE: The process must have been opened with the PROCESS_TERMINATE and
-// SYNCHRONIZE permissions.
-//
-BASE_EXPORT void EnsureProcessTerminated(Process process);
-
-#if defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
-// The nicer version of EnsureProcessTerminated() that is patient and will
-// wait for |pid| to finish and then reap it.
-BASE_EXPORT void EnsureProcessGetsReaped(ProcessId pid);
-#endif
-
} // namespace base
#endif // BASE_PROCESS_KILL_H_
diff --git a/chromium/base/process/kill_fuchsia.cc b/chromium/base/process/kill_fuchsia.cc
index 4edefe68c5e..a862fc308e2 100644
--- a/chromium/base/process/kill_fuchsia.cc
+++ b/chromium/base/process/kill_fuchsia.cc
@@ -50,20 +50,4 @@ TerminationStatus GetTerminationStatus(ProcessHandle handle, int* exit_code) {
: TERMINATION_STATUS_ABNORMAL_TERMINATION;
}
-void EnsureProcessTerminated(Process process) {
- DCHECK(!process.is_current());
-
- // Wait for up to two seconds for the process to terminate, and then kill it
- // forcefully if it hasn't already exited.
- zx_signals_t signals;
- if (zx_object_wait_one(process.Handle(), ZX_TASK_TERMINATED,
- zx_deadline_after(ZX_SEC(2)), &signals) == ZX_OK) {
- DCHECK(signals & ZX_TASK_TERMINATED);
- // If already signaled, then the process is terminated.
- return;
- }
-
- process.Terminate(/*exit_code=*/1, /*wait=*/true);
-}
-
} // namespace base
diff --git a/chromium/base/process/kill_posix.cc b/chromium/base/process/kill_posix.cc
index 67c68c57eae..4b52d8b0ef4 100644
--- a/chromium/base/process/kill_posix.cc
+++ b/chromium/base/process/kill_posix.cc
@@ -12,12 +12,11 @@
#include "base/debug/activity_tracker.h"
#include "base/files/file_util.h"
-#include "base/files/scoped_file.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/posix/eintr_wrapper.h"
#include "base/process/process_iterator.h"
-#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/post_task.h"
#include "base/threading/platform_thread.h"
#include "build/build_config.h"
@@ -136,81 +135,50 @@ bool CleanupProcesses(const FilePath::StringType& executable_name,
namespace {
-// A thread class which waits for the given child to exit and reaps it.
-// If the child doesn't exit within a couple of seconds, kill it.
class BackgroundReaper : public PlatformThread::Delegate {
public:
- BackgroundReaper(pid_t child, unsigned timeout)
- : child_(child),
- timeout_(timeout) {
- }
+ BackgroundReaper(base::Process child_process, const TimeDelta& wait_time)
+ : child_process_(std::move(child_process)), wait_time_(wait_time) {}
- // Overridden from PlatformThread::Delegate:
void ThreadMain() override {
- WaitForChildToDie();
- delete this;
- }
-
- void WaitForChildToDie() {
- // Wait forever case.
- if (timeout_ == 0) {
- pid_t r = HANDLE_EINTR(waitpid(child_, nullptr, 0));
- if (r != child_) {
- DPLOG(ERROR) << "While waiting for " << child_
- << " to terminate, we got the following result: " << r;
- }
- return;
- }
-
- // There's no good way to wait for a specific child to exit in a timed
- // fashion. (No kqueue on Linux), so we just loop and sleep.
-
- // Wait for 2 * timeout_ 500 milliseconds intervals.
- for (unsigned i = 0; i < 2 * timeout_; ++i) {
- PlatformThread::Sleep(TimeDelta::FromMilliseconds(500));
- if (Process(child_).WaitForExitWithTimeout(TimeDelta(), nullptr))
- return;
- }
-
- if (kill(child_, SIGKILL) == 0) {
- // SIGKILL is uncatchable. Since the signal was delivered, we can
- // just wait for the process to die now in a blocking manner.
- Process(child_).WaitForExit(nullptr);
- } else {
- DLOG(ERROR) << "While waiting for " << child_ << " to terminate we"
- << " failed to deliver a SIGKILL signal (" << errno << ").";
+ if (!wait_time_.is_zero()) {
+ child_process_.WaitForExitWithTimeout(wait_time_, nullptr);
+ kill(child_process_.Handle(), SIGKILL);
}
+ child_process_.WaitForExit(nullptr);
+ delete this;
}
private:
- const pid_t child_;
- // Number of seconds to wait, if 0 then wait forever and do not attempt to
- // kill |child_|.
- const unsigned timeout_;
-
+ Process child_process_;
+ const TimeDelta wait_time_;
DISALLOW_COPY_AND_ASSIGN(BackgroundReaper);
};
} // namespace
void EnsureProcessTerminated(Process process) {
- // If the child is already dead, then there's nothing to do.
+ DCHECK(!process.is_current());
+
if (process.WaitForExitWithTimeout(TimeDelta(), nullptr))
return;
- const unsigned timeout = 2; // seconds
- BackgroundReaper* reaper = new BackgroundReaper(process.Pid(), timeout);
- PlatformThread::CreateNonJoinable(0, reaper);
+ PlatformThread::CreateNonJoinable(
+ 0, new BackgroundReaper(std::move(process), TimeDelta::FromSeconds(2)));
}
-void EnsureProcessGetsReaped(ProcessId pid) {
+#if defined(OS_LINUX)
+void EnsureProcessGetsReaped(Process process) {
+ DCHECK(!process.is_current());
+
// If the child is already dead, then there's nothing to do.
- if (Process(pid).WaitForExitWithTimeout(TimeDelta(), nullptr))
+ if (process.WaitForExitWithTimeout(TimeDelta(), nullptr))
return;
- BackgroundReaper* reaper = new BackgroundReaper(pid, 0);
- PlatformThread::CreateNonJoinable(0, reaper);
+ PlatformThread::CreateNonJoinable(
+ 0, new BackgroundReaper(std::move(process), TimeDelta()));
}
+#endif // defined(OS_LINUX)
#endif // !defined(OS_MACOSX)
#endif // !defined(OS_NACL_NONSFI)
diff --git a/chromium/base/process/kill_win.cc b/chromium/base/process/kill_win.cc
index fee61c1e796..7a664429bcd 100644
--- a/chromium/base/process/kill_win.cc
+++ b/chromium/base/process/kill_win.cc
@@ -4,40 +4,19 @@
#include "base/process/kill.h"
+#include <algorithm>
+
#include <windows.h>
#include <io.h>
#include <stdint.h>
-#include <algorithm>
-#include <utility>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/debug/activity_tracker.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/process/memory.h"
#include "base/process/process_iterator.h"
-#include "base/task_scheduler/post_task.h"
namespace base {
-namespace {
-
-bool CheckForProcessExitAndReport(const Process& process) {
- if (WaitForSingleObject(process.Handle(), 0) == WAIT_OBJECT_0) {
- int exit_code;
- TerminationStatus status =
- GetTerminationStatus(process.Handle(), &exit_code);
- DCHECK_NE(TERMINATION_STATUS_STILL_RUNNING, status);
- process.Exited(exit_code);
- return true;
- }
- return false;
-}
-
-} // namespace
-
TerminationStatus GetTerminationStatus(ProcessHandle handle, int* exit_code) {
DCHECK(exit_code);
@@ -134,24 +113,4 @@ bool CleanupProcesses(const FilePath::StringType& executable_name,
return false;
}
-void EnsureProcessTerminated(Process process) {
- DCHECK(!process.is_current());
-
- // If already signaled, then we are done!
- if (CheckForProcessExitAndReport(process))
- return;
-
- PostDelayedTaskWithTraits(
- FROM_HERE,
- {TaskPriority::BACKGROUND, TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
- BindOnce(
- [](Process process) {
- if (CheckForProcessExitAndReport(process))
- return;
- process.Terminate(win::kProcessKilledExitCode, false);
- },
- std::move(process)),
- TimeDelta::FromSeconds(2));
-}
-
} // namespace base
diff --git a/chromium/base/process/launch_fuchsia.cc b/chromium/base/process/launch_fuchsia.cc
index c16a9600f67..d1edc473ede 100644
--- a/chromium/base/process/launch_fuchsia.cc
+++ b/chromium/base/process/launch_fuchsia.cc
@@ -16,6 +16,7 @@
#include "base/command_line.h"
#include "base/files/file_util.h"
#include "base/fuchsia/default_job.h"
+#include "base/fuchsia/fuchsia_logging.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/scoped_generic.h"
@@ -86,7 +87,7 @@ bool MapPathsToLaunchpad(const std::vector<std::string> paths_to_map,
uint32_t types[FDIO_MAX_HANDLES] = {};
status = fdio_transfer_fd(scoped_fd.get(), 0, handles, types);
if (status != ZX_OK) {
- LOG(ERROR) << "fdio_transfer_fd failed: " << zx_status_get_string(status);
+ ZX_LOG(ERROR, status) << "fdio_transfer_fd";
return false;
}
ScopedZxHandle scoped_handle(handles[0]);
@@ -110,8 +111,7 @@ bool MapPathsToLaunchpad(const std::vector<std::string> paths_to_map,
status = launchpad_add_handle(lp, scoped_handle.release(),
PA_HND(PA_NS_DIR, paths_idx));
if (status != ZX_OK) {
- LOG(ERROR) << "launchpad_add_handle failed: "
- << zx_status_get_string(status);
+ ZX_LOG(ERROR, status) << "launchpad_add_handle";
return false;
}
paths_c_str.push_back(next_path_str.c_str());
@@ -121,8 +121,7 @@ bool MapPathsToLaunchpad(const std::vector<std::string> paths_to_map,
status =
launchpad_set_nametable(lp, paths_c_str.size(), paths_c_str.data());
if (status != ZX_OK) {
- LOG(ERROR) << "launchpad_set_nametable failed: "
- << zx_status_get_string(status);
+ ZX_LOG(ERROR, status) << "launchpad_set_nametable";
return false;
}
}
@@ -166,19 +165,18 @@ Process LaunchProcess(const std::vector<std::string>& argv,
ScopedLaunchpad lp;
zx_status_t status;
if ((status = launchpad_create(job, argv_cstr[0], lp.receive())) != ZX_OK) {
- LOG(ERROR) << "launchpad_create(job): " << zx_status_get_string(status);
+ ZX_LOG(ERROR, status) << "launchpad_create(job)";
return Process();
}
if ((status = launchpad_load_from_file(lp.get(), argv_cstr[0])) != ZX_OK) {
- LOG(ERROR) << "launchpad_load_from_file(): "
- << zx_status_get_string(status);
+ ZX_LOG(ERROR, status) << "launchpad_load_from_file(" << argv_cstr[0] << ")";
return Process();
}
if ((status = launchpad_set_args(lp.get(), argv.size(), argv_cstr.data())) !=
ZX_OK) {
- LOG(ERROR) << "launchpad_set_args(): " << zx_status_get_string(status);
+ ZX_LOG(ERROR, status) << "launchpad_set_args";
return Process();
}
@@ -206,8 +204,7 @@ Process LaunchProcess(const std::vector<std::string>& argv,
zx_handle_t job_duplicate = ZX_HANDLE_INVALID;
if ((status = zx_handle_duplicate(job, ZX_RIGHT_SAME_RIGHTS,
&job_duplicate)) != ZX_OK) {
- LOG(ERROR) << "zx_handle_duplicate(job): "
- << zx_status_get_string(status);
+ ZX_LOG(ERROR, status) << "zx_handle_duplicate";
return Process();
}
launchpad_add_handle(lp.get(), job_duplicate, PA_HND(PA_JOB_DEFAULT, 0));
@@ -257,8 +254,7 @@ Process LaunchProcess(const std::vector<std::string>& argv,
zx_handle_t process_handle;
const char* errmsg;
if ((status = launchpad_go(lp.get(), &process_handle, &errmsg)) != ZX_OK) {
- LOG(ERROR) << "launchpad_go failed: " << errmsg
- << ", status=" << zx_status_get_string(status);
+ ZX_LOG(ERROR, status) << "launchpad_go failed: " << errmsg;
return Process();
}
ignore_result(lp.release()); // launchpad_go() took ownership.
diff --git a/chromium/base/process/launch_posix.cc b/chromium/base/process/launch_posix.cc
index 90c2d830dbc..ec584883b96 100644
--- a/chromium/base/process/launch_posix.cc
+++ b/chromium/base/process/launch_posix.cc
@@ -33,6 +33,7 @@
#include "base/files/file_util.h"
#include "base/files/scoped_file.h"
#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
#include "base/posix/eintr_wrapper.h"
#include "base/process/process.h"
#include "base/process/process_metrics.h"
@@ -40,6 +41,8 @@
#include "base/synchronization/waitable_event.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#if defined(OS_LINUX) || defined(OS_AIX)
@@ -297,6 +300,7 @@ Process LaunchProcess(const CommandLine& cmdline,
Process LaunchProcess(const std::vector<std::string>& argv,
const LaunchOptions& options) {
+ TRACE_EVENT0("base", "LaunchProcess");
#if defined(OS_MACOSX)
if (FeatureList::IsEnabled(kMacLaunchProcessPosixSpawn)) {
// TODO(rsesek): Do this unconditionally. There is one user for each of
@@ -335,6 +339,7 @@ Process LaunchProcess(const std::vector<std::string>& argv,
}
pid_t pid;
+ base::TimeTicks before_fork = TimeTicks::Now();
#if defined(OS_LINUX) || defined(OS_AIX)
if (options.clone_flags) {
// Signal handling in this function assumes the creation of a new
@@ -361,7 +366,11 @@ Process LaunchProcess(const std::vector<std::string>& argv,
// Always restore the original signal mask in the parent.
if (pid != 0) {
+ base::TimeTicks after_fork = TimeTicks::Now();
SetSignalMask(orig_sigmask);
+
+ base::TimeDelta fork_time = after_fork - before_fork;
+ UMA_HISTOGRAM_TIMES("MPArch.ForkTime", fork_time);
}
if (pid < 0) {
diff --git a/chromium/base/process/process_info_win.cc b/chromium/base/process/process_info_win.cc
index a33216b6935..23e93e335eb 100644
--- a/chromium/base/process/process_info_win.cc
+++ b/chromium/base/process/process_info_win.cc
@@ -5,6 +5,7 @@
#include "base/process/process_info.h"
#include <windows.h>
+#include <memory>
#include "base/logging.h"
#include "base/memory/ptr_util.h"
diff --git a/chromium/base/process/process_metrics.h b/chromium/base/process/process_metrics.h
index fc2c46d3022..3ae373f6963 100644
--- a/chromium/base/process/process_metrics.h
+++ b/chromium/base/process/process_metrics.h
@@ -41,58 +41,6 @@ namespace base {
// Full declaration is in process_metrics_iocounters.h.
struct IoCounters;
-// Working Set (resident) memory usage broken down by
-//
-// On Windows:
-// priv (private): These pages (kbytes) cannot be shared with any other process.
-// shareable: These pages (kbytes) can be shared with other processes under
-// the right circumstances.
-// shared : These pages (kbytes) are currently shared with at least one
-// other process.
-//
-// On Linux:
-// priv: Pages mapped only by this process.
-// shared: PSS or 0 if the kernel doesn't support this.
-// shareable: 0
-
-// On ChromeOS:
-// priv: Pages mapped only by this process.
-// shared: PSS or 0 if the kernel doesn't support this.
-// shareable: 0
-// swapped Pages swapped out to zram.
-//
-// On macOS:
-// priv: Resident size (RSS) including shared memory. Warning: This
-// does not include compressed size and does not always
-// accurately account for shared memory due to things like
-// copy-on-write. TODO(erikchen): Revamp this with something
-// more accurate.
-// shared: 0
-// shareable: 0
-//
-struct WorkingSetKBytes {
- WorkingSetKBytes() : priv(0), shareable(0), shared(0) {}
- size_t priv;
- size_t shareable;
- size_t shared;
-#if defined(OS_CHROMEOS)
- size_t swapped;
-#endif
-};
-
-// Committed (resident + paged) memory usage broken down by
-// private: These pages cannot be shared with any other process.
-// mapped: These pages are mapped into the view of a section (backed by
-// pagefile.sys)
-// image: These pages are mapped into the view of an image section (backed by
-// file system)
-struct CommittedKBytes {
- CommittedKBytes() : priv(0), mapped(0), image(0) {}
- size_t priv;
- size_t mapped;
- size_t image;
-};
-
#if defined(OS_LINUX) || defined(OS_ANDROID)
// Minor and major page fault counts since the process creation.
// Both counts are process-wide, and exclude child processes.
@@ -108,11 +56,20 @@ struct PageFaultCounts {
// Convert a POSIX timeval to microseconds.
BASE_EXPORT int64_t TimeValToMicroseconds(const struct timeval& tv);
-// Provides performance metrics for a specified process (CPU usage, memory and
-// IO counters). Use CreateCurrentProcessMetrics() to get an instance for the
+// Provides performance metrics for a specified process (CPU usage and IO
+// counters). Use CreateCurrentProcessMetrics() to get an instance for the
// current process, or CreateProcessMetrics() to get an instance for an
// arbitrary process. Then, access the information with the different get
// methods.
+//
+// This class exposes a few platform-specific APIs for parsing memory usage, but
+// these are not intended to generalize to other platforms, since the memory
+// models differ substantially.
+//
+// To obtain consistent memory metrics, use the memory_instrumentation service.
+//
+// For further documentation on memory, see
+// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/README.md
class BASE_EXPORT ProcessMetrics {
public:
~ProcessMetrics();
@@ -135,40 +92,27 @@ class BASE_EXPORT ProcessMetrics {
// convenience wrapper for CreateProcessMetrics().
static std::unique_ptr<ProcessMetrics> CreateCurrentProcessMetrics();
- // Returns the current space allocated for the pagefile, in bytes (these pages
- // may or may not be in memory). On Linux, this returns the total virtual
- // memory size.
- size_t GetPagefileUsage() const;
- // Returns the peak space allocated for the pagefile, in bytes.
- size_t GetPeakPagefileUsage() const;
- // Returns the current working set size, in bytes. On Linux, this returns
- // the resident set size.
- size_t GetWorkingSetSize() const;
- // Returns the peak working set size, in bytes.
- size_t GetPeakWorkingSetSize() const;
- // Returns private and sharedusage, in bytes. Private bytes is the amount of
- // memory currently allocated to a process that cannot be shared. Returns
- // false on platform specific error conditions. Note: |private_bytes|
- // returns 0 on unsupported OSes: prior to XP SP2.
- bool GetMemoryBytes(size_t* private_bytes, size_t* shared_bytes) const;
- // Fills a CommittedKBytes with both resident and paged
- // memory usage as per definition of CommittedBytes.
- void GetCommittedKBytes(CommittedKBytes* usage) const;
- // Fills a WorkingSetKBytes containing resident private and shared memory
- // usage in bytes, as per definition of WorkingSetBytes. Note that this
- // function is somewhat expensive on Windows (a few ms per process).
- bool GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const;
- // Computes pss (proportional set size) of a process. Note that this
- // function is somewhat expensive on Windows (a few ms per process).
- bool GetProportionalSetSizeBytes(uint64_t* pss_bytes) const;
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+ // Resident Set Size is a Linux/Android specific memory concept. Do not
+ // attempt to extend this to other platforms.
+ BASE_EXPORT size_t GetResidentSetSize() const;
+#endif
+
+#if defined(OS_CHROMEOS)
+ // /proc/<pid>/totmaps is a syscall that returns memory summary statistics for
+ // the process.
+ // totmaps is a Linux specific concept, currently only being used on ChromeOS.
+ // Do not attempt to extend this to other platforms.
+ //
+ struct TotalsSummary {
+ size_t private_clean_kb;
+ size_t private_dirty_kb;
+ size_t swap_kb;
+ };
+ BASE_EXPORT TotalsSummary GetTotalsSummary() const;
+#endif
#if defined(OS_MACOSX)
- // Fills both CommitedKBytes and WorkingSetKBytes in a single operation. This
- // is more efficient on Mac OS X, as the two can be retrieved with a single
- // system call.
- bool GetCommittedAndWorkingSetKBytes(CommittedKBytes* usage,
- WorkingSetKBytes* ws_usage) const;
-
struct TaskVMInfo {
// Only available on macOS 10.12+.
// Anonymous, non-discardable memory, including non-volatile IOKit.
@@ -183,14 +127,6 @@ class BASE_EXPORT ProcessMetrics {
uint64_t compressed = 0;
};
TaskVMInfo GetTaskVMInfo() const;
-
- // Returns private, shared, and total resident bytes. |locked_bytes| refers to
- // bytes that must stay resident. |locked_bytes| only counts bytes locked by
- // this task, not bytes locked by the kernel.
- bool GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes,
- size_t* resident_bytes,
- size_t* locked_bytes) const;
#endif
// Returns the percentage of time spent executing, across all threads of the
@@ -264,14 +200,6 @@ class BASE_EXPORT ProcessMetrics {
ProcessMetrics(ProcessHandle process, PortProvider* port_provider);
#endif // !defined(OS_MACOSX) || defined(OS_IOS)
-#if defined(OS_LINUX) || defined(OS_ANDROID) | defined(OS_AIX)
- bool GetWorkingSetKBytesStatm(WorkingSetKBytes* ws_usage) const;
-#endif
-
-#if defined(OS_CHROMEOS)
- bool GetWorkingSetKBytesTotmaps(WorkingSetKBytes *ws_usage) const;
-#endif
-
#if defined(OS_MACOSX) || defined(OS_LINUX) || defined(OS_AIX)
int CalculateIdleWakeupsPerSecond(uint64_t absolute_idle_wakeups);
#endif
@@ -291,7 +219,9 @@ class BASE_EXPORT ProcessMetrics {
// Used to store the previous times and CPU usage counts so we can
// compute the CPU usage between calls.
TimeTicks last_cpu_time_;
+#if defined(OS_WIN) || defined(OS_MACOSX)
int64_t last_system_time_;
+#endif
#if defined(OS_MACOSX) || defined(OS_LINUX) || defined(OS_AIX)
// Same thing for idle wakeups.
diff --git a/chromium/base/process/process_metrics_freebsd.cc b/chromium/base/process/process_metrics_freebsd.cc
index ebbaaaf369e..4dee9980292 100644
--- a/chromium/base/process/process_metrics_freebsd.cc
+++ b/chromium/base/process/process_metrics_freebsd.cc
@@ -17,7 +17,6 @@ namespace base {
ProcessMetrics::ProcessMetrics(ProcessHandle process)
: process_(process),
- last_system_time_(0),
last_cpu_(0) {}
// static
@@ -26,63 +25,6 @@ std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
return WrapUnique(new ProcessMetrics(process));
}
-size_t ProcessMetrics::GetPagefileUsage() const {
- struct kinfo_proc info;
- int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process_ };
- size_t length = sizeof(info);
-
- if (sysctl(mib, arraysize(mib), &info, &length, NULL, 0) < 0)
- return 0;
-
- return info.ki_size;
-}
-
-size_t ProcessMetrics::GetPeakPagefileUsage() const {
- return 0;
-}
-
-size_t ProcessMetrics::GetWorkingSetSize() const {
- struct kinfo_proc info;
- int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process_ };
- size_t length = sizeof(info);
-
- if (sysctl(mib, arraysize(mib), &info, &length, NULL, 0) < 0)
- return 0;
-
- return info.ki_rssize * getpagesize();
-}
-
-size_t ProcessMetrics::GetPeakWorkingSetSize() const {
- return 0;
-}
-
-bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes) const {
- WorkingSetKBytes ws_usage;
- if (!GetWorkingSetKBytes(&ws_usage))
- return false;
-
- if (private_bytes)
- *private_bytes = ws_usage.priv << 10;
-
- if (shared_bytes)
- *shared_bytes = ws_usage.shared * 1024;
-
- return true;
-}
-
-bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
-// TODO(bapt) be sure we can't be precise
- size_t priv = GetWorkingSetSize();
- if (!priv)
- return false;
- ws_usage->priv = priv / 1024;
- ws_usage->shareable = 0;
- ws_usage->shared = 0;
-
- return true;
-}
-
double ProcessMetrics::GetPlatformIndependentCPUUsage() {
struct kinfo_proc info;
int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process_ };
diff --git a/chromium/base/process/process_metrics_fuchsia.cc b/chromium/base/process/process_metrics_fuchsia.cc
index 5204383a0b7..3710c308408 100644
--- a/chromium/base/process/process_metrics_fuchsia.cc
+++ b/chromium/base/process/process_metrics_fuchsia.cc
@@ -30,26 +30,6 @@ double ProcessMetrics::GetPlatformIndependentCPUUsage() {
return 0.0;
}
-size_t ProcessMetrics::GetPagefileUsage() const {
- NOTIMPLEMENTED(); // TODO(fuchsia): https://crbug.com/706592.
- return 0;
-}
-
-size_t ProcessMetrics::GetWorkingSetSize() const {
- NOTIMPLEMENTED(); // TODO(fuchsia): https://crbug.com/706592.
- return 0;
-}
-
-size_t ProcessMetrics::GetPeakWorkingSetSize() const {
- NOTIMPLEMENTED(); // TODO(fuchsia): https://crbug.com/706592.
- return 0;
-}
-
-bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
- NOTIMPLEMENTED(); // TODO(fuchsia): https://crbug.com/706592.
- return false;
-}
-
bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
NOTIMPLEMENTED(); // TODO(fuchsia): https://crbug.com/706592.
return false;
diff --git a/chromium/base/process/process_metrics_ios.cc b/chromium/base/process/process_metrics_ios.cc
index 31c812dee03..794304f7658 100644
--- a/chromium/base/process/process_metrics_ios.cc
+++ b/chromium/base/process/process_metrics_ios.cc
@@ -15,19 +15,6 @@
namespace base {
-namespace {
-
-bool GetTaskInfo(task_basic_info_64* task_info_data) {
- mach_msg_type_number_t count = TASK_BASIC_INFO_64_COUNT;
- kern_return_t kr = task_info(mach_task_self(),
- TASK_BASIC_INFO_64,
- reinterpret_cast<task_info_t>(task_info_data),
- &count);
- return kr == KERN_SUCCESS;
-}
-
-} // namespace
-
ProcessMetrics::ProcessMetrics(ProcessHandle process) {}
ProcessMetrics::~ProcessMetrics() {}
@@ -43,20 +30,6 @@ double ProcessMetrics::GetPlatformIndependentCPUUsage() {
return 0;
}
-size_t ProcessMetrics::GetPagefileUsage() const {
- task_basic_info_64 task_info_data;
- if (!GetTaskInfo(&task_info_data))
- return 0;
- return task_info_data.virtual_size;
-}
-
-size_t ProcessMetrics::GetWorkingSetSize() const {
- task_basic_info_64 task_info_data;
- if (!GetTaskInfo(&task_info_data))
- return 0;
- return task_info_data.resident_size;
-}
-
size_t GetMaxFds() {
static const rlim_t kSystemDefaultMaxFds = 256;
rlim_t max_fds;
diff --git a/chromium/base/process/process_metrics_linux.cc b/chromium/base/process/process_metrics_linux.cc
index 5997713599b..e13f9e7a643 100644
--- a/chromium/base/process/process_metrics_linux.cc
+++ b/chromium/base/process/process_metrics_linux.cc
@@ -211,51 +211,11 @@ std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
return WrapUnique(new ProcessMetrics(process));
}
-// On Linux, return vsize.
-size_t ProcessMetrics::GetPagefileUsage() const {
- return internal::ReadProcStatsAndGetFieldAsSizeT(process_,
- internal::VM_VSIZE);
-}
-
-// On Linux, return the high water mark of vsize.
-size_t ProcessMetrics::GetPeakPagefileUsage() const {
- return ReadProcStatusAndGetFieldAsSizeT(process_, "VmPeak") * 1024;
-}
-
-// On Linux, return RSS.
-size_t ProcessMetrics::GetWorkingSetSize() const {
+size_t ProcessMetrics::GetResidentSetSize() const {
return internal::ReadProcStatsAndGetFieldAsSizeT(process_, internal::VM_RSS) *
getpagesize();
}
-// On Linux, return the high water mark of RSS.
-size_t ProcessMetrics::GetPeakWorkingSetSize() const {
- return ReadProcStatusAndGetFieldAsSizeT(process_, "VmHWM") * 1024;
-}
-
-bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes) const {
- WorkingSetKBytes ws_usage;
- if (!GetWorkingSetKBytes(&ws_usage))
- return false;
-
- if (private_bytes)
- *private_bytes = ws_usage.priv * 1024;
-
- if (shared_bytes)
- *shared_bytes = ws_usage.shared * 1024;
-
- return true;
-}
-
-bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
-#if defined(OS_CHROMEOS)
- if (GetWorkingSetKBytesTotmaps(ws_usage))
- return true;
-#endif
- return GetWorkingSetKBytesStatm(ws_usage);
-}
-
double ProcessMetrics::GetPlatformIndependentCPUUsage() {
TimeTicks time = TimeTicks::Now();
@@ -398,7 +358,6 @@ int ProcessMetrics::GetOpenFdSoftLimit() const {
ProcessMetrics::ProcessMetrics(ProcessHandle process)
: process_(process),
- last_system_time_(0),
#if defined(OS_LINUX) || defined(OS_AIX)
last_absolute_idle_wakeups_(0),
#endif
@@ -408,8 +367,7 @@ ProcessMetrics::ProcessMetrics(ProcessHandle process)
#if defined(OS_CHROMEOS)
// Private, Shared and Proportional working set sizes are obtained from
// /proc/<pid>/totmaps
-bool ProcessMetrics::GetWorkingSetKBytesTotmaps(WorkingSetKBytes *ws_usage)
- const {
+ProcessMetrics::TotalsSummary ProcessMetrics::GetTotalsSummary() const {
// The format of /proc/<pid>/totmaps is:
//
// Rss: 6120 kB
@@ -423,7 +381,8 @@ bool ProcessMetrics::GetWorkingSetKBytesTotmaps(WorkingSetKBytes *ws_usage)
// AnonHugePages: XXX kB
// Swap: XXX kB
// Locked: XXX kB
- const size_t kPssIndex = (1 * 3) + 1;
+ ProcessMetrics::TotalsSummary summary = {};
+
const size_t kPrivate_CleanIndex = (4 * 3) + 1;
const size_t kPrivate_DirtyIndex = (5 * 3) + 1;
const size_t kSwapIndex = (9 * 3) + 1;
@@ -434,85 +393,36 @@ bool ProcessMetrics::GetWorkingSetKBytesTotmaps(WorkingSetKBytes *ws_usage)
ThreadRestrictions::ScopedAllowIO allow_io;
bool ret = ReadFileToString(totmaps_file, &totmaps_data);
if (!ret || totmaps_data.length() == 0)
- return false;
+ return summary;
}
std::vector<std::string> totmaps_fields = SplitString(
totmaps_data, kWhitespaceASCII, KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
- DCHECK_EQ("Pss:", totmaps_fields[kPssIndex-1]);
DCHECK_EQ("Private_Clean:", totmaps_fields[kPrivate_CleanIndex - 1]);
DCHECK_EQ("Private_Dirty:", totmaps_fields[kPrivate_DirtyIndex - 1]);
DCHECK_EQ("Swap:", totmaps_fields[kSwapIndex-1]);
- int pss = 0;
- int private_clean = 0;
- int private_dirty = 0;
- int swap = 0;
- bool ret = true;
- ret &= StringToInt(totmaps_fields[kPssIndex], &pss);
- ret &= StringToInt(totmaps_fields[kPrivate_CleanIndex], &private_clean);
- ret &= StringToInt(totmaps_fields[kPrivate_DirtyIndex], &private_dirty);
- ret &= StringToInt(totmaps_fields[kSwapIndex], &swap);
-
- // On ChromeOS, swap goes to zram. Count this as private / shared, as
- // increased swap decreases available RAM to user processes, which would
- // otherwise create surprising results.
- ws_usage->priv = private_clean + private_dirty + swap;
- ws_usage->shared = pss + swap;
- ws_usage->shareable = 0;
- ws_usage->swapped = swap;
- return ret;
-}
-#endif
-
-// Private and Shared working set sizes are obtained from /proc/<pid>/statm.
-bool ProcessMetrics::GetWorkingSetKBytesStatm(WorkingSetKBytes* ws_usage)
- const {
- // Use statm instead of smaps because smaps is:
- // a) Large and slow to parse.
- // b) Unavailable in the SUID sandbox.
-
- // First get the page size, since everything is measured in pages.
- // For details, see: man 5 proc.
- const int page_size_kb = getpagesize() / 1024;
- if (page_size_kb <= 0)
- return false;
-
- std::string statm;
- {
- FilePath statm_file = internal::GetProcPidDir(process_).Append("statm");
- // Synchronously reading files in /proc does not hit the disk.
- ThreadRestrictions::ScopedAllowIO allow_io;
- bool ret = ReadFileToString(statm_file, &statm);
- if (!ret || statm.length() == 0)
- return false;
- }
-
- std::vector<StringPiece> statm_vec =
- SplitStringPiece(statm, " ", TRIM_WHITESPACE, SPLIT_WANT_ALL);
- if (statm_vec.size() != 7)
- return false; // Not the expected format.
-
- int statm_rss;
- int statm_shared;
- bool ret = true;
- ret &= StringToInt(statm_vec[1], &statm_rss);
- ret &= StringToInt(statm_vec[2], &statm_shared);
+ int private_clean_kb = 0;
+ int private_dirty_kb = 0;
+ int swap_kb = 0;
+ bool success = true;
+ success &=
+ StringToInt(totmaps_fields[kPrivate_CleanIndex], &private_clean_kb);
+ success &=
+ StringToInt(totmaps_fields[kPrivate_DirtyIndex], &private_dirty_kb);
+ success &= StringToInt(totmaps_fields[kSwapIndex], &swap_kb);
- ws_usage->priv = (statm_rss - statm_shared) * page_size_kb;
- ws_usage->shared = statm_shared * page_size_kb;
+ if (!success)
+ return summary;
- // Sharable is not calculated, as it does not provide interesting data.
- ws_usage->shareable = 0;
+ summary.private_clean_kb = private_clean_kb;
+ summary.private_dirty_kb = private_dirty_kb;
+ summary.swap_kb = swap_kb;
-#if defined(OS_CHROMEOS)
- // Can't get swapped memory from statm.
- ws_usage->swapped = 0;
-#endif
-
- return ret;
+ return summary;
}
+#endif
size_t GetSystemCommitCharge() {
SystemMemoryInfoKB meminfo;
diff --git a/chromium/base/process/process_metrics_mac.cc b/chromium/base/process/process_metrics_mac.cc
index 63c59e21453..148253f26d2 100644
--- a/chromium/base/process/process_metrics_mac.cc
+++ b/chromium/base/process/process_metrics_mac.cc
@@ -68,33 +68,6 @@ bool GetTaskInfo(mach_port_t task, task_basic_info_64* task_info_data) {
return kr == KERN_SUCCESS;
}
-bool GetCPUType(cpu_type_t* cpu_type) {
- size_t len = sizeof(*cpu_type);
- int result = sysctlbyname("sysctl.proc_cputype",
- cpu_type,
- &len,
- NULL,
- 0);
- if (result != 0) {
- DPLOG(ERROR) << "sysctlbyname(""sysctl.proc_cputype"")";
- return false;
- }
-
- return true;
-}
-
-bool IsAddressInSharedRegion(mach_vm_address_t addr, cpu_type_t type) {
- if (type == CPU_TYPE_I386) {
- return addr >= SHARED_REGION_BASE_I386 &&
- addr < (SHARED_REGION_BASE_I386 + SHARED_REGION_SIZE_I386);
- } else if (type == CPU_TYPE_X86_64) {
- return addr >= SHARED_REGION_BASE_X86_64 &&
- addr < (SHARED_REGION_BASE_X86_64 + SHARED_REGION_SIZE_X86_64);
- } else {
- return false;
- }
-}
-
MachVMRegionResult ParseOutputFromMachVMRegion(kern_return_t kr) {
if (kr == KERN_INVALID_ADDRESS) {
// We're at the end of the address space.
@@ -132,173 +105,6 @@ std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
return WrapUnique(new ProcessMetrics(process, port_provider));
}
-size_t ProcessMetrics::GetPagefileUsage() const {
- task_basic_info_64 task_info_data;
- if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
- return 0;
- return task_info_data.virtual_size;
-}
-
-size_t ProcessMetrics::GetPeakPagefileUsage() const {
- return 0;
-}
-
-size_t ProcessMetrics::GetWorkingSetSize() const {
- size_t resident_bytes = 0;
- if (!GetMemoryBytes(nullptr, nullptr, &resident_bytes, nullptr))
- return 0;
- return resident_bytes;
-}
-
-size_t ProcessMetrics::GetPeakWorkingSetSize() const {
- return 0;
-}
-
-bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes) const {
- return GetMemoryBytes(private_bytes, shared_bytes, nullptr, nullptr);
-}
-
-// This is a rough approximation of the algorithm that libtop uses.
-// private_bytes is the size of private resident memory.
-// shared_bytes is the size of shared resident memory.
-bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes,
- size_t* resident_bytes,
- size_t* locked_bytes) const {
- size_t private_pages_count = 0;
- size_t shared_pages_count = 0;
- size_t wired_pages_count = 0;
-
- mach_port_t task = TaskForPid(process_);
- if (task == MACH_PORT_NULL) {
- DLOG(ERROR) << "Invalid process";
- return false;
- }
-
- cpu_type_t cpu_type;
- if (!GetCPUType(&cpu_type))
- return false;
-
- // The same region can be referenced multiple times. To avoid double counting
- // we need to keep track of which regions we've already counted.
- hash_set<int> seen_objects;
-
- // We iterate through each VM region in the task's address map. For shared
- // memory we add up all the pages that are marked as shared. Like libtop we
- // try to avoid counting pages that are also referenced by other tasks. Since
- // we don't have access to the VM regions of other tasks the only hint we have
- // is if the address is in the shared region area.
- //
- // Private memory is much simpler. We simply count the pages that are marked
- // as private or copy on write (COW).
- //
- // See libtop_update_vm_regions in
- // http://www.opensource.apple.com/source/top/top-67/libtop.c
- mach_vm_size_t size = 0;
- mach_vm_address_t address = MACH_VM_MIN_ADDRESS;
- while (true) {
- base::CheckedNumeric<mach_vm_address_t> next_address(address);
- next_address += size;
- if (!next_address.IsValid())
- return false;
- address = next_address.ValueOrDie();
-
- mach_vm_address_t address_copy = address;
- vm_region_top_info_data_t info;
- MachVMRegionResult result = GetTopInfo(task, &size, &address, &info);
- if (result == MachVMRegionResult::Error)
- return false;
- if (result == MachVMRegionResult::Finished)
- break;
-
- vm_region_basic_info_64 basic_info;
- mach_vm_size_t dummy_size = 0;
- result = GetBasicInfo(task, &dummy_size, &address_copy, &basic_info);
- if (result == MachVMRegionResult::Error)
- return false;
- if (result == MachVMRegionResult::Finished)
- break;
-
- bool is_wired = basic_info.user_wired_count > 0;
-
- if (IsAddressInSharedRegion(address, cpu_type) &&
- info.share_mode != SM_PRIVATE)
- continue;
-
- if (info.share_mode == SM_COW && info.ref_count == 1)
- info.share_mode = SM_PRIVATE;
-
- switch (info.share_mode) {
- case SM_LARGE_PAGE:
- case SM_PRIVATE:
- private_pages_count += info.private_pages_resident;
- private_pages_count += info.shared_pages_resident;
- break;
- case SM_COW:
- private_pages_count += info.private_pages_resident;
- FALLTHROUGH;
- case SM_SHARED:
- case SM_PRIVATE_ALIASED:
- case SM_TRUESHARED:
- case SM_SHARED_ALIASED:
- if (seen_objects.count(info.obj_id) == 0) {
- // Only count the first reference to this region.
- seen_objects.insert(info.obj_id);
- shared_pages_count += info.shared_pages_resident;
- }
- break;
- default:
- break;
- }
- if (is_wired) {
- wired_pages_count +=
- info.private_pages_resident + info.shared_pages_resident;
- }
- }
-
- if (private_bytes)
- *private_bytes = private_pages_count * PAGE_SIZE;
- if (shared_bytes)
- *shared_bytes = shared_pages_count * PAGE_SIZE;
- if (resident_bytes)
- *resident_bytes = (private_pages_count + shared_pages_count) * PAGE_SIZE;
- if (locked_bytes)
- *locked_bytes = wired_pages_count * PAGE_SIZE;
-
- return true;
-}
-
-void ProcessMetrics::GetCommittedKBytes(CommittedKBytes* usage) const {
- WorkingSetKBytes unused;
- if (!GetCommittedAndWorkingSetKBytes(usage, &unused)) {
- *usage = CommittedKBytes();
- }
-}
-
-bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
- CommittedKBytes unused;
- return GetCommittedAndWorkingSetKBytes(&unused, ws_usage);
-}
-
-bool ProcessMetrics::GetCommittedAndWorkingSetKBytes(
- CommittedKBytes* usage,
- WorkingSetKBytes* ws_usage) const {
- task_basic_info_64 task_info_data;
- if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
- return false;
-
- usage->priv = task_info_data.virtual_size / 1024;
- usage->mapped = 0;
- usage->image = 0;
-
- ws_usage->priv = task_info_data.resident_size / 1024;
- ws_usage->shareable = 0;
- ws_usage->shared = 0;
-
- return true;
-}
-
ProcessMetrics::TaskVMInfo ProcessMetrics::GetTaskVMInfo() const {
TaskVMInfo info;
ChromeTaskVMInfo task_vm_info;
diff --git a/chromium/base/process/process_metrics_openbsd.cc b/chromium/base/process/process_metrics_openbsd.cc
index 77b43bd10b4..3bb1ac5c3bb 100644
--- a/chromium/base/process/process_metrics_openbsd.cc
+++ b/chromium/base/process/process_metrics_openbsd.cc
@@ -21,76 +21,6 @@ std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
return WrapUnique(new ProcessMetrics(process));
}
-size_t ProcessMetrics::GetPagefileUsage() const {
- struct kinfo_proc info;
- size_t length;
- int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process_,
- sizeof(struct kinfo_proc), 0 };
-
- if (sysctl(mib, arraysize(mib), NULL, &length, NULL, 0) < 0)
- return -1;
-
- mib[5] = (length / sizeof(struct kinfo_proc));
-
- if (sysctl(mib, arraysize(mib), &info, &length, NULL, 0) < 0)
- return -1;
-
- return (info.p_vm_tsize + info.p_vm_dsize + info.p_vm_ssize);
-}
-
-size_t ProcessMetrics::GetPeakPagefileUsage() const {
- return 0;
-}
-
-size_t ProcessMetrics::GetWorkingSetSize() const {
- struct kinfo_proc info;
- size_t length;
- int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process_,
- sizeof(struct kinfo_proc), 0 };
-
- if (sysctl(mib, arraysize(mib), NULL, &length, NULL, 0) < 0)
- return -1;
-
- mib[5] = (length / sizeof(struct kinfo_proc));
-
- if (sysctl(mib, arraysize(mib), &info, &length, NULL, 0) < 0)
- return -1;
-
- return info.p_vm_rssize * getpagesize();
-}
-
-size_t ProcessMetrics::GetPeakWorkingSetSize() const {
- return 0;
-}
-
-bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes) const {
- WorkingSetKBytes ws_usage;
-
- if (!GetWorkingSetKBytes(&ws_usage))
- return false;
-
- if (private_bytes)
- *private_bytes = ws_usage.priv << 10;
-
- if (shared_bytes)
- *shared_bytes = ws_usage.shared * 1024;
-
- return true;
-}
-
-bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
- // TODO(bapt): be sure we can't be precise
- size_t priv = GetWorkingSetSize();
- if (!priv)
- return false;
- ws_usage->priv = priv / 1024;
- ws_usage->shareable = 0;
- ws_usage->shared = 0;
-
- return true;
-}
-
bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
return false;
}
@@ -140,7 +70,6 @@ double ProcessMetrics::GetPlatformIndependentCPUUsage() {
ProcessMetrics::ProcessMetrics(ProcessHandle process)
: process_(process),
- last_system_time_(0),
last_cpu_(0) {}
size_t GetSystemCommitCharge() {
diff --git a/chromium/base/process/process_metrics_unittest.cc b/chromium/base/process/process_metrics_unittest.cc
index c622050a413..cb952950286 100644
--- a/chromium/base/process/process_metrics_unittest.cc
+++ b/chromium/base/process/process_metrics_unittest.cc
@@ -57,44 +57,6 @@ class SystemMetricsTest : public testing::Test {
DISALLOW_COPY_AND_ASSIGN(SystemMetricsTest);
};
-/////////////////////////////////////////////////////////////////////////////
-
-#if defined(OS_MACOSX) && !defined(OS_IOS) && !defined(ADDRESS_SANITIZER)
-TEST_F(SystemMetricsTest, LockedBytes) {
- ProcessHandle handle = GetCurrentProcessHandle();
- std::unique_ptr<ProcessMetrics> metrics(
- ProcessMetrics::CreateProcessMetrics(handle, nullptr));
-
- size_t initial_locked_bytes;
- bool result =
- metrics->GetMemoryBytes(nullptr, nullptr, nullptr, &initial_locked_bytes);
- ASSERT_TRUE(result);
-
- size_t size = 8 * 1024 * 1024;
- std::unique_ptr<char[]> memory(new char[size]);
- int r = mlock(memory.get(), size);
- ASSERT_EQ(0, r);
-
- size_t new_locked_bytes;
- result =
- metrics->GetMemoryBytes(nullptr, nullptr, nullptr, &new_locked_bytes);
- ASSERT_TRUE(result);
-
- // There should be around |size| more locked bytes, but multi-threading might
- // cause noise.
- EXPECT_LT(initial_locked_bytes + size / 2, new_locked_bytes);
- EXPECT_GT(initial_locked_bytes + size * 1.5, new_locked_bytes);
-
- r = munlock(memory.get(), size);
- ASSERT_EQ(0, r);
-
- result =
- metrics->GetMemoryBytes(nullptr, nullptr, nullptr, &new_locked_bytes);
- ASSERT_TRUE(result);
- EXPECT_EQ(initial_locked_bytes, new_locked_bytes);
-}
-#endif // defined(OS_MACOSX) && !defined(OS_IOS) && !defined(ADDRESS_SANITIZER)
-
#if defined(OS_LINUX) || defined(OS_ANDROID)
TEST_F(SystemMetricsTest, IsValidDiskName) {
const char invalid_input1[] = "";
diff --git a/chromium/base/process/process_metrics_unittest_ios.cc b/chromium/base/process/process_metrics_unittest_ios.cc
deleted file mode 100644
index 22bd97e7911..00000000000
--- a/chromium/base/process/process_metrics_unittest_ios.cc
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/process/process_metrics.h"
-
-#include <memory>
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-TEST(ProcessMetricsTestIos, Memory) {
- std::unique_ptr<base::ProcessMetrics> process_metrics(
- base::ProcessMetrics::CreateProcessMetrics(
- base::GetCurrentProcessHandle()));
-
- ASSERT_NE(0u, process_metrics->GetWorkingSetSize());
-}
diff --git a/chromium/base/process/process_metrics_win.cc b/chromium/base/process/process_metrics_win.cc
index 61f0bf4ad06..faabdbf63a2 100644
--- a/chromium/base/process/process_metrics_win.cc
+++ b/chromium/base/process/process_metrics_win.cc
@@ -46,102 +46,6 @@ std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
return WrapUnique(new ProcessMetrics(process));
}
-size_t ProcessMetrics::GetPagefileUsage() const {
- PROCESS_MEMORY_COUNTERS pmc;
- if (GetProcessMemoryInfo(process_.Get(), &pmc, sizeof(pmc))) {
- return pmc.PagefileUsage;
- }
- return 0;
-}
-
-// Returns the peak space allocated for the pagefile, in bytes.
-size_t ProcessMetrics::GetPeakPagefileUsage() const {
- PROCESS_MEMORY_COUNTERS pmc;
- if (GetProcessMemoryInfo(process_.Get(), &pmc, sizeof(pmc))) {
- return pmc.PeakPagefileUsage;
- }
- return 0;
-}
-
-// Returns the current working set size, in bytes.
-size_t ProcessMetrics::GetWorkingSetSize() const {
- PROCESS_MEMORY_COUNTERS pmc;
- if (GetProcessMemoryInfo(process_.Get(), &pmc, sizeof(pmc))) {
- return pmc.WorkingSetSize;
- }
- return 0;
-}
-
-// Returns the peak working set size, in bytes.
-size_t ProcessMetrics::GetPeakWorkingSetSize() const {
- PROCESS_MEMORY_COUNTERS pmc;
- if (GetProcessMemoryInfo(process_.Get(), &pmc, sizeof(pmc))) {
- return pmc.PeakWorkingSetSize;
- }
- return 0;
-}
-
-bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
- size_t* shared_bytes) const {
- // PROCESS_MEMORY_COUNTERS_EX is not supported until XP SP2.
- // GetProcessMemoryInfo() will simply fail on prior OS. So the requested
- // information is simply not available. Hence, we will return 0 on unsupported
- // OSes. Unlike most Win32 API, we don't need to initialize the "cb" member.
- PROCESS_MEMORY_COUNTERS_EX pmcx;
- if (private_bytes &&
- GetProcessMemoryInfo(process_.Get(),
- reinterpret_cast<PROCESS_MEMORY_COUNTERS*>(&pmcx),
- sizeof(pmcx))) {
- *private_bytes = pmcx.PrivateUsage;
- }
-
- if (shared_bytes) {
- WorkingSetKBytes ws_usage;
- if (!GetWorkingSetKBytes(&ws_usage))
- return false;
-
- *shared_bytes = ws_usage.shared * 1024;
- }
-
- return true;
-}
-
-void ProcessMetrics::GetCommittedKBytes(CommittedKBytes* usage) const {
- MEMORY_BASIC_INFORMATION mbi = {0};
- size_t committed_private = 0;
- size_t committed_mapped = 0;
- size_t committed_image = 0;
- void* base_address = NULL;
- while (VirtualQueryEx(process_.Get(), base_address, &mbi, sizeof(mbi)) ==
- sizeof(mbi)) {
- if (mbi.State == MEM_COMMIT) {
- if (mbi.Type == MEM_PRIVATE) {
- committed_private += mbi.RegionSize;
- } else if (mbi.Type == MEM_MAPPED) {
- committed_mapped += mbi.RegionSize;
- } else if (mbi.Type == MEM_IMAGE) {
- committed_image += mbi.RegionSize;
- } else {
- NOTREACHED();
- }
- }
- void* new_base = (static_cast<BYTE*>(mbi.BaseAddress)) + mbi.RegionSize;
- // Avoid infinite loop by weird MEMORY_BASIC_INFORMATION.
- // If we query 64bit processes in a 32bit process, VirtualQueryEx()
- // returns such data.
- if (new_base <= base_address) {
- usage->image = 0;
- usage->mapped = 0;
- usage->priv = 0;
- return;
- }
- base_address = new_base;
- }
- usage->image = committed_image / 1024;
- usage->mapped = committed_mapped / 1024;
- usage->priv = committed_private / 1024;
-}
-
namespace {
class WorkingSetInformationBuffer {
@@ -224,57 +128,6 @@ class WorkingSetInformationBuffer {
} // namespace
-bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
- size_t ws_private = 0;
- size_t ws_shareable = 0;
- size_t ws_shared = 0;
-
- DCHECK(ws_usage);
- memset(ws_usage, 0, sizeof(*ws_usage));
-
- WorkingSetInformationBuffer buffer;
- if (!buffer.QueryPageEntries(process_.Get()))
- return false;
-
- size_t num_page_entries = buffer.GetPageEntryCount();
- for (size_t i = 0; i < num_page_entries; i++) {
- if (buffer->WorkingSetInfo[i].Shared) {
- ws_shareable++;
- if (buffer->WorkingSetInfo[i].ShareCount > 1)
- ws_shared++;
- } else {
- ws_private++;
- }
- }
-
- ws_usage->priv = ws_private * PAGESIZE_KB;
- ws_usage->shareable = ws_shareable * PAGESIZE_KB;
- ws_usage->shared = ws_shared * PAGESIZE_KB;
-
- return true;
-}
-
-// This function calculates the proportional set size for a process.
-bool ProcessMetrics::GetProportionalSetSizeBytes(uint64_t* pss_bytes) const {
- double ws_pss = 0.0;
-
- WorkingSetInformationBuffer buffer;
- if (!buffer.QueryPageEntries(process_.Get()))
- return false;
-
- size_t num_page_entries = buffer.GetPageEntryCount();
- for (size_t i = 0; i < num_page_entries; i++) {
- if (buffer->WorkingSetInfo[i].Shared &&
- buffer->WorkingSetInfo[i].ShareCount > 0)
- ws_pss += 1.0 / buffer->WorkingSetInfo[i].ShareCount;
- else
- ws_pss += 1.0;
- }
-
- *pss_bytes = static_cast<uint64_t>(ws_pss * GetPageSize());
- return true;
-}
-
static uint64_t FileTimeToUTC(const FILETIME& ftime) {
LARGE_INTEGER li;
li.LowPart = ftime.dwLowDateTime;
@@ -325,11 +178,11 @@ bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
ProcessMetrics::ProcessMetrics(ProcessHandle process) : last_system_time_(0) {
if (process) {
- HANDLE duplicate_handle;
+ HANDLE duplicate_handle = INVALID_HANDLE_VALUE;
BOOL result = ::DuplicateHandle(::GetCurrentProcess(), process,
::GetCurrentProcess(), &duplicate_handle,
PROCESS_QUERY_INFORMATION, FALSE, 0);
- DCHECK(result);
+ DPCHECK(result);
process_.Set(duplicate_handle);
}
}
diff --git a/chromium/base/process/process_posix.cc b/chromium/base/process/process_posix.cc
index e7dc90ee499..7645b78b19b 100644
--- a/chromium/base/process/process_posix.cc
+++ b/chromium/base/process/process_posix.cc
@@ -201,8 +201,11 @@ bool WaitForExitWithTimeoutImpl(base::ProcessHandle handle,
}
int status;
- if (!WaitpidWithTimeout(handle, &status, timeout))
- return exited;
+ if (!WaitpidWithTimeout(handle, &status, timeout)) {
+ // If multiple threads wait on the same |handle| then one wait will succeed
+ // and the other will fail with errno set to ECHILD.
+ return exited || (errno == ECHILD);
+ }
if (WIFSIGNALED(status)) {
if (exit_code)
*exit_code = -1;
diff --git a/chromium/base/process/process_util_unittest.cc b/chromium/base/process/process_util_unittest.cc
index 8e1a5342176..a2a5e9fc553 100644
--- a/chromium/base/process/process_util_unittest.cc
+++ b/chromium/base/process/process_util_unittest.cc
@@ -30,6 +30,7 @@
#include "base/strings/utf_string_conversions.h"
#include "base/synchronization/waitable_event.h"
#include "base/test/multiprocess_test.h"
+#include "base/test/scoped_task_environment.h"
#include "base/test/test_timeouts.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread.h"
@@ -369,7 +370,7 @@ MULTIPROCESS_TEST_MAIN(CrashingChildProcess) {
// This test intentionally crashes, so we don't need to run it under
// AddressSanitizer.
-#if defined(ADDRESS_SANITIZER) || defined(SYZYASAN)
+#if defined(ADDRESS_SANITIZER)
#define MAYBE_GetTerminationStatusCrash DISABLED_GetTerminationStatusCrash
#else
#define MAYBE_GetTerminationStatusCrash GetTerminationStatusCrash
@@ -489,6 +490,47 @@ TEST_F(ProcessUtilTest, GetTerminationStatusSigTerm) {
}
#endif // defined(OS_POSIX)
+TEST_F(ProcessUtilTest, EnsureTerminationUndying) {
+ test::ScopedTaskEnvironment task_environment;
+
+ Process child_process = SpawnChild("process_util_test_never_die");
+ ASSERT_TRUE(child_process.IsValid());
+
+ EnsureProcessTerminated(child_process.Duplicate());
+
+ // Allow a generous timeout, to cope with slow/loaded test bots.
+ EXPECT_TRUE(child_process.WaitForExitWithTimeout(
+ TestTimeouts::action_max_timeout(), nullptr));
+}
+
+MULTIPROCESS_TEST_MAIN(process_util_test_never_die) {
+ while (1) {
+ PlatformThread::Sleep(TimeDelta::FromSeconds(500));
+ }
+ return kSuccess;
+}
+
+TEST_F(ProcessUtilTest, EnsureTerminationGracefulExit) {
+ test::ScopedTaskEnvironment task_environment;
+
+ Process child_process = SpawnChild("process_util_test_die_immediately");
+ ASSERT_TRUE(child_process.IsValid());
+
+ // Wait for the child process to actually exit.
+ child_process.Duplicate().WaitForExitWithTimeout(
+ TestTimeouts::action_max_timeout(), nullptr);
+
+ EnsureProcessTerminated(child_process.Duplicate());
+
+ // Verify that the process is really, truly gone.
+ EXPECT_TRUE(child_process.WaitForExitWithTimeout(
+ TestTimeouts::action_max_timeout(), nullptr));
+}
+
+MULTIPROCESS_TEST_MAIN(process_util_test_die_immediately) {
+ return kSuccess;
+}
+
#if defined(OS_WIN)
// TODO(estade): if possible, port this test.
TEST_F(ProcessUtilTest, GetAppOutput) {
@@ -821,7 +863,9 @@ TEST_F(ProcessUtilTest, FDRemappingIncludesStdio) {
EXPECT_EQ(0, exit_code);
}
-#if defined(OS_FUCHSIA)
+// TODO(https://crbug.com/793412): Disable on Debug/component builds due to
+// process launch taking too long and triggering timeouts.
+#if defined(OS_FUCHSIA) && defined(NDEBUG)
const uint16_t kStartupHandleId = 43;
MULTIPROCESS_TEST_MAIN(ProcessUtilsVerifyHandle) {
zx_handle_t handle =
@@ -856,11 +900,11 @@ TEST_F(ProcessUtilTest, LaunchWithHandleTransfer) {
// Read from the pipe to verify that the child received it.
zx_signals_t signals = 0;
result = zx_object_wait_one(
- handles[1], ZX_SOCKET_READABLE,
+ handles[1], ZX_SOCKET_READABLE | ZX_SOCKET_PEER_CLOSED,
(base::TimeTicks::Now() + TestTimeouts::action_timeout()).ToZxTime(),
&signals);
- EXPECT_EQ(ZX_OK, result);
- EXPECT_TRUE(signals & ZX_SOCKET_READABLE);
+ ASSERT_EQ(ZX_OK, result);
+ ASSERT_TRUE(signals & ZX_SOCKET_READABLE);
size_t bytes_read = 0;
char buf[16] = {0};
@@ -876,7 +920,7 @@ TEST_F(ProcessUtilTest, LaunchWithHandleTransfer) {
&exit_code));
EXPECT_EQ(0, exit_code);
}
-#endif // defined(OS_FUCHSIA)
+#endif // defined(OS_FUCHSIA) && defined(NDEBUG)
namespace {
@@ -1054,60 +1098,6 @@ TEST_F(ProcessUtilTest, GetParentProcessId) {
}
#endif // !defined(OS_FUCHSIA)
-// TODO(port): port those unit tests.
-bool IsProcessDead(ProcessHandle child) {
-#if defined(OS_FUCHSIA)
- // ProcessHandle is an zx_handle_t, not a pid on Fuchsia, so waitpid() doesn't
- // make sense.
- zx_signals_t signals;
- // Timeout of 0 to check for termination, but non-blocking.
- if (zx_object_wait_one(child, ZX_TASK_TERMINATED, 0, &signals) == ZX_OK) {
- DCHECK(signals & ZX_TASK_TERMINATED);
- return true;
- }
- return false;
-#else
- // waitpid() will actually reap the process which is exactly NOT what we
- // want to test for. The good thing is that if it can't find the process
- // we'll get a nice value for errno which we can test for.
- const pid_t result = HANDLE_EINTR(waitpid(child, nullptr, WNOHANG));
- return result == -1 && errno == ECHILD;
-#endif
-}
-
-TEST_F(ProcessUtilTest, DelayedTermination) {
- Process child_process = SpawnChild("process_util_test_never_die");
- ASSERT_TRUE(child_process.IsValid());
- EnsureProcessTerminated(child_process.Duplicate());
- int exit_code;
- child_process.WaitForExitWithTimeout(TimeDelta::FromSeconds(5), &exit_code);
-
- // Check that process was really killed.
- EXPECT_TRUE(IsProcessDead(child_process.Handle()));
-}
-
-MULTIPROCESS_TEST_MAIN(process_util_test_never_die) {
- while (1) {
- sleep(500);
- }
- return kSuccess;
-}
-
-TEST_F(ProcessUtilTest, ImmediateTermination) {
- Process child_process = SpawnChild("process_util_test_die_immediately");
- ASSERT_TRUE(child_process.IsValid());
- // Give it time to die.
- sleep(2);
- EnsureProcessTerminated(child_process.Duplicate());
-
- // Check that process was really killed.
- EXPECT_TRUE(IsProcessDead(child_process.Handle()));
-}
-
-MULTIPROCESS_TEST_MAIN(process_util_test_die_immediately) {
- return kSuccess;
-}
-
#if !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
class WriteToPipeDelegate : public LaunchOptions::PreExecDelegate {
public:
diff --git a/chromium/base/profiler/native_stack_sampler_mac.cc b/chromium/base/profiler/native_stack_sampler_mac.cc
index e72283e1a5a..93f944e1f74 100644
--- a/chromium/base/profiler/native_stack_sampler_mac.cc
+++ b/chromium/base/profiler/native_stack_sampler_mac.cc
@@ -13,6 +13,7 @@
#include <mach/kern_return.h>
#include <mach/mach.h>
#include <mach/thread_act.h>
+#include <mach/vm_map.h>
#include <pthread.h>
#include <sys/resource.h>
#include <sys/syslimits.h>
@@ -95,6 +96,18 @@ std::string GetUniqueId(const void* module_addr) {
return std::string();
}
+// Returns the size of the _TEXT segment of the module loaded at |module_addr|.
+size_t GetModuleTextSize(const void* module_addr) {
+ const mach_header_64* mach_header =
+ reinterpret_cast<const mach_header_64*>(module_addr);
+ DCHECK_EQ(MH_MAGIC_64, mach_header->magic);
+
+ unsigned long module_size;
+ getsegmentdata(mach_header, SEG_TEXT, &module_size);
+
+ return module_size;
+}
+
// Gets the index for the Module containing |instruction_pointer| in
// |modules|, adding it if it's not already present. Returns
// StackSamplingProfiler::Frame::kUnknownModuleIndex if no Module can be
@@ -122,16 +135,11 @@ size_t GetModuleIndex(const uintptr_t instruction_pointer,
base::FilePath(inf.dli_fname));
modules->push_back(module);
- const mach_header_64* mach_header =
- reinterpret_cast<const mach_header_64*>(inf.dli_fbase);
- DCHECK_EQ(MH_MAGIC_64, mach_header->magic);
-
- unsigned long module_size;
- getsegmentdata(mach_header, SEG_TEXT, &module_size);
- uintptr_t base_module_address = reinterpret_cast<uintptr_t>(mach_header);
+ uintptr_t base_module_address = reinterpret_cast<uintptr_t>(inf.dli_fbase);
size_t index = modules->size() - 1;
- profile_module_index->emplace_back(base_module_address,
- base_module_address + module_size, index);
+ profile_module_index->emplace_back(
+ base_module_address,
+ base_module_address + GetModuleTextSize(inf.dli_fbase), index);
return index;
}
@@ -228,6 +236,39 @@ uint32_t GetFrameOffset(int compact_unwind_info) {
(((1 << __builtin_popcount(UNWIND_X86_64_RBP_FRAME_OFFSET))) - 1));
}
+// True if the unwind from |leaf_frame_rip| may trigger a crash bug in
+// unw_init_local. If so, the stack walk should be aborted at the leaf frame.
+bool MayTriggerUnwInitLocalCrash(uint64_t leaf_frame_rip) {
+ // The issue here is a bug in unw_init_local that, in some unwinds, results in
+ // attempts to access memory at the address immediately following the address
+ // range of the library. When the library is the last of the mapped libraries
+ // that address is in a different memory region. Starting with 10.13.4 beta
+ // releases it appears that this region is sometimes either unmapped or mapped
+ // without read access, resulting in crashes on the attempted access. It's not
+ // clear what circumstances result in this situation; attempts to reproduce on
+ // a 10.13.4 beta did not trigger the issue.
+ //
+ // The workaround is to check if the memory address that would be accessed is
+ // readable, and if not, abort the stack walk before calling unw_init_local.
+ // As of 2018/03/19 about 0.1% of non-idle stacks on the UI and GPU main
+ // threads have a leaf frame in the last library. Since the issue appears to
+ // only occur some of the time it's expected that the quantity of lost samples
+ // will be lower than 0.1%, possibly significantly lower.
+ //
+ // TODO(lgrey): Add references above to LLVM/Radar bugs on unw_init_local once
+ // filed.
+ Dl_info info;
+ if (dladdr(reinterpret_cast<const void*>(leaf_frame_rip), &info) == 0)
+ return false;
+ uint64_t unused;
+ vm_size_t size = sizeof(unused);
+ return vm_read_overwrite(current_task(),
+ reinterpret_cast<vm_address_t>(info.dli_fbase) +
+ GetModuleTextSize(info.dli_fbase),
+ sizeof(unused),
+ reinterpret_cast<vm_address_t>(&unused), &size) != 0;
+}
+
// Walks the stack represented by |unwind_context|, calling back to the provided
// lambda for each frame. Returns false if an error occurred, otherwise returns
// true.
@@ -557,11 +598,20 @@ void NativeStackSamplerMac::SuspendThreadAndRecordStack(
auto* current_modules = current_modules_;
auto* profile_module_index = &profile_module_index_;
- // Unwinding sigtramp remotely is very fragile. It's a complex DWARF unwind
- // that needs to restore the entire thread context which was saved by the
- // kernel when the interrupt occurred. Bail instead of risking a crash.
+ // Check for two execution cases where we're unable to unwind, and if found,
+ // record the first frame and and bail:
+ //
+ // 1. In sigtramp: Unwinding this from another thread is very fragile. It's a
+ // complex DWARF unwind that needs to restore the entire thread context which
+ // was saved by the kernel when the interrupt occurred.
+ //
+ // 2. In the last mapped module and the memory past the module is
+ // inaccessible: unw_init_local has a bug where it attempts to access the
+ // memory immediately after the module, resulting in crashes. See
+ // MayTriggerUnwInitLocalCrash for details.
uintptr_t ip = thread_state.__rip;
- if (ip >= sigtramp_start_ && ip < sigtramp_end_) {
+ if ((ip >= sigtramp_start_ && ip < sigtramp_end_) ||
+ MayTriggerUnwInitLocalCrash(ip)) {
sample->frames.emplace_back(
ip, GetModuleIndex(ip, current_modules, profile_module_index));
return;
diff --git a/chromium/base/run_loop.cc b/chromium/base/run_loop.cc
index 467e2d8337a..30d8c4ea1fe 100644
--- a/chromium/base/run_loop.cc
+++ b/chromium/base/run_loop.cc
@@ -103,9 +103,6 @@ RunLoop::RunLoop(Type type)
DCHECK(delegate_) << "A RunLoop::Delegate must be bound to this thread prior "
"to using RunLoop.";
DCHECK(origin_task_runner_);
-
- DCHECK(IsNestingAllowedOnCurrentThread() ||
- type_ != Type::kNestableTasksAllowed);
}
RunLoop::~RunLoop() {
@@ -219,7 +216,6 @@ bool RunLoop::IsNestedOnCurrentThread() {
void RunLoop::AddNestingObserverOnCurrentThread(NestingObserver* observer) {
Delegate* delegate = tls_delegate.Get().Get();
DCHECK(delegate);
- CHECK(delegate->allow_nesting_);
delegate->nesting_observers_.AddObserver(observer);
}
@@ -227,21 +223,10 @@ void RunLoop::AddNestingObserverOnCurrentThread(NestingObserver* observer) {
void RunLoop::RemoveNestingObserverOnCurrentThread(NestingObserver* observer) {
Delegate* delegate = tls_delegate.Get().Get();
DCHECK(delegate);
- CHECK(delegate->allow_nesting_);
delegate->nesting_observers_.RemoveObserver(observer);
}
// static
-bool RunLoop::IsNestingAllowedOnCurrentThread() {
- return tls_delegate.Get().Get()->allow_nesting_;
-}
-
-// static
-void RunLoop::DisallowNestingOnCurrentThread() {
- tls_delegate.Get().Get()->allow_nesting_ = false;
-}
-
-// static
void RunLoop::QuitCurrentDeprecated() {
DCHECK(IsRunningOnCurrentThread());
tls_delegate.Get().Get()->active_run_loops_.top()->Quit();
@@ -301,7 +286,6 @@ bool RunLoop::BeforeRun() {
const bool is_nested = active_run_loops_.size() > 1;
if (is_nested) {
- CHECK(delegate_->allow_nesting_);
for (auto& observer : delegate_->nesting_observers_)
observer.OnBeginNestedRunLoop();
if (type_ == Type::kNestableTasksAllowed)
diff --git a/chromium/base/run_loop.h b/chromium/base/run_loop.h
index b7c594e16c1..d3858fcede5 100644
--- a/chromium/base/run_loop.h
+++ b/chromium/base/run_loop.h
@@ -50,9 +50,7 @@ class BASE_EXPORT RunLoop {
// recursive task processing is disabled.
//
// In general, nestable RunLoops are to be avoided. They are dangerous and
- // difficult to get right, so please use with extreme caution. To further
- // protect this: kNestableTasksAllowed RunLoops are only allowed on threads
- // where IsNestingAllowedOnCurrentThread().
+ // difficult to get right, so please use with extreme caution.
//
// A specific example where this makes a difference is:
// - The thread is running a RunLoop.
@@ -145,13 +143,6 @@ class BASE_EXPORT RunLoop {
static void AddNestingObserverOnCurrentThread(NestingObserver* observer);
static void RemoveNestingObserverOnCurrentThread(NestingObserver* observer);
- // Returns true if nesting is allowed on this thread.
- static bool IsNestingAllowedOnCurrentThread();
-
- // Disallow nesting. After this is called, running a nested RunLoop or calling
- // Add/RemoveNestingObserverOnCurrentThread() on this thread will crash.
- static void DisallowNestingOnCurrentThread();
-
// A RunLoop::Delegate is a generic interface that allows RunLoop to be
// separate from the underlying implementation of the message loop for this
// thread. It holds private state used by RunLoops on its associated thread.
@@ -207,7 +198,6 @@ class BASE_EXPORT RunLoop {
// have more than a few entries.
using RunLoopStack = base::stack<RunLoop*, std::vector<RunLoop*>>;
- bool allow_nesting_ = true;
RunLoopStack active_run_loops_;
ObserverList<RunLoop::NestingObserver> nesting_observers_;
diff --git a/chromium/base/run_loop_unittest.cc b/chromium/base/run_loop_unittest.cc
index 96060f4a660..ee12ea5fa7e 100644
--- a/chromium/base/run_loop_unittest.cc
+++ b/chromium/base/run_loop_unittest.cc
@@ -596,8 +596,6 @@ class MockTask {
} // namespace
TEST_P(RunLoopTest, NestingObservers) {
- EXPECT_TRUE(RunLoop::IsNestingAllowedOnCurrentThread());
-
testing::StrictMock<MockNestingObserver> nesting_observer;
testing::StrictMock<MockTask> mock_task_a;
testing::StrictMock<MockTask> mock_task_b;
@@ -606,10 +604,6 @@ TEST_P(RunLoopTest, NestingObservers) {
const RepeatingClosure run_nested_loop = Bind([]() {
RunLoop nested_run_loop(RunLoop::Type::kNestableTasksAllowed);
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce([]() {
- EXPECT_TRUE(RunLoop::IsNestingAllowedOnCurrentThread());
- }));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
nested_run_loop.QuitClosure());
nested_run_loop.Run();
@@ -617,7 +611,9 @@ TEST_P(RunLoopTest, NestingObservers) {
// Generate a stack of nested RunLoops. OnBeginNestedRunLoop() is expected
// when beginning each nesting depth and OnExitNestedRunLoop() is expected
- // when exiting each nesting depth.
+ // when exiting each nesting depth. Each one of these tasks is ahead of the
+ // QuitClosures as those are only posted at the end of the queue when
+ // |run_nested_loop| is executed.
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, run_nested_loop);
ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
@@ -640,21 +636,6 @@ TEST_P(RunLoopTest, NestingObservers) {
RunLoop::RemoveNestingObserverOnCurrentThread(&nesting_observer);
}
-// Disabled on Android per http://crbug.com/643760.
-#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
-TEST_P(RunLoopTest, DisallowNestingDeathTest) {
- EXPECT_TRUE(RunLoop::IsNestingAllowedOnCurrentThread());
- RunLoop::DisallowNestingOnCurrentThread();
- EXPECT_FALSE(RunLoop::IsNestingAllowedOnCurrentThread());
-
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, BindOnce([]() {
- RunLoop nested_run_loop;
- nested_run_loop.RunUntilIdle();
- }));
- EXPECT_DEATH({ run_loop_.RunUntilIdle(); }, "");
-}
-#endif // defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
-
TEST_P(RunLoopTest, DisallowRunningForTesting) {
RunLoop::ScopedDisallowRunningForTesting disallow_running;
EXPECT_DCHECK_DEATH({ run_loop_.Run(); });
diff --git a/chromium/base/safe_numerics_unittest.cc b/chromium/base/safe_numerics_unittest.cc
index dac79b468e6..44675cf72cc 100644
--- a/chromium/base/safe_numerics_unittest.cc
+++ b/chromium/base/safe_numerics_unittest.cc
@@ -18,6 +18,13 @@
#pragma warning(disable : 4293) // Invalid shift.
#endif
+// This may not need to come before the base/numerics headers, but let's keep
+// it close to the MSVC equivalent.
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Winteger-overflow"
+#endif
+
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/numerics/safe_math.h"
@@ -1625,5 +1632,9 @@ TEST(SafeNumerics, VariadicNumericOperations) {
}
}
+#if defined(__clang__)
+#pragma clang diagnostic pop // -Winteger-overflow
+#endif
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/sampling_heap_profiler/sampling_heap_profiler.cc b/chromium/base/sampling_heap_profiler/sampling_heap_profiler.cc
index 92165ee427f..65e858fbf9c 100644
--- a/chromium/base/sampling_heap_profiler/sampling_heap_profiler.cc
+++ b/chromium/base/sampling_heap_profiler/sampling_heap_profiler.cc
@@ -11,11 +11,12 @@
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/atomicops.h"
-#include "base/debug/alias.h"
#include "base/debug/stack_trace.h"
+#include "base/macros.h"
#include "base/no_destructor.h"
#include "base/partition_alloc_buildflags.h"
#include "base/rand_util.h"
+#include "base/threading/thread_local_storage.h"
#include "build/build_config.h"
namespace base {
@@ -46,20 +47,12 @@ Atomic32 g_operations_in_flight;
// When set to true, threads should not enter lock-free paths.
Atomic32 g_fast_path_is_closed;
-// Number of bytes left to form the sample being collected.
-AtomicWord g_bytes_left;
-
-// Current sample size to be accumulated. Basically:
-// <bytes accumulated toward sample> == g_current_interval - g_bytes_left
-AtomicWord g_current_interval;
-
// Sampling interval parameter, the mean value for intervals between samples.
AtomicWord g_sampling_interval = kDefaultSamplingIntervalBytes;
// Last generated sample ordinal number.
uint32_t g_last_sample_ordinal = 0;
-SamplingHeapProfiler* g_sampling_heap_profiler_instance;
void (*g_hooks_install_callback)();
Atomic32 g_hooks_installed;
@@ -167,6 +160,12 @@ void PartitionFreeHook(void* address) {
#endif // BUILDFLAG(USE_PARTITION_ALLOC) && !defined(OS_NACL)
+ThreadLocalStorage::Slot& AccumulatedBytesTLS() {
+ static base::NoDestructor<base::ThreadLocalStorage::Slot>
+ accumulated_bytes_tls;
+ return *accumulated_bytes_tls;
+}
+
} // namespace
SamplingHeapProfiler::Sample::Sample(size_t size,
@@ -178,14 +177,23 @@ SamplingHeapProfiler::Sample::Sample(const Sample&) = default;
SamplingHeapProfiler::Sample::~Sample() = default;
+SamplingHeapProfiler* SamplingHeapProfiler::instance_;
+
SamplingHeapProfiler::SamplingHeapProfiler() {
- g_sampling_heap_profiler_instance = this;
+ instance_ = this;
+}
+
+// static
+void SamplingHeapProfiler::InitTLSSlot() {
+ // Preallocate the TLS slot early, so it can't cause reentracy issues
+ // when sampling is started.
+ ignore_result(AccumulatedBytesTLS().Get());
}
// static
void SamplingHeapProfiler::InstallAllocatorHooksOnce() {
static bool hook_installed = InstallAllocatorHooks();
- base::debug::Alias(&hook_installed);
+ ignore_result(hook_installed);
}
// static
@@ -193,7 +201,7 @@ bool SamplingHeapProfiler::InstallAllocatorHooks() {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
base::allocator::InsertAllocatorDispatch(&g_allocator_dispatch);
#else
- base::debug::Alias(&g_allocator_dispatch);
+ ignore_result(g_allocator_dispatch);
DLOG(WARNING)
<< "base::allocator shims are not available for memory sampling.";
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
@@ -225,10 +233,6 @@ void SamplingHeapProfiler::SetHooksInstallCallback(
uint32_t SamplingHeapProfiler::Start() {
InstallAllocatorHooksOnce();
- size_t next_interval =
- GetNextSampleInterval(base::subtle::Acquire_Load(&g_sampling_interval));
- base::subtle::Release_Store(&g_current_interval, next_interval);
- base::subtle::Release_Store(&g_bytes_left, next_interval);
base::subtle::Barrier_AtomicIncrement(&g_running, 1);
return g_last_sample_ordinal;
}
@@ -275,40 +279,32 @@ void SamplingHeapProfiler::RecordAlloc(void* address,
uint32_t skip_frames) {
if (UNLIKELY(!base::subtle::NoBarrier_Load(&g_running)))
return;
-
- // Lock-free algorithm decreases number of bytes left to form a sample.
- // The thread that makes it to reach zero is responsible for recording
- // a sample.
- AtomicWord bytes_left = base::subtle::NoBarrier_AtomicIncrement(
- &g_bytes_left, -static_cast<AtomicWord>(size));
- if (LIKELY(bytes_left > 0))
+ if (UNLIKELY(base::ThreadLocalStorage::HasBeenDestroyed()))
return;
- // Return if g_bytes_left was already zero or below before we decreased it.
- // That basically means that another thread in fact crossed the threshold.
- if (LIKELY(bytes_left + static_cast<AtomicWord>(size) <= 0))
- return;
+ // TODO(alph): On MacOS it may call the hook several times for a single
+ // allocation. Handle the case.
- // Only one thread that crossed the threshold is running the code below.
- // It is going to be recording the sample.
+ intptr_t accumulated_bytes =
+ reinterpret_cast<intptr_t>(AccumulatedBytesTLS().Get());
+ accumulated_bytes += size;
+ if (LIKELY(accumulated_bytes < 0)) {
+ AccumulatedBytesTLS().Set(reinterpret_cast<void*>(accumulated_bytes));
+ return;
+ }
- size_t accumulated = base::subtle::Acquire_Load(&g_current_interval);
- size_t next_interval =
- GetNextSampleInterval(base::subtle::NoBarrier_Load(&g_sampling_interval));
+ size_t mean_interval = base::subtle::NoBarrier_Load(&g_sampling_interval);
+ size_t samples = accumulated_bytes / mean_interval;
+ accumulated_bytes %= mean_interval;
- // Make sure g_current_interval is set before updating g_bytes_left.
- base::subtle::Release_Store(&g_current_interval, next_interval);
+ do {
+ accumulated_bytes -= GetNextSampleInterval(mean_interval);
+ ++samples;
+ } while (accumulated_bytes >= 0);
- // Put the next sampling interval to g_bytes_left, thus allowing threads to
- // start accumulating bytes towards the next sample.
- // Simultaneously extract the current value (which is negative or zero)
- // and take it into account when calculating the number of bytes
- // accumulated for the current sample.
- accumulated -=
- base::subtle::NoBarrier_AtomicExchange(&g_bytes_left, next_interval);
+ AccumulatedBytesTLS().Set(reinterpret_cast<void*>(accumulated_bytes));
- g_sampling_heap_profiler_instance->DoRecordAlloc(accumulated, size, address,
- kSkipBaseAllocatorFrames);
+ instance_->DoRecordAlloc(samples * mean_interval, size, address, skip_frames);
}
void SamplingHeapProfiler::RecordStackTrace(Sample* sample,
@@ -331,8 +327,6 @@ void SamplingHeapProfiler::DoRecordAlloc(size_t total_allocated,
size_t size,
void* address,
uint32_t skip_frames) {
- // TODO(alph): It's better to use a recursive mutex and move the check
- // inside the critical section.
if (entered_.Get())
return;
base::AutoLock lock(mutex_);
@@ -364,13 +358,15 @@ void SamplingHeapProfiler::RecordFree(void* address) {
bool maybe_sampled = true; // Pessimistically assume allocation was sampled.
base::subtle::Barrier_AtomicIncrement(&g_operations_in_flight, 1);
if (LIKELY(!base::subtle::NoBarrier_Load(&g_fast_path_is_closed)))
- maybe_sampled = g_sampling_heap_profiler_instance->samples_.count(address);
+ maybe_sampled = instance_->samples_.count(address);
base::subtle::Barrier_AtomicIncrement(&g_operations_in_flight, -1);
if (maybe_sampled)
- g_sampling_heap_profiler_instance->DoRecordFree(address);
+ instance_->DoRecordFree(address);
}
void SamplingHeapProfiler::DoRecordFree(void* address) {
+ if (UNLIKELY(base::ThreadLocalStorage::HasBeenDestroyed()))
+ return;
if (entered_.Get())
return;
base::AutoLock lock(mutex_);
@@ -398,15 +394,19 @@ void SamplingHeapProfiler::SuppressRandomnessForTest(bool suppress) {
void SamplingHeapProfiler::AddSamplesObserver(SamplesObserver* observer) {
base::AutoLock lock(mutex_);
CHECK(!entered_.Get());
+ entered_.Set(true);
observers_.push_back(observer);
+ entered_.Set(false);
}
void SamplingHeapProfiler::RemoveSamplesObserver(SamplesObserver* observer) {
base::AutoLock lock(mutex_);
CHECK(!entered_.Get());
+ entered_.Set(true);
auto it = std::find(observers_.begin(), observers_.end(), observer);
CHECK(it != observers_.end());
observers_.erase(it);
+ entered_.Set(false);
}
std::vector<SamplingHeapProfiler::Sample> SamplingHeapProfiler::GetSamples(
diff --git a/chromium/base/sampling_heap_profiler/sampling_heap_profiler.h b/chromium/base/sampling_heap_profiler/sampling_heap_profiler.h
index e528a896666..b0573b886de 100644
--- a/chromium/base/sampling_heap_profiler/sampling_heap_profiler.h
+++ b/chromium/base/sampling_heap_profiler/sampling_heap_profiler.h
@@ -49,6 +49,10 @@ class BASE_EXPORT SamplingHeapProfiler {
virtual void SampleRemoved(uint32_t id) = 0;
};
+ // Must be called early during the process initialization. It creates and
+ // reserves a TLS slot.
+ static void InitTLSSlot();
+
// This is an entry point for plugging in an external allocator.
// Profiler will invoke the provided callback upon initialization.
// The callback should install hooks onto the corresponding memory allocator
@@ -94,6 +98,8 @@ class BASE_EXPORT SamplingHeapProfiler {
std::unordered_map<void*, Sample> samples_;
std::vector<SamplesObserver*> observers_;
+ static SamplingHeapProfiler* instance_;
+
friend class base::NoDestructor<SamplingHeapProfiler>;
DISALLOW_COPY_AND_ASSIGN(SamplingHeapProfiler);
diff --git a/chromium/base/sampling_heap_profiler/sampling_heap_profiler_unittest.cc b/chromium/base/sampling_heap_profiler/sampling_heap_profiler_unittest.cc
index 1e7972b33fb..6602e6c5a60 100644
--- a/chromium/base/sampling_heap_profiler/sampling_heap_profiler_unittest.cc
+++ b/chromium/base/sampling_heap_profiler/sampling_heap_profiler_unittest.cc
@@ -5,6 +5,7 @@
#include "base/sampling_heap_profiler/sampling_heap_profiler.h"
#include <stdlib.h>
+#include <cinttypes>
#include "base/allocator/allocator_shim.h"
#include "base/debug/alias.h"
@@ -46,6 +47,7 @@ class SamplesCollector : public SamplingHeapProfiler::SamplesObserver {
};
TEST_F(SamplingHeapProfilerTest, CollectSamples) {
+ SamplingHeapProfiler::InitTLSSlot();
SamplesCollector collector(10000);
SamplingHeapProfiler* profiler = SamplingHeapProfiler::GetInstance();
profiler->SuppressRandomnessForTest(true);
@@ -96,10 +98,14 @@ class MyThread2 : public SimpleThread {
};
void CheckAllocationPattern(void (*allocate_callback)()) {
+ SamplingHeapProfiler::InitTLSSlot();
SamplingHeapProfiler* profiler = SamplingHeapProfiler::GetInstance();
profiler->SuppressRandomnessForTest(false);
profiler->SetSamplingInterval(10240);
- for (int i = 0; i < 40; ++i) {
+ base::TimeTicks t0 = base::TimeTicks::Now();
+ std::map<size_t, size_t> sums;
+ const int iterations = 40;
+ for (int i = 0; i < iterations; ++i) {
uint32_t id = profiler->Start();
allocate_callback();
std::vector<SamplingHeapProfiler::Sample> samples =
@@ -110,11 +116,23 @@ void CheckAllocationPattern(void (*allocate_callback)()) {
buckets[sample.size] += sample.total;
}
for (auto& it : buckets) {
- if (it.first == 400 || it.first == 700 || it.first == 20480)
- printf("%u,", static_cast<uint32_t>(it.second));
+ if (it.first != 400 && it.first != 700 && it.first != 20480)
+ continue;
+ sums[it.first] += it.second;
+ printf("%zu,", it.second);
}
printf("\n");
}
+
+ printf("Time taken %" PRIu64 "ms\n",
+ (base::TimeTicks::Now() - t0).InMilliseconds());
+
+ for (auto sum : sums) {
+ intptr_t expected = sum.first * kNumberOfAllocations;
+ intptr_t actual = sum.second / iterations;
+ printf("%zu:\tmean: %zu\trelative error: %.2f%%\n", sum.first, actual,
+ 100. * (actual - expected) / expected);
+ }
}
// Manual tests to check precision of the sampling profiler.
diff --git a/chromium/base/security_unittest.cc b/chromium/base/security_unittest.cc
index 8515179be38..13e9594e2d2 100644
--- a/chromium/base/security_unittest.cc
+++ b/chromium/base/security_unittest.cc
@@ -72,7 +72,7 @@ void OverflowTestsSoftExpectTrue(bool overflow_detected) {
}
}
-#if defined(OS_IOS) || defined(ADDRESS_SANITIZER) || \
+#if defined(OS_IOS) || defined(OS_FUCHSIA) || defined(ADDRESS_SANITIZER) || \
defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER)
#define MAYBE_NewOverflow DISABLED_NewOverflow
#else
@@ -80,6 +80,8 @@ void OverflowTestsSoftExpectTrue(bool overflow_detected) {
#endif
// Test array[TooBig][X] and array[X][TooBig] allocations for int overflows.
// IOS doesn't honor nothrow, so disable the test there.
+// TODO(https://crbug.com/828229): Fuchsia SDK exports an incorrect new[] that
+// gets picked up in Debug/component builds, breaking this test.
// Disabled under XSan because asan aborts when new returns nullptr,
// https://bugs.chromium.org/p/chromium/issues/detail?id=690271#c15
TEST(SecurityTest, MAYBE_NewOverflow) {
diff --git a/chromium/base/stl_util.h b/chromium/base/stl_util.h
index 186bf12b440..6d47bc3a609 100644
--- a/chromium/base/stl_util.h
+++ b/chromium/base/stl_util.h
@@ -21,6 +21,7 @@
#include <vector>
#include "base/logging.h"
+#include "base/optional.h"
namespace base {
@@ -336,6 +337,17 @@ class IsNotIn {
const typename Collection::const_iterator end_;
};
+// Helper for returning the optional value's address, or nullptr.
+template <class T>
+T* OptionalOrNullptr(base::Optional<T>& optional) {
+ return optional.has_value() ? &optional.value() : nullptr;
+}
+
+template <class T>
+const T* OptionalOrNullptr(const base::Optional<T>& optional) {
+ return optional.has_value() ? &optional.value() : nullptr;
+}
+
} // namespace base
#endif // BASE_STL_UTIL_H_
diff --git a/chromium/base/stl_util_unittest.cc b/chromium/base/stl_util_unittest.cc
index 8d7364fd4bd..be4a736e0f4 100644
--- a/chromium/base/stl_util_unittest.cc
+++ b/chromium/base/stl_util_unittest.cc
@@ -438,5 +438,14 @@ TEST(ContainsValue, OrdinaryArrays) {
EXPECT_TRUE(ContainsValue(allowed_chars_including_nul, 0));
}
+TEST(STLUtilTest, OptionalOrNullptr) {
+ Optional<float> optional;
+ EXPECT_EQ(nullptr, base::OptionalOrNullptr(optional));
+
+ optional = 0.1f;
+ EXPECT_EQ(&optional.value(), base::OptionalOrNullptr(optional));
+ EXPECT_NE(nullptr, base::OptionalOrNullptr(optional));
+}
+
} // namespace
} // namespace base
diff --git a/chromium/base/strings/old_utf_string_conversions.cc b/chromium/base/strings/old_utf_string_conversions.cc
new file mode 100644
index 00000000000..5cab038ab48
--- /dev/null
+++ b/chromium/base/strings/old_utf_string_conversions.cc
@@ -0,0 +1,262 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/old_utf_string_conversions.h"
+
+#include <stdint.h>
+
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversion_utils.h"
+#include "build/build_config.h"
+
+namespace base_old {
+
+using base::IsStringASCII;
+using base::ReadUnicodeCharacter;
+using base::WriteUnicodeCharacter;
+
+template<typename CHAR>
+void PrepareForUTF8Output(const CHAR* src,
+ size_t src_len,
+ std::string* output) {
+ output->clear();
+ if (src_len == 0)
+ return;
+ if (src[0] < 0x80) {
+ // Assume that the entire input will be ASCII.
+ output->reserve(src_len);
+ } else {
+ // Assume that the entire input is non-ASCII and will have 3 bytes per char.
+ output->reserve(src_len * 3);
+ }
+}
+
+template<typename STRING>
+void PrepareForUTF16Or32Output(const char* src,
+ size_t src_len,
+ STRING* output) {
+ output->clear();
+ if (src_len == 0)
+ return;
+ if (static_cast<unsigned char>(src[0]) < 0x80) {
+ // Assume the input is all ASCII, which means 1:1 correspondence.
+ output->reserve(src_len);
+ } else {
+ // Otherwise assume that the UTF-8 sequences will have 2 bytes for each
+ // character.
+ output->reserve(src_len / 2);
+ }
+}
+
+namespace {
+
+// Generalized Unicode converter -----------------------------------------------
+
+// Converts the given source Unicode character type to the given destination
+// Unicode character type as a STL string. The given input buffer and size
+// determine the source, and the given output STL string will be replaced by
+// the result.
+template <typename SRC_CHAR, typename DEST_STRING>
+bool ConvertUnicode(const SRC_CHAR* src, size_t src_len, DEST_STRING* output) {
+ // ICU requires 32-bit numbers.
+ bool success = true;
+ int32_t src_len32 = static_cast<int32_t>(src_len);
+ for (int32_t i = 0; i < src_len32; i++) {
+ uint32_t code_point;
+ if (ReadUnicodeCharacter(src, src_len32, &i, &code_point)) {
+ WriteUnicodeCharacter(code_point, output);
+ } else {
+ WriteUnicodeCharacter(0xFFFD, output);
+ success = false;
+ }
+ }
+
+ return success;
+}
+
+} // namespace
+
+// UTF-8 <-> Wide --------------------------------------------------------------
+
+bool WideToUTF8(const wchar_t* src, size_t src_len, std::string* output) {
+ if (IsStringASCII(std::wstring(src, src_len))) {
+ output->assign(src, src + src_len);
+ return true;
+ } else {
+ PrepareForUTF8Output(src, src_len, output);
+ return ConvertUnicode(src, src_len, output);
+ }
+}
+
+std::string WideToUTF8(const std::wstring& wide) {
+ if (IsStringASCII(wide)) {
+ return std::string(wide.data(), wide.data() + wide.length());
+ }
+
+ std::string ret;
+ PrepareForUTF8Output(wide.data(), wide.length(), &ret);
+ ConvertUnicode(wide.data(), wide.length(), &ret);
+ return ret;
+}
+
+bool UTF8ToWide(const char* src, size_t src_len, std::wstring* output) {
+ if (IsStringASCII(StringPiece(src, src_len))) {
+ output->assign(src, src + src_len);
+ return true;
+ } else {
+ PrepareForUTF16Or32Output(src, src_len, output);
+ return ConvertUnicode(src, src_len, output);
+ }
+}
+
+std::wstring UTF8ToWide(StringPiece utf8) {
+ if (IsStringASCII(utf8)) {
+ return std::wstring(utf8.begin(), utf8.end());
+ }
+
+ std::wstring ret;
+ PrepareForUTF16Or32Output(utf8.data(), utf8.length(), &ret);
+ ConvertUnicode(utf8.data(), utf8.length(), &ret);
+ return ret;
+}
+
+// UTF-16 <-> Wide -------------------------------------------------------------
+
+#if defined(WCHAR_T_IS_UTF16)
+
+// When wide == UTF-16, then conversions are a NOP.
+bool WideToUTF16(const wchar_t* src, size_t src_len, string16* output) {
+ output->assign(src, src_len);
+ return true;
+}
+
+string16 WideToUTF16(const std::wstring& wide) {
+ return wide;
+}
+
+bool UTF16ToWide(const char16* src, size_t src_len, std::wstring* output) {
+ output->assign(src, src_len);
+ return true;
+}
+
+std::wstring UTF16ToWide(const string16& utf16) {
+ return utf16;
+}
+
+#elif defined(WCHAR_T_IS_UTF32)
+
+bool WideToUTF16(const wchar_t* src, size_t src_len, string16* output) {
+ output->clear();
+ // Assume that normally we won't have any non-BMP characters so the counts
+ // will be the same.
+ output->reserve(src_len);
+ return ConvertUnicode(src, src_len, output);
+}
+
+string16 WideToUTF16(const std::wstring& wide) {
+ string16 ret;
+ WideToUTF16(wide.data(), wide.length(), &ret);
+ return ret;
+}
+
+bool UTF16ToWide(const char16* src, size_t src_len, std::wstring* output) {
+ output->clear();
+ // Assume that normally we won't have any non-BMP characters so the counts
+ // will be the same.
+ output->reserve(src_len);
+ return ConvertUnicode(src, src_len, output);
+}
+
+std::wstring UTF16ToWide(const string16& utf16) {
+ std::wstring ret;
+ UTF16ToWide(utf16.data(), utf16.length(), &ret);
+ return ret;
+}
+
+#endif // defined(WCHAR_T_IS_UTF32)
+
+// UTF16 <-> UTF8 --------------------------------------------------------------
+
+#if defined(WCHAR_T_IS_UTF32)
+
+bool UTF8ToUTF16(const char* src, size_t src_len, string16* output) {
+ if (IsStringASCII(StringPiece(src, src_len))) {
+ output->assign(src, src + src_len);
+ return true;
+ } else {
+ PrepareForUTF16Or32Output(src, src_len, output);
+ return ConvertUnicode(src, src_len, output);
+ }
+}
+
+string16 UTF8ToUTF16(StringPiece utf8) {
+ if (IsStringASCII(utf8)) {
+ return string16(utf8.begin(), utf8.end());
+ }
+
+ string16 ret;
+ PrepareForUTF16Or32Output(utf8.data(), utf8.length(), &ret);
+ // Ignore the success flag of this call, it will do the best it can for
+ // invalid input, which is what we want here.
+ ConvertUnicode(utf8.data(), utf8.length(), &ret);
+ return ret;
+}
+
+bool UTF16ToUTF8(const char16* src, size_t src_len, std::string* output) {
+ if (IsStringASCII(StringPiece16(src, src_len))) {
+ output->assign(src, src + src_len);
+ return true;
+ } else {
+ PrepareForUTF8Output(src, src_len, output);
+ return ConvertUnicode(src, src_len, output);
+ }
+}
+
+std::string UTF16ToUTF8(StringPiece16 utf16) {
+ std::string ret;
+ // Ignore the success flag of this call, it will do the best it can for
+ // invalid input, which is what we want here.
+ UTF16ToUTF8(utf16.data(), utf16.length(), &ret);
+ return ret;
+}
+
+#elif defined(WCHAR_T_IS_UTF16)
+// Easy case since we can use the "wide" versions we already wrote above.
+
+bool UTF8ToUTF16(const char* src, size_t src_len, string16* output) {
+ return UTF8ToWide(src, src_len, output);
+}
+
+string16 UTF8ToUTF16(StringPiece utf8) {
+ return UTF8ToWide(utf8);
+}
+
+bool UTF16ToUTF8(const char16* src, size_t src_len, std::string* output) {
+ return WideToUTF8(src, src_len, output);
+}
+
+std::string UTF16ToUTF8(StringPiece16 utf16) {
+ if (IsStringASCII(utf16))
+ return std::string(utf16.data(), utf16.data() + utf16.length());
+
+ std::string ret;
+ PrepareForUTF8Output(utf16.data(), utf16.length(), &ret);
+ ConvertUnicode(utf16.data(), utf16.length(), &ret);
+ return ret;
+}
+
+#endif
+
+string16 ASCIIToUTF16(StringPiece ascii) {
+ DCHECK(IsStringASCII(ascii)) << ascii;
+ return string16(ascii.begin(), ascii.end());
+}
+
+std::string UTF16ToASCII(StringPiece16 utf16) {
+ DCHECK(IsStringASCII(utf16)) << UTF16ToUTF8(utf16);
+ return std::string(utf16.begin(), utf16.end());
+}
+
+} // namespace base_old
diff --git a/chromium/base/strings/old_utf_string_conversions.h b/chromium/base/strings/old_utf_string_conversions.h
new file mode 100644
index 00000000000..2f0c6c51a60
--- /dev/null
+++ b/chromium/base/strings/old_utf_string_conversions.h
@@ -0,0 +1,64 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_OLD_UTF_STRING_CONVERSIONS_H_
+#define BASE_STRINGS_OLD_UTF_STRING_CONVERSIONS_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+
+namespace base_old {
+
+using base::char16;
+using base::string16;
+using base::StringPiece16;
+using base::StringPiece;
+
+// These convert between UTF-8, -16, and -32 strings. They are potentially slow,
+// so avoid unnecessary conversions. The low-level versions return a boolean
+// indicating whether the conversion was 100% valid. In this case, it will still
+// do the best it can and put the result in the output buffer. The versions that
+// return strings ignore this error and just return the best conversion
+// possible.
+BASE_EXPORT bool WideToUTF8(const wchar_t* src,
+ size_t src_len,
+ std::string* output);
+BASE_EXPORT std::string WideToUTF8(const std::wstring& wide);
+BASE_EXPORT bool UTF8ToWide(const char* src,
+ size_t src_len,
+ std::wstring* output);
+BASE_EXPORT std::wstring UTF8ToWide(StringPiece utf8);
+
+BASE_EXPORT bool WideToUTF16(const wchar_t* src,
+ size_t src_len,
+ string16* output);
+BASE_EXPORT string16 WideToUTF16(const std::wstring& wide);
+BASE_EXPORT bool UTF16ToWide(const char16* src,
+ size_t src_len,
+ std::wstring* output);
+BASE_EXPORT std::wstring UTF16ToWide(const string16& utf16);
+
+BASE_EXPORT bool UTF8ToUTF16(const char* src, size_t src_len, string16* output);
+BASE_EXPORT string16 UTF8ToUTF16(StringPiece utf8);
+BASE_EXPORT bool UTF16ToUTF8(const char16* src,
+ size_t src_len,
+ std::string* output);
+BASE_EXPORT std::string UTF16ToUTF8(StringPiece16 utf16);
+
+// This converts an ASCII string, typically a hardcoded constant, to a UTF16
+// string.
+BASE_EXPORT string16 ASCIIToUTF16(StringPiece ascii);
+
+// Converts to 7-bit ASCII by truncating. The result must be known to be ASCII
+// beforehand.
+BASE_EXPORT std::string UTF16ToASCII(StringPiece16 utf16);
+
+} // namespace base_old
+
+#endif // BASE_STRINGS_OLD_UTF_STRING_CONVERSIONS_H_
diff --git a/chromium/base/strings/strcat.h b/chromium/base/strings/strcat.h
index b249d4919e8..44c6211d6b5 100644
--- a/chromium/base/strings/strcat.h
+++ b/chromium/base/strings/strcat.h
@@ -11,6 +11,12 @@
#include "base/compiler_specific.h"
#include "base/containers/span.h"
#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+// To resolve a conflict with Win32 API StrCat macro.
+#include "base/win/windows_types.h"
+#endif
namespace base {
diff --git a/chromium/base/strings/string_number_conversions_fuzzer.cc b/chromium/base/strings/string_number_conversions_fuzzer.cc
new file mode 100644
index 00000000000..2fed7de9c55
--- /dev/null
+++ b/chromium/base/strings/string_number_conversions_fuzzer.cc
@@ -0,0 +1,67 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "base/strings/string_number_conversions.h"
+
+// Entry point for LibFuzzer.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ base::StringPiece string_piece_input(reinterpret_cast<const char*>(data),
+ size);
+ std::string string_input(reinterpret_cast<const char*>(data), size);
+
+ int out_int;
+ base::StringToInt(string_piece_input, &out_int);
+ unsigned out_uint;
+ base::StringToUint(string_piece_input, &out_uint);
+ int64_t out_int64;
+ base::StringToInt64(string_piece_input, &out_int64);
+ uint64_t out_uint64;
+ base::StringToUint64(string_piece_input, &out_uint64);
+ size_t out_size;
+ base::StringToSizeT(string_piece_input, &out_size);
+
+ // Test for StringPiece16 if size is even.
+ if (size % 2 == 0) {
+ base::StringPiece16 string_piece_input16(
+ reinterpret_cast<const base::char16*>(data), size / 2);
+
+ base::StringToInt(string_piece_input16, &out_int);
+ base::StringToUint(string_piece_input16, &out_uint);
+ base::StringToInt64(string_piece_input16, &out_int64);
+ base::StringToUint64(string_piece_input16, &out_uint64);
+ base::StringToSizeT(string_piece_input16, &out_size);
+ }
+
+ double out_double;
+ base::StringToDouble(string_input, &out_double);
+
+ base::HexStringToInt(string_piece_input, &out_int);
+ base::HexStringToUInt(string_piece_input, &out_uint);
+ base::HexStringToInt64(string_piece_input, &out_int64);
+ base::HexStringToUInt64(string_piece_input, &out_uint64);
+ std::vector<uint8_t> out_bytes;
+ base::HexStringToBytes(string_piece_input, &out_bytes);
+
+ base::HexEncode(data, size);
+
+ // Convert the numbers back to strings.
+ base::NumberToString(out_int);
+ base::NumberToString16(out_int);
+ base::NumberToString(out_uint);
+ base::NumberToString16(out_uint);
+ base::NumberToString(out_int64);
+ base::NumberToString16(out_int64);
+ base::NumberToString(out_uint64);
+ base::NumberToString16(out_uint64);
+ base::NumberToString(out_double);
+ base::NumberToString16(out_double);
+
+ return 0;
+}
diff --git a/chromium/base/strings/string_piece.h b/chromium/base/strings/string_piece.h
index d6236d13a07..8a7d0d8710b 100644
--- a/chromium/base/strings/string_piece.h
+++ b/chromium/base/strings/string_piece.h
@@ -219,16 +219,31 @@ template <typename STRING_TYPE> class BasicStringPiece {
length_ = str ? STRING_TYPE::traits_type::length(str) : 0;
}
- constexpr value_type operator[](size_type i) const { return ptr_[i]; }
- value_type front() const { return ptr_[0]; }
- value_type back() const { return ptr_[length_ - 1]; }
+ constexpr value_type operator[](size_type i) const {
+ CHECK(i < length_);
+ return ptr_[i];
+ }
+
+ value_type front() const {
+ CHECK_NE(0UL, length_);
+ return ptr_[0];
+ }
+
+ value_type back() const {
+ CHECK_NE(0UL, length_);
+ return ptr_[length_ - 1];
+ }
constexpr void remove_prefix(size_type n) {
+ CHECK(n <= length_);
ptr_ += n;
length_ -= n;
}
- constexpr void remove_suffix(size_type n) { length_ -= n; }
+ constexpr void remove_suffix(size_type n) {
+ CHECK(n <= length_);
+ length_ -= n;
+ }
int compare(const BasicStringPiece<STRING_TYPE>& x) const {
int r = wordmemcmp(
@@ -357,7 +372,7 @@ template <typename STRING_TYPE> class BasicStringPiece {
protected:
const value_type* ptr_;
- size_type length_;
+ size_type length_;
};
template <typename STRING_TYPE>
@@ -456,6 +471,11 @@ struct StringPiece16Hash {
HASH_STRING_PIECE(StringPiece16, sp16);
}
};
+struct WStringPieceHash {
+ std::size_t operator()(const WStringPiece& wsp) const {
+ HASH_STRING_PIECE(WStringPiece, wsp);
+ }
+};
} // namespace base
diff --git a/chromium/base/strings/string_piece_forward.h b/chromium/base/strings/string_piece_forward.h
index 86c1d5fbd5b..b50b9806c9b 100644
--- a/chromium/base/strings/string_piece_forward.h
+++ b/chromium/base/strings/string_piece_forward.h
@@ -17,6 +17,7 @@ template <typename STRING_TYPE>
class BasicStringPiece;
typedef BasicStringPiece<std::string> StringPiece;
typedef BasicStringPiece<string16> StringPiece16;
+typedef BasicStringPiece<std::wstring> WStringPiece;
} // namespace base
diff --git a/chromium/base/strings/string_tokenizer.h b/chromium/base/strings/string_tokenizer.h
index 8defbac3b8d..72fc01650fb 100644
--- a/chromium/base/strings/string_tokenizer.h
+++ b/chromium/base/strings/string_tokenizer.h
@@ -17,14 +17,6 @@ namespace base {
// refer to the next token in the input string. The user may optionally
// configure the tokenizer to return delimiters.
//
-// Warning: be careful not to pass a C string into the 2-arg constructor:
-// StringTokenizer t("this is a test", " "); // WRONG
-// This will create a temporary std::string, save the begin() and end()
-// iterators, and then the string will be freed before we actually start
-// tokenizing it.
-// Instead, use a std::string or use the 3 arg constructor of CStringTokenizer.
-//
-//
// EXAMPLE 1:
//
// char input[] = "this is a test";
@@ -99,13 +91,19 @@ class StringTokenizerT {
RETURN_DELIMS = 1 << 0,
};
- // The string object must live longer than the tokenizer. (In particular this
- // should not be constructed with a temporary.)
+ // The string object must live longer than the tokenizer. In particular, this
+ // should not be constructed with a temporary. The deleted rvalue constructor
+ // blocks the most obvious instances of this (e.g. passing a string literal to
+ // the constructor), but caution must still be exercised.
StringTokenizerT(const str& string,
const str& delims) {
Init(string.begin(), string.end(), delims);
}
+ // Don't allow temporary strings to be used with string tokenizer, since
+ // Init() would otherwise save iterators to a temporary string.
+ StringTokenizerT(str&&, const str& delims) = delete;
+
StringTokenizerT(const_iterator string_begin,
const_iterator string_end,
const str& delims) {
@@ -147,9 +145,9 @@ class StringTokenizerT {
const_iterator token_begin() const { return token_begin_; }
const_iterator token_end() const { return token_end_; }
str token() const { return str(token_begin_, token_end_); }
- base::StringPiece token_piece() const {
- return base::StringPiece(&*token_begin_,
- std::distance(token_begin_, token_end_));
+ BasicStringPiece<str> token_piece() const {
+ return BasicStringPiece<str>(&*token_begin_,
+ std::distance(token_begin_, token_end_));
}
private:
diff --git a/chromium/base/strings/string_tokenizer_fuzzer.cc b/chromium/base/strings/string_tokenizer_fuzzer.cc
new file mode 100644
index 00000000000..917041bd7b0
--- /dev/null
+++ b/chromium/base/strings/string_tokenizer_fuzzer.cc
@@ -0,0 +1,56 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+#include "base/strings/string_tokenizer.h"
+
+void GetAllTokens(base::StringTokenizer& t) {
+ while (t.GetNext()) {
+ (void)t.token();
+ }
+}
+
+// Entry point for LibFuzzer.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ uint8_t size_t_bytes = sizeof(size_t);
+ if (size < size_t_bytes + 1) {
+ return 0;
+ }
+
+ // Calculate pattern size based on remaining bytes, otherwise fuzzing is
+ // inefficient with bailouts in most cases.
+ size_t pattern_size =
+ *reinterpret_cast<const size_t*>(data) % (size - size_t_bytes);
+
+ std::string pattern(reinterpret_cast<const char*>(data + size_t_bytes),
+ pattern_size);
+ std::string input(
+ reinterpret_cast<const char*>(data + size_t_bytes + pattern_size),
+ size - pattern_size - size_t_bytes);
+
+ // Allow quote_chars and options to be set. Otherwise full coverage
+ // won't be possible since IsQuote, FullGetNext and other functions
+ // won't be called.
+ base::StringTokenizer t(input, pattern);
+ GetAllTokens(t);
+
+ base::StringTokenizer t_quote(input, pattern);
+ t_quote.set_quote_chars("\"");
+ GetAllTokens(t_quote);
+
+ base::StringTokenizer t_options(input, pattern);
+ t_options.set_options(base::StringTokenizer::RETURN_DELIMS);
+ GetAllTokens(t_options);
+
+ base::StringTokenizer t_quote_and_options(input, pattern);
+ t_quote_and_options.set_quote_chars("\"");
+ t_quote_and_options.set_options(base::StringTokenizer::RETURN_DELIMS);
+ GetAllTokens(t_quote_and_options);
+
+ return 0;
+}
diff --git a/chromium/base/strings/string_util.cc b/chromium/base/strings/string_util.cc
index 33398f692cd..32e5ff2b72b 100644
--- a/chromium/base/strings/string_util.cc
+++ b/chromium/base/strings/string_util.cc
@@ -505,12 +505,8 @@ bool IsStringASCII(StringPiece16 str) {
return DoIsStringASCII(str.data(), str.length());
}
-bool IsStringASCII(const string16& str) {
- return DoIsStringASCII(str.data(), str.length());
-}
-
#if defined(WCHAR_T_IS_UTF32)
-bool IsStringASCII(const std::wstring& str) {
+bool IsStringASCII(WStringPiece str) {
return DoIsStringASCII(str.data(), str.length());
}
#endif
diff --git a/chromium/base/strings/string_util.h b/chromium/base/strings/string_util.h
index a4cba6330bd..1e69413b90d 100644
--- a/chromium/base/strings/string_util.h
+++ b/chromium/base/strings/string_util.h
@@ -285,9 +285,8 @@ BASE_EXPORT bool ContainsOnlyChars(StringPiece16 input,
BASE_EXPORT bool IsStringUTF8(StringPiece str);
BASE_EXPORT bool IsStringASCII(StringPiece str);
BASE_EXPORT bool IsStringASCII(StringPiece16 str);
-BASE_EXPORT bool IsStringASCII(const string16& str);
#if defined(WCHAR_T_IS_UTF32)
-BASE_EXPORT bool IsStringASCII(const std::wstring& str);
+BASE_EXPORT bool IsStringASCII(WStringPiece str);
#endif
// Compare the lower-case form of the given string against the given
diff --git a/chromium/base/strings/utf_string_conversion_utils.cc b/chromium/base/strings/utf_string_conversion_utils.cc
index 3101a602888..f7682c1be9d 100644
--- a/chromium/base/strings/utf_string_conversion_utils.cc
+++ b/chromium/base/strings/utf_string_conversion_utils.cc
@@ -5,6 +5,7 @@
#include "base/strings/utf_string_conversion_utils.h"
#include "base/third_party/icu/icu_utf.h"
+#include "build/build_config.h"
namespace base {
@@ -121,7 +122,10 @@ void PrepareForUTF8Output(const CHAR* src,
}
// Instantiate versions we know callers will need.
+#if !defined(OS_WIN)
+// wchar_t and char16 are the same thing on Windows.
template void PrepareForUTF8Output(const wchar_t*, size_t, std::string*);
+#endif
template void PrepareForUTF8Output(const char16*, size_t, std::string*);
template<typename STRING>
@@ -142,7 +146,10 @@ void PrepareForUTF16Or32Output(const char* src,
}
// Instantiate versions we know callers will need.
+#if !defined(OS_WIN)
+// std::wstring and string16 are the same thing on Windows.
template void PrepareForUTF16Or32Output(const char*, size_t, std::wstring*);
+#endif
template void PrepareForUTF16Or32Output(const char*, size_t, string16*);
} // namespace base
diff --git a/chromium/base/strings/utf_string_conversions.cc b/chromium/base/strings/utf_string_conversions.cc
index 85450c6566d..89acc3806c8 100644
--- a/chromium/base/strings/utf_string_conversions.cc
+++ b/chromium/base/strings/utf_string_conversions.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -9,96 +9,235 @@
#include "base/strings/string_piece.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversion_utils.h"
+#include "base/third_party/icu/icu_utf.h"
#include "build/build_config.h"
namespace base {
namespace {
-// Generalized Unicode converter -----------------------------------------------
-
-// Converts the given source Unicode character type to the given destination
-// Unicode character type as a STL string. The given input buffer and size
-// determine the source, and the given output STL string will be replaced by
-// the result.
-template<typename SRC_CHAR, typename DEST_STRING>
-bool ConvertUnicode(const SRC_CHAR* src,
- size_t src_len,
- DEST_STRING* output) {
- // ICU requires 32-bit numbers.
+constexpr int32_t kErrorCodePoint = 0xFFFD;
+
+// Size coefficient ----------------------------------------------------------
+// The maximum number of codeunits in the destination encoding corresponding to
+// one codeunit in the source encoding.
+
+template <typename SrcChar, typename DestChar>
+struct SizeCoefficient {
+ static_assert(sizeof(SrcChar) < sizeof(DestChar),
+ "Default case: from a smaller encoding to the bigger one");
+
+ // ASCII symbols are encoded by one codeunit in all encodings.
+ static constexpr int value = 1;
+};
+
+template <>
+struct SizeCoefficient<char16, char> {
+ // One UTF-16 codeunit corresponds to at most 3 codeunits in UTF-8.
+ static constexpr int value = 3;
+};
+
+#if defined(WCHAR_T_IS_UTF32)
+template <>
+struct SizeCoefficient<wchar_t, char> {
+ // UTF-8 uses at most 4 codeunits per character.
+ static constexpr int value = 4;
+};
+
+template <>
+struct SizeCoefficient<wchar_t, char16> {
+ // UTF-16 uses at most 2 codeunits per character.
+ static constexpr int value = 2;
+};
+#endif // defined(WCHAR_T_IS_UTF32)
+
+template <typename SrcChar, typename DestChar>
+constexpr int size_coefficient_v =
+ SizeCoefficient<std::decay_t<SrcChar>, std::decay_t<DestChar>>::value;
+
+// UnicodeAppendUnsafe --------------------------------------------------------
+// Function overloads that write code_point to the output string. Output string
+// has to have enough space for the codepoint.
+
+void UnicodeAppendUnsafe(char* out, int32_t* size, uint32_t code_point) {
+ CBU8_APPEND_UNSAFE(out, *size, code_point);
+}
+
+void UnicodeAppendUnsafe(char16* out, int32_t* size, uint32_t code_point) {
+ CBU16_APPEND_UNSAFE(out, *size, code_point);
+}
+
+#if defined(WCHAR_T_IS_UTF32)
+
+void UnicodeAppendUnsafe(wchar_t* out, int32_t* size, uint32_t code_point) {
+ out[(*size)++] = code_point;
+}
+
+#endif // defined(WCHAR_T_IS_UTF32)
+
+// DoUTFConversion ------------------------------------------------------------
+// Main driver of UTFConversion specialized for different Src encodings.
+// dest has to have enough room for the converted text.
+
+template <typename DestChar>
+bool DoUTFConversion(const char* src,
+ int32_t src_len,
+ DestChar* dest,
+ int32_t* dest_len) {
bool success = true;
- int32_t src_len32 = static_cast<int32_t>(src_len);
- for (int32_t i = 0; i < src_len32; i++) {
- uint32_t code_point;
- if (ReadUnicodeCharacter(src, src_len32, &i, &code_point)) {
- WriteUnicodeCharacter(code_point, output);
- } else {
- WriteUnicodeCharacter(0xFFFD, output);
+
+ for (int32_t i = 0; i < src_len;) {
+ int32_t code_point;
+ CBU8_NEXT(src, i, src_len, code_point);
+
+ if (!IsValidCodepoint(code_point)) {
success = false;
+ code_point = kErrorCodePoint;
}
+
+ UnicodeAppendUnsafe(dest, dest_len, code_point);
}
return success;
}
-} // namespace
+template <typename DestChar>
+bool DoUTFConversion(const char16* src,
+ int32_t src_len,
+ DestChar* dest,
+ int32_t* dest_len) {
+ bool success = true;
-// UTF-8 <-> Wide --------------------------------------------------------------
+ auto ConvertSingleChar = [&success](char16 in) -> int32_t {
+ if (!CBU16_IS_SINGLE(in) || !IsValidCodepoint(in)) {
+ success = false;
+ return kErrorCodePoint;
+ }
+ return in;
+ };
+
+ int32_t i = 0;
+
+ // Always have another symbol in order to avoid checking boundaries in the
+ // middle of the surrogate pair.
+ while (i < src_len - 1) {
+ int32_t code_point;
+
+ if (CBU16_IS_LEAD(src[i]) && CBU16_IS_TRAIL(src[i + 1])) {
+ code_point = CBU16_GET_SUPPLEMENTARY(src[i], src[i + 1]);
+ if (!IsValidCodepoint(code_point)) {
+ code_point = kErrorCodePoint;
+ success = false;
+ }
+ i += 2;
+ } else {
+ code_point = ConvertSingleChar(src[i]);
+ ++i;
+ }
-bool WideToUTF8(const wchar_t* src, size_t src_len, std::string* output) {
- if (IsStringASCII(std::wstring(src, src_len))) {
- output->assign(src, src + src_len);
- return true;
- } else {
- PrepareForUTF8Output(src, src_len, output);
- return ConvertUnicode(src, src_len, output);
+ UnicodeAppendUnsafe(dest, dest_len, code_point);
}
+
+ if (i < src_len)
+ UnicodeAppendUnsafe(dest, dest_len, ConvertSingleChar(src[i]));
+
+ return success;
}
-std::string WideToUTF8(const std::wstring& wide) {
- if (IsStringASCII(wide)) {
- return std::string(wide.data(), wide.data() + wide.length());
+#if defined(WCHAR_T_IS_UTF32)
+
+template <typename DestChar>
+bool DoUTFConversion(const wchar_t* src,
+ int32_t src_len,
+ DestChar* dest,
+ int32_t* dest_len) {
+ bool success = true;
+
+ for (int32_t i = 0; i < src_len; ++i) {
+ int32_t code_point = src[i];
+
+ if (!IsValidCodepoint(code_point)) {
+ success = false;
+ code_point = kErrorCodePoint;
+ }
+
+ UnicodeAppendUnsafe(dest, dest_len, code_point);
}
- std::string ret;
- PrepareForUTF8Output(wide.data(), wide.length(), &ret);
- ConvertUnicode(wide.data(), wide.length(), &ret);
- return ret;
+ return success;
}
-bool UTF8ToWide(const char* src, size_t src_len, std::wstring* output) {
- if (IsStringASCII(StringPiece(src, src_len))) {
- output->assign(src, src + src_len);
+#endif // defined(WCHAR_T_IS_UTF32)
+
+// UTFConversion --------------------------------------------------------------
+// Function template for generating all UTF conversions.
+
+template <typename InputString, typename DestString>
+bool UTFConversion(const InputString& src_str, DestString* dest_str) {
+ if (IsStringASCII(src_str)) {
+ dest_str->assign(src_str.begin(), src_str.end());
return true;
- } else {
- PrepareForUTF16Or32Output(src, src_len, output);
- return ConvertUnicode(src, src_len, output);
}
+
+ dest_str->resize(src_str.length() *
+ size_coefficient_v<typename InputString::value_type,
+ typename DestString::value_type>);
+
+ // Empty string is ASCII => it OK to call operator[].
+ auto* dest = &(*dest_str)[0];
+
+ // ICU requires 32 bit numbers.
+ int32_t src_len32 = static_cast<int32_t>(src_str.length());
+ int32_t dest_len32 = 0;
+
+ bool res = DoUTFConversion(src_str.data(), src_len32, dest, &dest_len32);
+
+ dest_str->resize(dest_len32);
+ dest_str->shrink_to_fit();
+
+ return res;
}
-std::wstring UTF8ToWide(StringPiece utf8) {
- if (IsStringASCII(utf8)) {
- return std::wstring(utf8.begin(), utf8.end());
- }
+} // namespace
- std::wstring ret;
- PrepareForUTF16Or32Output(utf8.data(), utf8.length(), &ret);
- ConvertUnicode(utf8.data(), utf8.length(), &ret);
+// UTF16 <-> UTF8 --------------------------------------------------------------
+
+bool UTF8ToUTF16(const char* src, size_t src_len, string16* output) {
+ return UTFConversion(StringPiece(src, src_len), output);
+}
+
+string16 UTF8ToUTF16(StringPiece utf8) {
+ string16 ret;
+ // Ignore the success flag of this call, it will do the best it can for
+ // invalid input, which is what we want here.
+ UTF8ToUTF16(utf8.data(), utf8.size(), &ret);
+ return ret;
+}
+
+bool UTF16ToUTF8(const char16* src, size_t src_len, std::string* output) {
+ return UTFConversion(StringPiece16(src, src_len), output);
+}
+
+std::string UTF16ToUTF8(StringPiece16 utf16) {
+ std::string ret;
+ // Ignore the success flag of this call, it will do the best it can for
+ // invalid input, which is what we want here.
+ UTF16ToUTF8(utf16.data(), utf16.length(), &ret);
return ret;
}
// UTF-16 <-> Wide -------------------------------------------------------------
#if defined(WCHAR_T_IS_UTF16)
+// When wide == UTF-16 the conversions are a NOP.
-// When wide == UTF-16, then conversions are a NOP.
bool WideToUTF16(const wchar_t* src, size_t src_len, string16* output) {
output->assign(src, src_len);
return true;
}
-string16 WideToUTF16(const std::wstring& wide) {
- return wide;
+string16 WideToUTF16(WStringPiece wide) {
+ return wide.as_string();
}
bool UTF16ToWide(const char16* src, size_t src_len, std::wstring* output) {
@@ -106,113 +245,80 @@ bool UTF16ToWide(const char16* src, size_t src_len, std::wstring* output) {
return true;
}
-std::wstring UTF16ToWide(const string16& utf16) {
- return utf16;
+std::wstring UTF16ToWide(StringPiece16 utf16) {
+ return utf16.as_string();
}
#elif defined(WCHAR_T_IS_UTF32)
bool WideToUTF16(const wchar_t* src, size_t src_len, string16* output) {
- output->clear();
- // Assume that normally we won't have any non-BMP characters so the counts
- // will be the same.
- output->reserve(src_len);
- return ConvertUnicode(src, src_len, output);
+ return UTFConversion(base::WStringPiece(src, src_len), output);
}
-string16 WideToUTF16(const std::wstring& wide) {
+string16 WideToUTF16(WStringPiece wide) {
string16 ret;
+ // Ignore the success flag of this call, it will do the best it can for
+ // invalid input, which is what we want here.
WideToUTF16(wide.data(), wide.length(), &ret);
return ret;
}
bool UTF16ToWide(const char16* src, size_t src_len, std::wstring* output) {
- output->clear();
- // Assume that normally we won't have any non-BMP characters so the counts
- // will be the same.
- output->reserve(src_len);
- return ConvertUnicode(src, src_len, output);
+ return UTFConversion(StringPiece16(src, src_len), output);
}
-std::wstring UTF16ToWide(const string16& utf16) {
+std::wstring UTF16ToWide(StringPiece16 utf16) {
std::wstring ret;
+ // Ignore the success flag of this call, it will do the best it can for
+ // invalid input, which is what we want here.
UTF16ToWide(utf16.data(), utf16.length(), &ret);
return ret;
}
#endif // defined(WCHAR_T_IS_UTF32)
-// UTF16 <-> UTF8 --------------------------------------------------------------
+// UTF-8 <-> Wide --------------------------------------------------------------
-#if defined(WCHAR_T_IS_UTF32)
+// UTF8ToWide is the same code, regardless of whether wide is 16 or 32 bits
-bool UTF8ToUTF16(const char* src, size_t src_len, string16* output) {
- if (IsStringASCII(StringPiece(src, src_len))) {
- output->assign(src, src + src_len);
- return true;
- } else {
- PrepareForUTF16Or32Output(src, src_len, output);
- return ConvertUnicode(src, src_len, output);
- }
+bool UTF8ToWide(const char* src, size_t src_len, std::wstring* output) {
+ return UTFConversion(StringPiece(src, src_len), output);
}
-string16 UTF8ToUTF16(StringPiece utf8) {
- if (IsStringASCII(utf8)) {
- return string16(utf8.begin(), utf8.end());
- }
-
- string16 ret;
- PrepareForUTF16Or32Output(utf8.data(), utf8.length(), &ret);
+std::wstring UTF8ToWide(StringPiece utf8) {
+ std::wstring ret;
// Ignore the success flag of this call, it will do the best it can for
// invalid input, which is what we want here.
- ConvertUnicode(utf8.data(), utf8.length(), &ret);
+ UTF8ToWide(utf8.data(), utf8.length(), &ret);
return ret;
}
-bool UTF16ToUTF8(const char16* src, size_t src_len, std::string* output) {
- if (IsStringASCII(StringPiece16(src, src_len))) {
- output->assign(src, src + src_len);
- return true;
- } else {
- PrepareForUTF8Output(src, src_len, output);
- return ConvertUnicode(src, src_len, output);
- }
-}
+#if defined(WCHAR_T_IS_UTF16)
+// Easy case since we can use the "utf" versions we already wrote above.
-std::string UTF16ToUTF8(StringPiece16 utf16) {
- std::string ret;
- // Ignore the success flag of this call, it will do the best it can for
- // invalid input, which is what we want here.
- UTF16ToUTF8(utf16.data(), utf16.length(), &ret);
- return ret;
+bool WideToUTF8(const wchar_t* src, size_t src_len, std::string* output) {
+ return UTF16ToUTF8(src, src_len, output);
}
-#elif defined(WCHAR_T_IS_UTF16)
-// Easy case since we can use the "wide" versions we already wrote above.
-
-bool UTF8ToUTF16(const char* src, size_t src_len, string16* output) {
- return UTF8ToWide(src, src_len, output);
+std::string WideToUTF8(WStringPiece wide) {
+ return UTF16ToUTF8(wide);
}
-string16 UTF8ToUTF16(StringPiece utf8) {
- return UTF8ToWide(utf8);
-}
+#elif defined(WCHAR_T_IS_UTF32)
-bool UTF16ToUTF8(const char16* src, size_t src_len, std::string* output) {
- return WideToUTF8(src, src_len, output);
+bool WideToUTF8(const wchar_t* src, size_t src_len, std::string* output) {
+ return UTFConversion(WStringPiece(src, src_len), output);
}
-std::string UTF16ToUTF8(StringPiece16 utf16) {
- if (IsStringASCII(utf16))
- return std::string(utf16.data(), utf16.data() + utf16.length());
-
+std::string WideToUTF8(WStringPiece wide) {
std::string ret;
- PrepareForUTF8Output(utf16.data(), utf16.length(), &ret);
- ConvertUnicode(utf16.data(), utf16.length(), &ret);
+ // Ignore the success flag of this call, it will do the best it can for
+ // invalid input, which is what we want here.
+ WideToUTF8(wide.data(), wide.length(), &ret);
return ret;
}
-#endif
+#endif // defined(WCHAR_T_IS_UTF32)
string16 ASCIIToUTF16(StringPiece ascii) {
DCHECK(IsStringASCII(ascii)) << ascii;
diff --git a/chromium/base/strings/utf_string_conversions.h b/chromium/base/strings/utf_string_conversions.h
index 2995f4cbcf6..14f94ac9679 100644
--- a/chromium/base/strings/utf_string_conversions.h
+++ b/chromium/base/strings/utf_string_conversions.h
@@ -23,17 +23,17 @@ namespace base {
// possible.
BASE_EXPORT bool WideToUTF8(const wchar_t* src, size_t src_len,
std::string* output);
-BASE_EXPORT std::string WideToUTF8(const std::wstring& wide);
+BASE_EXPORT std::string WideToUTF8(WStringPiece wide);
BASE_EXPORT bool UTF8ToWide(const char* src, size_t src_len,
std::wstring* output);
BASE_EXPORT std::wstring UTF8ToWide(StringPiece utf8);
BASE_EXPORT bool WideToUTF16(const wchar_t* src, size_t src_len,
string16* output);
-BASE_EXPORT string16 WideToUTF16(const std::wstring& wide);
+BASE_EXPORT string16 WideToUTF16(WStringPiece wide);
BASE_EXPORT bool UTF16ToWide(const char16* src, size_t src_len,
std::wstring* output);
-BASE_EXPORT std::wstring UTF16ToWide(const string16& utf16);
+BASE_EXPORT std::wstring UTF16ToWide(StringPiece16 utf16);
BASE_EXPORT bool UTF8ToUTF16(const char* src, size_t src_len, string16* output);
BASE_EXPORT string16 UTF8ToUTF16(StringPiece utf8);
diff --git a/chromium/base/strings/utf_string_conversions_fuzzer.cc b/chromium/base/strings/utf_string_conversions_fuzzer.cc
new file mode 100644
index 00000000000..37d4be215d0
--- /dev/null
+++ b/chromium/base/strings/utf_string_conversions_fuzzer.cc
@@ -0,0 +1,56 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+
+std::string output_std_string;
+std::wstring output_std_wstring;
+base::string16 output_string16;
+
+// Entry point for LibFuzzer.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ base::StringPiece string_piece_input(reinterpret_cast<const char*>(data),
+ size);
+
+ base::UTF8ToWide(string_piece_input);
+ base::UTF8ToWide(reinterpret_cast<const char*>(data), size,
+ &output_std_wstring);
+ base::UTF8ToUTF16(string_piece_input);
+ base::UTF8ToUTF16(reinterpret_cast<const char*>(data), size,
+ &output_string16);
+
+ // Test for char16.
+ if (size % 2 == 0) {
+ base::StringPiece16 string_piece_input16(
+ reinterpret_cast<const base::char16*>(data), size / 2);
+ base::UTF16ToWide(output_string16);
+ base::UTF16ToWide(reinterpret_cast<const base::char16*>(data), size / 2,
+ &output_std_wstring);
+ base::UTF16ToUTF8(string_piece_input16);
+ base::UTF16ToUTF8(reinterpret_cast<const base::char16*>(data), size / 2,
+ &output_std_string);
+ }
+
+ // Test for wchar_t.
+ size_t wchar_t_size = sizeof(wchar_t);
+ if (size % wchar_t_size == 0) {
+ base::WideToUTF8(output_std_wstring);
+ base::WideToUTF8(reinterpret_cast<const wchar_t*>(data),
+ size / wchar_t_size, &output_std_string);
+ base::WideToUTF16(output_std_wstring);
+ base::WideToUTF16(reinterpret_cast<const wchar_t*>(data),
+ size / wchar_t_size, &output_string16);
+ }
+
+ // Test for ASCII. This condition is needed to avoid hitting instant CHECK
+ // failures.
+ if (base::IsStringASCII(string_piece_input)) {
+ output_string16 = base::ASCIIToUTF16(string_piece_input);
+ base::StringPiece16 string_piece_input16(output_string16);
+ base::UTF16ToASCII(string_piece_input16);
+ }
+
+ return 0;
+}
diff --git a/chromium/base/strings/utf_string_conversions_regression_fuzzer.cc b/chromium/base/strings/utf_string_conversions_regression_fuzzer.cc
new file mode 100644
index 00000000000..ca6b4a27a31
--- /dev/null
+++ b/chromium/base/strings/utf_string_conversions_regression_fuzzer.cc
@@ -0,0 +1,105 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/strings/old_utf_string_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+
+namespace {
+
+void UTF8ToCheck(const uint8_t* data, size_t size) {
+ const auto* src = reinterpret_cast<const char*>(data);
+ const size_t src_len = size;
+
+ // UTF16
+ {
+ base::string16 new_out;
+ bool new_res = base::UTF8ToUTF16(src, src_len, &new_out);
+
+ base::string16 old_out;
+ bool old_res = base_old::UTF8ToUTF16(src, src_len, &old_out);
+
+ CHECK(new_res == old_res);
+ CHECK(new_out == old_out);
+ }
+
+ // Wide
+ {
+ std::wstring new_out;
+ bool new_res = base::UTF8ToWide(src, src_len, &new_out);
+
+ std::wstring old_out;
+ bool old_res = base_old::UTF8ToWide(src, src_len, &old_out);
+
+ CHECK(new_res == old_res);
+ CHECK(new_out == old_out);
+ }
+}
+
+void UTF16ToCheck(const uint8_t* data, size_t size) {
+ const auto* src = reinterpret_cast<const base::char16*>(data);
+ const size_t src_len = size / 2;
+
+ // UTF8
+ {
+ std::string new_out;
+ bool new_res = base::UTF16ToUTF8(src, src_len, &new_out);
+
+ std::string old_out;
+ bool old_res = base_old::UTF16ToUTF8(src, src_len, &old_out);
+
+ CHECK(new_res == old_res);
+ CHECK(new_out == old_out);
+ }
+
+ // Wide
+ {
+ std::wstring new_out;
+ bool new_res = base::UTF16ToWide(src, src_len, &new_out);
+
+ std::wstring old_out;
+ bool old_res = base_old::UTF16ToWide(src, src_len, &old_out);
+
+ CHECK(new_res == old_res);
+ CHECK(new_out == old_out);
+ }
+}
+
+void WideToCheck(const uint8_t* data, size_t size) {
+ const auto* src = reinterpret_cast<const wchar_t*>(data);
+ const size_t src_len = size / 4; // It's OK even if Wide is 16bit.
+
+ // UTF8
+ {
+ std::string new_out;
+ bool new_res = base::WideToUTF8(src, src_len, &new_out);
+
+ std::string old_out;
+ bool old_res = base_old::WideToUTF8(src, src_len, &old_out);
+
+ CHECK(new_res == old_res);
+ CHECK(new_out == old_out);
+ }
+
+ // UTF16
+ {
+ base::string16 new_out;
+ bool new_res = base::WideToUTF16(src, src_len, &new_out);
+
+ base::string16 old_out;
+ bool old_res = base_old::WideToUTF16(src, src_len, &old_out);
+
+ CHECK(new_res == old_res);
+ CHECK(new_out == old_out);
+ }
+}
+
+} // namespace
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ UTF8ToCheck(data, size);
+ UTF16ToCheck(data, size);
+ WideToCheck(data, size);
+ return 0;
+}
diff --git a/chromium/base/synchronization/waitable_event_win.cc b/chromium/base/synchronization/waitable_event_win.cc
index 3008a4e6e6c..d04a5a6db3b 100644
--- a/chromium/base/synchronization/waitable_event_win.cc
+++ b/chromium/base/synchronization/waitable_event_win.cc
@@ -61,7 +61,8 @@ void WaitableEvent::Wait() {
DWORD result = WaitForSingleObject(handle_.Get(), INFINITE);
// It is most unexpected that this should ever fail. Help consumers learn
// about it if it should ever fail.
- DCHECK_EQ(WAIT_OBJECT_0, result) << "WaitForSingleObject failed";
+ DPCHECK(result != WAIT_FAILED);
+ DCHECK_EQ(WAIT_OBJECT_0, result);
}
namespace {
diff --git a/chromium/base/sys_info.cc b/chromium/base/sys_info.cc
index 645605a37c9..379d7f26f95 100644
--- a/chromium/base/sys_info.cc
+++ b/chromium/base/sys_info.cc
@@ -44,6 +44,15 @@ int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
return AmountOfAvailablePhysicalMemoryImpl();
}
+bool SysInfo::IsLowEndDevice() {
+ if (base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableLowEndDeviceMode)) {
+ return true;
+ }
+
+ return IsLowEndDeviceImpl();
+}
+
#if !defined(OS_ANDROID)
bool DetectLowEndDevice() {
@@ -62,7 +71,7 @@ static LazyInstance<
g_lazy_low_end_device = LAZY_INSTANCE_INITIALIZER;
// static
-bool SysInfo::IsLowEndDevice() {
+bool SysInfo::IsLowEndDeviceImpl() {
return g_lazy_low_end_device.Get().value();
}
#endif
diff --git a/chromium/base/sys_info.h b/chromium/base/sys_info.h
index d435d58e0ed..6e58715bf5e 100644
--- a/chromium/base/sys_info.h
+++ b/chromium/base/sys_info.h
@@ -172,6 +172,7 @@ class BASE_EXPORT SysInfo {
static int64_t AmountOfPhysicalMemoryImpl();
static int64_t AmountOfAvailablePhysicalMemoryImpl();
+ static bool IsLowEndDeviceImpl();
#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_AIX)
static int64_t AmountOfAvailablePhysicalMemory(
diff --git a/chromium/base/sys_info_android.cc b/chromium/base/sys_info_android.cc
index 1d1710c72db..77047962f1a 100644
--- a/chromium/base/sys_info_android.cc
+++ b/chromium/base/sys_info_android.cc
@@ -222,7 +222,7 @@ static base::LazyInstance<
android::SysUtils::IsLowEndDeviceFromJni> >::Leaky
g_lazy_low_end_device = LAZY_INSTANCE_INITIALIZER;
-bool SysInfo::IsLowEndDevice() {
+bool SysInfo::IsLowEndDeviceImpl() {
// This code might be used in some environments
// which might not have a Java environment.
// Note that we need to call the Java version here.
diff --git a/chromium/base/task_scheduler/delayed_task_manager.cc b/chromium/base/task_scheduler/delayed_task_manager.cc
index eec40a88f6b..86a67219e46 100644
--- a/chromium/base/task_scheduler/delayed_task_manager.cc
+++ b/chromium/base/task_scheduler/delayed_task_manager.cc
@@ -14,7 +14,8 @@
namespace base {
namespace internal {
-DelayedTaskManager::DelayedTaskManager(std::unique_ptr<TickClock> tick_clock)
+DelayedTaskManager::DelayedTaskManager(
+ std::unique_ptr<const TickClock> tick_clock)
: tick_clock_(std::move(tick_clock)) {
DCHECK(tick_clock_);
}
diff --git a/chromium/base/task_scheduler/delayed_task_manager.h b/chromium/base/task_scheduler/delayed_task_manager.h
index 2d6babbc5a7..c48aeb1e6b1 100644
--- a/chromium/base/task_scheduler/delayed_task_manager.h
+++ b/chromium/base/task_scheduler/delayed_task_manager.h
@@ -36,7 +36,7 @@ class BASE_EXPORT DelayedTaskManager {
using PostTaskNowCallback = OnceCallback<void(Task task)>;
// |tick_clock| can be specified for testing.
- DelayedTaskManager(std::unique_ptr<TickClock> tick_clock =
+ DelayedTaskManager(std::unique_ptr<const TickClock> tick_clock =
std::make_unique<DefaultTickClock>());
~DelayedTaskManager();
@@ -57,7 +57,7 @@ class BASE_EXPORT DelayedTaskManager {
TimeDelta delay,
PostTaskNowCallback post_task_now_callback);
- const std::unique_ptr<TickClock> tick_clock_;
+ const std::unique_ptr<const TickClock> tick_clock_;
AtomicFlag started_;
diff --git a/chromium/base/task_scheduler/delayed_task_manager_unittest.cc b/chromium/base/task_scheduler/delayed_task_manager_unittest.cc
index 8cfe04a2a2e..b485a6c80be 100644
--- a/chromium/base/task_scheduler/delayed_task_manager_unittest.cc
+++ b/chromium/base/task_scheduler/delayed_task_manager_unittest.cc
@@ -34,7 +34,8 @@ void RunTask(Task task) {
class TaskSchedulerDelayedTaskManagerTest : public testing::Test {
protected:
TaskSchedulerDelayedTaskManagerTest()
- : delayed_task_manager_(service_thread_task_runner_->GetMockTickClock()),
+ : delayed_task_manager_(
+ service_thread_task_runner_->DeprecatedGetMockTickClock()),
task_(FROM_HERE,
BindOnce(&MockTask::Run, Unretained(&mock_task_)),
TaskTraits(),
diff --git a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc
index fd6dc7746d1..dff554f96c8 100644
--- a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc
+++ b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc
@@ -386,6 +386,10 @@ SchedulerSingleThreadTaskRunnerManager::SchedulerSingleThreadTaskRunnerManager(
arraysize(shared_scheduler_workers_),
"The size of |shared_com_scheduler_workers_| must match "
"|shared_scheduler_workers_|");
+ static_assert(arraysize(shared_com_scheduler_workers_[0]) ==
+ arraysize(shared_scheduler_workers_[0]),
+ "The size of |shared_com_scheduler_workers_| must match "
+ "|shared_scheduler_workers_|");
#endif // defined(OS_WIN)
DCHECK(!g_manager_is_alive);
g_manager_is_alive = true;
@@ -431,6 +435,15 @@ SchedulerSingleThreadTaskRunnerManager::CreateCOMSTATaskRunnerWithTraits(
}
#endif // defined(OS_WIN)
+// static
+SchedulerSingleThreadTaskRunnerManager::ContinueOnShutdown
+SchedulerSingleThreadTaskRunnerManager::TraitsToContinueOnShutdown(
+ const TaskTraits& traits) {
+ if (traits.shutdown_behavior() == TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN)
+ return IS_CONTINUE_ON_SHUTDOWN;
+ return IS_NOT_CONTINUE_ON_SHUTDOWN;
+}
+
template <typename DelegateType>
scoped_refptr<
SchedulerSingleThreadTaskRunnerManager::SchedulerSingleThreadTaskRunner>
@@ -541,7 +554,8 @@ template <>
SchedulerWorker*&
SchedulerSingleThreadTaskRunnerManager::GetSharedSchedulerWorkerForTraits<
SchedulerWorkerDelegate>(const TaskTraits& traits) {
- return shared_scheduler_workers_[GetEnvironmentIndexForTraits(traits)];
+ return shared_scheduler_workers_[GetEnvironmentIndexForTraits(traits)]
+ [TraitsToContinueOnShutdown(traits)];
}
#if defined(OS_WIN)
@@ -549,7 +563,8 @@ template <>
SchedulerWorker*&
SchedulerSingleThreadTaskRunnerManager::GetSharedSchedulerWorkerForTraits<
SchedulerWorkerCOMDelegate>(const TaskTraits& traits) {
- return shared_com_scheduler_workers_[GetEnvironmentIndexForTraits(traits)];
+ return shared_com_scheduler_workers_[GetEnvironmentIndexForTraits(traits)]
+ [TraitsToContinueOnShutdown(traits)];
}
#endif // defined(OS_WIN)
@@ -585,23 +600,28 @@ void SchedulerSingleThreadTaskRunnerManager::ReleaseSharedSchedulerWorkers() {
{
AutoSchedulerLock auto_lock(lock_);
for (size_t i = 0; i < arraysize(shared_scheduler_workers_); ++i) {
- local_shared_scheduler_workers[i] = shared_scheduler_workers_[i];
- shared_scheduler_workers_[i] = nullptr;
+ for (size_t j = 0; j < arraysize(shared_scheduler_workers_[i]); ++j) {
+ local_shared_scheduler_workers[i][j] = shared_scheduler_workers_[i][j];
+ shared_scheduler_workers_[i][j] = nullptr;
#if defined(OS_WIN)
- local_shared_com_scheduler_workers[i] = shared_com_scheduler_workers_[i];
- shared_com_scheduler_workers_[i] = nullptr;
+ local_shared_com_scheduler_workers[i][j] =
+ shared_com_scheduler_workers_[i][j];
+ shared_com_scheduler_workers_[i][j] = nullptr;
#endif
}
+ }
}
for (size_t i = 0; i < arraysize(local_shared_scheduler_workers); ++i) {
- if (local_shared_scheduler_workers[i])
- UnregisterSchedulerWorker(local_shared_scheduler_workers[i]);
+ for (size_t j = 0; j < arraysize(local_shared_scheduler_workers[i]); ++j) {
+ if (local_shared_scheduler_workers[i][j])
+ UnregisterSchedulerWorker(local_shared_scheduler_workers[i][j]);
#if defined(OS_WIN)
- if (local_shared_com_scheduler_workers[i])
- UnregisterSchedulerWorker(local_shared_com_scheduler_workers[i]);
+ if (local_shared_com_scheduler_workers[i][j])
+ UnregisterSchedulerWorker(local_shared_com_scheduler_workers[i][j]);
#endif
}
+ }
}
} // namespace internal
diff --git a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.h b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.h
index 1153a7b0c37..610b24f0bd1 100644
--- a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.h
+++ b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.h
@@ -80,6 +80,15 @@ class BASE_EXPORT SchedulerSingleThreadTaskRunnerManager final {
private:
class SchedulerSingleThreadTaskRunner;
+ enum ContinueOnShutdown {
+ IS_CONTINUE_ON_SHUTDOWN,
+ IS_NOT_CONTINUE_ON_SHUTDOWN,
+ CONTINUE_ON_SHUTDOWN_COUNT,
+ };
+
+ static ContinueOnShutdown TraitsToContinueOnShutdown(
+ const TaskTraits& traits);
+
template <typename DelegateType>
scoped_refptr<SchedulerSingleThreadTaskRunner> CreateTaskRunnerWithTraitsImpl(
const TaskTraits& traits,
@@ -110,10 +119,17 @@ class BASE_EXPORT SchedulerSingleThreadTaskRunnerManager final {
std::vector<scoped_refptr<SchedulerWorker>> workers_;
int next_worker_id_ = 0;
- SchedulerWorker* shared_scheduler_workers_[ENVIRONMENT_COUNT] = {};
-
+ // Workers for SingleThreadTaskRunnerThreadMode::SHARED tasks. It is
+ // important to have separate threads for CONTINUE_ON_SHUTDOWN and non-
+ // CONTINUE_ON_SHUTDOWN to avoid being in a situation where a
+ // CONTINUE_ON_SHUTDOWN task effectively blocks shutdown by preventing a
+ // BLOCK_SHUTDOWN task to be scheduled. https://crbug.com/829786
+ SchedulerWorker* shared_scheduler_workers_[ENVIRONMENT_COUNT]
+ [CONTINUE_ON_SHUTDOWN_COUNT] = {};
#if defined(OS_WIN)
- SchedulerWorker* shared_com_scheduler_workers_[ENVIRONMENT_COUNT] = {};
+ SchedulerWorker* shared_com_scheduler_workers_[ENVIRONMENT_COUNT]
+ [CONTINUE_ON_SHUTDOWN_COUNT] =
+ {};
#endif // defined(OS_WIN)
// Set to true when Start() is called.
diff --git a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc
index add274e1b73..bd7c134b953 100644
--- a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc
@@ -20,6 +20,7 @@
#include "base/threading/platform_thread.h"
#include "base/threading/simple_thread.h"
#include "base/threading/thread.h"
+#include "base/threading/thread_restrictions.h"
#include "testing/gtest/include/gtest/gtest.h"
#if defined(OS_WIN)
@@ -194,6 +195,51 @@ TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest,
});
}
+// Regression test for https://crbug.com/829786
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest,
+ ContinueOnShutdownDoesNotBlockBlockShutdown) {
+ WaitableEvent task_has_started(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_can_continue(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ // Post a CONTINUE_ON_SHUTDOWN task that waits on
+ // |task_can_continue| to a shared SingleThreadTaskRunner.
+ single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(
+ {TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+ SingleThreadTaskRunnerThreadMode::SHARED)
+ ->PostTask(FROM_HERE, base::BindOnce(
+ [](WaitableEvent* task_has_started,
+ WaitableEvent* task_can_continue) {
+ task_has_started->Signal();
+ ScopedAllowBaseSyncPrimitivesForTesting
+ allow_base_sync_primitives;
+ task_can_continue->Wait();
+ },
+ Unretained(&task_has_started),
+ Unretained(&task_can_continue)));
+
+ task_has_started.Wait();
+
+ // Post a BLOCK_SHUTDOWN task to a shared SingleThreadTaskRunner.
+ single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(
+ {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ SingleThreadTaskRunnerThreadMode::SHARED)
+ ->PostTask(FROM_HERE, DoNothing());
+
+ // Shutdown should not hang even though the first task hasn't finished.
+ task_tracker_.Shutdown();
+
+ // Let the first task finish.
+ task_can_continue.Signal();
+
+ // Tear down from the test body to prevent accesses to |task_can_continue|
+ // after it goes out of scope.
+ TearDownSingleThreadTaskRunnerManager();
+}
+
namespace {
class TaskSchedulerSingleThreadTaskRunnerManagerCommonTest
diff --git a/chromium/base/task_scheduler/task_tracker.cc b/chromium/base/task_scheduler/task_tracker.cc
index a78e40aa61a..f72a3021ec1 100644
--- a/chromium/base/task_scheduler/task_tracker.cc
+++ b/chromium/base/task_scheduler/task_tracker.cc
@@ -8,7 +8,9 @@
#include <string>
#include <vector>
+#include "base/base_switches.h"
#include "base/callback.h"
+#include "base/command_line.h"
#include "base/json/json_writer.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_macros.h"
@@ -105,6 +107,19 @@ void RecordNumBlockShutdownTasksPostedDuringShutdown(
kMaxBlockShutdownTasksPostedDuringShutdown, 50);
}
+// Returns the maximum number of TaskPriority::BACKGROUND sequences that can be
+// scheduled concurrently based on command line flags.
+int GetMaxNumScheduledBackgroundSequences() {
+ // The CommandLine might not be initialized if TaskScheduler is initialized
+ // in a dynamic library which doesn't have access to argc/argv.
+ if (CommandLine::InitializedForCurrentProcess() &&
+ CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableBackgroundTasks)) {
+ return 0;
+ }
+ return std::numeric_limits<int>::max();
+}
+
} // namespace
// Atomic internal state used by TaskTracker. Sequential consistency shouldn't
@@ -226,6 +241,9 @@ struct TaskTracker::PreemptedBackgroundSequence {
DISALLOW_COPY_AND_ASSIGN(PreemptedBackgroundSequence);
};
+TaskTracker::TaskTracker(StringPiece histogram_label)
+ : TaskTracker(histogram_label, GetMaxNumScheduledBackgroundSequences()) {}
+
TaskTracker::TaskTracker(StringPiece histogram_label,
int max_num_scheduled_background_sequences)
: state_(new State),
diff --git a/chromium/base/task_scheduler/task_tracker.h b/chromium/base/task_scheduler/task_tracker.h
index 2da33ff780d..37de12864d9 100644
--- a/chromium/base/task_scheduler/task_tracker.h
+++ b/chromium/base/task_scheduler/task_tracker.h
@@ -87,12 +87,14 @@ namespace internal {
class BASE_EXPORT TaskTracker {
public:
// |histogram_label| is used as a suffix for histograms, it must not be empty.
- // |max_num_scheduled_background_sequences| is the maximum number of
- // background sequences that can be scheduled concurrently during normal
- // execution (ignored during shutdown).
+ // The first constructor sets the maximum number of TaskPriority::BACKGROUND
+ // sequences that can be scheduled concurrently to 0 if the
+ // --disable-background-tasks flag is specified, max() otherwise. The second
+ // constructor sets it to |max_num_scheduled_background_sequences|.
+ TaskTracker(StringPiece histogram_label);
TaskTracker(StringPiece histogram_label,
- int max_num_scheduled_background_sequences =
- std::numeric_limits<int>::max());
+ int max_num_scheduled_background_sequences);
+
virtual ~TaskTracker();
// Synchronously shuts down the scheduler. Once this is called, only tasks
diff --git a/chromium/base/task_scheduler/task_tracker_posix.cc b/chromium/base/task_scheduler/task_tracker_posix.cc
index d929aac670e..8289d909dc4 100644
--- a/chromium/base/task_scheduler/task_tracker_posix.cc
+++ b/chromium/base/task_scheduler/task_tracker_posix.cc
@@ -11,9 +11,7 @@
namespace base {
namespace internal {
-TaskTrackerPosix::TaskTrackerPosix(StringPiece name,
- int max_num_scheduled_background_sequences)
- : TaskTracker(name, max_num_scheduled_background_sequences) {}
+TaskTrackerPosix::TaskTrackerPosix(StringPiece name) : TaskTracker(name) {}
TaskTrackerPosix::~TaskTrackerPosix() = default;
void TaskTrackerPosix::RunOrSkipTask(Task task,
diff --git a/chromium/base/task_scheduler/task_tracker_posix.h b/chromium/base/task_scheduler/task_tracker_posix.h
index 60b7f37009e..4689f7a13e5 100644
--- a/chromium/base/task_scheduler/task_tracker_posix.h
+++ b/chromium/base/task_scheduler/task_tracker_posix.h
@@ -5,7 +5,6 @@
#ifndef BASE_TASK_SCHEDULER_TASK_TRACKER_POSIX_H_
#define BASE_TASK_SCHEDULER_TASK_TRACKER_POSIX_H_
-#include <limits>
#include <memory>
#include "base/base_export.h"
@@ -28,10 +27,7 @@ struct Task;
// TaskTracker can run tasks.
class BASE_EXPORT TaskTrackerPosix : public TaskTracker {
public:
- // This must match the signature of TaskTracker() to allow interchangeability.
- TaskTrackerPosix(StringPiece name,
- int max_num_scheduled_background_sequences =
- std::numeric_limits<int>::max());
+ TaskTrackerPosix(StringPiece name);
~TaskTrackerPosix() override;
// Sets the MessageLoopForIO with which to setup FileDescriptorWatcher in the
diff --git a/chromium/base/test/BUILD.gn b/chromium/base/test/BUILD.gn
index b64b9840338..73977c8f8ad 100644
--- a/chromium/base/test/BUILD.gn
+++ b/chromium/base/test/BUILD.gn
@@ -2,6 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import("//build/compiled_action.gni")
import("//build/config/ui.gni")
import("//build/config/nacl/config.gni")
@@ -177,13 +178,24 @@ static_library("test_support") {
"//third_party/libxml",
]
- if (!is_posix) {
+ if (!is_posix && !is_fuchsia) {
sources -= [
"scoped_locale.cc",
"scoped_locale.h",
]
}
+ if (is_linux) {
+ public_deps += [ ":fontconfig_util_linux" ]
+ data_deps = [
+ "//third_party/test_fonts",
+ ]
+ if (current_toolchain == host_toolchain) {
+ data_deps += [ ":do_generate_fontconfig_caches" ]
+ data += [ "$root_out_dir/fontconfig_caches/" ]
+ }
+ }
+
if (is_ios) {
set_sources_assignment_filter([])
sources += [ "test_file_util_mac.cc" ]
@@ -317,6 +329,43 @@ static_library("run_all_base_unittests") {
}
if (is_linux) {
+ source_set("fontconfig_util_linux") {
+ sources = [
+ "fontconfig_util_linux.cc",
+ "fontconfig_util_linux.h",
+ ]
+ deps = [
+ "//base",
+ "//third_party/fontconfig",
+ ]
+ }
+
+ if (current_toolchain == host_toolchain) {
+ executable("generate_fontconfig_caches") {
+ testonly = true
+ sources = [
+ "generate_fontconfig_caches.cc",
+ ]
+ deps = [
+ ":fontconfig_util_linux",
+ "//base",
+ "//build/config:exe_and_shlib_deps",
+ ]
+ }
+
+ compiled_action("do_generate_fontconfig_caches") {
+ testonly = true
+ tool = ":generate_fontconfig_caches"
+ data_deps = [
+ "//third_party/test_fonts",
+ ]
+ args = []
+ outputs = [
+ "$root_out_dir/fontconfig_caches/STAMP",
+ ]
+ }
+ }
+
shared_library("malloc_wrapper") {
testonly = true
sources = [
diff --git a/chromium/base/third_party/symbolize/BUILD.gn b/chromium/base/third_party/symbolize/BUILD.gn
index 3351cdebbf1..0dc7c2f3e39 100644
--- a/chromium/base/third_party/symbolize/BUILD.gn
+++ b/chromium/base/third_party/symbolize/BUILD.gn
@@ -2,6 +2,17 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import("//build/config/compiler/compiler.gni")
+
+declare_args() {
+ # Stack traces will not include function names. Instead they will contain
+ # file and offset information that can be used with
+ # tools/valgrind/asan/asan_symbolize.py. By piping stderr through this script,
+ # and also enabling symbol_level = 2, you can get much more detailed stack
+ # traces with file names and line numbers, even in non-ASAN builds.
+ print_unsymbolized_stack_traces = is_asan || is_lsan || is_msan || is_tsan
+}
+
static_library("symbolize") {
visibility = [ "//base/*" ]
sources = [
@@ -15,6 +26,11 @@ static_library("symbolize") {
"utilities.h",
]
+ defines = []
+ if (print_unsymbolized_stack_traces) {
+ defines += [ "PRINT_UNSYMBOLIZED_STACK_TRACES" ]
+ }
+
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
}
diff --git a/chromium/base/third_party/symbolize/symbolize.cc b/chromium/base/third_party/symbolize/symbolize.cc
index 41b53bd5e37..e6fbb84fd4d 100644
--- a/chromium/base/third_party/symbolize/symbolize.cc
+++ b/chromium/base/third_party/symbolize/symbolize.cc
@@ -779,8 +779,7 @@ static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
}
// Check whether a file name was returned.
-#if !defined(ADDRESS_SANITIZER) && !defined(LEAK_SANITIZER) && \
- !defined(MEMORY_SANITIZER) && !defined(THREAD_SANITIZER)
+#if !defined(PRINT_UNSYMBOLIZED_STACK_TRACES)
if (object_fd < 0) {
#endif
if (out[1]) {
@@ -796,8 +795,7 @@ static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
}
// Failed to determine the object file containing PC. Bail out.
return false;
-#if !defined(ADDRESS_SANITIZER) && !defined(LEAK_SANITIZER) && \
- !defined(MEMORY_SANITIZER) && !defined(THREAD_SANITIZER)
+#if !defined(PRINT_UNSYMBOLIZED_STACK_TRACES)
}
#endif
FileDescriptor wrapped_object_fd(object_fd);
diff --git a/chromium/base/threading/platform_thread_android.cc b/chromium/base/threading/platform_thread_android.cc
index d4c9a04856b..fd90d35102f 100644
--- a/chromium/base/threading/platform_thread_android.cc
+++ b/chromium/base/threading/platform_thread_android.cc
@@ -60,7 +60,7 @@ bool GetCurrentThreadPriorityForPlatform(ThreadPriority* priority) {
} // namespace internal
void PlatformThread::SetName(const std::string& name) {
- ThreadIdNameManager::GetInstance()->SetName(CurrentId(), name);
+ ThreadIdNameManager::GetInstance()->SetName(name);
// Like linux, on android we can get the thread names to show up in the
// debugger by setting the process name for the LWP.
diff --git a/chromium/base/threading/platform_thread_fuchsia.cc b/chromium/base/threading/platform_thread_fuchsia.cc
index 1939f820912..eb06795c685 100644
--- a/chromium/base/threading/platform_thread_fuchsia.cc
+++ b/chromium/base/threading/platform_thread_fuchsia.cc
@@ -27,8 +27,7 @@ void PlatformThread::SetName(const std::string& name) {
name.data(), name.size());
DCHECK_EQ(status, ZX_OK);
- ThreadIdNameManager::GetInstance()->SetName(PlatformThread::CurrentId(),
- name);
+ ThreadIdNameManager::GetInstance()->SetName(name);
}
// static
diff --git a/chromium/base/threading/platform_thread_linux.cc b/chromium/base/threading/platform_thread_linux.cc
index 9917a7b1fd6..190acedf7c5 100644
--- a/chromium/base/threading/platform_thread_linux.cc
+++ b/chromium/base/threading/platform_thread_linux.cc
@@ -126,7 +126,7 @@ bool GetCurrentThreadPriorityForPlatform(ThreadPriority* priority) {
// static
void PlatformThread::SetName(const std::string& name) {
- ThreadIdNameManager::GetInstance()->SetName(CurrentId(), name);
+ ThreadIdNameManager::GetInstance()->SetName(name);
#if !defined(OS_NACL) && !defined(OS_AIX)
// On linux we can get the thread names to show up in the debugger by setting
diff --git a/chromium/base/threading/platform_thread_mac.mm b/chromium/base/threading/platform_thread_mac.mm
index ea180618522..39d979d6600 100644
--- a/chromium/base/threading/platform_thread_mac.mm
+++ b/chromium/base/threading/platform_thread_mac.mm
@@ -49,7 +49,7 @@ void InitThreading() {
// static
void PlatformThread::SetName(const std::string& name) {
- ThreadIdNameManager::GetInstance()->SetName(CurrentId(), name);
+ ThreadIdNameManager::GetInstance()->SetName(name);
// Mac OS X does not expose the length limit of the name, so
// hardcode it.
diff --git a/chromium/base/threading/platform_thread_win.cc b/chromium/base/threading/platform_thread_win.cc
index c53d24e57b5..daccc0e72d0 100644
--- a/chromium/base/threading/platform_thread_win.cc
+++ b/chromium/base/threading/platform_thread_win.cc
@@ -175,7 +175,7 @@ void PlatformThread::Sleep(TimeDelta duration) {
// static
void PlatformThread::SetName(const std::string& name) {
- ThreadIdNameManager::GetInstance()->SetName(CurrentId(), name);
+ ThreadIdNameManager::GetInstance()->SetName(name);
// The SetThreadDescription API works even if no debugger is attached.
auto set_thread_description_func =
@@ -188,9 +188,7 @@ void PlatformThread::SetName(const std::string& name) {
// The debugger needs to be around to catch the name in the exception. If
// there isn't a debugger, we are just needlessly throwing an exception.
- // If this image file is instrumented, we raise the exception anyway
- // to provide the profiler with human-readable thread names.
- if (!::IsDebuggerPresent() && !base::debug::IsBinaryInstrumented())
+ if (!::IsDebuggerPresent())
return;
SetNameInternal(CurrentId(), name.c_str());
diff --git a/chromium/base/threading/post_task_and_reply_impl.cc b/chromium/base/threading/post_task_and_reply_impl.cc
index 4eba45a1c07..5aacdada670 100644
--- a/chromium/base/threading/post_task_and_reply_impl.cc
+++ b/chromium/base/threading/post_task_and_reply_impl.cc
@@ -10,7 +10,6 @@
#include "base/debug/leak_annotations.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
-#include "base/sequence_checker.h"
#include "base/sequenced_task_runner.h"
#include "base/threading/sequenced_task_runner_handle.h"
@@ -18,56 +17,93 @@ namespace base {
namespace {
-// This relay class remembers the sequence that it was created on, and ensures
-// that both the |task| and |reply| Closures are deleted on this same sequence.
-// Also, |task| is guaranteed to be deleted before |reply| is run or deleted.
-//
-// If RunReplyAndSelfDestruct() doesn't run because the originating execution
-// context is no longer available, then the |task| and |reply| Closures are
-// leaked. Leaking is considered preferable to having a thread-safetey
-// violations caused by invoking the Closure destructor on the wrong sequence.
class PostTaskAndReplyRelay {
public:
PostTaskAndReplyRelay(const Location& from_here,
OnceClosure task,
OnceClosure reply)
- : sequence_checker_(),
- from_here_(from_here),
- origin_task_runner_(SequencedTaskRunnerHandle::Get()),
- reply_(std::move(reply)),
- task_(std::move(task)) {}
+ : from_here_(from_here),
+ task_(std::move(task)),
+ reply_(std::move(reply)) {}
+ PostTaskAndReplyRelay(PostTaskAndReplyRelay&&) = default;
~PostTaskAndReplyRelay() {
- DCHECK(sequence_checker_.CalledOnValidSequence());
+ if (reply_) {
+ // This can run:
+ // 1) On origin sequence, when:
+ // 1a) Posting |task_| fails.
+ // 1b) |reply_| is cancelled before running.
+ // 1c) The DeleteSoon() below is scheduled.
+ // 2) On destination sequence, when:
+ // 2a) |task_| is cancelled before running.
+ // 2b) Posting |reply_| fails.
+
+ if (!reply_task_runner_->RunsTasksInCurrentSequence()) {
+ // Case 2a) or 2b).
+ //
+ // Destroy callbacks asynchronously on |reply_task_runner| since their
+ // destructors can rightfully be affine to it. As always, DeleteSoon()
+ // might leak its argument if the target execution environment is
+ // shutdown (e.g. MessageLoop deleted, TaskScheduler shutdown).
+ //
+ // Note: while it's obvious why |reply_| can be affine to
+ // |reply_task_runner|, the reason that |task_| can also be affine to it
+ // is that it if neither tasks ran, |task_| may still hold an object
+ // which was intended to be moved to |reply_| when |task_| ran (such an
+ // object's destruction can be affine to |reply_task_runner_| -- e.g.
+ // https://crbug.com/829122).
+ auto relay_to_delete =
+ std::make_unique<PostTaskAndReplyRelay>(std::move(*this));
+ ANNOTATE_LEAKING_OBJECT_PTR(relay_to_delete.get());
+ reply_task_runner_->DeleteSoon(from_here_, std::move(relay_to_delete));
+ }
+
+ // Case 1a), 1b), 1c).
+ //
+ // Callbacks will be destroyed synchronously at the end of this scope.
+ } else {
+ // This can run when both callbacks have run or have been moved to another
+ // PostTaskAndReplyRelay instance. If |reply_| is null, |task_| must be
+ // null too.
+ DCHECK(!task_);
+ }
}
- void RunTaskAndPostReply() {
- std::move(task_).Run();
- origin_task_runner_->PostTask(
- from_here_, BindOnce(&PostTaskAndReplyRelay::RunReplyAndSelfDestruct,
- base::Unretained(this)));
- }
+ // No assignment operator because of const members.
+ PostTaskAndReplyRelay& operator=(PostTaskAndReplyRelay&&) = delete;
- private:
- void RunReplyAndSelfDestruct() {
- DCHECK(sequence_checker_.CalledOnValidSequence());
+ // Static function is used because it is not possible to bind a method call to
+ // a non-pointer type.
+ static void RunTaskAndPostReply(PostTaskAndReplyRelay relay) {
+ DCHECK(relay.task_);
+ std::move(relay.task_).Run();
- // Ensure |task_| has already been released before |reply_| to ensure that
- // no one accidentally depends on |task_| keeping one of its arguments alive
- // while |reply_| is executing.
- DCHECK(!task_);
+ // Keep a reference to the reply TaskRunner for the PostTask() call before
+ // |relay| is moved into a callback.
+ scoped_refptr<SequencedTaskRunner> reply_task_runner =
+ relay.reply_task_runner_;
- std::move(reply_).Run();
+ reply_task_runner->PostTask(
+ relay.from_here_,
+ BindOnce(&PostTaskAndReplyRelay::RunReply, std::move(relay)));
+ }
- // Cue mission impossible theme.
- delete this;
+ private:
+ // Static function is used because it is not possible to bind a method call to
+ // a non-pointer type.
+ static void RunReply(PostTaskAndReplyRelay relay) {
+ DCHECK(!relay.task_);
+ DCHECK(relay.reply_);
+ std::move(relay.reply_).Run();
}
- const SequenceChecker sequence_checker_;
const Location from_here_;
- const scoped_refptr<SequencedTaskRunner> origin_task_runner_;
- OnceClosure reply_;
OnceClosure task_;
+ OnceClosure reply_;
+ const scoped_refptr<SequencedTaskRunner> reply_task_runner_ =
+ SequencedTaskRunnerHandle::Get();
+
+ DISALLOW_COPY_AND_ASSIGN(PostTaskAndReplyRelay);
};
} // namespace
@@ -77,23 +113,13 @@ namespace internal {
bool PostTaskAndReplyImpl::PostTaskAndReply(const Location& from_here,
OnceClosure task,
OnceClosure reply) {
- DCHECK(!task.is_null()) << from_here.ToString();
- DCHECK(!reply.is_null()) << from_here.ToString();
- PostTaskAndReplyRelay* relay =
- new PostTaskAndReplyRelay(from_here, std::move(task), std::move(reply));
- // PostTaskAndReplyRelay self-destructs after executing |reply|. On the flip
- // side though, it is intentionally leaked if the |task| doesn't complete
- // before the origin sequence stops executing tasks. Annotate |relay| as leaky
- // to avoid having to suppress every callsite which happens to flakily trigger
- // this race.
- ANNOTATE_LEAKING_OBJECT_PTR(relay);
- if (!PostTask(from_here, BindOnce(&PostTaskAndReplyRelay::RunTaskAndPostReply,
- Unretained(relay)))) {
- delete relay;
- return false;
- }
+ DCHECK(task) << from_here.ToString();
+ DCHECK(reply) << from_here.ToString();
- return true;
+ return PostTask(from_here,
+ BindOnce(&PostTaskAndReplyRelay::RunTaskAndPostReply,
+ PostTaskAndReplyRelay(from_here, std::move(task),
+ std::move(reply))));
}
} // namespace internal
diff --git a/chromium/base/threading/post_task_and_reply_impl.h b/chromium/base/threading/post_task_and_reply_impl.h
index 696a655db0b..54038ceecd1 100644
--- a/chromium/base/threading/post_task_and_reply_impl.h
+++ b/chromium/base/threading/post_task_and_reply_impl.h
@@ -18,17 +18,20 @@ namespace internal {
// custom execution context.
//
// If you're looking for a concrete implementation of PostTaskAndReply, you
-// probably want base::TaskRunner.
-//
-// TODO(fdoray): Move this to the anonymous namespace of base/task_runner.cc.
+// probably want base::TaskRunner or base/task_scheduler/post_task.h
class BASE_EXPORT PostTaskAndReplyImpl {
public:
virtual ~PostTaskAndReplyImpl() = default;
- // Posts |task| by calling PostTask(). On completion, |reply| is posted to the
- // sequence or thread that called this. Can only be called when
- // SequencedTaskRunnerHandle::IsSet(). Both |task| and |reply| are guaranteed
- // to be deleted on the sequence or thread that called this.
+ // Posts |task| by calling PostTask(). On completion, posts |reply| to the
+ // origin sequence. Can only be called when
+ // SequencedTaskRunnerHandle::IsSet(). Each callback is deleted synchronously
+ // after running, or scheduled for asynchronous deletion on the origin
+ // sequence if it can't run (e.g. if a TaskRunner skips it on shutdown). See
+ // SequencedTaskRunner::DeleteSoon() for when objects scheduled for
+ // asynchronous deletion can be leaked. Note: All //base task posting APIs
+ // require callbacks to support deletion on the posting sequence if they can't
+ // be scheduled.
bool PostTaskAndReply(const Location& from_here,
OnceClosure task,
OnceClosure reply);
diff --git a/chromium/base/threading/post_task_and_reply_impl_unittest.cc b/chromium/base/threading/post_task_and_reply_impl_unittest.cc
index 6678c95a8d2..319327dfea8 100644
--- a/chromium/base/threading/post_task_and_reply_impl_unittest.cc
+++ b/chromium/base/threading/post_task_and_reply_impl_unittest.cc
@@ -6,12 +6,12 @@
#include <utility>
+#include "base/auto_reset.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
-#include "base/test/test_simple_task_runner.h"
-#include "base/threading/thread_task_runner_handle.h"
+#include "base/test/test_mock_time_task_runner.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -57,53 +57,141 @@ class MockObject {
MockObject() = default;
MOCK_METHOD1(Task, void(scoped_refptr<ObjectToDelete>));
- MOCK_METHOD0(Reply, void());
+ MOCK_METHOD1(Reply, void(scoped_refptr<ObjectToDelete>));
private:
DISALLOW_COPY_AND_ASSIGN(MockObject);
};
+class MockRunsTasksInCurrentSequenceTaskRunner : public TestMockTimeTaskRunner {
+ public:
+ MockRunsTasksInCurrentSequenceTaskRunner(
+ TestMockTimeTaskRunner::Type type =
+ TestMockTimeTaskRunner::Type::kStandalone)
+ : TestMockTimeTaskRunner(type) {}
+
+ void RunUntilIdleWithRunsTasksInCurrentSequence() {
+ AutoReset<bool> reset(&runs_tasks_in_current_sequence_, true);
+ RunUntilIdle();
+ }
+
+ void ClearPendingTasksWithRunsTasksInCurrentSequence() {
+ AutoReset<bool> reset(&runs_tasks_in_current_sequence_, true);
+ ClearPendingTasks();
+ }
+
+ // TestMockTimeTaskRunner:
+ bool RunsTasksInCurrentSequence() const override {
+ return runs_tasks_in_current_sequence_;
+ }
+
+ private:
+ ~MockRunsTasksInCurrentSequenceTaskRunner() override = default;
+
+ bool runs_tasks_in_current_sequence_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(MockRunsTasksInCurrentSequenceTaskRunner);
+};
+
+class PostTaskAndReplyImplTest : public testing::Test {
+ protected:
+ PostTaskAndReplyImplTest() = default;
+
+ void PostTaskAndReplyToMockObject() {
+ // Expect the post to succeed.
+ EXPECT_TRUE(
+ PostTaskAndReplyTaskRunner(post_runner_.get())
+ .PostTaskAndReply(
+ FROM_HERE,
+ BindOnce(&MockObject::Task, Unretained(&mock_object_),
+ MakeRefCounted<ObjectToDelete>(&delete_task_flag_)),
+ BindOnce(&MockObject::Reply, Unretained(&mock_object_),
+ MakeRefCounted<ObjectToDelete>(&delete_reply_flag_))));
+
+ // Expect the first task to be posted to |post_runner_|.
+ EXPECT_TRUE(post_runner_->HasPendingTask());
+ EXPECT_FALSE(reply_runner_->HasPendingTask());
+ EXPECT_FALSE(delete_task_flag_);
+ EXPECT_FALSE(delete_reply_flag_);
+ }
+
+ scoped_refptr<MockRunsTasksInCurrentSequenceTaskRunner> post_runner_ =
+ MakeRefCounted<MockRunsTasksInCurrentSequenceTaskRunner>();
+ scoped_refptr<MockRunsTasksInCurrentSequenceTaskRunner> reply_runner_ =
+ MakeRefCounted<MockRunsTasksInCurrentSequenceTaskRunner>(
+ TestMockTimeTaskRunner::Type::kBoundToThread);
+ testing::StrictMock<MockObject> mock_object_;
+ bool delete_task_flag_ = false;
+ bool delete_reply_flag_ = false;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PostTaskAndReplyImplTest);
+};
+
} // namespace
-TEST(PostTaskAndReplyImplTest, PostTaskAndReply) {
- scoped_refptr<TestSimpleTaskRunner> post_runner(new TestSimpleTaskRunner);
- scoped_refptr<TestSimpleTaskRunner> reply_runner(new TestSimpleTaskRunner);
- ThreadTaskRunnerHandle task_runner_handle(reply_runner);
-
- testing::StrictMock<MockObject> mock_object;
- bool delete_flag = false;
-
- EXPECT_TRUE(PostTaskAndReplyTaskRunner(post_runner.get())
- .PostTaskAndReply(
- FROM_HERE,
- BindOnce(&MockObject::Task, Unretained(&mock_object),
- MakeRefCounted<ObjectToDelete>(&delete_flag)),
- BindOnce(&MockObject::Reply, Unretained(&mock_object))));
-
- // Expect the task to be posted to |post_runner|.
- EXPECT_TRUE(post_runner->HasPendingTask());
- EXPECT_FALSE(reply_runner->HasPendingTask());
- EXPECT_FALSE(delete_flag);
-
- EXPECT_CALL(mock_object, Task(_));
- post_runner->RunUntilIdle();
- testing::Mock::VerifyAndClear(&mock_object);
-
- // |task| should have been deleted right after being run.
- EXPECT_TRUE(delete_flag);
-
- // Expect the reply to be posted to |reply_runner|.
- EXPECT_FALSE(post_runner->HasPendingTask());
- EXPECT_TRUE(reply_runner->HasPendingTask());
-
- EXPECT_CALL(mock_object, Reply());
- reply_runner->RunUntilIdle();
- testing::Mock::VerifyAndClear(&mock_object);
- EXPECT_TRUE(delete_flag);
-
- // Expect no pending task in |post_runner| and |reply_runner|.
- EXPECT_FALSE(post_runner->HasPendingTask());
- EXPECT_FALSE(reply_runner->HasPendingTask());
+TEST_F(PostTaskAndReplyImplTest, PostTaskAndReply) {
+ PostTaskAndReplyToMockObject();
+
+ EXPECT_CALL(mock_object_, Task(_));
+ post_runner_->RunUntilIdleWithRunsTasksInCurrentSequence();
+ testing::Mock::VerifyAndClear(&mock_object_);
+ // The task should have been deleted right after being run.
+ EXPECT_TRUE(delete_task_flag_);
+ EXPECT_FALSE(delete_reply_flag_);
+
+ // Expect the reply to be posted to |reply_runner_|.
+ EXPECT_FALSE(post_runner_->HasPendingTask());
+ EXPECT_TRUE(reply_runner_->HasPendingTask());
+
+ EXPECT_CALL(mock_object_, Reply(_));
+ reply_runner_->RunUntilIdleWithRunsTasksInCurrentSequence();
+ testing::Mock::VerifyAndClear(&mock_object_);
+ EXPECT_TRUE(delete_task_flag_);
+ // The reply should have been deleted right after being run.
+ EXPECT_TRUE(delete_reply_flag_);
+
+ // Expect no pending task in |post_runner_| and |reply_runner_|.
+ EXPECT_FALSE(post_runner_->HasPendingTask());
+ EXPECT_FALSE(reply_runner_->HasPendingTask());
+}
+
+TEST_F(PostTaskAndReplyImplTest, TaskDoesNotRun) {
+ PostTaskAndReplyToMockObject();
+
+ // Clear the |post_runner_|. Both callbacks should be scheduled for deletion
+ // on the |reply_runner_|.
+ post_runner_->ClearPendingTasksWithRunsTasksInCurrentSequence();
+ EXPECT_FALSE(post_runner_->HasPendingTask());
+ EXPECT_TRUE(reply_runner_->HasPendingTask());
+ EXPECT_FALSE(delete_task_flag_);
+ EXPECT_FALSE(delete_reply_flag_);
+
+ // Run the |reply_runner_|. Both callbacks should be deleted.
+ reply_runner_->RunUntilIdleWithRunsTasksInCurrentSequence();
+ EXPECT_TRUE(delete_task_flag_);
+ EXPECT_TRUE(delete_reply_flag_);
+}
+
+TEST_F(PostTaskAndReplyImplTest, ReplyDoesNotRun) {
+ PostTaskAndReplyToMockObject();
+
+ EXPECT_CALL(mock_object_, Task(_));
+ post_runner_->RunUntilIdleWithRunsTasksInCurrentSequence();
+ testing::Mock::VerifyAndClear(&mock_object_);
+ // The task should have been deleted right after being run.
+ EXPECT_TRUE(delete_task_flag_);
+ EXPECT_FALSE(delete_reply_flag_);
+
+ // Expect the reply to be posted to |reply_runner_|.
+ EXPECT_FALSE(post_runner_->HasPendingTask());
+ EXPECT_TRUE(reply_runner_->HasPendingTask());
+
+ // Clear the |reply_runner_| queue without running tasks. The reply callback
+ // should be deleted.
+ reply_runner_->ClearPendingTasksWithRunsTasksInCurrentSequence();
+ EXPECT_TRUE(delete_task_flag_);
+ EXPECT_TRUE(delete_reply_flag_);
}
} // namespace internal
diff --git a/chromium/base/threading/scoped_blocking_call.h b/chromium/base/threading/scoped_blocking_call.h
index c8c4b36222c..e376c308c56 100644
--- a/chromium/base/threading/scoped_blocking_call.h
+++ b/chromium/base/threading/scoped_blocking_call.h
@@ -24,14 +24,54 @@ namespace internal {
class BlockingObserver;
}
-// This class can be instantiated in a scope where a a blocking call (which
-// isn't using local computing resources -- e.g. a synchronous network request)
-// is made. Instantiation will hint the BlockingObserver for this thread about
-// the scope of the blocking operation.
+// This class must be instantiated in every scope where a blocking call is made.
+// CPU usage should be minimal within that scope. //base APIs that block
+// instantiate their own ScopedBlockingCall; it is not necessary to instantiate
+// another ScopedBlockingCall in the scope where these APIs are used.
//
-// In particular, when instantiated from a TaskScheduler parallel or sequenced
-// task, this will allow the thread to be replaced in its pool (more or less
-// aggressively depending on BlockingType).
+// Good:
+// Data data;
+// {
+// ScopedBlockingCall scoped_blocking_call(BlockingType::WILL_BLOCK);
+// data = GetDataFromNetwork();
+// }
+// CPUIntensiveProcessing(data);
+//
+// Bad:
+// ScopedBlockingCall scoped_blocking_call(BlockingType::WILL_BLOCK);
+// Data data = GetDataFromNetwork();
+// CPUIntensiveProcessing(data); // CPU usage within a ScopedBlockingCall.
+//
+// Good:
+// Data a;
+// Data b;
+// {
+// ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
+// a = GetDataFromMemoryCacheOrNetwork();
+// b = GetDataFromMemoryCacheOrNetwork();
+// }
+// CPUIntensiveProcessing(a);
+// CPUIntensiveProcessing(b);
+//
+// Bad:
+// ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
+// Data a = GetDataFromMemoryCacheOrNetwork();
+// Data b = GetDataFromMemoryCacheOrNetwork();
+// CPUIntensiveProcessing(a); // CPU usage within a ScopedBlockingCall.
+// CPUIntensiveProcessing(b); // CPU usage within a ScopedBlockingCall.
+//
+// Good:
+// base::WaitableEvent waitable_event(...);
+// waitable_event.Wait();
+//
+// Bad:
+// base::WaitableEvent waitable_event(...);
+// ScopedBlockingCall scoped_blocking_call(BlockingType::WILL_BLOCK);
+// waitable_event.Wait(); // Wait() instantiates its own ScopedBlockingCall.
+//
+// When a ScopedBlockingCall is instantiated from a TaskScheduler parallel or
+// sequenced task, the thread pool size is incremented to compensate for the
+// blocked thread (more or less aggressively depending on BlockingType).
class BASE_EXPORT ScopedBlockingCall {
public:
ScopedBlockingCall(BlockingType blocking_type);
diff --git a/chromium/base/threading/thread.cc b/chromium/base/threading/thread.cc
index 183f27e0a1d..97e160f91e8 100644
--- a/chromium/base/threading/thread.cc
+++ b/chromium/base/threading/thread.cc
@@ -308,9 +308,8 @@ void Thread::ThreadMain() {
// Allow threads running a MessageLoopForIO to use FileDescriptorWatcher API.
std::unique_ptr<FileDescriptorWatcher> file_descriptor_watcher;
if (MessageLoopForIO::IsCurrent()) {
- DCHECK_EQ(message_loop_, MessageLoopForIO::current());
- file_descriptor_watcher.reset(
- new FileDescriptorWatcher(MessageLoopForIO::current()));
+ file_descriptor_watcher.reset(new FileDescriptorWatcher(
+ static_cast<MessageLoopForIO*>(message_loop_)));
}
#endif
diff --git a/chromium/base/threading/thread.h b/chromium/base/threading/thread.h
index 8237a23a57c..a88d70f76fa 100644
--- a/chromium/base/threading/thread.h
+++ b/chromium/base/threading/thread.h
@@ -271,6 +271,9 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
static bool GetThreadWasQuitProperly();
// Bind this Thread to an existing MessageLoop instead of starting a new one.
+ // TODO(gab): Remove this after ios/ has undergone the same surgery as
+ // BrowserThreadImpl (ref.
+ // https://chromium-review.googlesource.com/c/chromium/src/+/969104).
void SetMessageLoop(MessageLoop* message_loop);
bool using_external_message_loop() const {
diff --git a/chromium/base/threading/thread_id_name_manager.cc b/chromium/base/threading/thread_id_name_manager.cc
index ebcc3ce13f8..ca1979d1557 100644
--- a/chromium/base/threading/thread_id_name_manager.cc
+++ b/chromium/base/threading/thread_id_name_manager.cc
@@ -9,7 +9,9 @@
#include "base/logging.h"
#include "base/memory/singleton.h"
+#include "base/no_destructor.h"
#include "base/strings/string_util.h"
+#include "base/threading/thread_local.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
namespace base {
@@ -18,6 +20,10 @@ namespace {
static const char kDefaultName[] = "";
static std::string* g_default_name;
+ThreadLocalStorage::Slot& GetThreadNameTLS() {
+ static base::NoDestructor<base::ThreadLocalStorage::Slot> thread_name_tls;
+ return *thread_name_tls;
+}
}
ThreadIdNameManager::ThreadIdNameManager()
@@ -52,8 +58,8 @@ void ThreadIdNameManager::InstallSetNameCallback(SetNameCallback callback) {
set_name_callback_ = std::move(callback);
}
-void ThreadIdNameManager::SetName(PlatformThreadId id,
- const std::string& name) {
+void ThreadIdNameManager::SetName(const std::string& name) {
+ PlatformThreadId id = PlatformThread::CurrentId();
std::string* leaked_str = nullptr;
{
AutoLock locked(lock_);
@@ -68,6 +74,7 @@ void ThreadIdNameManager::SetName(PlatformThreadId id,
ThreadIdToHandleMap::iterator id_to_handle_iter =
thread_id_to_handle_.find(id);
+ GetThreadNameTLS().Set(const_cast<char*>(leaked_str->c_str()));
if (set_name_callback_) {
set_name_callback_.Run(leaked_str->c_str());
}
@@ -107,6 +114,11 @@ const char* ThreadIdNameManager::GetName(PlatformThreadId id) {
return handle_to_name_iter->second->c_str();
}
+const char* ThreadIdNameManager::GetNameForCurrentThread() {
+ const char* name = reinterpret_cast<const char*>(GetThreadNameTLS().Get());
+ return name ? name : kDefaultName;
+}
+
void ThreadIdNameManager::RemoveName(PlatformThreadHandle::Handle handle,
PlatformThreadId id) {
AutoLock locked(lock_);
diff --git a/chromium/base/threading/thread_id_name_manager.h b/chromium/base/threading/thread_id_name_manager.h
index d0717b09bdb..f17dc1a4e84 100644
--- a/chromium/base/threading/thread_id_name_manager.h
+++ b/chromium/base/threading/thread_id_name_manager.h
@@ -34,12 +34,15 @@ class BASE_EXPORT ThreadIdNameManager {
using SetNameCallback = base::RepeatingCallback<void(const char* name)>;
void InstallSetNameCallback(SetNameCallback callback);
- // Set the name for the given id.
- void SetName(PlatformThreadId id, const std::string& name);
+ // Set the name for the current thread.
+ void SetName(const std::string& name);
// Get the name for the given id.
const char* GetName(PlatformThreadId id);
+ // Unlike |GetName|, this method using TLS and avoids touching |lock_|.
+ const char* GetNameForCurrentThread();
+
// Remove the name for the given id.
void RemoveName(PlatformThreadHandle::Handle handle, PlatformThreadId id);
diff --git a/chromium/base/threading/thread_local_storage.cc b/chromium/base/threading/thread_local_storage.cc
index de5fd26bad0..dae54fbb9a1 100644
--- a/chromium/base/threading/thread_local_storage.cc
+++ b/chromium/base/threading/thread_local_storage.cc
@@ -69,6 +69,45 @@ namespace {
base::subtle::Atomic32 g_native_tls_key =
PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES;
+// The OS TLS slot has three states:
+// * kUninitialized: Any call to Slot::Get()/Set() will create the base
+// per-thread TLS state. On POSIX, kUninitialized must be 0.
+// * [Memory Address]: Raw pointer to the base per-thread TLS state.
+// * kDestroyed: The base per-thread TLS state has been freed.
+//
+// Final States:
+// * Windows: kDestroyed. Windows does not iterate through the OS TLS to clean
+// up the values.
+// * POSIX: kUninitialized. POSIX iterates through TLS until all slots contain
+// nullptr.
+//
+// More details on this design:
+// We need some type of thread-local state to indicate that the TLS system has
+// been destroyed. To do so, we leverage the multi-pass nature of destruction
+// of pthread_key.
+//
+// a) After destruction of TLS system, we set the pthread_key to a sentinel
+// kDestroyed.
+// b) All calls to Slot::Get() DCHECK that the state is not kDestroyed, and
+// any system which might potentially invoke Slot::Get() after destruction
+// of TLS must check ThreadLOcalStorage::ThreadIsBeingDestroyed().
+// c) After a full pass of the pthread_keys, on the next invocation of
+// ConstructTlsVector(), we'll then set the key to nullptr.
+// d) At this stage, the TLS system is back in its uninitialized state.
+// e) If in the second pass of destruction of pthread_keys something were to
+// re-initialize TLS [this should never happen! Since the only code which
+// uses Chrome TLS is Chrome controlled, we should really be striving for
+// single-pass destruction], then TLS will be re-initialized and then go
+// through the 2-pass destruction system again. Everything should just
+// work (TM).
+
+// The consumers of kUninitialized and kDestroyed expect void*, since that's
+// what the API exposes on both POSIX and Windows.
+void* const kUninitialized = nullptr;
+
+// A sentinel value to indicate that the TLS system has been destroyed.
+void* const kDestroyed = reinterpret_cast<void*>(-3);
+
// The maximum number of slots in our thread local storage stack.
constexpr int kThreadLocalStorageSize = 256;
@@ -139,7 +178,7 @@ TlsVectorEntry* ConstructTlsVector() {
key = base::subtle::NoBarrier_Load(&g_native_tls_key);
}
}
- CHECK(!PlatformThreadLocalStorage::GetTLSValue(key));
+ CHECK_EQ(PlatformThreadLocalStorage::GetTLSValue(key), kUninitialized);
// Some allocators, such as TCMalloc, make use of thread local storage. As a
// result, any attempt to call new (or malloc) will lazily cause such a system
@@ -162,6 +201,16 @@ TlsVectorEntry* ConstructTlsVector() {
}
void OnThreadExitInternal(TlsVectorEntry* tls_data) {
+ // This branch is for POSIX, where this function is called twice. The first
+ // pass calls dtors and sets state to kDestroyed. The second pass sets
+ // kDestroyed to kUninitialized.
+ if (tls_data == kDestroyed) {
+ PlatformThreadLocalStorage::TLSKey key =
+ base::subtle::NoBarrier_Load(&g_native_tls_key);
+ PlatformThreadLocalStorage::SetTLSValue(key, kUninitialized);
+ return;
+ }
+
DCHECK(tls_data);
// Some allocators, such as TCMalloc, use TLS. As a result, when a thread
// terminates, one of the destructor calls we make may be to shut down an
@@ -221,7 +270,7 @@ void OnThreadExitInternal(TlsVectorEntry* tls_data) {
}
// Remove our stack allocated vector.
- PlatformThreadLocalStorage::SetTLSValue(key, nullptr);
+ PlatformThreadLocalStorage::SetTLSValue(key, kDestroyed);
}
} // namespace
@@ -237,8 +286,13 @@ void PlatformThreadLocalStorage::OnThreadExit() {
if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES)
return;
void *tls_data = GetTLSValue(key);
+
+ // On Windows, thread destruction callbacks are only invoked once per module,
+ // so there should be no way that this could be invoked twice.
+ DCHECK_NE(tls_data, kDestroyed);
+
// Maybe we have never initialized TLS for this thread.
- if (!tls_data)
+ if (tls_data == kUninitialized)
return;
OnThreadExitInternal(static_cast<TlsVectorEntry*>(tls_data));
}
@@ -250,11 +304,19 @@ void PlatformThreadLocalStorage::OnThreadExit(void* value) {
} // namespace internal
+bool ThreadLocalStorage::HasBeenDestroyed() {
+ PlatformThreadLocalStorage::TLSKey key =
+ base::subtle::NoBarrier_Load(&g_native_tls_key);
+ if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES)
+ return false;
+ return PlatformThreadLocalStorage::GetTLSValue(key) == kDestroyed;
+}
+
void ThreadLocalStorage::Slot::Initialize(TLSDestructorFunc destructor) {
PlatformThreadLocalStorage::TLSKey key =
base::subtle::NoBarrier_Load(&g_native_tls_key);
if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES ||
- !PlatformThreadLocalStorage::GetTLSValue(key)) {
+ PlatformThreadLocalStorage::GetTLSValue(key) == kUninitialized) {
ConstructTlsVector();
}
@@ -300,8 +362,9 @@ void* ThreadLocalStorage::Slot::Get() const {
TlsVectorEntry* tls_data = static_cast<TlsVectorEntry*>(
PlatformThreadLocalStorage::GetTLSValue(
base::subtle::NoBarrier_Load(&g_native_tls_key)));
+ DCHECK_NE(tls_data, kDestroyed);
if (!tls_data)
- tls_data = ConstructTlsVector();
+ return nullptr;
DCHECK_NE(slot_, kInvalidSlotValue);
DCHECK_LT(slot_, kThreadLocalStorageSize);
// Version mismatches means this slot was previously freed.
@@ -314,6 +377,7 @@ void ThreadLocalStorage::Slot::Set(void* value) {
TlsVectorEntry* tls_data = static_cast<TlsVectorEntry*>(
PlatformThreadLocalStorage::GetTLSValue(
base::subtle::NoBarrier_Load(&g_native_tls_key)));
+ DCHECK_NE(tls_data, kDestroyed);
if (!tls_data)
tls_data = ConstructTlsVector();
DCHECK_NE(slot_, kInvalidSlotValue);
diff --git a/chromium/base/threading/thread_local_storage.h b/chromium/base/threading/thread_local_storage.h
index 4f2b7f18ff0..844f4e742b6 100644
--- a/chromium/base/threading/thread_local_storage.h
+++ b/chromium/base/threading/thread_local_storage.h
@@ -18,10 +18,22 @@
#include <pthread.h>
#endif
+namespace heap_profiling {
+class MemlogAllocatorShimInternal;
+} // namespace heap_profiling
+
namespace base {
+class SamplingHeapProfiler;
+
+namespace trace_event {
+class MallocDumpProvider;
+} // namespace trace_event
+
namespace internal {
+class ThreadLocalStorageTestInternal;
+
// WARNING: You should *NOT* use this class directly.
// PlatformThreadLocalStorage is a low-level abstraction of the OS's TLS
// interface. Instead, you should use one of the following:
@@ -90,7 +102,6 @@ class BASE_EXPORT PlatformThreadLocalStorage {
// an API for portability.
class BASE_EXPORT ThreadLocalStorage {
public:
-
// Prototype for the TLS destructor function, which can be optionally used to
// cleanup thread local storage on thread exit. 'value' is the data that is
// stored in thread local storage.
@@ -134,6 +145,20 @@ class BASE_EXPORT ThreadLocalStorage {
};
private:
+ // In most cases, most callers should not need access to HasBeenDestroyed().
+ // If you are working in code that runs during thread destruction, contact the
+ // base OWNERs for advice and then make a friend request.
+ //
+ // Returns |true| if Chrome's implementation of TLS has been destroyed during
+ // thread destruction. Attempting to call Slot::Get() during destruction is
+ // disallowed and will hit a DCHECK. Any code that relies on TLS during thread
+ // destruction must first check this method before calling Slot::Get().
+ friend class base::SamplingHeapProfiler;
+ friend class base::internal::ThreadLocalStorageTestInternal;
+ friend class base::trace_event::MallocDumpProvider;
+ friend class heap_profiling::MemlogAllocatorShimInternal;
+ static bool HasBeenDestroyed();
+
DISALLOW_COPY_AND_ASSIGN(ThreadLocalStorage);
};
diff --git a/chromium/base/threading/thread_local_storage_unittest.cc b/chromium/base/threading/thread_local_storage_unittest.cc
index 02794b4b666..9062ff0c7fa 100644
--- a/chromium/base/threading/thread_local_storage_unittest.cc
+++ b/chromium/base/threading/thread_local_storage_unittest.cc
@@ -23,6 +23,22 @@
namespace base {
+#if defined(OS_POSIX)
+
+namespace internal {
+
+// This class is friended by ThreadLocalStorage.
+class ThreadLocalStorageTestInternal {
+ public:
+ static bool HasBeenDestroyed() {
+ return ThreadLocalStorage::HasBeenDestroyed();
+ }
+};
+
+} // namespace internal
+
+#endif // defined(OS_POSIX)
+
namespace {
const int kInitialTlsValue = 0x5555;
@@ -80,6 +96,105 @@ void ThreadLocalStorageCleanup(void *value) {
TLSSlot().Set(value);
}
+#if defined(OS_POSIX)
+constexpr intptr_t kDummyValue = 0xABCD;
+constexpr size_t kKeyCount = 20;
+
+// The order in which pthread keys are destructed is not specified by the POSIX
+// specification. Hopefully, of the 20 keys we create, some of them should be
+// destroyed after the TLS key is destroyed.
+class UseTLSDuringDestructionRunner {
+ public:
+ UseTLSDuringDestructionRunner() = default;
+
+ // The order in which pthread_key destructors are called is not well defined.
+ // Hopefully, by creating 10 both before and after initializing TLS on the
+ // thread, at least 1 will be called after TLS destruction.
+ void Run() {
+ ASSERT_FALSE(internal::ThreadLocalStorageTestInternal::HasBeenDestroyed());
+
+ // Create 10 pthread keys before initializing TLS on the thread.
+ size_t slot_index = 0;
+ for (; slot_index < 10; ++slot_index) {
+ CreateTlsKeyWithDestructor(slot_index);
+ }
+
+ // Initialize the Chrome TLS system. It's possible that base::Thread has
+ // already initialized Chrome TLS, but we don't rely on that.
+ slot_.Set(reinterpret_cast<void*>(kDummyValue));
+
+ // Create 10 pthread keys after initializing TLS on the thread.
+ for (; slot_index < kKeyCount; ++slot_index) {
+ CreateTlsKeyWithDestructor(slot_index);
+ }
+ }
+
+ bool teardown_works_correctly() { return teardown_works_correctly_; }
+
+ private:
+ struct TLSState {
+ pthread_key_t key;
+ bool* teardown_works_correctly;
+ };
+
+ // The POSIX TLS destruction API takes as input a single C-function, which is
+ // called with the current |value| of a (key, value) pair. We need this
+ // function to do two things: set the |value| to nullptr, which requires
+ // knowing the associated |key|, and update the |teardown_works_correctly_|
+ // state.
+ //
+ // To accomplish this, we set the value to an instance of TLSState, which
+ // contains |key| as well as a pointer to |teardown_works_correctly|.
+ static void ThreadLocalDestructor(void* value) {
+ TLSState* state = static_cast<TLSState*>(value);
+ int result = pthread_setspecific(state->key, nullptr);
+ ASSERT_EQ(result, 0);
+
+ // If this path is hit, then the thread local destructor was called after
+ // the Chrome-TLS destructor and the internal state was updated correctly.
+ // No further checks are necessary.
+ if (internal::ThreadLocalStorageTestInternal::HasBeenDestroyed()) {
+ *(state->teardown_works_correctly) = true;
+ return;
+ }
+
+ // If this path is hit, then the thread local destructor was called before
+ // the Chrome-TLS destructor is hit. The ThreadLocalStorage::Slot should
+ // still function correctly.
+ ASSERT_EQ(reinterpret_cast<intptr_t>(slot_.Get()), kDummyValue);
+ }
+
+ void CreateTlsKeyWithDestructor(size_t index) {
+ ASSERT_LT(index, kKeyCount);
+
+ tls_states_[index].teardown_works_correctly = &teardown_works_correctly_;
+ int result = pthread_key_create(
+ &(tls_states_[index].key),
+ UseTLSDuringDestructionRunner::ThreadLocalDestructor);
+ ASSERT_EQ(result, 0);
+
+ result = pthread_setspecific(tls_states_[index].key, &tls_states_[index]);
+ ASSERT_EQ(result, 0);
+ }
+
+ static base::ThreadLocalStorage::Slot slot_;
+ bool teardown_works_correctly_ = false;
+ TLSState tls_states_[kKeyCount];
+
+ DISALLOW_COPY_AND_ASSIGN(UseTLSDuringDestructionRunner);
+};
+
+base::ThreadLocalStorage::Slot UseTLSDuringDestructionRunner::slot_;
+
+void* UseTLSTestThreadRun(void* input) {
+ UseTLSDuringDestructionRunner* runner =
+ static_cast<UseTLSDuringDestructionRunner*>(input);
+ runner->Run();
+ return nullptr;
+}
+
+#endif // defined(OS_POSIX)
+
} // namespace
TEST(ThreadLocalStorageTest, Basics) {
@@ -142,4 +257,22 @@ TEST(ThreadLocalStorageTest, TLSReclaim) {
}
}
+#if defined(OS_POSIX)
+// Unlike POSIX, Windows does not iterate through the OS TLS to cleanup any
+// values there. Instead a per-module thread destruction function is called.
+// However, it is not possible to perform a check after this point (as the code
+// is detached from the thread), so this check remains POSIX only.
+TEST(ThreadLocalStorageTest, UseTLSDuringDestruction) {
+ UseTLSDuringDestructionRunner runner;
+ pthread_t thread;
+ int result = pthread_create(&thread, nullptr, UseTLSTestThreadRun, &runner);
+ ASSERT_EQ(result, 0);
+
+ result = pthread_join(thread, nullptr);
+ ASSERT_EQ(result, 0);
+
+ EXPECT_TRUE(runner.teardown_works_correctly());
+}
+#endif // defined(OS_POSIX)
+
} // namespace base
diff --git a/chromium/base/threading/thread_restrictions.cc b/chromium/base/threading/thread_restrictions.cc
index e7e1716e080..633bcb26e6e 100644
--- a/chromium/base/threading/thread_restrictions.cc
+++ b/chromium/base/threading/thread_restrictions.cc
@@ -118,6 +118,13 @@ void ResetThreadRestrictionsForTesting() {
} // namespace internal
+ThreadRestrictions::ScopedAllowIO::ScopedAllowIO()
+ : was_allowed_(SetIOAllowed(true)) {}
+
+ThreadRestrictions::ScopedAllowIO::~ScopedAllowIO() {
+ SetIOAllowed(was_allowed_);
+}
+
// static
bool ThreadRestrictions::SetIOAllowed(bool allowed) {
bool previous_disallowed = g_blocking_disallowed.Get().Get();
@@ -157,6 +164,13 @@ bool ThreadRestrictions::SetWaitAllowed(bool allowed) {
return !previous_disallowed;
}
+ThreadRestrictions::ScopedAllowWait::ScopedAllowWait()
+ : was_allowed_(SetWaitAllowed(true)) {}
+
+ThreadRestrictions::ScopedAllowWait::~ScopedAllowWait() {
+ SetWaitAllowed(was_allowed_);
+}
+
} // namespace base
#endif // DCHECK_IS_ON()
diff --git a/chromium/base/threading/thread_restrictions.h b/chromium/base/threading/thread_restrictions.h
index e63c406810b..e3d70e31083 100644
--- a/chromium/base/threading/thread_restrictions.h
+++ b/chromium/base/threading/thread_restrictions.h
@@ -38,6 +38,7 @@ namespace content {
class BrowserGpuChannelHostFactory;
class BrowserGpuMemoryBufferManager;
class BrowserMainLoop;
+class BrowserProcessSubThread;
class BrowserShutdownProfileDumper;
class BrowserSurfaceViewManager;
class BrowserTestBase;
@@ -79,6 +80,7 @@ namespace midi {
class TaskService; // https://crbug.com/796830
}
namespace mojo {
+class CoreLibraryInitializer;
class SyncCallRestrictions;
namespace edk {
class ScopedIPCSupport;
@@ -91,6 +93,7 @@ namespace ui {
class CommandBufferClientImpl;
class CommandBufferLocal;
class GpuState;
+class MaterialDesignController;
}
namespace net {
class MultiThreadedCertVerifierScopedAllowBaseSyncPrimitives;
@@ -211,9 +214,12 @@ class BASE_EXPORT ScopedAllowBlocking {
// in unit tests to avoid the friend requirement.
FRIEND_TEST_ALL_PREFIXES(ThreadRestrictionsTest, ScopedAllowBlocking);
friend class android_webview::ScopedAllowInitGLBindings;
+ friend class content::BrowserProcessSubThread;
friend class cronet::CronetPrefsManager;
friend class cronet::CronetURLRequestContext;
+ friend class mojo::CoreLibraryInitializer;
friend class resource_coordinator::TabManagerDelegate; // crbug.com/778703
+ friend class ui::MaterialDesignController;
friend class ScopedAllowBlockingForTesting;
friend class StackSamplingProfiler;
@@ -359,11 +365,13 @@ class BASE_EXPORT ThreadRestrictions {
// DEPRECATED. Use ScopedAllowBlocking(ForTesting).
class BASE_EXPORT ScopedAllowIO {
public:
- ScopedAllowIO() { previous_value_ = SetIOAllowed(true); }
- ~ScopedAllowIO() { SetIOAllowed(previous_value_); }
+ ScopedAllowIO() EMPTY_BODY_IF_DCHECK_IS_OFF;
+ ~ScopedAllowIO() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
private:
- // Whether IO is allowed when the ScopedAllowIO was constructed.
- bool previous_value_;
+#if DCHECK_IS_ON()
+ const bool was_allowed_;
+#endif
DISALLOW_COPY_AND_ASSIGN(ScopedAllowIO);
};
@@ -470,12 +478,13 @@ class BASE_EXPORT ThreadRestrictions {
// DEPRECATED. Use ScopedAllowBaseSyncPrimitives.
class BASE_EXPORT ScopedAllowWait {
public:
- ScopedAllowWait() { previous_value_ = SetWaitAllowed(true); }
- ~ScopedAllowWait() { SetWaitAllowed(previous_value_); }
+ ScopedAllowWait() EMPTY_BODY_IF_DCHECK_IS_OFF;
+ ~ScopedAllowWait() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
private:
- // Whether singleton use is allowed when the ScopedAllowWait was
- // constructed.
- bool previous_value_;
+#if DCHECK_IS_ON()
+ const bool was_allowed_;
+#endif
DISALLOW_COPY_AND_ASSIGN(ScopedAllowWait);
};
diff --git a/chromium/base/time/default_tick_clock.cc b/chromium/base/time/default_tick_clock.cc
index 96d4d869eb6..188c3cf921b 100644
--- a/chromium/base/time/default_tick_clock.cc
+++ b/chromium/base/time/default_tick_clock.cc
@@ -4,21 +4,20 @@
#include "base/time/default_tick_clock.h"
-#include "base/lazy_instance.h"
+#include "base/no_destructor.h"
namespace base {
DefaultTickClock::~DefaultTickClock() = default;
-TimeTicks DefaultTickClock::NowTicks() {
+TimeTicks DefaultTickClock::NowTicks() const {
return TimeTicks::Now();
}
// static
-DefaultTickClock* DefaultTickClock::GetInstance() {
- static LazyInstance<DefaultTickClock>::Leaky instance =
- LAZY_INSTANCE_INITIALIZER;
- return instance.Pointer();
+const DefaultTickClock* DefaultTickClock::GetInstance() {
+ static const base::NoDestructor<DefaultTickClock> default_tick_clock;
+ return default_tick_clock.get();
}
} // namespace base
diff --git a/chromium/base/time/default_tick_clock.h b/chromium/base/time/default_tick_clock.h
index dce2538b1d9..78f8a997335 100644
--- a/chromium/base/time/default_tick_clock.h
+++ b/chromium/base/time/default_tick_clock.h
@@ -6,7 +6,6 @@
#define BASE_TIME_DEFAULT_TICK_CLOCK_H_
#include "base/base_export.h"
-#include "base/compiler_specific.h"
#include "base/time/tick_clock.h"
namespace base {
@@ -17,10 +16,10 @@ class BASE_EXPORT DefaultTickClock : public TickClock {
~DefaultTickClock() override;
// Simply returns TimeTicks::Now().
- TimeTicks NowTicks() override;
+ TimeTicks NowTicks() const override;
// Returns a shared instance of DefaultTickClock. This is thread-safe.
- static DefaultTickClock* GetInstance();
+ static const DefaultTickClock* GetInstance();
};
} // namespace base
diff --git a/chromium/base/time/tick_clock.h b/chromium/base/time/tick_clock.h
index f7aba537430..dc57354a256 100644
--- a/chromium/base/time/tick_clock.h
+++ b/chromium/base/time/tick_clock.h
@@ -32,7 +32,7 @@ class BASE_EXPORT TickClock {
// assume that NowTicks() is monotonic (but not strictly monotonic).
// In other words, the returned TimeTicks will never decrease with
// time, although they might "stand still".
- virtual TimeTicks NowTicks() = 0;
+ virtual TimeTicks NowTicks() const = 0;
};
} // namespace base
diff --git a/chromium/base/timer/timer.cc b/chromium/base/timer/timer.cc
index 5540994dcb7..99cd83933aa 100644
--- a/chromium/base/timer/timer.cc
+++ b/chromium/base/timer/timer.cc
@@ -62,7 +62,9 @@ class BaseTimerTaskInternal {
Timer::Timer(bool retain_user_task, bool is_repeating)
: Timer(retain_user_task, is_repeating, nullptr) {}
-Timer::Timer(bool retain_user_task, bool is_repeating, TickClock* tick_clock)
+Timer::Timer(bool retain_user_task,
+ bool is_repeating,
+ const TickClock* tick_clock)
: scheduled_task_(nullptr),
is_repeating_(is_repeating),
retain_user_task_(retain_user_task),
@@ -85,7 +87,7 @@ Timer::Timer(const Location& posted_from,
TimeDelta delay,
const base::Closure& user_task,
bool is_repeating,
- TickClock* tick_clock)
+ const TickClock* tick_clock)
: scheduled_task_(nullptr),
posted_from_(posted_from),
delay_(delay),
diff --git a/chromium/base/timer/timer.h b/chromium/base/timer/timer.h
index 99c969f997b..b10aca845d8 100644
--- a/chromium/base/timer/timer.h
+++ b/chromium/base/timer/timer.h
@@ -89,7 +89,7 @@ class BASE_EXPORT Timer {
// retained or reset when it runs or stops. If |tick_clock| is provided, it is
// used instead of TimeTicks::Now() to get TimeTicks when scheduling tasks.
Timer(bool retain_user_task, bool is_repeating);
- Timer(bool retain_user_task, bool is_repeating, TickClock* tick_clock);
+ Timer(bool retain_user_task, bool is_repeating, const TickClock* tick_clock);
// Construct a timer with retained task info. If |tick_clock| is provided, it
// is used instead of TimeTicks::Now() to get TimeTicks when scheduling tasks.
@@ -101,7 +101,7 @@ class BASE_EXPORT Timer {
TimeDelta delay,
const base::Closure& user_task,
bool is_repeating,
- TickClock* tick_clock);
+ const TickClock* tick_clock);
virtual ~Timer();
@@ -229,7 +229,7 @@ class BASE_EXPORT Timer {
const bool retain_user_task_;
// The tick clock used to calculate the run time for scheduled tasks.
- TickClock* const tick_clock_;
+ const TickClock* const tick_clock_;
// If true, |user_task_| is scheduled to run sometime in the future.
bool is_running_;
@@ -242,7 +242,7 @@ class BASE_EXPORT Timer {
class OneShotTimer : public Timer {
public:
OneShotTimer() : OneShotTimer(nullptr) {}
- explicit OneShotTimer(TickClock* tick_clock)
+ explicit OneShotTimer(const TickClock* tick_clock)
: Timer(false, false, tick_clock) {}
};
@@ -251,7 +251,7 @@ class OneShotTimer : public Timer {
class RepeatingTimer : public Timer {
public:
RepeatingTimer() : RepeatingTimer(nullptr) {}
- explicit RepeatingTimer(TickClock* tick_clock)
+ explicit RepeatingTimer(const TickClock* tick_clock)
: Timer(true, true, tick_clock) {}
};
@@ -280,7 +280,7 @@ class DelayTimer : protected Timer {
TimeDelta delay,
Receiver* receiver,
void (Receiver::*method)(),
- TickClock* tick_clock)
+ const TickClock* tick_clock)
: Timer(posted_from,
delay,
base::Bind(method, base::Unretained(receiver)),
diff --git a/chromium/base/timer/timer_unittest.cc b/chromium/base/timer/timer_unittest.cc
index a868441c076..aaab237d11f 100644
--- a/chromium/base/timer/timer_unittest.cc
+++ b/chromium/base/timer/timer_unittest.cc
@@ -435,11 +435,10 @@ TEST(TimerTest, OneShotTimer_CustomTaskRunner) {
TEST(TimerTest, OneShotTimerWithTickClock) {
scoped_refptr<TestMockTimeTaskRunner> task_runner(
new TestMockTimeTaskRunner(Time::Now(), TimeTicks::Now()));
- std::unique_ptr<TickClock> tick_clock(task_runner->GetMockTickClock());
MessageLoop message_loop;
message_loop.SetTaskRunner(task_runner);
Receiver receiver;
- OneShotTimer timer(tick_clock.get());
+ OneShotTimer timer(task_runner->GetMockTickClock());
timer.Start(FROM_HERE, TimeDelta::FromSeconds(1),
Bind(&Receiver::OnCalled, Unretained(&receiver)));
task_runner->FastForwardBy(TimeDelta::FromSeconds(1));
@@ -477,12 +476,11 @@ TEST(TimerTest, RepeatingTimerZeroDelay_Cancel) {
TEST(TimerTest, RepeatingTimerWithTickClock) {
scoped_refptr<TestMockTimeTaskRunner> task_runner(
new TestMockTimeTaskRunner(Time::Now(), TimeTicks::Now()));
- std::unique_ptr<TickClock> tick_clock(task_runner->GetMockTickClock());
MessageLoop message_loop;
message_loop.SetTaskRunner(task_runner);
Receiver receiver;
const int expected_times_called = 10;
- RepeatingTimer timer(tick_clock.get());
+ RepeatingTimer timer(task_runner->GetMockTickClock());
timer.Start(FROM_HERE, TimeDelta::FromSeconds(1),
Bind(&Receiver::OnCalled, Unretained(&receiver)));
task_runner->FastForwardBy(TimeDelta::FromSeconds(expected_times_called));
@@ -518,12 +516,11 @@ TEST(TimerTest, DelayTimer_Deleted) {
TEST(TimerTest, DelayTimerWithTickClock) {
scoped_refptr<TestMockTimeTaskRunner> task_runner(
new TestMockTimeTaskRunner(Time::Now(), TimeTicks::Now()));
- std::unique_ptr<TickClock> tick_clock(task_runner->GetMockTickClock());
MessageLoop message_loop;
message_loop.SetTaskRunner(task_runner);
Receiver receiver;
DelayTimer timer(FROM_HERE, TimeDelta::FromSeconds(1), &receiver,
- &Receiver::OnCalled, tick_clock.get());
+ &Receiver::OnCalled, task_runner->GetMockTickClock());
task_runner->FastForwardBy(TimeDelta::FromMilliseconds(999));
EXPECT_FALSE(receiver.WasCalled());
timer.Reset();
diff --git a/chromium/base/tools_sanity_unittest.cc b/chromium/base/tools_sanity_unittest.cc
index fbe1d2f9ffe..0a48a74c313 100644
--- a/chromium/base/tools_sanity_unittest.cc
+++ b/chromium/base/tools_sanity_unittest.cc
@@ -26,20 +26,13 @@ const base::subtle::Atomic32 kMagicValue = 42;
// Helper for memory accesses that can potentially corrupt memory or cause a
// crash during a native run.
-#if defined(ADDRESS_SANITIZER) || defined(SYZYASAN)
+#if defined(ADDRESS_SANITIZER)
#if defined(OS_IOS)
// EXPECT_DEATH is not supported on IOS.
#define HARMFUL_ACCESS(action,error_regexp) do { action; } while (0)
-#elif defined(SYZYASAN)
-// We won't get a meaningful error message because we're not running under the
-// SyzyASan logger, but we can at least make sure that the error has been
-// generated in the SyzyASan runtime.
-#define HARMFUL_ACCESS(action,unused) \
-if (debug::IsBinaryInstrumented()) { EXPECT_DEATH(action, \
- "AsanRuntime::OnError"); }
#else
#define HARMFUL_ACCESS(action,error_regexp) EXPECT_DEATH(action,error_regexp)
-#endif // !OS_IOS && !SYZYASAN
+#endif // !OS_IOS
#else
#define HARMFUL_ACCESS(action, error_regexp)
#define HARMFUL_ACCESS_IS_NOOP
@@ -108,16 +101,15 @@ TEST(ToolsSanityTest, MemoryLeak) {
leak[4] = 1; // Make sure the allocated memory is used.
}
-#if (defined(ADDRESS_SANITIZER) && defined(OS_IOS)) || defined(SYZYASAN)
+#if (defined(ADDRESS_SANITIZER) && defined(OS_IOS))
// Because iOS doesn't support death tests, each of the following tests will
-// crash the whole program under Asan. On Windows Asan is based on SyzyAsan; the
-// error report mechanism is different than with Asan so these tests will fail.
+// crash the whole program under Asan.
#define MAYBE_AccessesToNewMemory DISABLED_AccessesToNewMemory
#define MAYBE_AccessesToMallocMemory DISABLED_AccessesToMallocMemory
#else
#define MAYBE_AccessesToNewMemory AccessesToNewMemory
#define MAYBE_AccessesToMallocMemory AccessesToMallocMemory
-#endif // (defined(ADDRESS_SANITIZER) && defined(OS_IOS)) || defined(SYZYASAN)
+#endif // (defined(ADDRESS_SANITIZER) && defined(OS_IOS))
// The following tests pass with Clang r170392, but not r172454, which
// makes AddressSanitizer detect errors in them. We disable these tests under
@@ -125,14 +117,14 @@ TEST(ToolsSanityTest, MemoryLeak) {
// tests should be put back under the (defined(OS_IOS) || defined(OS_WIN))
// clause above.
// See also http://crbug.com/172614.
-#if defined(ADDRESS_SANITIZER) || defined(SYZYASAN)
+#if defined(ADDRESS_SANITIZER)
#define MAYBE_SingleElementDeletedWithBraces \
DISABLED_SingleElementDeletedWithBraces
#define MAYBE_ArrayDeletedWithoutBraces DISABLED_ArrayDeletedWithoutBraces
#else
#define MAYBE_ArrayDeletedWithoutBraces ArrayDeletedWithoutBraces
#define MAYBE_SingleElementDeletedWithBraces SingleElementDeletedWithBraces
-#endif // defined(ADDRESS_SANITIZER) || defined(SYZYASAN)
+#endif // defined(ADDRESS_SANITIZER)
TEST(ToolsSanityTest, MAYBE_AccessesToNewMemory) {
char *foo = new char[10];
@@ -150,7 +142,7 @@ TEST(ToolsSanityTest, MAYBE_AccessesToMallocMemory) {
HARMFUL_ACCESS(foo[5] = 0, "heap-use-after-free");
}
-#if defined(ADDRESS_SANITIZER) || defined(SYZYASAN)
+#if defined(ADDRESS_SANITIZER)
static int* allocateArray() {
// Clang warns about the mismatched new[]/delete if they occur in the same
@@ -182,7 +174,7 @@ TEST(ToolsSanityTest, MAYBE_SingleElementDeletedWithBraces) {
}
#endif
-#if defined(ADDRESS_SANITIZER) || defined(SYZYASAN)
+#if defined(ADDRESS_SANITIZER)
TEST(ToolsSanityTest, DISABLED_AddressSanitizerNullDerefCrashTest) {
// Intentionally crash to make sure AddressSanitizer is running.
@@ -228,20 +220,22 @@ TEST(ToolsSanityTest, AsanHeapUseAfterFree) {
HARMFUL_ACCESS(debug::AsanHeapUseAfterFree(), "heap-use-after-free");
}
-#if defined(SYZYASAN) && defined(COMPILER_MSVC)
-TEST(ToolsSanityTest, AsanCorruptHeapBlock) {
+#if defined(OS_WIN)
+// The ASAN runtime doesn't detect heap corruption, this needs fixing before
+// ASAN builds can ship to the wild. See https://crbug.com/818747.
+TEST(ToolsSanityTest, DISABLED_AsanCorruptHeapBlock) {
HARMFUL_ACCESS(debug::AsanCorruptHeapBlock(), "");
}
-TEST(ToolsSanityTest, AsanCorruptHeap) {
+TEST(ToolsSanityTest, DISABLED_AsanCorruptHeap) {
// This test will kill the process by raising an exception, there's no
// particular string to look for in the stack trace.
EXPECT_DEATH(debug::AsanCorruptHeap(), "");
}
-#endif // SYZYASAN && COMPILER_MSVC
+#endif // OS_WIN
#endif // !HARMFUL_ACCESS_IS_NOOP
-#endif // ADDRESS_SANITIZER || SYZYASAN
+#endif // ADDRESS_SANITIZER
namespace {
diff --git a/chromium/base/trace_event/auto_open_close_event.cc b/chromium/base/trace_event/auto_open_close_event.cc
index f2794f497cb..1879700b39f 100644
--- a/chromium/base/trace_event/auto_open_close_event.cc
+++ b/chromium/base/trace_event/auto_open_close_event.cc
@@ -28,7 +28,7 @@ AutoOpenCloseEvent::~AutoOpenCloseEvent() {
void AutoOpenCloseEvent::Begin() {
DCHECK(thread_checker_.CalledOnValidThread());
- start_time_ = base::TimeTicks::Now();
+ start_time_ = TRACE_TIME_TICKS_NOW();
TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(
category_, event_name_, static_cast<void*>(this), start_time_);
}
@@ -49,4 +49,4 @@ void AutoOpenCloseEvent::OnTraceLogEnabled() {
void AutoOpenCloseEvent::OnTraceLogDisabled() {}
} // namespace trace_event
-} // namespace base \ No newline at end of file
+} // namespace base
diff --git a/chromium/base/trace_event/cfi_backtrace_android.cc b/chromium/base/trace_event/cfi_backtrace_android.cc
new file mode 100644
index 00000000000..8fd8b955dcd
--- /dev/null
+++ b/chromium/base/trace_event/cfi_backtrace_android.cc
@@ -0,0 +1,314 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/cfi_backtrace_android.h"
+
+#include <sys/mman.h>
+#include <sys/types.h>
+
+#include "base/android/apk_assets.h"
+
+#if !defined(ARCH_CPU_ARMEL)
+#error This file should not be built for this architecture.
+#endif
+
+/*
+Basics of unwinding:
+For each instruction in a function we need to know what is the offset of SP
+(Stack Pointer) to reach the previous function's stack frame. To know which
+function is being invoked, we need the return address of the next function. The
+CFI information for an instruction is made up of 2 offsets, CFA (Call Frame
+Address) offset and RA (Return Address) offset. The CFA offset is the change in
+SP made by the function till the current instruction. This depends on amount of
+memory allocated on stack by the function plus some registers that the function
+stores that needs to be restored at the end of function. So, at each instruction
+the CFA offset tells the offset from original SP before the function call. The
+RA offset tells us the offset from the previous SP into the current function
+where the return address is stored.
+
+The unwind table file has 2 tables UNW_INDEX and UNW_DATA, inspired from ARM
+EHABI format. The first table contains function addresses and an index into the
+UNW_DATA table. The second table contains one or more rows for the function
+unwind information.
+
+UNW_INDEX contains two columns of N rows each, where N is the number of
+functions.
+ 1. First column 4 byte rows of all the function start address as offset from
+ start of the binary, in sorted order.
+ 2. For each function addr, the second column contains 2 byte indices in order.
+ The indices are offsets (in count of 2 bytes) of the CFI data from start of
+ UNW_DATA.
+The last entry in the table always contains CANT_UNWIND index to specify the
+end address of the last function.
+
+UNW_DATA contains data of all the functions. Each function data contains N rows.
+The data found at the address pointed from UNW_INDEX will be:
+ 2 bytes: N - number of rows that belong to current function.
+ N * 4 bytes: N rows of data. 16 bits : Address offset from function start.
+ 14 bits : CFA offset / 4.
+ 2 bits : RA offset / 4.
+If the RA offset of a row is 0, then use the offset of the previous rows in the
+same function.
+TODO(ssid): Make sure RA offset is always present.
+
+See extract_unwind_tables.py for details about how this data is extracted from
+breakpad symbol files.
+*/
+
+extern "C" {
+extern char __executable_start;
+extern char _etext;
+}
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+// The value of index when the function does not have unwind information.
+constexpr uint32_t kCantUnwind = 0xFFFF;
+
+// The mask on the CFI row data that is used to get the high 14 bits and
+// multiply it by 4 to get CFA offset. Since the last 2 bits are masked out, a
+// shift is not necessary.
+constexpr uint16_t kCFAMask = 0xfffc;
+
+// The mask on the CFI row data that is used to get the low 2 bits and multiply
+// it by 4 to get the RA offset.
+constexpr uint16_t kRAMask = 0x3;
+constexpr uint16_t kRAShift = 2;
+
+// The code in this file assumes we are running in 32-bit builds since all the
+// addresses in the unwind table are specified in 32 bits.
+static_assert(sizeof(uintptr_t) == 4,
+ "The unwind table format is only valid for 32 bit builds.");
+
+// The CFI data in UNW_DATA table starts with number of rows (N) and then
+// followed by N rows of 4 bytes long. The CFIUnwindDataRow represents a single
+// row of CFI data of a function in the table. Since we cast the memory at the
+// address after the address of number of rows, into an array of
+// CFIUnwindDataRow, the size of the struct should be 4 bytes and the order of
+// the members is fixed according to the given format. The first 2 bytes tell
+// the address of function and last 2 bytes give the CFI data for the offset.
+struct CFIUnwindDataRow {
+ // The address of the instruction in terms of offset from the start of the
+ // function.
+ uint16_t addr_offset;
+ // Represents the CFA and RA offsets to get information about next stack
+ // frame. This is the CFI data at the point before executing the instruction
+ // at |addr_offset| from the start of the function.
+ uint16_t cfi_data;
+
+ // Return the RA offset for the current unwind row.
+ size_t ra_offset() const { return (cfi_data & kRAMask) << kRAShift; }
+
+ // Returns the CFA offset for the current unwind row.
+ size_t cfa_offset() const { return cfi_data & kCFAMask; }
+};
+
+static_assert(
+ sizeof(CFIUnwindDataRow) == 4,
+ "The CFIUnwindDataRow struct must be exactly 4 bytes for searching.");
+
+} // namespace
+
+// static
+CFIBacktraceAndroid* CFIBacktraceAndroid::GetInitializedInstance() {
+ static CFIBacktraceAndroid* instance = new CFIBacktraceAndroid();
+ return instance;
+}
+
+CFIBacktraceAndroid::CFIBacktraceAndroid()
+ : thread_local_cfi_cache_(
+ [](void* ptr) { delete static_cast<CFICache*>(ptr); }) {
+ Initialize();
+}
+
+CFIBacktraceAndroid::~CFIBacktraceAndroid() {}
+
+void CFIBacktraceAndroid::Initialize() {
+ // The address |_etext| gives the end of the .text section in the binary. This
+ // value is more accurate than parsing the memory map since the mapped
+ // regions are usualy larger than the .text section.
+ executable_end_addr_ = reinterpret_cast<uintptr_t>(&_etext);
+ // The address of |__executable_start| gives the start address of the
+ // executable. This value is used to find the offset address of the
+ // instruction in binary from PC.
+ executable_start_addr_ = reinterpret_cast<uintptr_t>(&__executable_start);
+
+ // This file name is defined by extract_unwind_tables.gni.
+ static constexpr char kCfiFileName[] = "assets/unwind_cfi_32";
+ MemoryMappedFile::Region cfi_region;
+ int fd = base::android::OpenApkAsset(kCfiFileName, &cfi_region);
+ if (fd < 0)
+ return;
+ cfi_mmap_ = std::make_unique<MemoryMappedFile>();
+ // The CFI region starts at |cfi_region.offset|.
+ if (!cfi_mmap_->Initialize(base::File(fd), cfi_region))
+ return;
+
+ ParseCFITables();
+ can_unwind_stack_frames_ = true;
+}
+
+void CFIBacktraceAndroid::ParseCFITables() {
+ // The first 4 bytes in the file is the size of UNW_INDEX table.
+ static constexpr size_t kUnwIndexRowSize =
+ sizeof(*unw_index_function_col_) + sizeof(*unw_index_indices_col_);
+ size_t unw_index_size = 0;
+ memcpy(&unw_index_size, cfi_mmap_->data(), sizeof(unw_index_size));
+ DCHECK_EQ(0u, unw_index_size % kUnwIndexRowSize);
+ // UNW_INDEX table starts after 4 bytes.
+ unw_index_function_col_ =
+ reinterpret_cast<const uintptr_t*>(cfi_mmap_->data()) + 1;
+ unw_index_row_count_ = unw_index_size / kUnwIndexRowSize;
+ unw_index_indices_col_ = reinterpret_cast<const uint16_t*>(
+ unw_index_function_col_ + unw_index_row_count_);
+
+ // The UNW_DATA table data is right after the end of UNW_INDEX table.
+ // Interpret the UNW_DATA table as an array of 2 byte numbers since the
+ // indexes we have from the UNW_INDEX table are in terms of 2 bytes.
+ unw_data_start_addr_ = unw_index_indices_col_ + unw_index_row_count_;
+}
+
+size_t CFIBacktraceAndroid::Unwind(const void** out_trace, size_t max_depth) {
+ // This function walks the stack using the call frame information to find the
+ // return addresses of all the functions that belong to current binary in call
+ // stack. For each function the CFI table defines the offset of the previous
+ // call frame and offset where the return address is stored.
+ if (!can_unwind_stack_frames())
+ return 0;
+
+ // Get the current register state. This register state can be taken at any
+ // point in the function and the unwind information would be for this point.
+ // Define local variables before trying to get the current PC and SP to make
+ // sure the register state obtained is consistent with each other.
+ uintptr_t pc = 0, sp = 0;
+ asm volatile("mov %0, pc" : "=r"(pc));
+ asm volatile("mov %0, sp" : "=r"(sp));
+
+ // We can only unwind as long as the pc is within the chrome.so.
+ size_t depth = 0;
+ while (pc > executable_start_addr_ && pc <= executable_end_addr_ &&
+ depth < max_depth) {
+ out_trace[depth++] = reinterpret_cast<void*>(pc);
+ // The offset of function from the start of the chrome.so binary:
+ uintptr_t func_addr = pc - executable_start_addr_;
+ CFIRow cfi{};
+ if (!FindCFIRowForPC(func_addr, &cfi))
+ break;
+
+ // The rules for unwinding using the CFI information are:
+ // SP_prev = SP_cur + cfa_offset and
+ // PC_prev = * (SP_prev - ra_offset).
+ sp = sp + cfi.cfa_offset;
+ memcpy(&pc, reinterpret_cast<uintptr_t*>(sp - cfi.ra_offset),
+ sizeof(uintptr_t));
+ }
+ return depth;
+}
+
+bool CFIBacktraceAndroid::FindCFIRowForPC(uintptr_t func_addr,
+ CFIBacktraceAndroid::CFIRow* cfi) {
+ auto* cache = GetThreadLocalCFICache();
+ *cfi = {0};
+ if (cache->Find(func_addr, cfi))
+ return true;
+
+ // Consider each column of UNW_INDEX table as arrays of uintptr_t (function
+ // addresses) and uint16_t (indices). Define start and end iterator on the
+ // first column array (addresses) and use std::lower_bound() to binary search
+ // on this array to find the required function address.
+ static const uintptr_t* const unw_index_fn_end =
+ unw_index_function_col_ + unw_index_row_count_;
+ const uintptr_t* found =
+ std::lower_bound(unw_index_function_col_, unw_index_fn_end, func_addr);
+
+ // If found is start, then the given function is not in the table. If the
+ // given pc is start of a function then we cannot unwind.
+ if (found == unw_index_function_col_ || *found == func_addr)
+ return false;
+
+ // std::lower_bound() returns the iter that corresponds to the first address
+ // that is greater than the given address. So, the required iter is always one
+ // less than the value returned by std::lower_bound().
+ --found;
+ uintptr_t func_start_addr = *found;
+ size_t row_num = found - unw_index_function_col_;
+ uint16_t index = unw_index_indices_col_[row_num];
+ DCHECK_LE(func_start_addr, func_addr);
+ // If the index is CANT_UNWIND then we do not have unwind infomation for the
+ // function.
+ if (index == kCantUnwind)
+ return false;
+
+ // The unwind data for the current function is at an offsset of the index
+ // found in UNW_INDEX table.
+ const uint16_t* unwind_data = unw_data_start_addr_ + index;
+ // The value of first 2 bytes is the CFI data row count for the function.
+ uint16_t row_count = 0;
+ memcpy(&row_count, unwind_data, sizeof(row_count));
+ // And the actual CFI rows start after 2 bytes from the |unwind_data|. Cast
+ // the data into an array of CFIUnwindDataRow since the struct is designed to
+ // represent each row. We should be careful to read only |row_count| number of
+ // elements in the array.
+ const CFIUnwindDataRow* function_data =
+ reinterpret_cast<const CFIUnwindDataRow*>(unwind_data + 1);
+
+ // Iterate through the CFI rows of the function to find the row that gives
+ // offset for the given instruction address.
+ CFIUnwindDataRow cfi_row = {0, 0};
+ uint16_t ra_offset = 0;
+ for (uint16_t i = 0; i < row_count; ++i) {
+ CFIUnwindDataRow row;
+ memcpy(&row, function_data + i, sizeof(CFIUnwindDataRow));
+ // The return address of the function is the instruction that is not yet
+ // been executed. The cfi row specifies the unwind info before executing the
+ // given instruction. If the given address is equal to the instruction
+ // offset, then use the current row. Or use the row with highest address
+ // less than the given address.
+ if (row.addr_offset + func_start_addr > func_addr)
+ break;
+
+ cfi_row = row;
+ // The ra offset of the last specified row should be used, if unspecified.
+ // So, keep updating the RA offset till we reach the correct CFI row.
+ // TODO(ssid): This should be fixed in the format and we should always
+ // output ra offset.
+ if (cfi_row.ra_offset())
+ ra_offset = cfi_row.ra_offset();
+ }
+ DCHECK_NE(0u, cfi_row.addr_offset);
+ *cfi = {cfi_row.cfa_offset(), ra_offset};
+ DCHECK(cfi->cfa_offset);
+ DCHECK(cfi->ra_offset);
+
+ // safe to update since the cache is thread local.
+ cache->Add(func_addr, *cfi);
+ return true;
+}
+
+CFIBacktraceAndroid::CFICache* CFIBacktraceAndroid::GetThreadLocalCFICache() {
+ auto* cache = static_cast<CFICache*>(thread_local_cfi_cache_.Get());
+ if (!cache) {
+ cache = new CFICache();
+ thread_local_cfi_cache_.Set(cache);
+ }
+ return cache;
+}
+
+void CFIBacktraceAndroid::CFICache::Add(uintptr_t address, CFIRow cfi) {
+ cache_[address % kLimit] = {address, cfi};
+}
+
+bool CFIBacktraceAndroid::CFICache::Find(uintptr_t address, CFIRow* cfi) {
+ if (cache_[address % kLimit].address == address) {
+ *cfi = cache_[address % kLimit].cfi;
+ return true;
+ }
+ return false;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/cfi_backtrace_android.h b/chromium/base/trace_event/cfi_backtrace_android.h
new file mode 100644
index 00000000000..0c513321c89
--- /dev/null
+++ b/chromium/base/trace_event/cfi_backtrace_android.h
@@ -0,0 +1,157 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_CFI_BACKTRACE_ANDROID_H_
+#define BASE_TRACE_EVENT_CFI_BACKTRACE_ANDROID_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/debug/debugging_buildflags.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/gtest_prod_util.h"
+#include "base/threading/thread_local_storage.h"
+
+namespace base {
+namespace trace_event {
+
+// This class is used to unwind stack frames in the current thread. The unwind
+// information (dwarf debug info) is stripped from the chrome binary and we do
+// not build with exception tables (ARM EHABI) in release builds. So, we use a
+// custom unwind table which is generated and added to specific android builds,
+// when add_unwind_tables_in_apk build option is specified. This unwind table
+// contains information for unwinding stack frames when the functions calls are
+// from lib[mono]chrome.so. The file is added as an asset to the apk and the
+// table is used to unwind stack frames for profiling. This class implements
+// methods to read and parse the unwind table and unwind stack frames using this
+// data.
+class BASE_EXPORT CFIBacktraceAndroid {
+ public:
+ // Creates and initializes by memory mapping the unwind tables from apk assets
+ // on first call.
+ static CFIBacktraceAndroid* GetInitializedInstance();
+
+ // Returns true if stack unwinding is possible using CFI unwind tables in apk.
+ // There is no need to check this before each unwind call. Will always return
+ // the same value based on CFI tables being present in the binary.
+ bool can_unwind_stack_frames() const { return can_unwind_stack_frames_; }
+
+ // Returns the program counters by unwinding stack in the current thread in
+ // order of latest call frame first. Unwinding works only if
+ // can_unwind_stack_frames() returns true. This function allocates memory from
+ // heap for caches. For each stack frame, this method searches through the
+ // unwind table mapped in memory to find the unwind information for function
+ // and walks the stack to find all the return address. This only works until
+ // the last function call from the chrome.so. We do not have unwind
+ // information to unwind beyond any frame outside of chrome.so. Calls to
+ // Unwind() are thread safe and lock free, once Initialize() returns success.
+ size_t Unwind(const void** out_trace, size_t max_depth);
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestCFICache);
+ FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestFindCFIRow);
+ FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestUnwinding);
+
+ // The CFI information that correspond to an instruction.
+ struct CFIRow {
+ bool operator==(const CFIBacktraceAndroid::CFIRow& o) const {
+ return cfa_offset == o.cfa_offset && ra_offset == o.ra_offset;
+ }
+
+ // The offset of the call frame address of previous function from the
+ // current stack pointer. Rule for unwinding SP: SP_prev = SP_cur +
+ // cfa_offset.
+ uint16_t cfa_offset = 0;
+ // The offset of location of return address from the previous call frame
+ // address. Rule for unwinding PC: PC_prev = * (SP_prev - ra_offset).
+ uint16_t ra_offset = 0;
+ };
+
+ // A simple cache that stores entries in table using prime modulo hashing.
+ // This cache with 500 entries already gives us 95% hit rate, and fits in a
+ // single system page (usually 4KiB). Using a thread local cache for each
+ // thread gives us 30% improvements on performance of heap profiling.
+ class CFICache {
+ public:
+ // Add new item to the cache. It replaces an existing item with same hash.
+ // Constant time operation.
+ void Add(uintptr_t address, CFIRow cfi);
+
+ // Finds the given address and fills |cfi| with the info for the address.
+ // returns true if found, otherwise false. Assumes |address| is never 0.
+ bool Find(uintptr_t address, CFIRow* cfi);
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestCFICache);
+
+ // Size is the highest prime which fits the cache in a single system page,
+ // usually 4KiB. A prime is chosen to make sure addresses are hashed evenly.
+ static const int kLimit = 509;
+
+ struct AddrAndCFI {
+ uintptr_t address;
+ CFIRow cfi;
+ };
+ AddrAndCFI cache_[kLimit] = {};
+ };
+
+ static_assert(sizeof(CFIBacktraceAndroid::CFICache) < 4096,
+ "The cache does not fit in a single page.");
+
+ CFIBacktraceAndroid();
+ ~CFIBacktraceAndroid();
+
+ // Initializes unwind tables using the CFI asset file in the apk if present.
+ // Also stores the limits of mapped region of the lib[mono]chrome.so binary,
+ // since the unwind is only feasible for addresses within the .so file. Once
+ // initialized, the memory map of the unwind table is never cleared since we
+ // cannot guarantee that all the threads are done using the memory map when
+ // heap profiling is turned off. But since we keep the memory map is clean,
+ // the system can choose to evict the unused pages when needed. This would
+ // still reduce the total amount of address space available in process.
+ void Initialize();
+
+ // Finds the UNW_INDEX and UNW_DATA tables in from the CFI file memory map.
+ void ParseCFITables();
+
+ // Finds the CFI row for the given |func_addr| in terms of offset from
+ // the start of the current binary.
+ bool FindCFIRowForPC(uintptr_t func_addr, CFIRow* out);
+
+ CFICache* GetThreadLocalCFICache();
+
+ // Details about the memory mapped region which contains the libchrome.so
+ // library file.
+ uintptr_t executable_start_addr_ = 0;
+ uintptr_t executable_end_addr_ = 0;
+
+ // The start address of the memory mapped unwind table asset file. Unique ptr
+ // because it is replaced in tests.
+ std::unique_ptr<MemoryMappedFile> cfi_mmap_;
+
+ // The UNW_INDEX table: Start address of the function address column. The
+ // memory segment corresponding to this column is treated as an array of
+ // uintptr_t.
+ const uintptr_t* unw_index_function_col_ = nullptr;
+ // The UNW_INDEX table: Start address of the index column. The memory segment
+ // corresponding to this column is treated as an array of uint16_t.
+ const uint16_t* unw_index_indices_col_ = nullptr;
+ // The number of rows in UNW_INDEX table.
+ size_t unw_index_row_count_ = 0;
+
+ // The start address of UNW_DATA table.
+ const uint16_t* unw_data_start_addr_ = nullptr;
+
+ bool can_unwind_stack_frames_ = false;
+
+ ThreadLocalStorage::Slot thread_local_cfi_cache_;
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_CFI_BACKTRACE_ANDROID_H_
diff --git a/chromium/base/trace_event/cfi_backtrace_android_unittest.cc b/chromium/base/trace_event/cfi_backtrace_android_unittest.cc
new file mode 100644
index 00000000000..3ad3d330422
--- /dev/null
+++ b/chromium/base/trace_event/cfi_backtrace_android_unittest.cc
@@ -0,0 +1,197 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/cfi_backtrace_android.h"
+
+#include "base/files/file_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+void* GetPC() {
+ return __builtin_return_address(0);
+}
+
+} // namespace
+
+TEST(CFIBacktraceAndroidTest, TestUnwinding) {
+ auto* unwinder = CFIBacktraceAndroid::GetInitializedInstance();
+ EXPECT_TRUE(unwinder->can_unwind_stack_frames());
+ EXPECT_GT(unwinder->executable_start_addr_, 0u);
+ EXPECT_GT(unwinder->executable_end_addr_, unwinder->executable_start_addr_);
+ EXPECT_GT(unwinder->cfi_mmap_->length(), 0u);
+
+ const size_t kMaxFrames = 100;
+ const void* frames[kMaxFrames];
+ size_t unwind_count = unwinder->Unwind(frames, kMaxFrames);
+ // Expect at least 2 frames in the result.
+ ASSERT_GT(unwind_count, 2u);
+ EXPECT_LE(unwind_count, kMaxFrames);
+
+ const size_t kMaxCurrentFuncCodeSize = 50;
+ const uintptr_t current_pc = reinterpret_cast<uintptr_t>(GetPC());
+ const uintptr_t actual_frame = reinterpret_cast<uintptr_t>(frames[2]);
+ EXPECT_NEAR(current_pc, actual_frame, kMaxCurrentFuncCodeSize);
+
+ for (size_t i = 0; i < unwind_count; ++i) {
+ EXPECT_GT(reinterpret_cast<uintptr_t>(frames[i]),
+ unwinder->executable_start_addr_);
+ EXPECT_LT(reinterpret_cast<uintptr_t>(frames[i]),
+ unwinder->executable_end_addr_);
+ }
+}
+
+// Flaky: https://bugs.chromium.org/p/chromium/issues/detail?id=829555
+TEST(CFIBacktraceAndroidTest, DISABLED_TestFindCFIRow) {
+ auto* unwinder = CFIBacktraceAndroid::GetInitializedInstance();
+ /* Input is generated from the CFI file:
+ STACK CFI INIT 1000 500
+ STACK CFI 1002 .cfa: sp 272 + .ra: .cfa -4 + ^ r4: .cfa -16 +
+ STACK CFI 1008 .cfa: sp 544 + .r1: .cfa -0 + ^ r4: .cfa -16 + ^
+ STACK CFI 1040 .cfa: sp 816 + .r1: .cfa -0 + ^ r4: .cfa -16 + ^
+ STACK CFI 1050 .cfa: sp 816 + .ra: .cfa -8 + ^ r4: .cfa -16 + ^
+ STACK CFI 1080 .cfa: sp 544 + .r1: .cfa -0 + ^ r4: .cfa -16 + ^
+
+ STACK CFI INIT 2000 22
+ STACK CFI 2004 .cfa: sp 16 + .ra: .cfa -12 + ^ r4: .cfa -16 + ^
+ STACK CFI 2008 .cfa: sp 16 + .ra: .cfa -12 + ^ r4: .cfa -16 + ^
+
+ STACK CFI INIT 2024 100
+ STACK CFI 2030 .cfa: sp 48 + .ra: .cfa -12 + ^ r4: .cfa -16 + ^
+ STACK CFI 2100 .cfa: sp 64 + .r1: .cfa -0 + ^ r4: .cfa -16 + ^
+
+ STACK CFI INIT 2200 10
+ STACK CFI 2204 .cfa: sp 44 + .ra: .cfa -8 + ^ r4: .cfa -16 + ^
+ */
+ uint16_t input[] = {// UNW_INDEX size
+ 0x2A,
+
+ // UNW_INDEX address column (4 byte rows).
+ 0x0, 0x1000, 0x0, 0x1502, 0x0, 0x2000, 0x0, 0x2024, 0x0,
+ 0x2126, 0x0, 0x2200, 0x0, 0x2212, 0x0,
+
+ // UNW_INDEX index column (2 byte rows).
+ 0x0, 0xffff, 0xb, 0x10, 0xffff, 0x15, 0xffff,
+
+ // UNW_DATA table.
+ 0x5, 0x2, 0x111, 0x8, 0x220, 0x40, 0x330, 0x50, 0x332,
+ 0x80, 0x220, 0x2, 0x4, 0x13, 0x8, 0x13, 0x2, 0xc, 0x33,
+ 0xdc, 0x40, 0x1, 0x4, 0x2e};
+ FilePath temp_path;
+ CreateTemporaryFile(&temp_path);
+ EXPECT_EQ(
+ static_cast<int>(sizeof(input)),
+ WriteFile(temp_path, reinterpret_cast<char*>(input), sizeof(input)));
+
+ unwinder->cfi_mmap_.reset(new MemoryMappedFile());
+ unwinder->cfi_mmap_->Initialize(temp_path);
+ unwinder->ParseCFITables();
+
+ CFIBacktraceAndroid::CFIRow cfi_row = {0};
+ EXPECT_FALSE(unwinder->FindCFIRowForPC(0x01, &cfi_row));
+ EXPECT_FALSE(unwinder->FindCFIRowForPC(0x100, &cfi_row));
+ EXPECT_FALSE(unwinder->FindCFIRowForPC(0x1502, &cfi_row));
+ EXPECT_FALSE(unwinder->FindCFIRowForPC(0x3000, &cfi_row));
+ EXPECT_FALSE(unwinder->FindCFIRowForPC(0x2024, &cfi_row));
+ EXPECT_FALSE(unwinder->FindCFIRowForPC(0x2212, &cfi_row));
+
+ const CFIBacktraceAndroid::CFIRow kRow1 = {0x110, 0x4};
+ const CFIBacktraceAndroid::CFIRow kRow2 = {0x220, 0x4};
+ const CFIBacktraceAndroid::CFIRow kRow3 = {0x220, 0x8};
+ const CFIBacktraceAndroid::CFIRow kRow4 = {0x30, 0xc};
+ const CFIBacktraceAndroid::CFIRow kRow5 = {0x2c, 0x8};
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1002, &cfi_row));
+ EXPECT_EQ(kRow1, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1003, &cfi_row));
+ EXPECT_EQ(kRow1, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1008, &cfi_row));
+ EXPECT_EQ(kRow2, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1009, &cfi_row));
+ EXPECT_EQ(kRow2, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1039, &cfi_row));
+ EXPECT_EQ(kRow2, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1080, &cfi_row));
+ EXPECT_EQ(kRow3, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1100, &cfi_row));
+ EXPECT_EQ(kRow3, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x2050, &cfi_row));
+ EXPECT_EQ(kRow4, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x2208, &cfi_row));
+ EXPECT_EQ(kRow5, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x2210, &cfi_row));
+ EXPECT_EQ(kRow5, cfi_row);
+
+ // Test if cache is used on the future calls to Find, all addresses should
+ // have different hash. Resetting the memory map to make sure it is never
+ // accessed in Find().
+ unwinder->cfi_mmap_.reset(new MemoryMappedFile());
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1002, &cfi_row));
+ EXPECT_EQ(kRow1, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1003, &cfi_row));
+ EXPECT_EQ(kRow1, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1008, &cfi_row));
+ EXPECT_EQ(kRow2, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1009, &cfi_row));
+ EXPECT_EQ(kRow2, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1039, &cfi_row));
+ EXPECT_EQ(kRow2, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1080, &cfi_row));
+ EXPECT_EQ(kRow3, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1100, &cfi_row));
+ EXPECT_EQ(kRow3, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x2050, &cfi_row));
+ EXPECT_EQ(kRow4, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x2208, &cfi_row));
+ EXPECT_EQ(kRow5, cfi_row);
+ EXPECT_TRUE(unwinder->FindCFIRowForPC(0x2210, &cfi_row));
+ EXPECT_EQ(kRow5, cfi_row);
+}
+
+TEST(CFIBacktraceAndroidTest, TestCFICache) {
+ // Use ASSERT macros in this function since they are in loop and using EXPECT
+ // prints too many failures.
+ CFIBacktraceAndroid::CFICache cache;
+ CFIBacktraceAndroid::CFIRow cfi;
+
+ // Empty cache should not find anything.
+ EXPECT_FALSE(cache.Find(1, &cfi));
+
+ // Insert 1 - 2*kLimit
+ for (size_t i = 1; i <= 2 * cache.kLimit; ++i) {
+ CFIBacktraceAndroid::CFIRow val = {4 * i, 2 * i};
+ cache.Add(i, val);
+ ASSERT_TRUE(cache.Find(i, &cfi));
+ ASSERT_EQ(cfi, val);
+
+ // Inserting more than kLimit items evicts |i - cache.kLimit| from cache.
+ if (i >= cache.kLimit)
+ ASSERT_FALSE(cache.Find(i - cache.kLimit, &cfi));
+ }
+ // Cache contains kLimit+1 - 2*kLimit.
+
+ // Check that 1 - kLimit cannot be found.
+ for (size_t i = 1; i <= cache.kLimit; ++i) {
+ ASSERT_FALSE(cache.Find(i, &cfi));
+ }
+
+ // Check if kLimit+1 - 2*kLimit still exists in cache.
+ for (size_t i = cache.kLimit + 1; i <= 2 * cache.kLimit; ++i) {
+ CFIBacktraceAndroid::CFIRow val = {4 * i, 2 * i};
+ ASSERT_TRUE(cache.Find(i, &cfi));
+ ASSERT_EQ(cfi, val);
+ }
+
+ // Insert 2*kLimit+1, will evict kLimit.
+ cfi = {1, 1};
+ cache.Add(2 * cache.kLimit + 1, cfi);
+ EXPECT_TRUE(cache.Find(2 * cache.kLimit + 1, &cfi));
+ EXPECT_FALSE(cache.Find(cache.kLimit + 1, &cfi));
+ // Cache contains kLimit+1 - 2*kLimit.
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/common/trace_event_common.h b/chromium/base/trace_event/common/trace_event_common.h
index 51869ee9525..e2a5ca0c8d4 100644
--- a/chromium/base/trace_event/common/trace_event_common.h
+++ b/chromium/base/trace_event/common/trace_event_common.h
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef BASE_TRACE_EVENT_COMMON_TRACE_EVENT_COMMON_H_
+#define BASE_TRACE_EVENT_COMMON_TRACE_EVENT_COMMON_H_
+
// This header file defines the set of trace_event macros without specifying
// how the events actually get collected and stored. If you need to expose trace
// events to some other universe, you can copy-and-paste this file as well as
@@ -687,6 +690,11 @@
TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
@@ -1020,6 +1028,14 @@
} \
} while (0)
+// Macro for getting the real base::TimeTicks::Now() which can be overridden in
+// headless when VirtualTime is enabled.
+#define TRACE_TIME_TICKS_NOW() INTERNAL_TRACE_TIME_TICKS_NOW()
+
+// Macro for getting the real base::Time::Now() which can be overridden in
+// headless when VirtualTime is enabled.
+#define TRACE_TIME_NOW() INTERNAL_TRACE_TIME_NOW()
+
// Notes regarding the following definitions:
// New values can be added and propagated to third party libraries, but existing
// definitions must never be changed, because third party libraries may use old
@@ -1094,3 +1110,5 @@
#define TRACE_EVENT_SCOPE_NAME_GLOBAL ('g')
#define TRACE_EVENT_SCOPE_NAME_PROCESS ('p')
#define TRACE_EVENT_SCOPE_NAME_THREAD ('t')
+
+#endif // BASE_TRACE_EVENT_COMMON_TRACE_EVENT_COMMON_H_
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
index a375d3b9f10..556719e9ae8 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -15,6 +15,11 @@
#include "base/threading/platform_thread.h"
#include "base/threading/thread_local_storage.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
+#include "base/trace_event/cfi_backtrace_android.h"
+#endif
#if defined(OS_LINUX) || defined(OS_ANDROID)
#include <sys/prctl.h>
@@ -214,20 +219,27 @@ bool AllocationContextTracker::GetContextSnapshot(AllocationContext* ctx) {
// kMaxFrameCount + 1 frames, so that we know if there are more frames
// than our backtrace capacity.
#if !defined(OS_NACL) // We don't build base/debug/stack_trace.cc for NaCl.
-#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+#if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
+ const void* frames[Backtrace::kMaxFrameCount + 1];
+ static_assert(arraysize(frames) >= Backtrace::kMaxFrameCount,
+ "not requesting enough frames to fill Backtrace");
+ size_t frame_count =
+ CFIBacktraceAndroid::GetInitializedInstance()->Unwind(
+ frames, arraysize(frames));
+#elif BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
const void* frames[Backtrace::kMaxFrameCount + 1];
static_assert(arraysize(frames) >= Backtrace::kMaxFrameCount,
"not requesting enough frames to fill Backtrace");
size_t frame_count = debug::TraceStackFramePointers(
frames, arraysize(frames),
1 /* exclude this function from the trace */);
-#else // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+#else
// Fall-back to capturing the stack with base::debug::StackTrace,
// which is likely slower, but more reliable.
base::debug::StackTrace stack_trace(Backtrace::kMaxFrameCount + 1);
size_t frame_count = 0u;
const void* const* frames = stack_trace.Addresses(&frame_count);
-#endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+#endif
// If there are too many frames, keep the ones furthest from main().
size_t backtrace_capacity = backtrace_end - backtrace;
diff --git a/chromium/base/trace_event/malloc_dump_provider.cc b/chromium/base/trace_event/malloc_dump_provider.cc
index 78d4b83e1f4..7a9fbfcda74 100644
--- a/chromium/base/trace_event/malloc_dump_provider.cc
+++ b/chromium/base/trace_event/malloc_dump_provider.cc
@@ -12,6 +12,7 @@
#include "base/allocator/allocator_shim.h"
#include "base/allocator/buildflags.h"
#include "base/debug/profiler.h"
+#include "base/threading/thread_local_storage.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
#include "base/trace_event/heap_profiler_heap_dump_writer.h"
@@ -154,11 +155,6 @@ struct WinHeapInfo {
// Unfortunately, there is no safe way to collect information from secondary
// heaps due to limitations and racy nature of this piece of WinAPI.
void WinHeapMemoryDumpImpl(WinHeapInfo* crt_heap_info) {
-#if defined(SYZYASAN)
- if (base::debug::IsBinaryInstrumented())
- return;
-#endif
-
// Iterate through whichever heap our CRT is using.
HANDLE crt_heap = reinterpret_cast<HANDLE>(_get_heap_handle());
::HeapLock(crt_heap);
@@ -338,6 +334,9 @@ void MallocDumpProvider::OnHeapProfilingEnabled(bool enabled) {
}
void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
+ if (UNLIKELY(base::ThreadLocalStorage::HasBeenDestroyed()))
+ return;
+
// CurrentId() can be a slow operation (crbug.com/497226). This apparently
// redundant condition short circuits the CurrentID() calls when unnecessary.
if (tid_dumping_heap_ != kInvalidThreadId &&
@@ -363,6 +362,9 @@ void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
}
void MallocDumpProvider::RemoveAllocation(void* address) {
+ if (UNLIKELY(base::ThreadLocalStorage::HasBeenDestroyed()))
+ return;
+
// No re-entrancy is expected here as none of the calls below should
// cause a free()-s (|allocation_register_| does its own heap management).
if (tid_dumping_heap_ != kInvalidThreadId &&
diff --git a/chromium/base/trace_event/memory_dump_manager.cc b/chromium/base/trace_event/memory_dump_manager.cc
index 994333cbcac..dfea78a162f 100644
--- a/chromium/base/trace_event/memory_dump_manager.cc
+++ b/chromium/base/trace_event/memory_dump_manager.cc
@@ -17,6 +17,7 @@
#include "base/debug/alias.h"
#include "base/debug/stack_trace.h"
#include "base/debug/thread_heap_usage_tracker.h"
+#include "base/memory/ptr_util.h"
#include "base/sequenced_task_runner.h"
#include "base/strings/string_util.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
@@ -40,8 +41,13 @@
#if defined(OS_ANDROID)
#include "base/trace_event/java_heap_dump_provider_android.h"
+
+#if BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
+#include "base/trace_event/cfi_backtrace_android.h"
#endif
+#endif // defined(OS_ANDROID)
+
namespace base {
namespace trace_event {
@@ -272,8 +278,15 @@ bool MemoryDumpManager::EnableHeapProfiling(HeapProfilingMode profiling_mode) {
break;
case kHeapProfilingModeNative:
- // If we don't have frame pointers then native tracing falls-back to
- // using base::debug::StackTrace, which may be slow.
+#if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
+ {
+ bool can_unwind = CFIBacktraceAndroid::GetInitializedInstance()
+ ->can_unwind_stack_frames();
+ DCHECK(can_unwind);
+ }
+#endif
+ // If we don't have frame pointers and unwind tables then native tracing
+ // falls-back to using base::debug::StackTrace, which may be slow.
AllocationContextTracker::SetCaptureMode(
AllocationContextTracker::CaptureMode::NATIVE_STACK);
break;
@@ -384,18 +397,15 @@ void MemoryDumpManager::RegisterDumpProviderInternal(
if (dumper_registrations_ignored_for_testing_)
return;
- // A handful of MDPs are required to compute the summary struct these are
- // 'whitelisted for summary mode'. These MDPs are a subset of those which
+ // Only a handful of MDPs are required to compute the memory metrics. These
// have small enough performance overhead that it is resonable to run them
// in the background while the user is doing other things. Those MDPs are
// 'whitelisted for background mode'.
bool whitelisted_for_background_mode = IsMemoryDumpProviderWhitelisted(name);
- bool whitelisted_for_summary_mode =
- IsMemoryDumpProviderWhitelistedForSummary(name);
- scoped_refptr<MemoryDumpProviderInfo> mdpinfo = new MemoryDumpProviderInfo(
- mdp, name, std::move(task_runner), options,
- whitelisted_for_background_mode, whitelisted_for_summary_mode);
+ scoped_refptr<MemoryDumpProviderInfo> mdpinfo =
+ new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
+ whitelisted_for_background_mode);
if (options.is_fast_polling_supported) {
DCHECK(!mdpinfo->task_runner) << "MemoryDumpProviders capable of fast "
@@ -596,7 +606,11 @@ void MemoryDumpManager::ContinueAsyncProcessDump(
MemoryDumpProviderInfo* mdpinfo =
pmd_async_state->pending_dump_providers.back().get();
- if (!IsDumpProviderAllowedToDump(pmd_async_state->req_args, *mdpinfo)) {
+ // If we are in background mode, we should invoke only the whitelisted
+ // providers. Ignore other providers and continue.
+ if (pmd_async_state->req_args.level_of_detail ==
+ MemoryDumpLevelOfDetail::BACKGROUND &&
+ !mdpinfo->whitelisted_for_background_mode) {
pmd_async_state->pending_dump_providers.pop_back();
continue;
}
@@ -646,26 +660,6 @@ void MemoryDumpManager::ContinueAsyncProcessDump(
FinishAsyncProcessDump(std::move(pmd_async_state));
}
-bool MemoryDumpManager::IsDumpProviderAllowedToDump(
- const MemoryDumpRequestArgs& req_args,
- const MemoryDumpProviderInfo& mdpinfo) const {
- // If we are in background tracing, we should invoke only the whitelisted
- // providers. Ignore other providers and continue.
- if (req_args.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND &&
- !mdpinfo.whitelisted_for_background_mode) {
- return false;
- }
-
- // If we are in summary mode, we only need to invoke the providers
- // whitelisted for summary mode.
- if (req_args.dump_type == MemoryDumpType::SUMMARY_ONLY &&
- !mdpinfo.whitelisted_for_summary_mode) {
- return false;
- }
-
- return true;
-}
-
// This function is called on the right task runner for current MDP. It is
// either the task runner specified by MDP or |dump_thread_task_runner| if the
// MDP did not specify task runner. Invokes the dump provider's OnMemoryDump()
diff --git a/chromium/base/trace_event/memory_dump_manager.h b/chromium/base/trace_event/memory_dump_manager.h
index 593bfe05948..d6237fd8cc4 100644
--- a/chromium/base/trace_event/memory_dump_manager.h
+++ b/chromium/base/trace_event/memory_dump_manager.h
@@ -240,7 +240,6 @@ class BASE_EXPORT MemoryDumpManager {
virtual ~MemoryDumpManager();
static void SetInstanceForTesting(MemoryDumpManager* instance);
- static uint32_t GetDumpsSumKb(const std::string&, const ProcessMemoryDump*);
// Lazily initializes dump_thread_ and returns its TaskRunner.
scoped_refptr<base::SequencedTaskRunner> GetOrCreateBgTaskRunnerLocked();
@@ -252,10 +251,6 @@ class BASE_EXPORT MemoryDumpManager {
void ContinueAsyncProcessDump(
ProcessMemoryDumpAsyncState* owned_pmd_async_state);
- // Returns true if the given dump type and mode allows the given MDP to dump.
- bool IsDumpProviderAllowedToDump(const MemoryDumpRequestArgs& req_args,
- const MemoryDumpProviderInfo& mdpinfo) const;
-
// Invokes OnMemoryDump() of the given MDP. Should be called on the MDP task
// runner.
void InvokeOnMemoryDump(MemoryDumpProviderInfo* mdpinfo,
diff --git a/chromium/base/trace_event/memory_dump_manager_unittest.cc b/chromium/base/trace_event/memory_dump_manager_unittest.cc
index 3a6b58e1b80..de98f1d1e79 100644
--- a/chromium/base/trace_event/memory_dump_manager_unittest.cc
+++ b/chromium/base/trace_event/memory_dump_manager_unittest.cc
@@ -16,6 +16,7 @@
#include "base/command_line.h"
#include "base/debug/thread_heap_usage_tracker.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
@@ -58,12 +59,7 @@ namespace {
const char* kMDPName = "TestDumpProvider";
const char* kWhitelistedMDPName = "WhitelistedTestDumpProvider";
-const char* kBackgroundButNotSummaryWhitelistedMDPName =
- "BackgroundButNotSummaryWhitelistedTestDumpProvider";
-const char* const kTestMDPWhitelist[] = {
- kWhitelistedMDPName, kBackgroundButNotSummaryWhitelistedMDPName, nullptr};
-const char* const kTestMDPWhitelistForSummary[] = {kWhitelistedMDPName,
- nullptr};
+const char* const kTestMDPWhitelist[] = {kWhitelistedMDPName, nullptr};
void RegisterDumpProvider(
MemoryDumpProvider* mdp,
@@ -751,23 +747,17 @@ TEST_F(MemoryDumpManagerTest, TriggerDumpWithoutTracing) {
MemoryDumpLevelOfDetail::DETAILED));
}
-TEST_F(MemoryDumpManagerTest, SummaryOnlyWhitelisting) {
- // Summary only MDPs are a subset of background MDPs.
+TEST_F(MemoryDumpManagerTest, BackgroundWhitelisting) {
SetDumpProviderWhitelistForTesting(kTestMDPWhitelist);
- SetDumpProviderSummaryWhitelistForTesting(kTestMDPWhitelistForSummary);
// Standard provider with default options (create dump for current process).
- MockMemoryDumpProvider summaryMdp;
- RegisterDumpProvider(&summaryMdp, nullptr, kDefaultOptions,
- kWhitelistedMDPName);
MockMemoryDumpProvider backgroundMdp;
RegisterDumpProvider(&backgroundMdp, nullptr, kDefaultOptions,
- kBackgroundButNotSummaryWhitelistedMDPName);
+ kWhitelistedMDPName);
EnableForTracing();
- EXPECT_CALL(backgroundMdp, OnMemoryDump(_, _)).Times(0);
- EXPECT_CALL(summaryMdp, OnMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(backgroundMdp, OnMemoryDump(_, _)).Times(1);
EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::SUMMARY_ONLY,
MemoryDumpLevelOfDetail::BACKGROUND));
DisableTracing();
@@ -1045,7 +1035,6 @@ class SimpleMockMemoryDumpProvider : public MemoryDumpProvider {
TEST_F(MemoryDumpManagerTest, NoStackOverflowWithTooManyMDPs) {
SetDumpProviderWhitelistForTesting(kTestMDPWhitelist);
- SetDumpProviderSummaryWhitelistForTesting(kTestMDPWhitelistForSummary);
int kMDPCount = 1000;
std::vector<std::unique_ptr<SimpleMockMemoryDumpProvider>> mdps;
@@ -1054,11 +1043,6 @@ TEST_F(MemoryDumpManagerTest, NoStackOverflowWithTooManyMDPs) {
RegisterDumpProvider(mdps.back().get(), nullptr);
}
for (int i = 0; i < kMDPCount; ++i) {
- mdps.push_back(std::make_unique<SimpleMockMemoryDumpProvider>(2));
- RegisterDumpProvider(mdps.back().get(), nullptr, kDefaultOptions,
- kBackgroundButNotSummaryWhitelistedMDPName);
- }
- for (int i = 0; i < kMDPCount; ++i) {
mdps.push_back(std::make_unique<SimpleMockMemoryDumpProvider>(3));
RegisterDumpProvider(mdps.back().get(), nullptr, kDefaultOptions,
kWhitelistedMDPName);
diff --git a/chromium/base/trace_event/memory_dump_provider_info.cc b/chromium/base/trace_event/memory_dump_provider_info.cc
index 65eeadf6bef..3220476cf96 100644
--- a/chromium/base/trace_event/memory_dump_provider_info.cc
+++ b/chromium/base/trace_event/memory_dump_provider_info.cc
@@ -16,14 +16,12 @@ MemoryDumpProviderInfo::MemoryDumpProviderInfo(
const char* name,
scoped_refptr<SequencedTaskRunner> task_runner,
const MemoryDumpProvider::Options& options,
- bool whitelisted_for_background_mode,
- bool whitelisted_for_summary_mode)
+ bool whitelisted_for_background_mode)
: dump_provider(dump_provider),
options(options),
name(name),
task_runner(std::move(task_runner)),
whitelisted_for_background_mode(whitelisted_for_background_mode),
- whitelisted_for_summary_mode(whitelisted_for_summary_mode),
consecutive_failures(0),
disabled(false) {}
diff --git a/chromium/base/trace_event/memory_dump_provider_info.h b/chromium/base/trace_event/memory_dump_provider_info.h
index 9d18b821343..f0ea1e6bbc1 100644
--- a/chromium/base/trace_event/memory_dump_provider_info.h
+++ b/chromium/base/trace_event/memory_dump_provider_info.h
@@ -58,8 +58,7 @@ struct BASE_EXPORT MemoryDumpProviderInfo
const char* name,
scoped_refptr<SequencedTaskRunner> task_runner,
const MemoryDumpProvider::Options& options,
- bool whitelisted_for_background_mode,
- bool whitelisted_for_summary_mode);
+ bool whitelisted_for_background_mode);
// It is safe to access the const fields below from any thread as they are
// never mutated.
@@ -81,9 +80,6 @@ struct BASE_EXPORT MemoryDumpProviderInfo
// True if the dump provider is whitelisted for background mode.
const bool whitelisted_for_background_mode;
- // True if the dump provider is whitelisted for summary mode.
- const bool whitelisted_for_summary_mode;
-
// These fields below, instead, are not thread safe and can be mutated only:
// - On the |task_runner|, when not null (i.e. for thread-bound MDPS).
// - By the MDM's background thread (or in any other way that guarantees
diff --git a/chromium/base/trace_event/memory_dump_request_args.cc b/chromium/base/trace_event/memory_dump_request_args.cc
index 1862c4e0667..3cb9cabae52 100644
--- a/chromium/base/trace_event/memory_dump_request_args.cc
+++ b/chromium/base/trace_event/memory_dump_request_args.cc
@@ -45,8 +45,6 @@ const char* MemoryDumpLevelOfDetailToString(
return "background";
case MemoryDumpLevelOfDetail::LIGHT:
return "light";
- case MemoryDumpLevelOfDetail::VM_REGIONS_ONLY_FOR_HEAP_PROFILER:
- return "vm_regions_only";
case MemoryDumpLevelOfDetail::DETAILED:
return "detailed";
}
@@ -60,8 +58,6 @@ MemoryDumpLevelOfDetail StringToMemoryDumpLevelOfDetail(
return MemoryDumpLevelOfDetail::BACKGROUND;
if (str == "light")
return MemoryDumpLevelOfDetail::LIGHT;
- if (str == "vm_regions_only")
- return MemoryDumpLevelOfDetail::VM_REGIONS_ONLY_FOR_HEAP_PROFILER;
if (str == "detailed")
return MemoryDumpLevelOfDetail::DETAILED;
NOTREACHED();
diff --git a/chromium/base/trace_event/memory_dump_request_args.h b/chromium/base/trace_event/memory_dump_request_args.h
index a3e9d0c9c7e..41bc99bc724 100644
--- a/chromium/base/trace_event/memory_dump_request_args.h
+++ b/chromium/base/trace_event/memory_dump_request_args.h
@@ -53,9 +53,6 @@ enum class MemoryDumpLevelOfDetail : uint32_t {
// Few entries, typically a fixed number, per dump.
LIGHT,
- // Retrieve only memory maps. Used only for the heap profiler.
- VM_REGIONS_ONLY_FOR_HEAP_PROFILER,
-
// Unrestricted amount of entries per dump.
DETAILED,
diff --git a/chromium/base/trace_event/memory_dump_scheduler.cc b/chromium/base/trace_event/memory_dump_scheduler.cc
index 0332af7569d..8b03f5c90ba 100644
--- a/chromium/base/trace_event/memory_dump_scheduler.cc
+++ b/chromium/base/trace_event/memory_dump_scheduler.cc
@@ -50,11 +50,6 @@ void MemoryDumpScheduler::StartInternal(MemoryDumpScheduler::Config config) {
for (const Config::Trigger& trigger : config.triggers) {
DCHECK_GT(trigger.period_ms, 0u);
switch (trigger.level_of_detail) {
- case MemoryDumpLevelOfDetail::VM_REGIONS_ONLY_FOR_HEAP_PROFILER:
- // There is no use case to request a periodic dump which contains
- // details that are useful only for the heap-profiler.
- NOTREACHED();
- return;
case MemoryDumpLevelOfDetail::BACKGROUND:
break;
case MemoryDumpLevelOfDetail::LIGHT:
diff --git a/chromium/base/trace_event/memory_infra_background_whitelist.cc b/chromium/base/trace_event/memory_infra_background_whitelist.cc
index 52e196b4687..f89417fcc3c 100644
--- a/chromium/base/trace_event/memory_infra_background_whitelist.cc
+++ b/chromium/base/trace_event/memory_infra_background_whitelist.cc
@@ -18,10 +18,14 @@ namespace {
// The names of dump providers whitelisted for background tracing. Dump
// providers can be added here only if the background mode dump has very
// little processor and memory overhead.
+// TODO(ssid): Some dump providers do not create ownership edges on background
+// dump. So, the effective size will not be correct.
const char* const kDumpProviderWhitelist[] = {
"android::ResourceManagerImpl",
+ "AutocompleteController",
"BlinkGC",
"BlinkObjectCounters",
+ "BlobStorageContext",
"ClientDiscardableSharedMemoryManager",
"DOMStorage",
"DownloadService",
@@ -29,7 +33,9 @@ const char* const kDumpProviderWhitelist[] = {
"gpu::BufferManager",
"gpu::RenderbufferManager",
"gpu::TextureManager",
+ "FontCaches",
"HistoryReport",
+ "IPCChannel",
"IndexedDBBackingStore",
"InMemoryURLIndex",
"JavaHeap",
@@ -40,35 +46,20 @@ const char* const kDumpProviderWhitelist[] = {
"MemoryCache",
"MojoHandleTable",
"MojoLevelDB",
- "OutOfProcessHeapProfilingDumpProvider",
+ "MojoMessages",
"PartitionAlloc",
"ProcessMemoryMetrics",
- "Skia",
+ "RenderProcessHost",
"SharedMemoryTracker",
+ "Skia",
"Sql",
"URLRequestContext",
"V8Isolate",
- "WinHeap",
"SyncDirectory",
"TabRestoreServiceHelper",
nullptr // End of list marker.
};
-// The names of dump providers whitelisted for summary tracing.
-const char* const kDumpProviderSummaryWhitelist[] = {
- "BlinkGC",
- "BlinkObjectCounters",
- "gpu::BufferManager",
- "gpu::RenderbufferManager",
- "gpu::TextureManager",
- "Malloc",
- "PartitionAlloc",
- "ProcessMemoryMetrics",
- "SharedMemoryTracker",
- "V8Isolate",
- nullptr // End of list marker.
-};
-
// A list of string names that are allowed for the memory allocator dumps in
// background mode.
const char* const kAllocatorDumpNameWhitelist[] = {
@@ -89,6 +80,7 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"blink_objects/V8PerContextData",
"blink_objects/WorkerGlobalScope",
"blink_objects/UACSSResource",
+ "blink_objects/ResourceFetcher",
"components/download/controller_0x?",
"discardable",
"discardable/child_0x?",
@@ -98,6 +90,8 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"extensions/value_store/Extensions.Database.Open/0x?",
"extensions/value_store/Extensions.Database.Restore/0x?",
"extensions/value_store/Extensions.Database.Value.Restore/0x?",
+ "font_caches/font_platform_data_cache",
+ "font_caches/shape_caches",
"gpu/gl/buffers/share_group_0x?",
"gpu/gl/renderbuffers/share_group_0x?",
"gpu/gl/textures/share_group_0x?",
@@ -118,8 +112,11 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"mojo",
"mojo/data_pipe_consumer",
"mojo/data_pipe_producer",
+ "mojo/messages",
"mojo/message_pipe",
"mojo/platform_handle",
+ "mojo/queued_ipc_channel_message/0x?",
+ "mojo/render_process_host/0x?",
"mojo/shared_buffer",
"mojo/unknown",
"mojo/watcher",
@@ -132,59 +129,106 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"net/url_request_context",
"net/url_request_context/app_request",
"net/url_request_context/app_request/0x?",
+ "net/url_request_context/app_request/0x?/cookie_monster",
+ "net/url_request_context/app_request/0x?/cookie_monster/cookies",
+ "net/url_request_context/app_request/0x?/cookie_monster/"
+ "tasks_pending_global",
+ "net/url_request_context/app_request/0x?/cookie_monster/"
+ "tasks_pending_for_key",
"net/url_request_context/app_request/0x?/http_cache",
"net/url_request_context/app_request/0x?/http_cache/memory_backend",
"net/url_request_context/app_request/0x?/http_cache/simple_backend",
"net/url_request_context/app_request/0x?/http_network_session",
"net/url_request_context/extensions",
"net/url_request_context/extensions/0x?",
+ "net/url_request_context/extensions/0x?/cookie_monster",
+ "net/url_request_context/extensions/0x?/cookie_monster/cookies",
+ "net/url_request_context/extensions/0x?/cookie_monster/"
+ "tasks_pending_global",
+ "net/url_request_context/extensions/0x?/cookie_monster/"
+ "tasks_pending_for_key",
"net/url_request_context/extensions/0x?/http_cache",
"net/url_request_context/extensions/0x?/http_cache/memory_backend",
"net/url_request_context/extensions/0x?/http_cache/simple_backend",
"net/url_request_context/extensions/0x?/http_network_session",
"net/url_request_context/isolated_media",
"net/url_request_context/isolated_media/0x?",
+ "net/url_request_context/isolated_media/0x?/cookie_monster",
+ "net/url_request_context/isolated_media/0x?/cookie_monster/cookies",
+ "net/url_request_context/isolated_media/0x?/cookie_monster/"
+ "tasks_pending_global",
+ "net/url_request_context/isolated_media/0x?/cookie_monster/"
+ "tasks_pending_for_key",
"net/url_request_context/isolated_media/0x?/http_cache",
"net/url_request_context/isolated_media/0x?/http_cache/memory_backend",
"net/url_request_context/isolated_media/0x?/http_cache/simple_backend",
"net/url_request_context/isolated_media/0x?/http_network_session",
"net/url_request_context/main",
"net/url_request_context/main/0x?",
+ "net/url_request_context/main/0x?/cookie_monster",
+ "net/url_request_context/main/0x?/cookie_monster/cookies",
+ "net/url_request_context/main/0x?/cookie_monster/tasks_pending_global",
+ "net/url_request_context/main/0x?/cookie_monster/tasks_pending_for_key",
"net/url_request_context/main/0x?/http_cache",
"net/url_request_context/main/0x?/http_cache/memory_backend",
"net/url_request_context/main/0x?/http_cache/simple_backend",
"net/url_request_context/main/0x?/http_network_session",
"net/url_request_context/main_media",
"net/url_request_context/main_media/0x?",
+ "net/url_request_context/main_media/0x?/cookie_monster",
+ "net/url_request_context/main_media/0x?/cookie_monster/cookies",
+ "net/url_request_context/main_media/0x?/cookie_monster/"
+ "tasks_pending_global",
+ "net/url_request_context/main_media/0x?/cookie_monster/"
+ "tasks_pending_for_key",
"net/url_request_context/main_media/0x?/http_cache",
"net/url_request_context/main_media/0x?/http_cache/memory_backend",
"net/url_request_context/main_media/0x?/http_cache/simple_backend",
"net/url_request_context/main_media/0x?/http_network_session",
"net/url_request_context/proxy",
"net/url_request_context/proxy/0x?",
+ "net/url_request_context/proxy/0x?/cookie_monster",
+ "net/url_request_context/proxy/0x?/cookie_monster/cookies",
+ "net/url_request_context/proxy/0x?/cookie_monster/tasks_pending_global",
+ "net/url_request_context/proxy/0x?/cookie_monster/tasks_pending_for_key",
"net/url_request_context/proxy/0x?/http_cache",
"net/url_request_context/proxy/0x?/http_cache/memory_backend",
"net/url_request_context/proxy/0x?/http_cache/simple_backend",
"net/url_request_context/proxy/0x?/http_network_session",
"net/url_request_context/safe_browsing",
"net/url_request_context/safe_browsing/0x?",
+ "net/url_request_context/safe_browsing/0x?/cookie_monster",
+ "net/url_request_context/safe_browsing/0x?/cookie_monster/cookies",
+ "net/url_request_context/safe_browsing/0x?/cookie_monster/"
+ "tasks_pending_global",
+ "net/url_request_context/safe_browsing/0x?/cookie_monster/"
+ "tasks_pending_for_key",
"net/url_request_context/safe_browsing/0x?/http_cache",
"net/url_request_context/safe_browsing/0x?/http_cache/memory_backend",
"net/url_request_context/safe_browsing/0x?/http_cache/simple_backend",
"net/url_request_context/safe_browsing/0x?/http_network_session",
"net/url_request_context/system",
"net/url_request_context/system/0x?",
+ "net/url_request_context/system/0x?/cookie_monster",
+ "net/url_request_context/system/0x?/cookie_monster/cookies",
+ "net/url_request_context/system/0x?/cookie_monster/tasks_pending_global",
+ "net/url_request_context/system/0x?/cookie_monster/tasks_pending_for_key",
"net/url_request_context/system/0x?/http_cache",
"net/url_request_context/system/0x?/http_cache/memory_backend",
"net/url_request_context/system/0x?/http_cache/simple_backend",
"net/url_request_context/system/0x?/http_network_session",
"net/url_request_context/unknown",
"net/url_request_context/unknown/0x?",
+ "net/url_request_context/unknown/0x?/cookie_monster",
+ "net/url_request_context/unknown/0x?/cookie_monster/cookies",
+ "net/url_request_context/unknown/0x?/cookie_monster/tasks_pending_global",
+ "net/url_request_context/unknown/0x?/cookie_monster/tasks_pending_for_key",
"net/url_request_context/unknown/0x?/http_cache",
"net/url_request_context/unknown/0x?/http_cache/memory_backend",
"net/url_request_context/unknown/0x?/http_cache/simple_backend",
"net/url_request_context/unknown/0x?/http_network_session",
- "omnibox/in_memory_url_index_0x?",
+ "omnibox/autocomplete_controller/0x?",
+ "omnibox/in_memory_url_index/0x?",
"web_cache/Image_resources",
"web_cache/CSS stylesheet_resources",
"web_cache/Script_resources",
@@ -211,15 +255,15 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"v8/isolate_0x?/heap_spaces/new_space",
"v8/isolate_0x?/heap_spaces/old_space",
"v8/isolate_0x?/heap_spaces/other_spaces",
+ "v8/isolate_0x?/heap_spaces/read_only_space",
"v8/isolate_0x?/malloc",
"v8/isolate_0x?/zapped_for_debug",
- "winheap",
- "winheap/allocated_objects",
+ "site_storage/blob_storage/0x?",
"site_storage/index_db/0x?",
- "site_storage/localstorage_0x?/cache_size",
- "site_storage/localstorage_0x?/leveldb",
- "site_storage/session_storage_0x?",
- "site_storage/session_storage_0x?/cache_size",
+ "site_storage/localstorage/0x?/cache_size",
+ "site_storage/localstorage/0x?/leveldb",
+ "site_storage/session_storage/0x?",
+ "site_storage/session_storage/0x?/cache_size",
"sync/0x?/kernel",
"sync/0x?/store",
"sync/0x?/model_type/APP",
@@ -256,6 +300,7 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"sync/0x?/model_type/SYNCED_NOTIFICATION_APP_INFO",
"sync/0x?/model_type/THEME",
"sync/0x?/model_type/TYPED_URL",
+ "sync/0x?/model_type/USER_EVENT",
"sync/0x?/model_type/WALLET_METADATA",
"sync/0x?/model_type/WIFI_CREDENTIAL",
"tab_restore/service_helper_0x?/entries",
@@ -268,8 +313,6 @@ const char* const kAllocatorDumpNameWhitelist[] = {
};
const char* const* g_dump_provider_whitelist = kDumpProviderWhitelist;
-const char* const* g_dump_provider_whitelist_for_summary =
- kDumpProviderSummaryWhitelist;
const char* const* g_allocator_dump_name_whitelist =
kAllocatorDumpNameWhitelist;
@@ -287,11 +330,6 @@ bool IsMemoryDumpProviderWhitelisted(const char* mdp_name) {
return IsMemoryDumpProviderInList(mdp_name, g_dump_provider_whitelist);
}
-bool IsMemoryDumpProviderWhitelistedForSummary(const char* mdp_name) {
- return IsMemoryDumpProviderInList(mdp_name,
- g_dump_provider_whitelist_for_summary);
-}
-
bool IsMemoryAllocatorDumpNameWhitelisted(const std::string& name) {
// Global dumps are explicitly whitelisted for background use.
if (base::StartsWith(name, "global/", CompareCase::SENSITIVE)) {
@@ -339,10 +377,6 @@ void SetDumpProviderWhitelistForTesting(const char* const* list) {
g_dump_provider_whitelist = list;
}
-void SetDumpProviderSummaryWhitelistForTesting(const char* const* list) {
- g_dump_provider_whitelist_for_summary = list;
-}
-
void SetAllocatorDumpNameWhitelistForTesting(const char* const* list) {
g_allocator_dump_name_whitelist = list;
}
diff --git a/chromium/base/trace_event/memory_infra_background_whitelist.h b/chromium/base/trace_event/memory_infra_background_whitelist.h
index 11900109af7..b8d704ae241 100644
--- a/chromium/base/trace_event/memory_infra_background_whitelist.h
+++ b/chromium/base/trace_event/memory_infra_background_whitelist.h
@@ -18,10 +18,6 @@ namespace trace_event {
// Checks if the given |mdp_name| is in the whitelist.
bool BASE_EXPORT IsMemoryDumpProviderWhitelisted(const char* mdp_name);
-// Checks if the given |mdp_name| is required for summary dumps.
-bool BASE_EXPORT
-IsMemoryDumpProviderWhitelistedForSummary(const char* mdp_name);
-
// Checks if the given |name| matches any of the whitelisted patterns.
bool BASE_EXPORT IsMemoryAllocatorDumpNameWhitelisted(const std::string& name);
@@ -29,8 +25,6 @@ bool BASE_EXPORT IsMemoryAllocatorDumpNameWhitelisted(const std::string& name);
// the list must be nullptr.
void BASE_EXPORT SetDumpProviderWhitelistForTesting(const char* const* list);
void BASE_EXPORT
-SetDumpProviderSummaryWhitelistForTesting(const char* const* list);
-void BASE_EXPORT
SetAllocatorDumpNameWhitelistForTesting(const char* const* list);
} // namespace trace_event
diff --git a/chromium/base/trace_event/memory_peak_detector_unittest.cc b/chromium/base/trace_event/memory_peak_detector_unittest.cc
index bd70e3e98ac..bc10c80d921 100644
--- a/chromium/base/trace_event/memory_peak_detector_unittest.cc
+++ b/chromium/base/trace_event/memory_peak_detector_unittest.cc
@@ -176,10 +176,9 @@ class MemoryPeakDetectorTest : public testing::Test {
std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider());
MemoryDumpProvider::Options opt;
opt.is_fast_polling_supported = true;
- scoped_refptr<MemoryDumpProviderInfo> mdp_info(
- new MemoryDumpProviderInfo(mdp.get(), "Mock MDP", nullptr, opt,
- false /* whitelisted_for_background_mode */,
- false /* whitelisted_for_summary_mode */));
+ scoped_refptr<MemoryDumpProviderInfo> mdp_info(new MemoryDumpProviderInfo(
+ mdp.get(), "Mock MDP", nullptr, opt,
+ false /* whitelisted_for_background_mode */));
// The |mdp| instance will be destroyed together with the |mdp_info|.
mdp_info->owned_dump_provider = std::move(mdp);
diff --git a/chromium/base/trace_event/process_memory_dump.cc b/chromium/base/trace_event/process_memory_dump.cc
index 8bd89737040..8313caa7e0a 100644
--- a/chromium/base/trace_event/process_memory_dump.cc
+++ b/chromium/base/trace_event/process_memory_dump.cc
@@ -9,7 +9,6 @@
#include <vector>
#include "base/memory/ptr_util.h"
-#include "base/memory/shared_memory.h"
#include "base/memory/shared_memory_tracker.h"
#include "base/process/process_metrics.h"
#include "base/strings/stringprintf.h"
@@ -84,7 +83,7 @@ size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
DCHECK_EQ(0u, start_pointer % page_size);
size_t offset = 0;
- size_t total_resident_size = 0;
+ size_t total_resident_pages = 0;
bool failure = false;
// An array as large as number of pages in memory segment needs to be passed
@@ -150,27 +149,28 @@ size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
if (failure)
break;
- total_resident_size += resident_page_count * page_size;
+ total_resident_pages += resident_page_count * page_size;
offset += kMaxChunkSize;
}
DCHECK(!failure);
if (failure) {
- total_resident_size = 0;
+ total_resident_pages = 0;
LOG(ERROR) << "CountResidentBytes failed. The resident size is invalid";
}
- return total_resident_size;
+ return total_resident_pages;
}
// static
base::Optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory(
- const SharedMemory& shared_memory) {
+ void* start_address,
+ size_t mapped_size) {
#if defined(OS_MACOSX) && !defined(OS_IOS)
// On macOS, use mach_vm_region instead of mincore for performance
// (crbug.com/742042).
mach_vm_size_t dummy_size = 0;
mach_vm_address_t address =
- reinterpret_cast<mach_vm_address_t>(shared_memory.memory());
+ reinterpret_cast<mach_vm_address_t>(start_address);
vm_region_top_info_data_t info;
MachVMRegionResult result =
GetTopInfo(mach_task_self(), &dummy_size, &address, &info);
@@ -180,12 +180,50 @@ base::Optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory(
return base::Optional<size_t>();
}
- size_t resident_size =
+ size_t resident_pages =
info.private_pages_resident + info.shared_pages_resident;
- return resident_size * PAGE_SIZE;
+
+ // On macOS, measurements for private memory footprint overcount by
+ // faulted pages in anonymous shared memory. To discount for this, we touch
+ // all the resident pages in anonymous shared memory here, thus making them
+ // faulted as well. This relies on two assumptions:
+ //
+ // 1) Consumers use shared memory from front to back. Thus, if there are
+ // (N) resident pages, those pages represent the first N * PAGE_SIZE bytes in
+ // the shared memory region.
+ //
+ // 2) This logic is run shortly before the logic that calculates
+ // phys_footprint, thus ensuring that the discrepancy between faulted and
+ // resident pages is minimal.
+ //
+ // The performance penalty is expected to be small.
+ //
+ // * Most of the time, we expect the pages to already be resident and faulted,
+ // thus incurring a cache penalty read hit [since we read from each resident
+ // page].
+ //
+ // * Rarely, we expect the pages to be resident but not faulted, resulting in
+ // soft faults + cache penalty.
+ //
+ // * If assumption (1) is invalid, this will potentially fault some
+ // previously non-resident pages, thus increasing memory usage, without fixing
+ // the accounting.
+ //
+ // Sanity check in case the mapped size is less than the total size of the
+ // region.
+ size_t pages_to_fault =
+ std::min(resident_pages, (mapped_size + PAGE_SIZE - 1) / PAGE_SIZE);
+
+ volatile char* base_address = static_cast<char*>(start_address);
+ for (size_t i = 0; i < pages_to_fault; ++i) {
+ // Reading from a volatile is a visible side-effect for the purposes of
+ // optimization. This guarantees that the optimizer will not kill this line.
+ base_address[i * PAGE_SIZE];
+ }
+
+ return resident_pages * PAGE_SIZE;
#else
- return CountResidentBytes(shared_memory.memory(),
- shared_memory.mapped_size());
+ return CountResidentBytes(start_address, mapped_size);
#endif // defined(OS_MACOSX) && !defined(OS_IOS)
}
diff --git a/chromium/base/trace_event/process_memory_dump.h b/chromium/base/trace_event/process_memory_dump.h
index ec83c3cd6b4..345531d72f5 100644
--- a/chromium/base/trace_event/process_memory_dump.h
+++ b/chromium/base/trace_event/process_memory_dump.h
@@ -74,10 +74,11 @@ class BASE_EXPORT ProcessMemoryDump {
// process. The |start_address| must be page-aligned.
static size_t CountResidentBytes(void* start_address, size_t mapped_size);
- // Returns the total bytes resident for the given |shared_memory|'s mapped
- // region.
+ // The same as above, but the given mapped range should belong to the
+ // shared_memory's mapped region.
static base::Optional<size_t> CountResidentBytesInSharedMemory(
- const SharedMemory& shared_memory);
+ void* start_address,
+ size_t mapped_size);
#endif
ProcessMemoryDump(scoped_refptr<HeapProfilerSerializationState>
diff --git a/chromium/base/trace_event/process_memory_dump_unittest.cc b/chromium/base/trace_event/process_memory_dump_unittest.cc
index 934f986087f..8aed15a22bc 100644
--- a/chromium/base/trace_event/process_memory_dump_unittest.cc
+++ b/chromium/base/trace_event/process_memory_dump_unittest.cc
@@ -547,7 +547,8 @@ TEST(ProcessMemoryDumpTest, CountResidentBytesInSharedMemory) {
shared_memory1.CreateAndMapAnonymous(size1);
memset(shared_memory1.memory(), 0, size1);
base::Optional<size_t> res1 =
- ProcessMemoryDump::CountResidentBytesInSharedMemory(shared_memory1);
+ ProcessMemoryDump::CountResidentBytesInSharedMemory(
+ shared_memory1.memory(), shared_memory1.mapped_size());
ASSERT_TRUE(res1.has_value());
ASSERT_EQ(res1.value(), size1);
shared_memory1.Unmap();
@@ -559,7 +560,8 @@ TEST(ProcessMemoryDumpTest, CountResidentBytesInSharedMemory) {
shared_memory2.CreateAndMapAnonymous(kVeryLargeMemorySize);
memset(shared_memory2.memory(), 0, kVeryLargeMemorySize);
base::Optional<size_t> res2 =
- ProcessMemoryDump::CountResidentBytesInSharedMemory(shared_memory2);
+ ProcessMemoryDump::CountResidentBytesInSharedMemory(
+ shared_memory2.memory(), shared_memory2.mapped_size());
ASSERT_TRUE(res2.has_value());
ASSERT_EQ(res2.value(), kVeryLargeMemorySize);
shared_memory2.Unmap();
@@ -571,7 +573,8 @@ TEST(ProcessMemoryDumpTest, CountResidentBytesInSharedMemory) {
shared_memory3.CreateAndMapAnonymous(kVeryLargeMemorySize);
memset(shared_memory3.memory(), 0, kTouchedMemorySize);
base::Optional<size_t> res3 =
- ProcessMemoryDump::CountResidentBytesInSharedMemory(shared_memory3);
+ ProcessMemoryDump::CountResidentBytesInSharedMemory(
+ shared_memory3.memory(), shared_memory3.mapped_size());
ASSERT_TRUE(res3.has_value());
ASSERT_EQ(res3.value(), kTouchedMemorySize);
shared_memory3.Unmap();
diff --git a/chromium/base/trace_event/trace_config.cc b/chromium/base/trace_event/trace_config.cc
index 5926c9541bd..624a29c47af 100644
--- a/chromium/base/trace_event/trace_config.cc
+++ b/chromium/base/trace_event/trace_config.cc
@@ -182,6 +182,23 @@ bool TraceConfig::EventFilterConfig::IsCategoryGroupEnabled(
return category_filter_.IsCategoryGroupEnabled(category_group_name);
}
+// static
+std::string TraceConfig::TraceRecordModeToStr(TraceRecordMode record_mode) {
+ switch (record_mode) {
+ case RECORD_UNTIL_FULL:
+ return kRecordUntilFull;
+ case RECORD_CONTINUOUSLY:
+ return kRecordContinuously;
+ case RECORD_AS_MUCH_AS_POSSIBLE:
+ return kRecordAsMuchAsPossible;
+ case ECHO_TO_CONSOLE:
+ return kTraceToConsole;
+ default:
+ NOTREACHED();
+ }
+ return kRecordUntilFull;
+}
+
TraceConfig::TraceConfig() {
InitializeDefault();
}
@@ -193,24 +210,8 @@ TraceConfig::TraceConfig(StringPiece category_filter_string,
TraceConfig::TraceConfig(StringPiece category_filter_string,
TraceRecordMode record_mode) {
- std::string trace_options_string;
- switch (record_mode) {
- case RECORD_UNTIL_FULL:
- trace_options_string = kRecordUntilFull;
- break;
- case RECORD_CONTINUOUSLY:
- trace_options_string = kRecordContinuously;
- break;
- case RECORD_AS_MUCH_AS_POSSIBLE:
- trace_options_string = kRecordAsMuchAsPossible;
- break;
- case ECHO_TO_CONSOLE:
- trace_options_string = kTraceToConsole;
- break;
- default:
- NOTREACHED();
- }
- InitializeFromStrings(category_filter_string, trace_options_string);
+ InitializeFromStrings(category_filter_string,
+ TraceConfig::TraceRecordModeToStr(record_mode));
}
TraceConfig::TraceConfig(const DictionaryValue& config) {
@@ -470,23 +471,8 @@ void TraceConfig::SetEventFiltersFromConfigList(
std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
auto dict = std::make_unique<DictionaryValue>();
- switch (record_mode_) {
- case RECORD_UNTIL_FULL:
- dict->SetString(kRecordModeParam, kRecordUntilFull);
- break;
- case RECORD_CONTINUOUSLY:
- dict->SetString(kRecordModeParam, kRecordContinuously);
- break;
- case RECORD_AS_MUCH_AS_POSSIBLE:
- dict->SetString(kRecordModeParam, kRecordAsMuchAsPossible);
- break;
- case ECHO_TO_CONSOLE:
- dict->SetString(kRecordModeParam, kTraceToConsole);
- break;
- default:
- NOTREACHED();
- }
-
+ dict->SetString(kRecordModeParam,
+ TraceConfig::TraceRecordModeToStr(record_mode_));
dict->SetBoolean(kEnableSystraceParam, enable_systrace_);
dict->SetBoolean(kEnableArgumentFilterParam, enable_argument_filter_);
diff --git a/chromium/base/trace_event/trace_config.h b/chromium/base/trace_event/trace_config.h
index 54604892936..decd54d1888 100644
--- a/chromium/base/trace_event/trace_config.h
+++ b/chromium/base/trace_event/trace_config.h
@@ -120,6 +120,8 @@ class BASE_EXPORT TraceConfig {
};
typedef std::vector<EventFilterConfig> EventFilters;
+ static std::string TraceRecordModeToStr(TraceRecordMode record_mode);
+
TraceConfig();
// Create TraceConfig object from category filter and trace options strings.
diff --git a/chromium/base/trace_event/trace_event.h b/chromium/base/trace_event/trace_event.h
index d1f3fc653c0..38528aa67b3 100644
--- a/chromium/base/trace_event/trace_event.h
+++ b/chromium/base/trace_event/trace_event.h
@@ -18,6 +18,7 @@
#include "base/debug/debugging_buildflags.h"
#include "base/macros.h"
#include "base/time/time.h"
+#include "base/time/time_override.h"
#include "base/trace_event/common/trace_event_common.h"
#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/trace_category.h"
@@ -249,6 +250,17 @@
INTERNAL_TRACE_EVENT_UID(atomic), \
INTERNAL_TRACE_EVENT_UID(category_group_enabled));
+// Implementation detail: internal macro to return unoverridden
+// base::TimeTicks::Now(). This is important because in headless VirtualTime can
+// override base:TimeTicks::Now().
+#define INTERNAL_TRACE_TIME_TICKS_NOW() \
+ base::subtle::TimeTicksNowIgnoringOverride()
+
+// Implementation detail: internal macro to return unoverridden
+// base::Time::Now(). This is important because in headless VirtualTime can
+// override base:TimeTicks::Now().
+#define INTERNAL_TRACE_TIME_NOW() base::subtle::TimeNowIgnoringOverride()
+
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
@@ -878,7 +890,7 @@ static inline base::trace_event::TraceEventHandle AddTraceEvent(
unsigned int flags,
unsigned long long bind_id) {
const int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
- const base::TimeTicks now = base::TimeTicks::Now();
+ const base::TimeTicks now = TRACE_TIME_TICKS_NOW();
return AddTraceEventWithThreadIdAndTimestamp(
phase, category_group_enabled, name, scope, id, thread_id, now, flags,
bind_id);
@@ -919,7 +931,7 @@ static inline base::trace_event::TraceEventHandle AddTraceEvent(
const char* arg1_name,
const ARG1_TYPE& arg1_val) {
int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
- base::TimeTicks now = base::TimeTicks::Now();
+ base::TimeTicks now = TRACE_TIME_TICKS_NOW();
return AddTraceEventWithThreadIdAndTimestamp(
phase, category_group_enabled, name, scope, id, thread_id, now, flags,
bind_id, arg1_name, arg1_val);
@@ -937,7 +949,7 @@ static inline base::trace_event::TraceEventHandle AddTraceEvent(
const char* arg1_name,
std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val) {
int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
- base::TimeTicks now = base::TimeTicks::Now();
+ base::TimeTicks now = TRACE_TIME_TICKS_NOW();
return AddTraceEventWithThreadIdAndTimestamp(
phase, category_group_enabled, name, scope, id, thread_id, now, flags,
bind_id, arg1_name, std::move(arg1_val));
@@ -984,7 +996,7 @@ static inline base::trace_event::TraceEventHandle AddTraceEvent(
const char* arg2_name,
const ARG2_TYPE& arg2_val) {
int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
- base::TimeTicks now = base::TimeTicks::Now();
+ base::TimeTicks now = TRACE_TIME_TICKS_NOW();
return AddTraceEventWithThreadIdAndTimestamp(
phase, category_group_enabled, name, scope, id, thread_id, now, flags,
bind_id, arg1_name, std::move(arg1_val), arg2_name, arg2_val);
@@ -1004,7 +1016,7 @@ static inline base::trace_event::TraceEventHandle AddTraceEvent(
const char* arg2_name,
std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
- base::TimeTicks now = base::TimeTicks::Now();
+ base::TimeTicks now = TRACE_TIME_TICKS_NOW();
return AddTraceEventWithThreadIdAndTimestamp(
phase, category_group_enabled, name, scope, id, thread_id, now, flags,
bind_id, arg1_name, arg1_val, arg2_name, std::move(arg2_val));
@@ -1024,7 +1036,7 @@ static inline base::trace_event::TraceEventHandle AddTraceEvent(
const char* arg2_name,
std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
- base::TimeTicks now = base::TimeTicks::Now();
+ base::TimeTicks now = TRACE_TIME_TICKS_NOW();
return AddTraceEventWithThreadIdAndTimestamp(
phase, category_group_enabled, name, scope, id, thread_id, now, flags,
bind_id, arg1_name, std::move(arg1_val), arg2_name, std::move(arg2_val));
@@ -1044,7 +1056,7 @@ static inline base::trace_event::TraceEventHandle AddTraceEvent(
const char* arg2_name,
const ARG2_TYPE& arg2_val) {
int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
- base::TimeTicks now = base::TimeTicks::Now();
+ base::TimeTicks now = TRACE_TIME_TICKS_NOW();
return AddTraceEventWithThreadIdAndTimestamp(
phase, category_group_enabled, name, scope, id, thread_id, now, flags,
bind_id, arg1_name, arg1_val, arg2_name, arg2_val);
diff --git a/chromium/base/trace_event/trace_event_android.cc b/chromium/base/trace_event/trace_event_android.cc
index 0a4e6eab4e0..30d9c74a6d7 100644
--- a/chromium/base/trace_event/trace_event_android.cc
+++ b/chromium/base/trace_event/trace_event_android.cc
@@ -205,7 +205,7 @@ void TraceLog::AddClockSyncMetadataEvent() {
// debugfs that takes the written data and pushes it onto the trace
// buffer. So, to establish clock sync, we write our monotonic clock into that
// trace buffer.
- double now_in_seconds = (TimeTicks::Now() - TimeTicks()).InSecondsF();
+ double now_in_seconds = (TRACE_TIME_TICKS_NOW() - TimeTicks()).InSecondsF();
std::string marker = StringPrintf(
"trace_event_clock_sync: parent_ts=%f\n", now_in_seconds);
WriteToATrace(atrace_fd, marker.c_str(), marker.size());
diff --git a/chromium/base/trace_event/trace_log.cc b/chromium/base/trace_event/trace_log.cc
index 718383b0d7e..578bbde04d0 100644
--- a/chromium/base/trace_event/trace_log.cc
+++ b/chromium/base/trace_event/trace_log.cc
@@ -85,7 +85,9 @@ std::vector<std::unique_ptr<TraceEventFilter>>& GetCategoryGroupFilters() {
}
ThreadTicks ThreadNow() {
- return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks();
+ return ThreadTicks::IsSupported()
+ ? base::subtle::ThreadTicksNowIgnoringOverride()
+ : ThreadTicks();
}
template <typename T>
@@ -369,7 +371,7 @@ TraceLog::TraceLog()
process_creation_time_ = CurrentProcessInfo::CreationTime();
#else
// Use approximate time when creation time is not available.
- process_creation_time_ = Time::Now();
+ process_creation_time_ = TRACE_TIME_NOW();
#endif
logged_events_.reset(CreateTraceBuffer());
@@ -1031,7 +1033,7 @@ TraceEventHandle TraceLog::AddTraceEvent(
std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
unsigned int flags) {
int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
- base::TimeTicks now = base::TimeTicks::Now();
+ base::TimeTicks now = TRACE_TIME_TICKS_NOW();
return AddTraceEventWithThreadIdAndTimestamp(
phase,
category_group_enabled,
@@ -1063,7 +1065,7 @@ TraceEventHandle TraceLog::AddTraceEventWithBindId(
std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
unsigned int flags) {
int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
- base::TimeTicks now = base::TimeTicks::Now();
+ base::TimeTicks now = TRACE_TIME_TICKS_NOW();
return AddTraceEventWithThreadIdAndTimestamp(
phase,
category_group_enabled,
@@ -1094,7 +1096,7 @@ TraceEventHandle TraceLog::AddTraceEventWithProcessId(
const unsigned long long* arg_values,
std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
unsigned int flags) {
- base::TimeTicks now = base::TimeTicks::Now();
+ base::TimeTicks now = TRACE_TIME_TICKS_NOW();
return AddTraceEventWithThreadIdAndTimestamp(
phase,
category_group_enabled,
@@ -1493,7 +1495,7 @@ void TraceLog::AddMetadataEventsWhileLocked() {
current_thread_id, "process_name", "name", process_name_);
}
- TimeDelta process_uptime = Time::Now() - process_creation_time_;
+ TimeDelta process_uptime = TRACE_TIME_NOW() - process_creation_time_;
InitializeMetadataEvent(
AddEventToThreadSharedChunkWhileLocked(nullptr, false), current_thread_id,
"process_uptime_seconds", "uptime", process_uptime.InSeconds());
@@ -1701,19 +1703,12 @@ ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
if (*category_group_enabled_) {
event_handle_ =
TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
- TRACE_EVENT_PHASE_COMPLETE,
- category_group_enabled_,
- name,
- trace_event_internal::kGlobalScope, // scope
- trace_event_internal::kNoId, // id
+ TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
+ trace_event_internal::kGlobalScope, // scope
+ trace_event_internal::kNoId, // id
static_cast<int>(base::PlatformThread::CurrentId()), // thread_id
- base::TimeTicks::Now(),
- trace_event_internal::kZeroNumArgs,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- TRACE_EVENT_FLAG_NONE);
+ TRACE_TIME_TICKS_NOW(), trace_event_internal::kZeroNumArgs, nullptr,
+ nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
}
}
diff --git a/chromium/base/trace_event/trace_log.h b/chromium/base/trace_event/trace_log.h
index b8041d4d441..06c58481d9c 100644
--- a/chromium/base/trace_event/trace_log.h
+++ b/chromium/base/trace_event/trace_log.h
@@ -17,6 +17,7 @@
#include "base/containers/stack.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
+#include "base/time/time_override.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/trace_config.h"
#include "base/trace_event/trace_event_impl.h"
@@ -431,7 +432,10 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
}
void UseNextTraceBuffer();
- TimeTicks OffsetNow() const { return OffsetTimestamp(TimeTicks::Now()); }
+ TimeTicks OffsetNow() const {
+ // This should be TRACE_TIME_TICKS_NOW but include order makes that hard.
+ return OffsetTimestamp(base::subtle::TimeTicksNowIgnoringOverride());
+ }
TimeTicks OffsetTimestamp(const TimeTicks& timestamp) const {
return timestamp - time_offset_;
}
diff --git a/chromium/base/values.cc b/chromium/base/values.cc
index 0dc557907d2..c0ac5db80ae 100644
--- a/chromium/base/values.cc
+++ b/chromium/base/values.cc
@@ -554,8 +554,7 @@ bool operator==(const Value& lhs, const Value& rhs) {
return false;
return std::equal(std::begin(lhs.dict_), std::end(lhs.dict_),
std::begin(rhs.dict_),
- [](const Value::DictStorage::value_type& u,
- const Value::DictStorage::value_type& v) {
+ [](const auto& u, const auto& v) {
return std::tie(u.first, *u.second) ==
std::tie(v.first, *v.second);
});
diff --git a/chromium/base/win/com_init_check_hook.cc b/chromium/base/win/com_init_check_hook.cc
index be40598b792..3da76229cc6 100644
--- a/chromium/base/win/com_init_check_hook.cc
+++ b/chromium/base/win/com_init_check_hook.cc
@@ -157,8 +157,8 @@ class HookManager {
return;
}
- uint32_t dchecked_co_create_instance_address = reinterpret_cast<uint32_t>(
- static_cast<void*>(&HookManager::DCheckedCoCreateInstance));
+ uint32_t dchecked_co_create_instance_address =
+ reinterpret_cast<uint32_t>(&HookManager::DCheckedCoCreateInstance);
uint32_t jmp_offset_base_address = co_create_instance_padded_address_ + 5;
StructuredHotpatch structured_hotpatch;
structured_hotpatch.relative_address =
diff --git a/chromium/base/win/core_winrt_util.cc b/chromium/base/win/core_winrt_util.cc
index 9ae2d352164..7a304903a6d 100644
--- a/chromium/base/win/core_winrt_util.cc
+++ b/chromium/base/win/core_winrt_util.cc
@@ -6,7 +6,7 @@
namespace {
-void* LoadComBaseFunction(const char* function_name) {
+FARPROC LoadComBaseFunction(const char* function_name) {
static HMODULE const handle = ::LoadLibrary(L"combase.dll");
return handle ? ::GetProcAddress(handle, function_name) : nullptr;
}
diff --git a/chromium/base/win/scoped_hstring.cc b/chromium/base/win/scoped_hstring.cc
index 335a494b697..89d1f493646 100644
--- a/chromium/base/win/scoped_hstring.cc
+++ b/chromium/base/win/scoped_hstring.cc
@@ -15,7 +15,7 @@ namespace {
static bool g_load_succeeded = false;
-void* LoadComBaseFunction(const char* function_name) {
+FARPROC LoadComBaseFunction(const char* function_name) {
static HMODULE const handle = ::LoadLibrary(L"combase.dll");
return handle ? ::GetProcAddress(handle, function_name) : nullptr;
}
diff --git a/chromium/base/win/win_includes_unittest.cc b/chromium/base/win/win_includes_unittest.cc
index 73b7b556502..20c6cbcd14f 100644
--- a/chromium/base/win/win_includes_unittest.cc
+++ b/chromium/base/win/win_includes_unittest.cc
@@ -5,6 +5,7 @@
// This file ensures that these header files don't include Windows.h and can
// compile without including Windows.h. This helps to improve compile times.
+#include "base/atomicops.h"
#include "base/files/file_util.h"
#include "base/files/platform_file.h"
#include "base/process/process_handle.h"
diff --git a/chromium/base/win/win_util_unittest.cc b/chromium/base/win/win_util_unittest.cc
index a0dbdd38214..6d5cf61cb33 100644
--- a/chromium/base/win/win_util_unittest.cc
+++ b/chromium/base/win/win_util_unittest.cc
@@ -7,6 +7,7 @@
#include "base/files/file_path.h"
#include "base/macros.h"
#include "base/scoped_native_library.h"
+#include "base/stl_util.h"
#include "base/win/win_client_metrics.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -67,8 +68,7 @@ TEST(BaseWinUtilTest, TestGetLoadedModulesSnapshot) {
ASSERT_NE(static_cast<HMODULE>(NULL), new_dll.get());
ASSERT_TRUE(GetLoadedModulesSnapshot(::GetCurrentProcess(), &snapshot));
ASSERT_GT(snapshot.size(), original_snapshot_size);
- ASSERT_NE(snapshot.end(),
- std::find(snapshot.begin(), snapshot.end(), new_dll.get()));
+ ASSERT_TRUE(base::ContainsValue(snapshot, new_dll.get()));
}
TEST(BaseWinUtilTest, TestUint32ToInvalidHandle) {
diff --git a/chromium/base/win/windows_types.h b/chromium/base/win/windows_types.h
index 8060f039111..2a86195fa73 100644
--- a/chromium/base/win/windows_types.h
+++ b/chromium/base/win/windows_types.h
@@ -248,5 +248,6 @@ WINBASEAPI VOID WINAPI SetLastError(_In_ DWORD dwErrCode);
#define SendMessageCallback SendMessageCallbackW
#define SetCurrentDirectory SetCurrentDirectoryW
#define StartService StartServiceW
+#define StrCat StrCatW
#endif // BASE_WIN_WINDOWS_TYPES_H