summaryrefslogtreecommitdiffstats
path: root/chromium/base
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2018-01-29 16:35:13 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2018-02-01 15:33:35 +0000
commitc8c2d1901aec01e934adf561a9fdf0cc776cdef8 (patch)
tree9157c3d9815e5870799e070b113813bec53e0535 /chromium/base
parentabefd5095b41dac94ca451d784ab6e27372e981a (diff)
BASELINE: Update Chromium to 64.0.3282.139
Change-Id: I1cae68fe9c94ff7608b26b8382fc19862cdb293a Reviewed-by: Alexandru Croitor <alexandru.croitor@qt.io>
Diffstat (limited to 'chromium/base')
-rw-r--r--chromium/base/BUILD.gn188
-rw-r--r--chromium/base/allocator/allocator_shim.cc19
-rw-r--r--chromium/base/allocator/partition_allocator/PartitionAlloc.md12
-rw-r--r--chromium/base/allocator/partition_allocator/address_space_randomization.cc71
-rw-r--r--chromium/base/allocator/partition_allocator/address_space_randomization.h13
-rw-r--r--chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc81
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator.cc56
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator.h30
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.cc488
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.h325
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc948
-rw-r--r--chromium/base/allocator/partition_allocator/spin_lock.cc5
-rw-r--r--chromium/base/allocator/tcmalloc_unittest.cc4
-rw-r--r--chromium/base/android/linker/BUILD.gn16
-rw-r--r--chromium/base/at_exit.cc2
-rw-r--r--chromium/base/at_exit_unittest.cc16
-rw-r--r--chromium/base/base64_decode_fuzzer.cc15
-rw-r--r--chromium/base/base64_encode_fuzzer.cc20
-rw-r--r--chromium/base/base_switches.cc12
-rw-r--r--chromium/base/base_switches.h6
-rw-r--r--chromium/base/bind_internal.h6
-rw-r--r--chromium/base/bind_unittest.cc16
-rw-r--r--chromium/base/bits.h120
-rw-r--r--chromium/base/bits_unittest.cc119
-rw-r--r--chromium/base/callback.h6
-rw-r--r--chromium/base/callback_helpers.cc2
-rw-r--r--chromium/base/callback_internal.cc2
-rw-r--r--chromium/base/callback_internal.h16
-rw-r--r--chromium/base/callback_unittest.cc4
-rw-r--r--chromium/base/cancelable_callback.h96
-rw-r--r--chromium/base/cancelable_callback_unittest.cc3
-rw-r--r--chromium/base/command_line.cc39
-rw-r--r--chromium/base/command_line.h14
-rw-r--r--chromium/base/command_line_unittest.cc6
-rw-r--r--chromium/base/containers/linked_list_unittest.cc12
-rw-r--r--chromium/base/containers/small_map.h14
-rw-r--r--chromium/base/containers/small_map_unittest.cc2
-rw-r--r--chromium/base/containers/span.h16
-rw-r--r--chromium/base/containers/stack_container_unittest.cc4
-rw-r--r--chromium/base/cpu.cc63
-rw-r--r--chromium/base/cpu_unittest.cc9
-rw-r--r--chromium/base/debug/activity_analyzer.cc77
-rw-r--r--chromium/base/debug/activity_analyzer.h14
-rw-r--r--chromium/base/debug/activity_analyzer_unittest.cc35
-rw-r--r--chromium/base/debug/activity_tracker.cc140
-rw-r--r--chromium/base/debug/activity_tracker.h26
-rw-r--r--chromium/base/debug/activity_tracker_unittest.cc83
-rw-r--r--chromium/base/debug/crash_logging.cc47
-rw-r--r--chromium/base/debug/crash_logging.h79
-rw-r--r--chromium/base/debug/crash_logging_unittest.cc14
-rw-r--r--chromium/base/debug/dump_without_crashing.cc2
-rw-r--r--chromium/base/debug/proc_maps_linux.h3
-rw-r--r--chromium/base/debug/profiler.cc8
-rw-r--r--chromium/base/debug/stack_trace.cc2
-rw-r--r--chromium/base/debug/stack_trace_posix.cc114
-rw-r--r--chromium/base/debug/task_annotator.cc6
-rw-r--r--chromium/base/deferred_sequenced_task_runner.cc6
-rw-r--r--chromium/base/environment.cc2
-rw-r--r--chromium/base/environment_unittest.cc22
-rw-r--r--chromium/base/export_template.h163
-rw-r--r--chromium/base/feature_list.cc4
-rw-r--r--chromium/base/file_descriptor_store.cc6
-rw-r--r--chromium/base/file_version_info_win.cc2
-rw-r--r--chromium/base/files/file.cc3
-rw-r--r--chromium/base/files/file_enumerator.cc3
-rw-r--r--chromium/base/files/file_enumerator_posix.cc5
-rw-r--r--chromium/base/files/file_enumerator_win.cc2
-rw-r--r--chromium/base/files/file_locking_unittest.cc2
-rw-r--r--chromium/base/files/file_path.cc18
-rw-r--r--chromium/base/files/file_path_watcher_linux.cc2
-rw-r--r--chromium/base/files/file_path_watcher_unittest.cc14
-rw-r--r--chromium/base/files/file_posix.cc30
-rw-r--r--chromium/base/files/file_proxy.cc2
-rw-r--r--chromium/base/files/file_proxy_unittest.cc2
-rw-r--r--chromium/base/files/file_unittest.cc7
-rw-r--r--chromium/base/files/file_util.cc8
-rw-r--r--chromium/base/files/file_util_mac.mm2
-rw-r--r--chromium/base/files/file_util_posix.cc72
-rw-r--r--chromium/base/files/file_util_unittest.cc57
-rw-r--r--chromium/base/files/file_util_win.cc63
-rw-r--r--chromium/base/files/file_win.cc24
-rw-r--r--chromium/base/files/important_file_writer_unittest.cc2
-rw-r--r--chromium/base/files/memory_mapped_file.cc31
-rw-r--r--chromium/base/files/memory_mapped_file.h6
-rw-r--r--chromium/base/files/memory_mapped_file_posix.cc55
-rw-r--r--chromium/base/files/memory_mapped_file_win.cc25
-rw-r--r--chromium/base/files/scoped_temp_dir.cc3
-rw-r--r--chromium/base/fuchsia/default_job.h5
-rw-r--r--chromium/base/fuchsia/fuchsia_logging.cc26
-rw-r--r--chromium/base/fuchsia/fuchsia_logging.h60
-rw-r--r--chromium/base/gmock_unittest.cc4
-rw-r--r--chromium/base/i18n/bidi_line_iterator.cc11
-rw-r--r--chromium/base/i18n/bidi_line_iterator_unittest.cc2
-rw-r--r--chromium/base/i18n/break_iterator.cc23
-rw-r--r--chromium/base/i18n/case_conversion.cc4
-rw-r--r--chromium/base/i18n/char_iterator.cc6
-rw-r--r--chromium/base/i18n/file_util_icu.cc2
-rw-r--r--chromium/base/i18n/icu_string_conversions.cc59
-rw-r--r--chromium/base/i18n/icu_string_conversions_unittest.cc202
-rw-r--r--chromium/base/i18n/message_formatter.cc2
-rw-r--r--chromium/base/i18n/message_formatter.h19
-rw-r--r--chromium/base/i18n/number_formatting_unittest.cc11
-rw-r--r--chromium/base/i18n/rtl.cc6
-rw-r--r--chromium/base/i18n/string_search.cc7
-rw-r--r--chromium/base/i18n/string_search_unittest.cc8
-rw-r--r--chromium/base/i18n/time_formatting_unittest.cc134
-rw-r--r--chromium/base/ios/callback_counter.h46
-rw-r--r--chromium/base/ios/callback_counter.mm35
-rw-r--r--chromium/base/ios/callback_counter_unittest.mm58
-rw-r--r--chromium/base/json/json_file_value_serializer.cc8
-rw-r--r--chromium/base/json/json_parser.cc6
-rw-r--r--chromium/base/json/json_reader.cc3
-rw-r--r--chromium/base/json/json_string_value_serializer.cc4
-rw-r--r--chromium/base/json/json_value_converter_unittest.cc4
-rw-r--r--chromium/base/json/json_writer.cc6
-rw-r--r--chromium/base/lazy_instance.cc15
-rw-r--r--chromium/base/lazy_instance_unittest.cc6
-rw-r--r--chromium/base/linux_util.cc6
-rw-r--r--chromium/base/location.cc12
-rw-r--r--chromium/base/location.h13
-rw-r--r--chromium/base/logging.cc95
-rw-r--r--chromium/base/logging.h48
-rw-r--r--chromium/base/logging_unittest.cc33
-rw-r--r--chromium/base/mac/objc_property_releaser.h134
-rw-r--r--chromium/base/mac/objc_property_releaser.mm131
-rw-r--r--chromium/base/mac/objc_property_releaser_unittest.mm356
-rw-r--r--chromium/base/mac/sdk_forward_declarations.h1
-rw-r--r--chromium/base/macros.h4
-rw-r--r--chromium/base/memory/aligned_memory.cc4
-rw-r--r--chromium/base/memory/discardable_memory.cc6
-rw-r--r--chromium/base/memory/discardable_memory_allocator.cc10
-rw-r--r--chromium/base/memory/discardable_shared_memory.cc3
-rw-r--r--chromium/base/memory/discardable_shared_memory_unittest.cc2
-rw-r--r--chromium/base/memory/linked_ptr_unittest.cc16
-rw-r--r--chromium/base/memory/manual_constructor.h73
-rw-r--r--chromium/base/memory/memory_coordinator_client_registry.cc2
-rw-r--r--chromium/base/memory/memory_coordinator_proxy.cc6
-rw-r--r--chromium/base/memory/memory_pressure_listener.cc35
-rw-r--r--chromium/base/memory/memory_pressure_monitor_chromeos_unittest.cc2
-rw-r--r--chromium/base/memory/protected_memory.h252
-rw-r--r--chromium/base/memory/protected_memory_cfi.h87
-rw-r--r--chromium/base/memory/protected_memory_posix.cc82
-rw-r--r--chromium/base/memory/protected_memory_unittest.cc126
-rw-r--r--chromium/base/memory/ref_counted.h304
-rw-r--r--chromium/base/memory/ref_counted_memory.cc20
-rw-r--r--chromium/base/memory/ref_counted_memory_unittest.cc2
-rw-r--r--chromium/base/memory/ref_counted_unittest.cc24
-rw-r--r--chromium/base/memory/scoped_refptr.h358
-rw-r--r--chromium/base/memory/shared_memory_android.cc2
-rw-r--r--chromium/base/memory/shared_memory_handle.h91
-rw-r--r--chromium/base/memory/shared_memory_handle_android.cc198
-rw-r--r--chromium/base/memory/shared_memory_helper.cc21
-rw-r--r--chromium/base/memory/shared_memory_helper.h11
-rw-r--r--chromium/base/memory/shared_memory_mac.cc30
-rw-r--r--chromium/base/memory/shared_memory_posix.cc93
-rw-r--r--chromium/base/memory/shared_memory_tracker.cc4
-rw-r--r--chromium/base/memory/shared_memory_tracker.h3
-rw-r--r--chromium/base/memory/shared_memory_unittest.cc19
-rw-r--r--chromium/base/memory/shared_memory_win.cc29
-rw-r--r--chromium/base/memory/singleton_unittest.cc14
-rw-r--r--chromium/base/memory/weak_ptr.cc16
-rw-r--r--chromium/base/memory/weak_ptr.h9
-rw-r--r--chromium/base/memory/weak_ptr_unittest.cc38
-rw-r--r--chromium/base/message_loop/incoming_task_queue.cc6
-rw-r--r--chromium/base/message_loop/message_loop.cc25
-rw-r--r--chromium/base/message_loop/message_loop.h19
-rw-r--r--chromium/base/message_loop/message_loop_io_posix_unittest.cc2
-rw-r--r--chromium/base/message_loop/message_loop_task_runner.cc3
-rw-r--r--chromium/base/message_loop/message_loop_task_runner_unittest.cc44
-rw-r--r--chromium/base/message_loop/message_loop_unittest.cc54
-rw-r--r--chromium/base/message_loop/message_pump.cc6
-rw-r--r--chromium/base/message_loop/message_pump_android.cc163
-rw-r--r--chromium/base/message_loop/message_pump_android.h22
-rw-r--r--chromium/base/message_loop/message_pump_default.cc2
-rw-r--r--chromium/base/message_loop/message_pump_fuchsia.cc2
-rw-r--r--chromium/base/message_loop/message_pump_fuchsia.h1
-rw-r--r--chromium/base/message_loop/message_pump_glib.cc10
-rw-r--r--chromium/base/message_loop/message_pump_glib_unittest.cc25
-rw-r--r--chromium/base/message_loop/message_pump_libevent.cc20
-rw-r--r--chromium/base/message_loop/message_pump_libevent_unittest.cc17
-rw-r--r--chromium/base/message_loop/message_pump_perftest.cc4
-rw-r--r--chromium/base/metrics/OWNERS2
-rw-r--r--chromium/base/metrics/bucket_ranges.cc2
-rw-r--r--chromium/base/metrics/field_trial.cc36
-rw-r--r--chromium/base/metrics/field_trial_param_associator.cc4
-rw-r--r--chromium/base/metrics/field_trial_unittest.cc89
-rw-r--r--chromium/base/metrics/histogram.cc69
-rw-r--r--chromium/base/metrics/histogram.h40
-rw-r--r--chromium/base/metrics/histogram_base.cc100
-rw-r--r--chromium/base/metrics/histogram_base.h54
-rw-r--r--chromium/base/metrics/histogram_base_unittest.cc76
-rw-r--r--chromium/base/metrics/histogram_delta_serialization.cc9
-rw-r--r--chromium/base/metrics/histogram_functions.cc4
-rw-r--r--chromium/base/metrics/histogram_functions.h2
-rw-r--r--chromium/base/metrics/histogram_samples.cc17
-rw-r--r--chromium/base/metrics/histogram_samples.h17
-rw-r--r--chromium/base/metrics/histogram_snapshot_manager.cc10
-rw-r--r--chromium/base/metrics/histogram_snapshot_manager_unittest.cc4
-rw-r--r--chromium/base/metrics/histogram_unittest.cc8
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator.cc102
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator.h15
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator_unittest.cc28
-rw-r--r--chromium/base/metrics/persistent_memory_allocator.cc24
-rw-r--r--chromium/base/metrics/persistent_memory_allocator.h3
-rw-r--r--chromium/base/metrics/persistent_sample_map.cc36
-rw-r--r--chromium/base/metrics/persistent_sample_map_unittest.cc2
-rw-r--r--chromium/base/metrics/sample_map.cc2
-rw-r--r--chromium/base/metrics/sample_map_unittest.cc2
-rw-r--r--chromium/base/metrics/sample_vector.cc22
-rw-r--r--chromium/base/metrics/single_sample_metrics_unittest.cc2
-rw-r--r--chromium/base/metrics/sparse_histogram.cc20
-rw-r--r--chromium/base/metrics/sparse_histogram.h6
-rw-r--r--chromium/base/metrics/sparse_histogram_unittest.cc6
-rw-r--r--chromium/base/metrics/statistics_recorder.cc33
-rw-r--r--chromium/base/metrics/statistics_recorder.h6
-rw-r--r--chromium/base/metrics/statistics_recorder_unittest.cc49
-rw-r--r--chromium/base/native_library_posix.cc2
-rw-r--r--chromium/base/native_library_win.cc2
-rw-r--r--chromium/base/nix/mime_util_xdg.cc2
-rw-r--r--chromium/base/nix/xdg_util.cc4
-rw-r--r--chromium/base/numerics/README.md2
-rw-r--r--chromium/base/numerics/ranges.h5
-rw-r--r--chromium/base/observer_list.h414
-rw-r--r--chromium/base/observer_list_threadsafe.cc16
-rw-r--r--chromium/base/observer_list_threadsafe.h104
-rw-r--r--chromium/base/observer_list_unittest.cc167
-rw-r--r--chromium/base/path_service.cc12
-rw-r--r--chromium/base/pending_task.cc3
-rw-r--r--chromium/base/pickle.cc28
-rw-r--r--chromium/base/pickle_unittest.cc20
-rw-r--r--chromium/base/posix/global_descriptors.cc4
-rw-r--r--chromium/base/posix/safe_strerror.cc2
-rw-r--r--chromium/base/posix/unix_domain_socket.cc10
-rw-r--r--chromium/base/posix/unix_domain_socket_unittest.cc8
-rw-r--r--chromium/base/power_monitor/power_monitor.cc4
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source_android.cc4
-rw-r--r--chromium/base/power_monitor/power_monitor_source.cc4
-rw-r--r--chromium/base/power_monitor/power_monitor_unittest.cc2
-rw-r--r--chromium/base/process/kill.cc8
-rw-r--r--chromium/base/process/kill.h14
-rw-r--r--chromium/base/process/kill_posix.cc9
-rw-r--r--chromium/base/process/kill_win.cc51
-rw-r--r--chromium/base/process/launch.h4
-rw-r--r--chromium/base/process/launch_fuchsia.cc8
-rw-r--r--chromium/base/process/launch_mac.cc2
-rw-r--r--chromium/base/process/launch_posix.cc35
-rw-r--r--chromium/base/process/launch_win.cc12
-rw-r--r--chromium/base/process/memory.cc2
-rw-r--r--chromium/base/process/memory_linux.cc2
-rw-r--r--chromium/base/process/memory_unittest.cc31
-rw-r--r--chromium/base/process/process.h7
-rw-r--r--chromium/base/process/process_fuchsia.cc2
-rw-r--r--chromium/base/process/process_info_win.cc9
-rw-r--r--chromium/base/process/process_iterator.cc7
-rw-r--r--chromium/base/process/process_iterator_linux.cc2
-rw-r--r--chromium/base/process/process_iterator_mac.cc3
-rw-r--r--chromium/base/process/process_metrics.cc6
-rw-r--r--chromium/base/process/process_metrics.h58
-rw-r--r--chromium/base/process/process_metrics_fuchsia.cc5
-rw-r--r--chromium/base/process/process_metrics_linux.cc33
-rw-r--r--chromium/base/process/process_metrics_posix.cc2
-rw-r--r--chromium/base/process/process_metrics_unittest.cc24
-rw-r--r--chromium/base/process/process_posix.cc24
-rw-r--r--chromium/base/process/process_unittest.cc2
-rw-r--r--chromium/base/process/process_util_unittest.cc6
-rw-r--r--chromium/base/process/process_win.cc34
-rw-r--r--chromium/base/profiler/native_stack_sampler.cc10
-rw-r--r--chromium/base/profiler/stack_sampling_profiler.cc14
-rw-r--r--chromium/base/profiler/stack_sampling_profiler_unittest.cc2
-rw-r--r--chromium/base/scoped_native_library.cc9
-rw-r--r--chromium/base/security_unittest.cc8
-rw-r--r--chromium/base/sequenced_task_runner.cc3
-rw-r--r--chromium/base/stl_util.h29
-rw-r--r--chromium/base/stl_util_unittest.cc9
-rw-r--r--chromium/base/strings/pattern.cc4
-rw-r--r--chromium/base/strings/safe_sprintf.cc8
-rw-r--r--chromium/base/strings/safe_sprintf_unittest.cc12
-rw-r--r--chromium/base/strings/string16.cc2
-rw-r--r--chromium/base/strings/string_number_conversions.cc45
-rw-r--r--chromium/base/strings/string_number_conversions.h70
-rw-r--r--chromium/base/strings/string_number_conversions_unittest.cc51
-rw-r--r--chromium/base/strings/string_piece_unittest.cc60
-rw-r--r--chromium/base/strings/string_util.cc2
-rw-r--r--chromium/base/strings/string_util.h8
-rw-r--r--chromium/base/strings/sys_string_conversions_posix.cc2
-rw-r--r--chromium/base/strings/utf_offset_string_conversions_unittest.cc2
-rw-r--r--chromium/base/strings/utf_string_conversions_unittest.cc4
-rw-r--r--chromium/base/supports_user_data_unittest.cc2
-rw-r--r--chromium/base/sync_socket_posix.cc8
-rw-r--r--chromium/base/sync_socket_unittest.cc2
-rw-r--r--chromium/base/sync_socket_win.cc28
-rw-r--r--chromium/base/synchronization/condition_variable_unittest.cc8
-rw-r--r--chromium/base/synchronization/lock_impl_posix.cc11
-rw-r--r--chromium/base/synchronization/lock_impl_win.cc11
-rw-r--r--chromium/base/synchronization/lock_unittest.cc42
-rw-r--r--chromium/base/synchronization/waitable_event_posix.cc6
-rw-r--r--chromium/base/synchronization/waitable_event_watcher_posix.cc10
-rw-r--r--chromium/base/sys_info_android.cc4
-rw-r--r--chromium/base/sys_info_chromeos.cc3
-rw-r--r--chromium/base/sys_info_posix.cc4
-rw-r--r--chromium/base/sys_info_win.cc4
-rw-r--r--chromium/base/system_monitor/system_monitor.cc4
-rw-r--r--chromium/base/task_runner.cc4
-rw-r--r--chromium/base/task_scheduler/OWNERS3
-rw-r--r--chromium/base/task_scheduler/post_task.cc7
-rw-r--r--chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc15
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl.cc31
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl.h20
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc125
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_unittest.cc3
-rw-r--r--chromium/base/task_scheduler/task_scheduler.cc6
-rw-r--r--chromium/base/task_scheduler/task_scheduler.h18
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl.cc26
-rw-r--r--chromium/base/task_scheduler/task_tracker.cc22
-rw-r--r--chromium/base/task_scheduler/task_tracker.h24
-rw-r--r--chromium/base/task_scheduler/task_tracker_unittest.cc7
-rw-r--r--chromium/base/test/BUILD.gn9
-rw-r--r--chromium/base/third_party/dmg_fp/README.chromium1
-rw-r--r--chromium/base/third_party/dmg_fp/dtoa_wrapper.cc2
-rw-r--r--chromium/base/third_party/icu/LICENSE50
-rw-r--r--chromium/base/third_party/icu/README.chromium14
-rw-r--r--chromium/base/third_party/icu/icu_utf.cc260
-rw-r--r--chromium/base/third_party/icu/icu_utf.h294
-rw-r--r--chromium/base/third_party/symbolize/README.chromium6
-rw-r--r--chromium/base/third_party/symbolize/symbolize.cc110
-rw-r--r--chromium/base/threading/platform_thread_fuchsia.cc7
-rw-r--r--chromium/base/threading/platform_thread_posix.cc8
-rw-r--r--chromium/base/threading/platform_thread_unittest.cc9
-rw-r--r--chromium/base/threading/platform_thread_win.cc4
-rw-r--r--chromium/base/threading/sequenced_task_runner_handle.cc58
-rw-r--r--chromium/base/threading/sequenced_worker_pool.cc27
-rw-r--r--chromium/base/threading/sequenced_worker_pool_unittest.cc16
-rw-r--r--chromium/base/threading/simple_thread.cc4
-rw-r--r--chromium/base/threading/simple_thread_unittest.cc4
-rw-r--r--chromium/base/threading/thread_checker.h2
-rw-r--r--chromium/base/threading/thread_checker_unittest.cc2
-rw-r--r--chromium/base/threading/thread_collision_warner_unittest.cc2
-rw-r--r--chromium/base/threading/thread_id_name_manager.cc8
-rw-r--r--chromium/base/threading/thread_local_storage_unittest.cc2
-rw-r--r--chromium/base/threading/thread_local_unittest.cc27
-rw-r--r--chromium/base/threading/thread_perftest.cc4
-rw-r--r--chromium/base/threading/thread_restrictions.cc5
-rw-r--r--chromium/base/threading/thread_restrictions.h98
-rw-r--r--chromium/base/threading/thread_task_runner_handle.cc16
-rw-r--r--chromium/base/threading/thread_unittest.cc2
-rw-r--r--chromium/base/threading/watchdog_unittest.cc2
-rw-r--r--chromium/base/time/clock.cc2
-rw-r--r--chromium/base/time/default_clock.cc10
-rw-r--r--chromium/base/time/default_clock.h3
-rw-r--r--chromium/base/time/default_tick_clock.cc11
-rw-r--r--chromium/base/time/default_tick_clock.h3
-rw-r--r--chromium/base/time/tick_clock.cc2
-rw-r--r--chromium/base/time/time.cc2
-rw-r--r--chromium/base/time/time.h30
-rw-r--r--chromium/base/time/time_exploded_posix.cc6
-rw-r--r--chromium/base/time/time_mac.cc8
-rw-r--r--chromium/base/time/time_unittest.cc113
-rw-r--r--chromium/base/time/time_win.cc26
-rw-r--r--chromium/base/timer/hi_res_timer_manager_posix.cc3
-rw-r--r--chromium/base/timer/mock_timer.cc3
-rw-r--r--chromium/base/timer/mock_timer_unittest.cc4
-rw-r--r--chromium/base/tools_sanity_unittest.cc19
-rw-r--r--chromium/base/trace_event/common/trace_event_common.h2
-rw-r--r--chromium/base/trace_event/event_name_filter.cc2
-rw-r--r--chromium/base/trace_event/heap_profiler.h30
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc52
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker.h22
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc65
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_register.cc2
-rw-r--r--chromium/base/trace_event/heap_profiler_event_filter.cc9
-rw-r--r--chromium/base/trace_event/heap_profiler_heap_dump_writer.cc2
-rw-r--r--chromium/base/trace_event/heap_profiler_serialization_state.cc2
-rw-r--r--chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.cc6
-rw-r--r--chromium/base/trace_event/heap_profiler_type_name_deduplicator.cc47
-rw-r--r--chromium/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc13
-rw-r--r--chromium/base/trace_event/malloc_dump_provider.cc18
-rw-r--r--chromium/base/trace_event/malloc_dump_provider.h10
-rw-r--r--chromium/base/trace_event/memory_allocator_dump.cc21
-rw-r--r--chromium/base/trace_event/memory_allocator_dump.h8
-rw-r--r--chromium/base/trace_event/memory_allocator_dump_unittest.cc18
-rw-r--r--chromium/base/trace_event/memory_dump_manager.cc44
-rw-r--r--chromium/base/trace_event/memory_dump_manager.h2
-rw-r--r--chromium/base/trace_event/memory_dump_manager_test_utils.h7
-rw-r--r--chromium/base/trace_event/memory_dump_manager_unittest.cc8
-rw-r--r--chromium/base/trace_event/memory_dump_provider_info.cc2
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.cc8
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.h21
-rw-r--r--chromium/base/trace_event/memory_dump_scheduler.cc9
-rw-r--r--chromium/base/trace_event/memory_infra_background_whitelist.cc27
-rw-r--r--chromium/base/trace_event/process_memory_dump.cc38
-rw-r--r--chromium/base/trace_event/process_memory_dump.h22
-rw-r--r--chromium/base/trace_event/process_memory_dump_unittest.cc40
-rw-r--r--chromium/base/trace_event/trace_buffer.cc20
-rw-r--r--chromium/base/trace_event/trace_config.cc19
-rw-r--r--chromium/base/trace_event/trace_config_category_filter.cc16
-rw-r--r--chromium/base/trace_event/trace_event.h10
-rw-r--r--chromium/base/trace_event/trace_event_argument.cc54
-rw-r--r--chromium/base/trace_event/trace_event_filter.cc4
-rw-r--r--chromium/base/trace_event/trace_event_filter_test_utils.cc4
-rw-r--r--chromium/base/trace_event/trace_event_impl.cc13
-rw-r--r--chromium/base/trace_event/trace_event_memory_overhead.cc2
-rw-r--r--chromium/base/trace_event/trace_event_system_stats_monitor.cc4
-rw-r--r--chromium/base/trace_event/trace_event_system_stats_monitor_unittest.cc4
-rw-r--r--chromium/base/trace_event/trace_event_unittest.cc152
-rw-r--r--chromium/base/trace_event/trace_log.cc85
-rw-r--r--chromium/base/trace_event/trace_log.h10
-rw-r--r--chromium/base/trace_event/tracing_agent.cc2
-rw-r--r--chromium/base/tuple_unittest.cc7
-rw-r--r--chromium/base/values.cc90
-rw-r--r--chromium/base/values.h39
-rw-r--r--chromium/base/values_unittest.cc553
-rw-r--r--chromium/base/version.cc6
-rw-r--r--chromium/base/vlog.cc4
-rw-r--r--chromium/base/win/OWNERS2
-rw-r--r--chromium/base/win/com_init_check_hook.cc8
-rw-r--r--chromium/base/win/com_init_check_hook.h3
-rw-r--r--chromium/base/win/com_init_check_hook_unittest.cc5
-rw-r--r--chromium/base/win/com_init_util_unittest.cc8
-rw-r--r--chromium/base/win/core_winrt_util.cc37
-rw-r--r--chromium/base/win/core_winrt_util.h20
-rw-r--r--chromium/base/win/core_winrt_util_unittest.cc14
-rw-r--r--chromium/base/win/scoped_com_initializer.cc37
-rw-r--r--chromium/base/win/scoped_com_initializer.h29
-rw-r--r--chromium/base/win/scoped_comptr.h19
-rw-r--r--chromium/base/win/scoped_handle.cc12
-rw-r--r--chromium/base/win/scoped_hstring.h2
-rw-r--r--chromium/base/win/scoped_variant.h5
-rw-r--r--chromium/base/win/scoped_windows_thread_environment.h28
-rw-r--r--chromium/base/win/scoped_winrt_initializer.cc38
-rw-r--r--chromium/base/win/scoped_winrt_initializer.h48
-rw-r--r--chromium/base/win/scoped_winrt_initializer_unittest.cc47
-rw-r--r--chromium/base/win/shortcut.cc72
-rw-r--r--chromium/base/win/shortcut.h25
-rw-r--r--chromium/base/win/shortcut_unittest.cc19
-rw-r--r--chromium/base/win/win_util.cc20
-rw-r--r--chromium/base/win/win_util.h5
-rw-r--r--chromium/base/win/winrt_storage_util.cc72
-rw-r--r--chromium/base/win/winrt_storage_util.h34
-rw-r--r--chromium/base/win/winrt_storage_util_unittest.cc42
439 files changed, 8672 insertions, 6087 deletions
diff --git a/chromium/base/BUILD.gn b/chromium/base/BUILD.gn
index 6ecbad17b20..28ebd2cd454 100644
--- a/chromium/base/BUILD.gn
+++ b/chromium/base/BUILD.gn
@@ -24,6 +24,7 @@ import("//build/config/chromecast_build.gni")
import("//build/config/clang/clang.gni")
import("//build/config/compiler/compiler.gni")
import("//build/config/dcheck_always_on.gni")
+import("//build/config/jumbo.gni")
import("//build/config/nacl/config.gni")
import("//build/config/sysroot.gni")
import("//build/config/ui.gni")
@@ -44,6 +45,14 @@ declare_args() {
# (file, function, line). False means only the program counter (and currently
# file name) is saved.
enable_location_source = true
+
+ # Unsafe developer build. Has developer-friendly features that may weaken or
+ # disable security measures like sandboxing or ASLR.
+ # IMPORTANT: Unsafe developer builds should never be distributed to end users.
+ is_unsafe_developer_build = !is_official_build
+
+ # Set to true to disable COM init check hooks.
+ com_init_check_hook_disabled = false
}
if (is_android) {
@@ -105,7 +114,7 @@ if (is_android) {
# to be linked in where they wouldn't have otherwise. This does not include
# test code (test support and anything in the test directory) which should use
# source_set as is recommended for GN targets).
-component("base") {
+jumbo_component("base") {
if (is_nacl_nonsfi) {
# TODO(phosek) bug 570839: If field_trial.cc is in a static library,
# nacl_helper_nonsfi doesn't link properly on Linux in debug builds. The
@@ -114,6 +123,12 @@ component("base") {
# non-component case.
static_component_type = "source_set"
}
+ if (is_nacl || is_ios) {
+ # Link errors related to malloc functions if libbase for nacl is
+ # compiled with jumbo: https://crbug.com/775959.
+ # Same for ios: https://crbug.com/776313.
+ never_build_jumbo = true
+ }
sources = [
"allocator/allocator_check.cc",
@@ -125,6 +140,9 @@ component("base") {
"allocator/allocator_shim.h",
"allocator/malloc_zone_functions_mac.cc",
"allocator/malloc_zone_functions_mac.h",
+ "android/android_hardware_buffer_abi.h",
+ "android/android_hardware_buffer_compat.cc",
+ "android/android_hardware_buffer_compat.h",
"android/animation_frame_time_histogram.cc",
"android/apk_assets.cc",
"android/apk_assets.h",
@@ -151,7 +169,6 @@ component("base") {
"android/java_exception_reporter.h",
"android/java_handler_thread.cc",
"android/java_handler_thread.h",
- "android/java_message_handler_factory.h",
"android/java_runtime.cc",
"android/java_runtime.h",
"android/jni_android.cc",
@@ -168,6 +185,8 @@ component("base") {
"android/jni_utils.h",
"android/jni_weak_ref.cc",
"android/jni_weak_ref.h",
+ "android/library_loader/anchor_functions.cc",
+ "android/library_loader/anchor_functions.h",
"android/library_loader/library_load_from_apk_status_codes.h",
"android/library_loader/library_loader_hooks.cc",
"android/library_loader/library_loader_hooks.h",
@@ -294,6 +313,7 @@ component("base") {
"environment.cc",
"environment.h",
"event_types.h",
+ "export_template.h",
"feature_list.cc",
"feature_list.h",
"file_descriptor_posix.h",
@@ -360,6 +380,8 @@ component("base") {
"hash.cc",
"hash.h",
"ios/block_types.h",
+ "ios/callback_counter.h",
+ "ios/callback_counter.mm",
"ios/crb_protocol_observers.h",
"ios/crb_protocol_observers.mm",
"ios/device_util.h",
@@ -424,8 +446,6 @@ component("base") {
"mac/mach_port_broker.mm",
"mac/mach_port_util.cc",
"mac/mach_port_util.h",
- "mac/objc_property_releaser.h",
- "mac/objc_property_releaser.mm",
"mac/objc_release_properties.h",
"mac/objc_release_properties.mm",
"mac/os_crash_dumps.cc",
@@ -467,7 +487,6 @@ component("base") {
"memory/discardable_shared_memory.h",
"memory/free_deleter.h",
"memory/linked_ptr.h",
- "memory/manual_constructor.h",
"memory/memory_coordinator_client.cc",
"memory/memory_coordinator_client.h",
"memory/memory_coordinator_client_registry.cc",
@@ -484,6 +503,9 @@ component("base") {
"memory/memory_pressure_monitor_mac.h",
"memory/memory_pressure_monitor_win.cc",
"memory/memory_pressure_monitor_win.h",
+ "memory/protected_memory.h",
+ "memory/protected_memory_cfi.h",
+ "memory/protected_memory_posix.cc",
"memory/ptr_util.h",
"memory/raw_scoped_refptr_mismatch_checker.h",
"memory/ref_counted.cc",
@@ -492,20 +514,14 @@ component("base") {
"memory/ref_counted_memory.cc",
"memory/ref_counted_memory.h",
"memory/scoped_policy.h",
+ "memory/scoped_refptr.h",
"memory/shared_memory.h",
- "memory/shared_memory_android.cc",
"memory/shared_memory_handle.cc",
"memory/shared_memory_handle.h",
- "memory/shared_memory_handle_mac.cc",
- "memory/shared_memory_handle_win.cc",
"memory/shared_memory_helper.cc",
"memory/shared_memory_helper.h",
- "memory/shared_memory_mac.cc",
- "memory/shared_memory_nacl.cc",
- "memory/shared_memory_posix.cc",
"memory/shared_memory_tracker.cc",
"memory/shared_memory_tracker.h",
- "memory/shared_memory_win.cc",
"memory/singleton.cc",
"memory/singleton.h",
"memory/weak_ptr.cc",
@@ -590,6 +606,7 @@ component("base") {
"nix/xdg_util.cc",
"nix/xdg_util.h",
"observer_list.h",
+ "observer_list_threadsafe.cc",
"observer_list_threadsafe.h",
"optional.h",
"os_compat_android.cc",
@@ -1046,8 +1063,8 @@ component("base") {
"win/scoped_bstr.cc",
"win/scoped_bstr.h",
"win/scoped_co_mem.h",
+ "win/scoped_com_initializer.cc",
"win/scoped_com_initializer.h",
- "win/scoped_comptr.h",
"win/scoped_gdi_object.h",
"win/scoped_handle.cc",
"win/scoped_handle.h",
@@ -1061,6 +1078,9 @@ component("base") {
"win/scoped_select_object.h",
"win/scoped_variant.cc",
"win/scoped_variant.h",
+ "win/scoped_windows_thread_environment.h",
+ "win/scoped_winrt_initializer.cc",
+ "win/scoped_winrt_initializer.h",
"win/shortcut.cc",
"win/shortcut.h",
"win/startup_information.cc",
@@ -1071,10 +1091,19 @@ component("base") {
"win/win_util.h",
"win/windows_version.cc",
"win/windows_version.h",
+ "win/winrt_storage_util.cc",
+ "win/winrt_storage_util.h",
"win/wrapped_window_proc.cc",
"win/wrapped_window_proc.h",
]
+ # winternl.h and NTSecAPI.h have different definitions of UNICODE_STRING.
+ # There's only one client of NTSecAPI.h in base but several of winternl.h,
+ # so exclude the NTSecAPI.h one.
+ if (is_win) {
+ jumbo_excluded_sources = [ "rand_util_win.cc" ]
+ }
+
if (!is_nacl) {
sources += [
"base_paths.cc",
@@ -1094,10 +1123,6 @@ component("base") {
}
}
- if (!is_mac && is_posix) {
- sources += [ "memory/shared_memory_handle_posix.cc" ]
- }
-
all_dependent_configs = []
defines = []
data = []
@@ -1106,6 +1131,7 @@ component("base") {
":base_flags",
":base_implementation",
"//build/config:precompiled_headers",
+ "//build/config/compiler:noshadowing",
]
deps = [
@@ -1118,6 +1144,7 @@ component("base") {
public_deps = [
":base_static",
":build_date",
+ ":cfi_flags",
":debugging_flags",
"//base/numerics:base_numerics",
]
@@ -1177,14 +1204,20 @@ component("base") {
# Android.
if (is_android) {
sources -= [ "debug/stack_trace_posix.cc" ]
+ sources += [
+ "memory/shared_memory_android.cc",
+ "memory/shared_memory_handle_android.cc",
+ ]
# Android uses some Linux sources, put those back.
set_sources_assignment_filter([])
sources += [
"debug/proc_maps_linux.cc",
+ "debug/proc_maps_linux.h",
"files/file_path_watcher_linux.cc",
"power_monitor/power_monitor_device_source_android.cc",
"process/internal_linux.cc",
+ "process/internal_linux.h",
"process/memory_linux.cc",
"process/process_handle_linux.cc",
"process/process_info_linux.cc",
@@ -1215,8 +1248,6 @@ component("base") {
# below, rather than using the generic POSIX or Linux-y ones.
sources -= [
"debug/stack_trace_posix.cc",
- "memory/shared_memory_handle_posix.cc",
- "memory/shared_memory_posix.cc",
"message_loop/message_pump_libevent.cc",
"message_loop/message_pump_libevent.h",
"posix/unix_domain_socket.cc",
@@ -1234,6 +1265,8 @@ component("base") {
"files/file_path_watcher_fuchsia.cc",
"fuchsia/default_job.cc",
"fuchsia/default_job.h",
+ "fuchsia/fuchsia_logging.cc",
+ "fuchsia/fuchsia_logging.h",
"fuchsia/scoped_zx_handle.h",
"memory/shared_memory_fuchsia.cc",
"memory/shared_memory_handle_fuchsia.cc",
@@ -1264,6 +1297,7 @@ component("base") {
set_sources_assignment_filter([])
sources += [
"files/file_path_watcher_stub.cc",
+ "memory/shared_memory_nacl.cc",
"process/process_metrics_nacl.cc",
"sync_socket_nacl.cc",
"threading/platform_thread_linux.cc",
@@ -1290,7 +1324,6 @@ component("base") {
"memory/discardable_shared_memory.h",
"memory/shared_memory_helper.cc",
"memory/shared_memory_helper.h",
- "memory/shared_memory_posix.cc",
"native_library.cc",
"native_library_posix.cc",
"path_service.cc",
@@ -1314,7 +1347,10 @@ component("base") {
if (is_nacl_nonsfi) {
set_sources_assignment_filter([])
- sources += [ "posix/unix_domain_socket.cc" ]
+ sources += [
+ "posix/unix_domain_socket.cc",
+ "posix/unix_domain_socket.h",
+ ]
set_sources_assignment_filter(sources_assignment_filter)
sources -= [ "rand_util_nacl.cc" ]
configs += [ ":nacl_nonsfi_warnings" ]
@@ -1341,7 +1377,6 @@ component("base") {
} else {
# Remove NaCl stuff.
sources -= [
- "memory/shared_memory_nacl.cc",
"os_compat_nacl.cc",
"os_compat_nacl.h",
"rand_util_nacl.cc",
@@ -1367,6 +1402,8 @@ component("base") {
# Windows.
if (is_win) {
sources += [
+ "memory/shared_memory_handle_win.cc",
+ "memory/shared_memory_win.cc",
"power_monitor/power_monitor_device_source_win.cc",
"profiler/win32_stack_frame_unwinder.cc",
"profiler/win32_stack_frame_unwinder.h",
@@ -1387,6 +1424,10 @@ component("base") {
"//base/win:base_win_features",
]
+ if (com_init_check_hook_disabled) {
+ defines += [ "COM_INIT_CHECK_HOOK_DISABLED" ]
+ }
+
if (is_component_build) {
# Copy the VS runtime DLLs into the isolate so that they don't have to be
# preinstalled on the target machine. The debug runtimes have a "d" at
@@ -1462,6 +1503,7 @@ component("base") {
libs = [
"cfgmgr32.lib",
"powrprof.lib",
+ "propsys.lib",
"setupapi.lib",
"userenv.lib",
"winmm.lib",
@@ -1477,10 +1519,14 @@ component("base") {
# Desktop Mac.
if (is_mac) {
+ sources -= [ "profiler/native_stack_sampler_posix.cc" ]
sources += [
"mac/scoped_typeref.h",
+ "memory/shared_memory_handle_mac.cc",
+ "memory/shared_memory_mac.cc",
"power_monitor/power_monitor_device_source_mac.mm",
"time/time_conversion_posix.cc",
+ "time/time_exploded_posix.cc",
"time/time_mac.cc",
]
@@ -1497,17 +1543,12 @@ component("base") {
# Mac or iOS.
if (is_mac || is_ios) {
sources -= [
- "memory/shared_memory_posix.cc",
"native_library_posix.cc",
"strings/sys_string_conversions_posix.cc",
"synchronization/waitable_event_posix.cc",
"synchronization/waitable_event_watcher_posix.cc",
"threading/platform_thread_internal_posix.cc",
]
-
- if (is_mac) {
- sources -= [ "profiler/native_stack_sampler_posix.cc" ]
- }
} else {
# Non-Mac/ios.
sources -= [
@@ -1602,8 +1643,6 @@ component("base") {
"mac/mac_logging.mm",
"mac/mach_logging.cc",
"mac/mach_logging.h",
- "mac/objc_property_releaser.h",
- "mac/objc_property_releaser.mm",
"mac/objc_release_properties.h",
"mac/objc_release_properties.mm",
"mac/scoped_block.h",
@@ -1618,7 +1657,6 @@ component("base") {
"mac/scoped_objc_class_swizzler.h",
"mac/scoped_objc_class_swizzler.mm",
"mac/scoped_typeref.h",
- "memory/shared_memory_posix.cc",
"message_loop/message_pump_mac.h",
"message_loop/message_pump_mac.mm",
"power_monitor/power_monitor_device_source_ios.mm",
@@ -1633,6 +1671,16 @@ component("base") {
set_sources_assignment_filter(sources_assignment_filter)
}
+ # Android, Fuchsia, and MacOS have their own custom shared memory handle
+ # implementations. e.g. due to supporting both POSIX and native handles.
+ if (is_posix && !is_android && !is_fuchsia && !is_mac) {
+ sources += [ "memory/shared_memory_handle_posix.cc" ]
+ }
+
+ if (is_posix && !is_fuchsia && !is_mac && !is_nacl) {
+ sources += [ "memory/shared_memory_posix.cc" ]
+ }
+
if (is_posix && !is_fuchsia && !is_mac && !is_ios) {
sources += [
"time/time_conversion_posix.cc",
@@ -1669,13 +1717,33 @@ component("base") {
}
}
+# Build flags for Control Flow Integrity
+# https://www.chromium.org/developers/testing/control-flow-integrity
+buildflag_header("cfi_flags") {
+ header = "cfi_flags.h"
+
+ # buildflag entries added to this header must also must be manually added to
+ # tools/gn/bootstrap/bootstrap.py
+ flags = [
+ # TODO(pcc): remove CFI_CAST_CHECK, see https://crbug.com/626794.
+ "CFI_CAST_CHECK=$is_cfi && $use_cfi_cast",
+ "CFI_ICALL_CHECK=$is_cfi && $use_cfi_icall",
+ "CFI_ENFORCEMENT_TRAP=$is_cfi && !$use_cfi_diag",
+ "CFI_ENFORCEMENT_DIAGNOSTIC=$is_cfi && $use_cfi_diag && !$use_cfi_recover",
+ ]
+}
+
buildflag_header("debugging_flags") {
header = "debugging_flags.h"
header_dir = "base/debug"
+
+ # buildflag entries added to this header must also must be manually added to
+ # tools/gn/bootstrap/bootstrap.py
flags = [
"ENABLE_LOCATION_SOURCE=$enable_location_source",
"ENABLE_PROFILING=$enable_profiling",
"CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
+ "UNSAFE_DEVELOPER_BUILD=$is_unsafe_developer_build",
]
}
@@ -2021,6 +2089,7 @@ test("base_unittests") {
"i18n/string_search_unittest.cc",
"i18n/time_formatting_unittest.cc",
"i18n/timezone_unittest.cc",
+ "ios/callback_counter_unittest.mm",
"ios/crb_protocol_observers_unittest.mm",
"ios/device_util_unittest.mm",
"ios/weak_nsobject_unittest.mm",
@@ -2038,7 +2107,6 @@ test("base_unittests") {
"mac/foundation_util_unittest.mm",
"mac/mac_util_unittest.mm",
"mac/mach_port_broker_unittest.cc",
- "mac/objc_property_releaser_unittest.mm",
"mac/objc_release_properties_unittest.mm",
"mac/scoped_nsobject_unittest.mm",
"mac/scoped_objc_class_swizzler_unittest.mm",
@@ -2053,6 +2121,7 @@ test("base_unittests") {
"memory/memory_pressure_monitor_mac_unittest.cc",
"memory/memory_pressure_monitor_unittest.cc",
"memory/memory_pressure_monitor_win_unittest.cc",
+ "memory/protected_memory_unittest.cc",
"memory/ptr_util_unittest.cc",
"memory/ref_counted_memory_unittest.cc",
"memory/ref_counted_unittest.cc",
@@ -2247,11 +2316,13 @@ test("base_unittests") {
"win/scoped_hstring_unittest.cc",
"win/scoped_process_information_unittest.cc",
"win/scoped_variant_unittest.cc",
+ "win/scoped_winrt_initializer_unittest.cc",
"win/shortcut_unittest.cc",
"win/startup_information_unittest.cc",
"win/wait_chain_unittest.cc",
"win/win_util_unittest.cc",
"win/windows_version_unittest.cc",
+ "win/winrt_storage_util_unittest.cc",
"win/wrapped_window_proc_unittest.cc",
]
@@ -2323,7 +2394,6 @@ test("base_unittests") {
sources += [
"mac/bind_objc_block_unittest.mm",
"mac/foundation_util_unittest.mm",
- "mac/objc_property_releaser_unittest.mm",
"mac/objc_release_properties_unittest.mm",
"mac/scoped_nsobject_unittest.mm",
"strings/sys_string_conversions_mac_unittest.mm",
@@ -2414,15 +2484,6 @@ test("base_unittests") {
# data += [ "$root_out_dir/base_unittests.dSYM/" ]
}
}
-
- if (use_cfi_cast) {
- # TODO(krasin): remove CFI_CAST_CHECK, see https://crbug.com/626794.
- defines += [ "CFI_CAST_CHECK" ]
- }
-
- if (use_cfi_diag && !use_cfi_recover) {
- defines += [ "CFI_ENFORCEMENT_DIAGNOSTIC" ]
- }
}
action("build_date") {
@@ -2594,7 +2655,6 @@ if (is_android) {
"android/java/src/org/chromium/base/annotations/NativeCall.java",
"android/java/src/org/chromium/base/annotations/NativeClassQualifiedName.java",
"android/java/src/org/chromium/base/annotations/RemovableInRelease.java",
- "android/java/src/org/chromium/base/annotations/SuppressFBWarnings.java",
"android/java/src/org/chromium/base/annotations/UsedByReflection.java",
"android/java/src/org/chromium/base/library_loader/LegacyLinker.java",
"android/java/src/org/chromium/base/library_loader/LibraryLoader.java",
@@ -2689,18 +2749,22 @@ if (is_android) {
"test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunner.java",
"test/android/javatests/src/org/chromium/base/test/params/BlockJUnit4RunnerDelegate.java",
"test/android/javatests/src/org/chromium/base/test/params/BaseJUnit4RunnerDelegate.java",
+ "test/android/javatests/src/org/chromium/base/test/params/MethodParamAnnotationRule.java",
+ "test/android/javatests/src/org/chromium/base/test/params/MethodParamRule.java",
"test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunnerDelegateFactory.java",
"test/android/javatests/src/org/chromium/base/test/params/ParameterizedFrameworkMethod.java",
"test/android/javatests/src/org/chromium/base/test/params/ParameterSet.java",
"test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunnerDelegate.java",
"test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommon.java",
"test/android/javatests/src/org/chromium/base/test/params/ParameterAnnotations.java",
+ "test/android/javatests/src/org/chromium/base/test/params/ParameterProvider.java",
"test/android/javatests/src/org/chromium/base/test/util/AdvancedMockContext.java",
- "test/android/javatests/src/org/chromium/base/test/util/AnnotationProcessor.java",
+ "test/android/javatests/src/org/chromium/base/test/util/AnnotationRule.java",
"test/android/javatests/src/org/chromium/base/test/util/CallbackHelper.java",
"test/android/javatests/src/org/chromium/base/test/util/CommandLineFlags.java",
"test/android/javatests/src/org/chromium/base/test/util/DisableIf.java",
"test/android/javatests/src/org/chromium/base/test/util/DisableIfSkipCheck.java",
+ "test/android/javatests/src/org/chromium/base/test/util/AnnotationProcessingUtils.java",
"test/android/javatests/src/org/chromium/base/test/util/DisabledTest.java",
"test/android/javatests/src/org/chromium/base/test/util/EnormousTest.java",
"test/android/javatests/src/org/chromium/base/test/util/Feature.java",
@@ -2737,9 +2801,20 @@ if (is_android) {
java_files = [ "test/android/javatests/src/org/chromium/base/test/TestChildProcessConnection.java" ]
}
+ android_library("base_junit_test_support") {
+ # Plaform checks are broken for Robolectric.
+ bypass_platform_checks = true
+ testonly = true
+ java_files = [ "android/junit/src/org/chromium/base/metrics/test/ShadowRecordHistogram.java" ]
+ deps = [
+ ":base_java",
+ "//third_party/robolectric:robolectric_all_java",
+ ]
+ }
+
junit_binary("base_junit_tests") {
java_files = [
- "android/junit/src/org/chromium/base/BaseChromiumApplicationTest.java",
+ "android/junit/src/org/chromium/base/ApplicationStatusTest.java",
"android/junit/src/org/chromium/base/DiscardableReferencePoolTest.java",
"android/junit/src/org/chromium/base/LogTest.java",
"android/junit/src/org/chromium/base/NonThreadSafeTest.java",
@@ -2748,6 +2823,7 @@ if (is_android) {
"android/junit/src/org/chromium/base/process_launcher/ChildProcessConnectionTest.java",
"test/android/junit/src/org/chromium/base/test/SetUpStatementTest.java",
"test/android/junit/src/org/chromium/base/test/TestListInstrumentationRunListenerTest.java",
+ "test/android/junit/src/org/chromium/base/test/util/AnnotationProcessingUtilsTest.java",
"test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java",
"test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java",
"test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java",
@@ -2762,6 +2838,7 @@ if (is_android) {
":base_java_process_launcher_test_support",
":base_java_test_support",
"//third_party/android_support_test_runner:runner_java",
+ "//third_party/hamcrest:hamcrest_java",
]
srcjar_deps = [ ":base_build_config_gen" ]
}
@@ -2772,6 +2849,7 @@ if (is_android) {
"android/library_loader/library_load_from_apk_status_codes.h",
"android/library_loader/library_loader_hooks.h",
"memory/memory_pressure_listener.h",
+ "metrics/histogram_base.h",
]
}
@@ -2801,12 +2879,30 @@ if (is_android) {
]
java_files = [
"test/android/java/src/org/chromium/base/ContentUriTestUtils.java",
- "test/android/java/src/org/chromium/base/JavaHandlerThreadTest.java",
- "test/android/java/src/org/chromium/base/TestSystemMessageHandler.java",
+ "test/android/java/src/org/chromium/base/JavaHandlerThreadHelpers.java",
]
}
}
+# Keep the list of fuzzer_tests in alphabetical order.
+fuzzer_test("base64_decode_fuzzer") {
+ sources = [
+ "base64_decode_fuzzer.cc",
+ ]
+ deps = [
+ "//base",
+ ]
+}
+
+fuzzer_test("base64_encode_fuzzer") {
+ sources = [
+ "base64_encode_fuzzer.cc",
+ ]
+ deps = [
+ "//base",
+ ]
+}
+
fuzzer_test("base_json_correctness_fuzzer") {
sources = [
"json/correctness_fuzzer.cc",
diff --git a/chromium/base/allocator/allocator_shim.cc b/chromium/base/allocator/allocator_shim.cc
index 41adbf8a9b4..4b7de5e1a32 100644
--- a/chromium/base/allocator/allocator_shim.cc
+++ b/chromium/base/allocator/allocator_shim.cc
@@ -41,10 +41,6 @@ subtle::AtomicWord g_chain_head = reinterpret_cast<subtle::AtomicWord>(
bool g_call_new_handler_on_malloc_failure = false;
-#if !defined(OS_WIN)
-subtle::Atomic32 g_new_handler_lock = 0;
-#endif
-
inline size_t GetCachedPageSize() {
static size_t pagesize = 0;
if (!pagesize)
@@ -58,20 +54,7 @@ bool CallNewHandler(size_t size) {
#if defined(OS_WIN)
return base::allocator::WinCallNewHandler(size);
#else
- // TODO(primiano): C++11 has introduced std::get_new_handler() which is
- // supposed to be thread safe and would avoid the spinlock boilerplate here.
- // However, it is not available in the headers in the current Debian Jessie
- // sysroot, which has libstdc++ 4.8. The function is available in libstdc++
- // 4.9 and newer, but it will be a few more years before a newer sysroot
- // becomes available.
- std::new_handler nh;
- {
- while (subtle::Acquire_CompareAndSwap(&g_new_handler_lock, 0, 1))
- PlatformThread::YieldCurrentThread();
- nh = std::set_new_handler(0);
- ignore_result(std::set_new_handler(nh));
- subtle::Release_Store(&g_new_handler_lock, 0);
- }
+ std::new_handler nh = std::get_new_handler();
if (!nh)
return false;
(*nh)();
diff --git a/chromium/base/allocator/partition_allocator/PartitionAlloc.md b/chromium/base/allocator/partition_allocator/PartitionAlloc.md
index d1a67481aa7..982d91f27b8 100644
--- a/chromium/base/allocator/partition_allocator/PartitionAlloc.md
+++ b/chromium/base/allocator/partition_allocator/PartitionAlloc.md
@@ -46,17 +46,21 @@ possibility of inlining.
For an example of how to use partitions to get good performance and good safety,
see Blink's usage, as described in `wtf/allocator/Allocator.md`.
-Large allocations (> 1 MB) are realized by direct memory mmapping.
+Large allocations (> kGenericMaxBucketed == 960KB) are realized by direct
+memory mmapping. This size makes sense because 960KB = 0xF0000. The next larger
+bucket size is 1MB = 0x100000 which is greater than 1/2 the available space in
+a SuperPage meaning it would not be possible to pack even 2 sequential
+alloctions in a SuperPage.
-`PartitionAllocGeneric` acquires a lock for thread safety. (The current
+`PartitionRootGeneric::Alloc()` acquires a lock for thread safety. (The current
implementation uses a spin lock on the assumption that thread contention will be
rare in its callers. The original caller was Blink, where this is generally
true. Spin locks also have the benefit of simplicity.)
Callers can get thread-unsafe performance using a
`SizeSpecificPartitionAllocator` or otherwise using `PartitionAlloc` (instead of
-`PartitionAllocGeneric`). Callers can also arrange for low contention, such as
-by using a dedicated partition for single-threaded, latency-critical
+`PartitionRootGeneric::Alloc()`). Callers can also arrange for low contention,
+such as by using a dedicated partition for single-threaded, latency-critical
allocations.
Because PartitionAlloc guarantees that address space regions used for one
diff --git a/chromium/base/allocator/partition_allocator/address_space_randomization.cc b/chromium/base/allocator/partition_allocator/address_space_randomization.cc
index fb1b47329c5..114ad9557f0 100644
--- a/chromium/base/allocator/partition_allocator/address_space_randomization.cc
+++ b/chromium/base/allocator/partition_allocator/address_space_randomization.cc
@@ -7,18 +7,10 @@
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/spin_lock.h"
#include "base/lazy_instance.h"
+#include "base/rand_util.h"
#include "build/build_config.h"
#if defined(OS_WIN)
-#include <windows.h>
-#include "base/win/windows_version.h"
-#else
-#include <sys/time.h>
-#include <unistd.h>
-#endif
-
-// VersionHelpers.h must be included after windows.h.
-#if defined(OS_WIN)
#include <VersionHelpers.h>
#endif
@@ -37,6 +29,8 @@ struct ranctx {
uint32_t d;
};
+static LazyInstance<ranctx>::Leaky s_ranctx = LAZY_INSTANCE_INITIALIZER;
+
#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
uint32_t ranvalInternal(ranctx* x) {
@@ -53,40 +47,31 @@ uint32_t ranvalInternal(ranctx* x) {
uint32_t ranval(ranctx* x) {
subtle::SpinLock::Guard guard(x->lock);
if (UNLIKELY(!x->initialized)) {
+ const uint64_t r1 = RandUint64();
+ const uint64_t r2 = RandUint64();
+
+ x->a = static_cast<uint32_t>(r1);
+ x->b = static_cast<uint32_t>(r1 >> 32);
+ x->c = static_cast<uint32_t>(r2);
+ x->d = static_cast<uint32_t>(r2 >> 32);
+
x->initialized = true;
- char c;
- uint32_t seed = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(&c));
- uint32_t pid;
- uint32_t usec;
-#if defined(OS_WIN)
- pid = GetCurrentProcessId();
- SYSTEMTIME st;
- GetSystemTime(&st);
- usec = static_cast<uint32_t>(st.wMilliseconds * 1000);
-#else
- pid = static_cast<uint32_t>(getpid());
- struct timeval tv;
- gettimeofday(&tv, 0);
- usec = static_cast<uint32_t>(tv.tv_usec);
-#endif
- seed ^= pid;
- seed ^= usec;
- x->a = 0xf1ea5eed;
- x->b = x->c = x->d = seed;
- for (int i = 0; i < 20; ++i) {
- (void)ranvalInternal(x);
- }
}
- uint32_t ret = ranvalInternal(x);
- return ret;
-}
-static LazyInstance<ranctx>::Leaky s_ranctx = LAZY_INSTANCE_INITIALIZER;
+ return ranvalInternal(x);
+}
} // namespace
-// Calculates a random preferred mapping address. In calculating an address, we
-// balance good ASLR against not fragmenting the address space too badly.
+void SetRandomPageBaseSeed(int64_t seed) {
+ ranctx* x = s_ranctx.Pointer();
+ subtle::SpinLock::Guard guard(x->lock);
+ // Set RNG to initial state.
+ x->initialized = true;
+ x->a = x->b = static_cast<uint32_t>(seed);
+ x->c = x->d = static_cast<uint32_t>(seed >> 32);
+}
+
void* GetRandomPageBase() {
uintptr_t random = static_cast<uintptr_t>(ranval(s_ranctx.Pointer()));
@@ -94,7 +79,12 @@ void* GetRandomPageBase() {
random <<= 32ULL;
random |= static_cast<uintptr_t>(ranval(s_ranctx.Pointer()));
-#if defined(OS_WIN)
+// The kASLRMask and kASLROffset constants will be suitable for the
+// OS and build configuration.
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) || defined(OS_POSIX)
+ random &= internal::kASLRMask;
+ random += internal::kASLROffset;
+#else // defined(OS_WIN)
// Windows >= 8.1 has the full 47 bits. Use them where available.
static bool windows_81 = false;
static bool windows_81_initialized = false;
@@ -108,10 +98,7 @@ void* GetRandomPageBase() {
random &= internal::kASLRMask;
}
random += internal::kASLROffset;
-#else // defined(OS_POSIX)
- random &= internal::kASLRMask;
- random += internal::kASLROffset;
-#endif // defined(OS_POSIX)
+#endif // defined(OS_WIN)
#else // defined(ARCH_CPU_32_BITS)
#if defined(OS_WIN)
// On win32 host systems the randomization plus huge alignment causes
diff --git a/chromium/base/allocator/partition_allocator/address_space_randomization.h b/chromium/base/allocator/partition_allocator/address_space_randomization.h
index 2ff41d3ae37..8bea1f7d76c 100644
--- a/chromium/base/allocator/partition_allocator/address_space_randomization.h
+++ b/chromium/base/allocator/partition_allocator/address_space_randomization.h
@@ -11,6 +11,15 @@
namespace base {
+// Sets the seed for the random number generator used by GetRandomPageBase in
+// order to generate a predictable sequence of addresses. May be called multiple
+// times.
+BASE_EXPORT void SetRandomPageBaseSeed(int64_t seed);
+
+// Calculates a random preferred mapping address. In calculating an address, we
+// balance good ASLR against not fragmenting the address space too badly.
+BASE_EXPORT void* GetRandomPageBase();
+
namespace internal {
constexpr uintptr_t AslrAddress(uintptr_t mask) {
@@ -129,10 +138,6 @@ constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL);
} // namespace internal
-// Calculates a random preferred mapping address. In calculating an address, we
-// balance good ASLR against not fragmenting the address space too badly.
-BASE_EXPORT void* GetRandomPageBase();
-
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION
diff --git a/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc b/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc
index 1e5af2e8a82..c8c1da4c3b5 100644
--- a/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc
@@ -20,10 +20,13 @@
namespace base {
-TEST(AddressSpaceRandomizationTest, GetRandomPageBase) {
+namespace {
+
+uintptr_t GetMask() {
uintptr_t mask = internal::kASLRMask;
#if defined(ARCH_CPU_64_BITS)
-#if defined(OS_WIN)
+// Sanitizers use their own kASLRMask constant.
+#if defined(OS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
if (!IsWindows8Point1OrGreater()) {
mask = internal::kASLRMaskBefore8_10;
}
@@ -34,24 +37,47 @@ TEST(AddressSpaceRandomizationTest, GetRandomPageBase) {
if (!IsWow64Process(GetCurrentProcess(), &is_wow64))
is_wow64 = FALSE;
if (!is_wow64) {
- // ASLR is turned off on 32-bit Windows; check that result is null.
- EXPECT_EQ(nullptr, base::GetRandomPageBase());
- return;
+ mask = 0;
}
#endif // defined(OS_WIN)
#endif // defined(ARCH_CPU_32_BITS)
- // Sample the first 100 addresses.
+ return mask;
+}
+
+const size_t kSamples = 100;
+
+} // namespace
+
+TEST(AddressSpaceRandomizationTest, DisabledASLR) {
+ uintptr_t mask = GetMask();
+ if (!mask) {
+#if defined(OS_WIN) && defined(ARCH_CPU_32_BITS)
+ // ASLR should be turned off on 32-bit Windows.
+ EXPECT_EQ(nullptr, base::GetRandomPageBase());
+#else
+ // Otherwise, nullptr is very unexpected.
+ EXPECT_NE(nullptr, base::GetRandomPageBase());
+#endif
+ }
+}
+
+TEST(AddressSpaceRandomizationTest, Unpredictable) {
+ uintptr_t mask = GetMask();
+ // Configurations without ASLR are tested above, in DisabledASLR.
+ if (!mask)
+ return;
+
std::set<uintptr_t> addresses;
uintptr_t address_logical_sum = 0;
uintptr_t address_logical_product = static_cast<uintptr_t>(-1);
- for (int i = 0; i < 100; i++) {
+ for (size_t i = 0; i < kSamples; ++i) {
uintptr_t address = reinterpret_cast<uintptr_t>(base::GetRandomPageBase());
// Test that address is in range.
EXPECT_LE(internal::kASLROffset, address);
EXPECT_GE(internal::kASLROffset + mask, address);
// Test that address is page aligned.
EXPECT_EQ(0ULL, (address & kPageAllocationGranularityOffsetMask));
- // Test that address is unique (no collisions in 100 tries)
+ // Test that address is unique (no collisions in kSamples tries)
CHECK_EQ(0ULL, addresses.count(address));
addresses.insert(address);
// Sum and product to test randomness at each bit position, below.
@@ -60,12 +86,47 @@ TEST(AddressSpaceRandomizationTest, GetRandomPageBase) {
address_logical_product &= address;
}
// All randomized bits in address_logical_sum should be set, since the
- // likelihood of never setting any of the bits is 1 / (2 ^ 100) with a good
- // RNG. Likewise, all bits in address_logical_product should be cleared.
+ // likelihood of never setting any of the bits is 1 / (2 ^ kSamples) with a
+ // good RNG. Likewise, all bits in address_logical_product should be cleared.
// Note that we don't test unmasked high bits. These may be set if kASLROffset
// is larger than kASLRMask, or if adding kASLROffset generated a carry.
EXPECT_EQ(mask, address_logical_sum & mask);
EXPECT_EQ(0ULL, address_logical_product & mask);
}
+TEST(AddressSpaceRandomizationTest, Predictable) {
+ uintptr_t mask = GetMask();
+ // Configurations without ASLR are tested above, in DisabledASLR.
+ if (!mask)
+ return;
+
+ const uintptr_t kInitialSeed = 0xfeed5eedULL;
+ base::SetRandomPageBaseSeed(kInitialSeed);
+
+ // Make sure the addresses look random but are predictable.
+ std::set<uintptr_t> addresses;
+ std::vector<uintptr_t> sequence;
+ for (size_t i = 0; i < kSamples; ++i) {
+ uintptr_t address = reinterpret_cast<uintptr_t>(base::GetRandomPageBase());
+ sequence.push_back(address);
+ // Test that address is in range.
+ EXPECT_LE(internal::kASLROffset, address);
+ EXPECT_GE(internal::kASLROffset + mask, address);
+ // Test that address is page aligned.
+ EXPECT_EQ(0ULL, (address & kPageAllocationGranularityOffsetMask));
+ // Test that address is unique (no collisions in kSamples tries)
+ CHECK_EQ(0ULL, addresses.count(address));
+ addresses.insert(address);
+ // Test that (address - offset) == (predicted & mask).
+ address -= internal::kASLROffset;
+ }
+
+ // Make sure sequence is repeatable.
+ base::SetRandomPageBaseSeed(kInitialSeed);
+ for (size_t i = 0; i < kSamples; ++i) {
+ uintptr_t address = reinterpret_cast<uintptr_t>(base::GetRandomPageBase());
+ EXPECT_EQ(address, sequence[i]);
+ }
+}
+
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/page_allocator.cc b/chromium/base/allocator/partition_allocator/page_allocator.cc
index 268753208fc..87fead7b87e 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator.cc
+++ b/chromium/base/allocator/partition_allocator/page_allocator.cc
@@ -139,7 +139,7 @@ static void* SystemAllocPages(void* hint,
break;
if (have_retried) {
s_allocPageErrorCode = errno;
- ret = 0;
+ ret = nullptr;
break;
}
ReleaseReservation();
@@ -291,23 +291,17 @@ bool SetSystemPagesAccess(void* address,
}
void DecommitSystemPages(void* address, size_t length) {
- DCHECK(!(length & kSystemPageOffsetMask));
+ DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
#if defined(OS_POSIX)
-#if defined(OS_MACOSX)
- // On macOS, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also
- // marks the pages with the reusable bit, which allows both Activity Monitor
- // and memory-infra to correctly track the pages.
- int ret = madvise(address, length, MADV_FREE_REUSABLE);
-#else
- int ret = madvise(address, length, MADV_FREE);
-#endif
- if (ret != 0 && errno == EINVAL) {
- // MADV_FREE only works on Linux 4.5+ . If request failed,
- // retry with older MADV_DONTNEED . Note that MADV_FREE
- // being defined at compile time doesn't imply runtime support.
- ret = madvise(address, length, MADV_DONTNEED);
- }
- CHECK(!ret);
+ // In POSIX, there is no decommit concept. Discarding is an effective way of
+ // implementing the Windows semantics where the OS is allowed to not swap the
+ // pages in the region.
+ //
+ // TODO(ajwong): Also explore setting PageInaccessible to make the protection
+ // semantics consistent between Windows and POSIX. This might have a perf cost
+ // though as both decommit and recommit would incur an extra syscall.
+ // http://crbug.com/766882
+ DiscardSystemPages(address, length);
#else
CHECK(SetSystemPagesAccess(address, length, PageInaccessible));
#endif
@@ -316,7 +310,7 @@ void DecommitSystemPages(void* address, size_t length) {
bool RecommitSystemPages(void* address,
size_t length,
PageAccessibilityConfiguration page_accessibility) {
- DCHECK(!(length & kSystemPageOffsetMask));
+ DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
DCHECK_NE(PageInaccessible, page_accessibility);
#if defined(OS_POSIX)
// On POSIX systems, read the memory to recommit. This has the correct
@@ -329,13 +323,23 @@ bool RecommitSystemPages(void* address,
}
void DiscardSystemPages(void* address, size_t length) {
- DCHECK(!(length & kSystemPageOffsetMask));
+ DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
#if defined(OS_POSIX)
- // On POSIX, the implementation detail is that discard and decommit are the
- // same, and lead to pages that are returned to the system immediately and
- // get replaced with zeroed pages when touched. So we just call
- // DecommitSystemPages() here to avoid code duplication.
- DecommitSystemPages(address, length);
+#if defined(OS_MACOSX)
+ // On macOS, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also
+ // marks the pages with the reusable bit, which allows both Activity Monitor
+ // and memory-infra to correctly track the pages.
+ int ret = madvise(address, length, MADV_FREE_REUSABLE);
+#else
+ int ret = madvise(address, length, MADV_FREE);
+#endif
+ if (ret != 0 && errno == EINVAL) {
+ // MADV_FREE only works on Linux 4.5+ . If request failed,
+ // retry with older MADV_DONTNEED . Note that MADV_FREE
+ // being defined at compile time doesn't imply runtime support.
+ ret = madvise(address, length, MADV_DONTNEED);
+ }
+ CHECK(!ret);
#else
// On Windows discarded pages are not returned to the system immediately and
// not guaranteed to be zeroed when returned to the application.
@@ -356,8 +360,8 @@ void DiscardSystemPages(void* address, size_t length) {
// DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
// failure.
if (ret) {
- void* ret = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
- CHECK(ret);
+ void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
+ CHECK(ptr);
}
#endif
}
diff --git a/chromium/base/allocator/partition_allocator/page_allocator.h b/chromium/base/allocator/partition_allocator/page_allocator.h
index 7085163ec74..297d817cd9f 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator.h
+++ b/chromium/base/allocator/partition_allocator/page_allocator.h
@@ -83,15 +83,24 @@ BASE_EXPORT WARN_UNUSED_RESULT bool SetSystemPagesAccess(
// Decommit one or more system pages starting at |address| and continuing for
// |length| bytes. |length| must be a multiple of |kSystemPageSize|.
//
-// Decommitted means that the physical memory is released to the system, but the
-// virtual address space remains reserved. System pages are re-committed by
-// calling |RecommitSystemPages|. Touching a decommitted page _may_ fault.
-//
-// Clients should not make any assumptions about the contents of decommitted
-// system pages, before or after they write to the page. The only guarantee
-// provided is that the contents of the system page will be deterministic again
-// after recommitting and writing to it. In particlar note that system pages are
-// not guaranteed to be zero-filled upon re-commit.
+// Decommitted means that physical resources (RAM or swap) backing the allocated
+// virtual address range are released back to the system, but the address space
+// is still allocated to the process (possibly using up page table entries or
+// other accounting resources). Any access to a decommitted region of memory
+// is an error and will generate a fault.
+//
+// This operation is not atomic on all platforms.
+//
+// Note: "Committed memory" is a Windows Memory Subsystem concept that ensures
+// processes will not fault when touching a committed memory region. There is
+// no analogue in the POSIX memory API where virtual memory pages are
+// best-effort allocated resources on the first touch. To create a
+// platform-agnostic abstraction, this API simulates the Windows "decommit"
+// state by both discarding the region (allowing the OS to avoid swap
+// operations) and changing the page protections so accesses fault.
+//
+// TODO(ajwong): This currently does not change page protections on POSIX
+// systems due to a perf regression. Tracked at http://crbug.com/766882.
BASE_EXPORT void DecommitSystemPages(void* address, size_t length);
// Recommit one or more system pages, starting at |address| and continuing for
@@ -99,8 +108,7 @@ BASE_EXPORT void DecommitSystemPages(void* address, size_t length);
// multiple of |kSystemPageSize|.
//
// Decommitted system pages must be recommitted with their original permissions
-// before they are used again. Note that this operation may be a no-op on some
-// platforms.
+// before they are used again.
//
// Returns true if the recommit change succeeded. In most cases you must |CHECK|
// the result.
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.cc b/chromium/base/allocator/partition_allocator/partition_alloc.cc
index 17d18bee6df..8afd6c07e50 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.cc
@@ -34,6 +34,9 @@ static_assert(sizeof(base::PartitionSuperPageExtentEntry) <=
static_assert(base::kPageMetadataSize * base::kNumPartitionPagesPerSuperPage <=
base::kSystemPageSize,
"page metadata fits in hole");
+// Limit to prevent callers accidentally overflowing an int size.
+static_assert(base::kGenericMaxDirectMapped <= 1UL << 31,
+ "maximum direct mapped allocation");
// Check that some of our zanier calculations worked out as expected.
static_assert(base::kGenericSmallestBucket == 8, "generic smallest bucket");
static_assert(base::kGenericMaxBucketed == 983040, "generic max bucketed");
@@ -75,6 +78,18 @@ PartitionAllocHooks::AllocationHook* PartitionAllocHooks::allocation_hook_ =
nullptr;
PartitionAllocHooks::FreeHook* PartitionAllocHooks::free_hook_ = nullptr;
+// Find the best number of System Pages to allocate for |size| to minimize
+// wasted space. Uses a heuristic that looks at number of bytes wasted after
+// the last slot and attempts to account for the PTE usage of each System Page.
+//
+// TODO(ajwong): This seems to interact badly with
+// PartitionBucketPartitionPages() which rounds the value from this up to a
+// multiple of kNumSystemPagesPerPartitionPage (aka 4) anyways.
+// http://crbug.com/776537
+//
+// TODO(ajwong): The waste calculation seems wrong. The PTE usage should cover
+// both used and unsed pages.
+// http://crbug.com/776537
static uint8_t PartitionBucketNumSystemPages(size_t size) {
// This works out reasonably for the current bucket sizes of the generic
// allocator, and the current values of partition page size and constants.
@@ -89,8 +104,13 @@ static uint8_t PartitionBucketNumSystemPages(size_t size) {
double best_waste_ratio = 1.0f;
uint16_t best_pages = 0;
if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
+ // TODO(ajwong): Why is there a DCHECK here for this?
+ // http://crbug.com/776537
DCHECK(!(size % kSystemPageSize));
best_pages = static_cast<uint16_t>(size / kSystemPageSize);
+ // TODO(ajwong): Should this be checking against
+ // kMaxSystemPagesPerSlotSpan or numeric_limits<uint8_t>::max?
+ // http://crbug.com/776537
CHECK(best_pages < (1 << 8));
return static_cast<uint8_t>(best_pages);
}
@@ -102,6 +122,11 @@ static uint8_t PartitionBucketNumSystemPages(size_t size) {
size_t waste = page_size - (num_slots * size);
// Leaving a page unfaulted is not free; the page will occupy an empty page
// table entry. Make a simple attempt to account for that.
+ //
+ // TODO(ajwong): This looks wrong. PTEs are allocated for all pages
+ // regardless of whether or not they are wasted. Should it just
+ // be waste += i * sizeof(void*)?
+ // http://crbug.com/776537
size_t num_remainder_pages = i & (kNumSystemPagesPerPartitionPage - 1);
size_t num_unfaulted_pages =
num_remainder_pages
@@ -140,8 +165,8 @@ static void PartitionAllocBaseInit(PartitionRootBase* root) {
static void PartitionBucketInitBase(PartitionBucket* bucket,
PartitionRootBase* root) {
bucket->active_pages_head = &g_sentinel_page;
- bucket->empty_pages_head = 0;
- bucket->decommitted_pages_head = 0;
+ bucket->empty_pages_head = nullptr;
+ bucket->decommitted_pages_head = nullptr;
bucket->num_full_pages = 0;
bucket->num_system_pages_per_slot_span =
PartitionBucketNumSystemPages(bucket->slot_size);
@@ -152,28 +177,26 @@ void PartitionAllocGlobalInit(void (*oom_handling_function)()) {
PartitionRootBase::gOomHandlingFunction = oom_handling_function;
}
-void PartitionAllocInit(PartitionRoot* root,
- size_t num_buckets,
- size_t max_allocation) {
- PartitionAllocBaseInit(root);
+void PartitionRoot::Init(size_t num_buckets, size_t max_allocation) {
+ PartitionAllocBaseInit(this);
- root->num_buckets = num_buckets;
- root->max_allocation = max_allocation;
+ this->num_buckets = num_buckets;
+ this->max_allocation = max_allocation;
size_t i;
- for (i = 0; i < root->num_buckets; ++i) {
- PartitionBucket* bucket = &root->buckets()[i];
+ for (i = 0; i < this->num_buckets; ++i) {
+ PartitionBucket* bucket = &this->buckets()[i];
if (!i)
bucket->slot_size = kAllocationGranularity;
else
bucket->slot_size = i << kBucketShift;
- PartitionBucketInitBase(bucket, root);
+ PartitionBucketInitBase(bucket, this);
}
}
-void PartitionAllocGenericInit(PartitionRootGeneric* root) {
- subtle::SpinLock::Guard guard(root->lock);
+void PartitionRootGeneric::Init() {
+ subtle::SpinLock::Guard guard(this->lock);
- PartitionAllocBaseInit(root);
+ PartitionAllocBaseInit(this);
// Precalculate some shift and mask constants used in the hot path.
// Example: malloc(41) == 101001 binary.
@@ -189,7 +212,7 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
order_index_shift = 0;
else
order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1);
- root->order_index_shifts[order] = order_index_shift;
+ this->order_index_shifts[order] = order_index_shift;
size_t sub_order_index_mask;
if (order == kBitsPerSizeT) {
// This avoids invoking undefined behavior for an excessive shift.
@@ -199,7 +222,7 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >>
(kGenericNumBucketsPerOrderBits + 1);
}
- root->order_sub_index_masks[order] = sub_order_index_mask;
+ this->order_sub_index_masks[order] = sub_order_index_mask;
}
// Set up the actual usable buckets first.
@@ -212,30 +235,30 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
size_t current_size = kGenericSmallestBucket;
size_t currentIncrement =
kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits;
- PartitionBucket* bucket = &root->buckets[0];
+ PartitionBucket* bucket = &this->buckets[0];
for (i = 0; i < kGenericNumBucketedOrders; ++i) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
bucket->slot_size = current_size;
- PartitionBucketInitBase(bucket, root);
+ PartitionBucketInitBase(bucket, this);
// Disable psuedo buckets so that touching them faults.
if (current_size % kGenericSmallestBucket)
- bucket->active_pages_head = 0;
+ bucket->active_pages_head = nullptr;
current_size += currentIncrement;
++bucket;
}
currentIncrement <<= 1;
}
DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
- DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets);
+ DCHECK(bucket == &this->buckets[0] + kGenericNumBuckets);
// Then set up the fast size -> bucket lookup table.
- bucket = &root->buckets[0];
- PartitionBucket** bucketPtr = &root->bucket_lookups[0];
+ bucket = &this->buckets[0];
+ PartitionBucket** bucketPtr = &this->bucket_lookups[0];
for (order = 0; order <= kBitsPerSizeT; ++order) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
if (order < kGenericMinBucketedOrder) {
// Use the bucket of the finest granularity for malloc(0) etc.
- *bucketPtr++ = &root->buckets[0];
+ *bucketPtr++ = &this->buckets[0];
} else if (order > kGenericMaxBucketedOrder) {
*bucketPtr++ = &g_sentinel_bucket;
} else {
@@ -248,10 +271,9 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
}
}
}
- DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets);
- DCHECK(bucketPtr ==
- &root->bucket_lookups[0] +
- ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
+ DCHECK(bucket == &this->buckets[0] + kGenericNumBuckets);
+ DCHECK(bucketPtr == &this->bucket_lookups[0] +
+ ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
// And there's one last bucket lookup that will be hit for e.g. malloc(-1),
// which tries to overflow to a non-existant order.
*bucketPtr = &g_sentinel_bucket;
@@ -287,7 +309,7 @@ static NOINLINE void PartitionBucketFull() {
OOM_CRASH();
}
-// partitionPageStateIs*
+// PartitionPageStateIs*
// Note that it's only valid to call these functions on pages found on one of
// the page lists. Specifically, you can't call these functions on full pages
// that were detached from the active list.
@@ -302,7 +324,7 @@ PartitionPageStateIsActive(const PartitionPage* page) {
static bool ALWAYS_INLINE PartitionPageStateIsFull(const PartitionPage* page) {
DCHECK(page != &g_sentinel_page);
DCHECK(!page->page_offset);
- bool ret = (page->num_allocated_slots == PartitionBucketSlots(page->bucket));
+ bool ret = (page->num_allocated_slots == page->bucket->get_slots_per_span());
if (ret) {
DCHECK(!page->freelist_head);
DCHECK(!page->num_unprovisioned_slots);
@@ -375,6 +397,11 @@ static ALWAYS_INLINE void* PartitionAllocPartitionPages(
// In this case, we can still hand out pages from the current super page
// allocation.
char* ret = root->next_partition_page;
+
+ // Fresh System Pages in the SuperPages are decommited. Commit them
+ // before vending them back.
+ CHECK(SetSystemPagesAccess(ret, total_size, PageReadWrite));
+
root->next_partition_page += total_size;
PartitionIncreaseCommittedPages(root, total_size);
return ret;
@@ -388,11 +415,16 @@ static ALWAYS_INLINE void* PartitionAllocPartitionPages(
char* super_page = reinterpret_cast<char*>(AllocPages(
requestedAddress, kSuperPageSize, kSuperPageSize, PageReadWrite));
if (UNLIKELY(!super_page))
- return 0;
+ return nullptr;
root->total_size_of_super_pages += kSuperPageSize;
PartitionIncreaseCommittedPages(root, total_size);
+ // |total_size| MUST be less than kSuperPageSize - (kPartitionPageSize*2).
+ // This is a trustworthy value because num_partition_pages is not user
+ // controlled.
+ //
+ // TODO(ajwong): Introduce a DCHECK.
root->next_super_page = super_page + kSuperPageSize;
char* ret = super_page + kPartitionPageSize;
root->next_partition_page = ret + total_size;
@@ -405,9 +437,18 @@ static ALWAYS_INLINE void* PartitionAllocPartitionPages(
CHECK(SetSystemPagesAccess(super_page + (kSystemPageSize * 2),
kPartitionPageSize - (kSystemPageSize * 2),
PageInaccessible));
- // Also make the last partition page a guard page.
- CHECK(SetSystemPagesAccess(super_page + (kSuperPageSize - kPartitionPageSize),
- kPartitionPageSize, PageInaccessible));
+ // CHECK(SetSystemPagesAccess(super_page + (kSuperPageSize -
+ // kPartitionPageSize),
+ // kPartitionPageSize, PageInaccessible));
+ // All remaining slotspans for the unallocated PartitionPages inside the
+ // SuperPage are conceptually decommitted. Correctly set the state here
+ // so they do not occupy resources.
+ //
+ // TODO(ajwong): Refactor Page Allocator API so the SuperPage comes in
+ // decommited initially.
+ CHECK(SetSystemPagesAccess(super_page + kPartitionPageSize + total_size,
+ (kSuperPageSize - kPartitionPageSize - total_size),
+ PageInaccessible));
// If we were after a specific address, but didn't get it, assume that
// the system chose a lousy address. Here most OS'es have a default
@@ -416,7 +457,7 @@ static ALWAYS_INLINE void* PartitionAllocPartitionPages(
// successful mapping, which is far from random. So we just get fresh
// randomness for the next mapping attempt.
if (requestedAddress && requestedAddress != super_page)
- root->next_super_page = 0;
+ root->next_super_page = nullptr;
// We allocated a new super page so update super page metadata.
// First check if this is a new extent or not.
@@ -429,9 +470,9 @@ static ALWAYS_INLINE void* PartitionAllocPartitionPages(
// Most new extents will be part of a larger extent, and these three fields
// are unused, but we initialize them to 0 so that we get a clear signal
// in case they are accidentally used.
- latest_extent->super_page_base = 0;
- latest_extent->super_pages_end = 0;
- latest_extent->next = 0;
+ latest_extent->super_page_base = nullptr;
+ latest_extent->super_pages_end = nullptr;
+ latest_extent->next = nullptr;
PartitionSuperPageExtentEntry* current_extent = root->current_extent;
bool isNewExtent = (super_page != requestedAddress);
@@ -457,8 +498,12 @@ static ALWAYS_INLINE void* PartitionAllocPartitionPages(
return ret;
}
+// Returns a natural number of PartitionPages (calculated by
+// PartitionBucketNumSystemPages()) to allocate from the current SuperPage
+// when the bucket runs out of slots.
static ALWAYS_INLINE uint16_t
PartitionBucketPartitionPages(const PartitionBucket* bucket) {
+ // Rounds up to nearest multiple of kNumSystemPagesPerPartitionPage.
return (bucket->num_system_pages_per_slot_span +
(kNumSystemPagesPerPartitionPage - 1)) /
kNumSystemPagesPerPartitionPage;
@@ -467,12 +512,16 @@ PartitionBucketPartitionPages(const PartitionBucket* bucket) {
static ALWAYS_INLINE void PartitionPageReset(PartitionPage* page) {
DCHECK(PartitionPageStateIsDecommitted(page));
- page->num_unprovisioned_slots = PartitionBucketSlots(page->bucket);
+ page->num_unprovisioned_slots = page->bucket->get_slots_per_span();
DCHECK(page->num_unprovisioned_slots);
page->next_page = nullptr;
}
+// Each bucket allocates a slot span when it runs out of slots.
+// A slot span's size is equal to PartitionBucketPartitionPages(bucket)
+// number of PartitionPages. This function initializes all pages within the
+// span.
static ALWAYS_INLINE void PartitionPageSetup(PartitionPage* page,
PartitionBucket* bucket) {
// The bucket never changes. We set it up once.
@@ -506,13 +555,13 @@ static ALWAYS_INLINE char* PartitionPageAllocAndFillFreelist(
// We should only get here when _every_ slot is either used or unprovisioned.
// (The third state is "on the freelist". If we have a non-empty freelist, we
// should not get here.)
- DCHECK(num_slots + page->num_allocated_slots == PartitionBucketSlots(bucket));
+ DCHECK(num_slots + page->num_allocated_slots == bucket->get_slots_per_span());
// Similarly, make explicitly sure that the freelist is empty.
DCHECK(!page->freelist_head);
DCHECK(page->num_allocated_slots >= 0);
size_t size = bucket->slot_size;
- char* base = reinterpret_cast<char*>(PartitionPageToPointer(page));
+ char* base = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
char* return_object = base + (size * page->num_allocated_slots);
char* firstFreelistPointer = return_object + size;
char* firstFreelistPointerExtent =
@@ -559,9 +608,9 @@ static ALWAYS_INLINE char* PartitionPageAllocAndFillFreelist(
entry->next = PartitionFreelistMask(next_entry);
entry = next_entry;
}
- entry->next = PartitionFreelistMask(0);
+ entry->next = PartitionFreelistMask(nullptr);
} else {
- page->freelist_head = 0;
+ page->freelist_head = nullptr;
}
return return_object;
}
@@ -570,7 +619,7 @@ static ALWAYS_INLINE char* PartitionPageAllocAndFillFreelist(
// active page.
// When it finds a suitable new active page (one that has free slots and is not
// empty), it is set as the new active page. If there is no suitable new
-// active page, the current active page is set to the seed page.
+// active page, the current active page is set to &g_sentinel_page.
// As potential pages are scanned, they are tidied up according to their state.
// Empty pages are swept on to the empty page list, decommitted pages on to the
// decommitted page list and full pages are unlinked from any list.
@@ -612,7 +661,7 @@ static bool PartitionSetNewActivePage(PartitionBucket* bucket) {
if (UNLIKELY(!bucket->num_full_pages))
PartitionBucketFull();
// Not necessary but might help stop accidents.
- page->next_page = 0;
+ page->next_page = nullptr;
}
}
@@ -622,14 +671,14 @@ static bool PartitionSetNewActivePage(PartitionBucket* bucket) {
static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent(
PartitionPage* page) {
- DCHECK(PartitionBucketIsDirectMapped(page->bucket));
+ DCHECK(page->bucket->is_direct_mapped());
return reinterpret_cast<PartitionDirectMapExtent*>(
reinterpret_cast<char*>(page) + 3 * kPageMetadataSize);
}
static ALWAYS_INLINE void PartitionPageSetRawSize(PartitionPage* page,
size_t size) {
- size_t* raw_size_ptr = PartitionPageGetRawSizePtr(page);
+ size_t* raw_size_ptr = page->get_raw_size_ptr();
if (UNLIKELY(raw_size_ptr != nullptr))
*raw_size_ptr = size;
}
@@ -657,7 +706,7 @@ static ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
// TODO: these pages will be zero-filled. Consider internalizing an
// allocZeroed() API so we can avoid a memset() entirely in this case.
char* ptr = reinterpret_cast<char*>(
- AllocPages(0, map_size, kSuperPageSize, PageReadWrite));
+ AllocPages(nullptr, map_size, kSuperPageSize, PageReadWrite));
if (UNLIKELY(!ptr))
return nullptr;
@@ -683,7 +732,7 @@ static ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
DCHECK(!extent->super_page_base);
DCHECK(!extent->super_pages_end);
DCHECK(!extent->next);
- PartitionPage* page = PartitionPointerToPageNoAlignmentCheck(slot);
+ PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(slot);
PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>(
reinterpret_cast<char*>(page) + (kPageMetadataSize * 2));
DCHECK(!page->next_page);
@@ -695,7 +744,7 @@ static ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
PartitionFreelistEntry* next_entry =
reinterpret_cast<PartitionFreelistEntry*>(slot);
- next_entry->next = PartitionFreelistMask(0);
+ next_entry->next = PartitionFreelistMask(nullptr);
DCHECK(!bucket->active_pages_head);
DCHECK(!bucket->empty_pages_head);
@@ -746,7 +795,7 @@ static ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask));
- char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page));
+ char* ptr = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
// Account for the mapping starting a partition page before the actual
// allocation address.
ptr -= kPartitionPageSize;
@@ -754,85 +803,92 @@ static ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
FreePages(ptr, unmap_size);
}
-void* PartitionAllocSlowPath(PartitionRootBase* root,
- int flags,
- size_t size,
- PartitionBucket* bucket) {
+void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
+ int flags,
+ size_t size) {
// The slow path is called when the freelist is empty.
- DCHECK(!bucket->active_pages_head->freelist_head);
+ DCHECK(!this->active_pages_head->freelist_head);
PartitionPage* new_page = nullptr;
- // For the PartitionAllocGeneric API, we have a bunch of buckets marked
- // as special cases. We bounce them through to the slow path so that we
- // can still have a blazing fast hot path due to lack of corner-case
+ // For the PartitionRootGeneric::Alloc() API, we have a bunch of buckets
+ // marked as special cases. We bounce them through to the slow path so that
+ // we can still have a blazing fast hot path due to lack of corner-case
// branches.
+ //
+ // Note: The ordering of the conditionals matter! In particular,
+ // PartitionSetNewActivePage() has a side-effect even when returning
+ // false where it sweeps the active page list and may move things into
+ // the empty or decommitted lists which affects the subsequent conditional.
bool returnNull = flags & PartitionAllocReturnNull;
- if (UNLIKELY(PartitionBucketIsDirectMapped(bucket))) {
+ if (UNLIKELY(this->is_direct_mapped())) {
DCHECK(size > kGenericMaxBucketed);
- DCHECK(bucket == &g_sentinel_bucket);
- DCHECK(bucket->active_pages_head == &g_sentinel_page);
+ DCHECK(this == &g_sentinel_bucket);
+ DCHECK(this->active_pages_head == &g_sentinel_page);
if (size > kGenericMaxDirectMapped) {
if (returnNull)
return nullptr;
PartitionExcessiveAllocationSize();
}
new_page = PartitionDirectMap(root, flags, size);
- } else if (LIKELY(PartitionSetNewActivePage(bucket))) {
+ } else if (LIKELY(PartitionSetNewActivePage(this))) {
// First, did we find an active page in the active pages list?
- new_page = bucket->active_pages_head;
+ new_page = this->active_pages_head;
DCHECK(PartitionPageStateIsActive(new_page));
- } else if (LIKELY(bucket->empty_pages_head != nullptr) ||
- LIKELY(bucket->decommitted_pages_head != nullptr)) {
+ } else if (LIKELY(this->empty_pages_head != nullptr) ||
+ LIKELY(this->decommitted_pages_head != nullptr)) {
// Second, look in our lists of empty and decommitted pages.
// Check empty pages first, which are preferred, but beware that an
// empty page might have been decommitted.
- while (LIKELY((new_page = bucket->empty_pages_head) != nullptr)) {
- DCHECK(new_page->bucket == bucket);
+ while (LIKELY((new_page = this->empty_pages_head) != nullptr)) {
+ DCHECK(new_page->bucket == this);
DCHECK(PartitionPageStateIsEmpty(new_page) ||
PartitionPageStateIsDecommitted(new_page));
- bucket->empty_pages_head = new_page->next_page;
+ this->empty_pages_head = new_page->next_page;
// Accept the empty page unless it got decommitted.
if (new_page->freelist_head) {
new_page->next_page = nullptr;
break;
}
DCHECK(PartitionPageStateIsDecommitted(new_page));
- new_page->next_page = bucket->decommitted_pages_head;
- bucket->decommitted_pages_head = new_page;
+ new_page->next_page = this->decommitted_pages_head;
+ this->decommitted_pages_head = new_page;
}
if (UNLIKELY(!new_page) &&
- LIKELY(bucket->decommitted_pages_head != nullptr)) {
- new_page = bucket->decommitted_pages_head;
- DCHECK(new_page->bucket == bucket);
+ LIKELY(this->decommitted_pages_head != nullptr)) {
+ new_page = this->decommitted_pages_head;
+ DCHECK(new_page->bucket == this);
DCHECK(PartitionPageStateIsDecommitted(new_page));
- bucket->decommitted_pages_head = new_page->next_page;
- void* addr = PartitionPageToPointer(new_page);
+ this->decommitted_pages_head = new_page->next_page;
+ void* addr = PartitionPage::ToPointer(new_page);
PartitionRecommitSystemPages(root, addr,
- PartitionBucketBytes(new_page->bucket));
+ new_page->bucket->get_bytes_per_span());
PartitionPageReset(new_page);
}
DCHECK(new_page);
} else {
// Third. If we get here, we need a brand new page.
- uint16_t num_partition_pages = PartitionBucketPartitionPages(bucket);
+ uint16_t num_partition_pages = PartitionBucketPartitionPages(this);
void* rawPages =
PartitionAllocPartitionPages(root, flags, num_partition_pages);
if (LIKELY(rawPages != nullptr)) {
- new_page = PartitionPointerToPageNoAlignmentCheck(rawPages);
- PartitionPageSetup(new_page, bucket);
+ new_page = PartitionPage::FromPointerNoAlignmentCheck(rawPages);
+ PartitionPageSetup(new_page, this);
}
}
// Bail if we had a memory allocation failure.
if (UNLIKELY(!new_page)) {
- DCHECK(bucket->active_pages_head == &g_sentinel_page);
+ DCHECK(this->active_pages_head == &g_sentinel_page);
if (returnNull)
return nullptr;
PartitionOutOfMemory(root);
}
- bucket = new_page->bucket;
+ // TODO(ajwong): Is there a way to avoid the reading of bucket here?
+ // It seems like in many of the conditional branches above, |this| ==
+ // |new_page->bucket|. Maybe pull this into another function?
+ PartitionBucket* bucket = new_page->bucket;
DCHECK(bucket != &g_sentinel_bucket);
bucket->active_pages_head = new_page;
PartitionPageSetRawSize(new_page, size);
@@ -854,9 +910,9 @@ void* PartitionAllocSlowPath(PartitionRootBase* root,
static ALWAYS_INLINE void PartitionDecommitPage(PartitionRootBase* root,
PartitionPage* page) {
DCHECK(PartitionPageStateIsEmpty(page));
- DCHECK(!PartitionBucketIsDirectMapped(page->bucket));
- void* addr = PartitionPageToPointer(page);
- PartitionDecommitSystemPages(root, addr, PartitionBucketBytes(page->bucket));
+ DCHECK(!page->bucket->is_direct_mapped());
+ void* addr = PartitionPage::ToPointer(page);
+ PartitionDecommitSystemPages(root, addr, page->bucket->get_bytes_per_span());
// We actually leave the decommitted page in the active list. We'll sweep
// it on to the decommitted page list when we next walk the active page
@@ -864,7 +920,7 @@ static ALWAYS_INLINE void PartitionDecommitPage(PartitionRootBase* root,
// Pulling this trick enables us to use a singly-linked page list for all
// cases, which is critical in keeping the page metadata structure down to
// 32 bytes in size.
- page->freelist_head = 0;
+ page->freelist_head = nullptr;
page->num_unprovisioned_slots = 0;
DCHECK(PartitionPageStateIsDecommitted(page));
}
@@ -888,7 +944,7 @@ static ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) {
DCHECK(page->empty_cache_index >= 0);
DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans);
DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page);
- root->global_empty_page_ring[page->empty_cache_index] = 0;
+ root->global_empty_page_ring[page->empty_cache_index] = nullptr;
}
int16_t current_index = root->global_empty_page_ring_index;
@@ -919,55 +975,54 @@ static void PartitionDecommitEmptyPages(PartitionRootBase* root) {
}
}
-void PartitionFreeSlowPath(PartitionPage* page) {
- PartitionBucket* bucket = page->bucket;
- DCHECK(page != &g_sentinel_page);
- if (LIKELY(page->num_allocated_slots == 0)) {
+void PartitionPage::FreeSlowPath() {
+ DCHECK(this != &g_sentinel_page);
+ if (LIKELY(this->num_allocated_slots == 0)) {
// Page became fully unused.
- if (UNLIKELY(PartitionBucketIsDirectMapped(bucket))) {
- PartitionDirectUnmap(page);
+ if (UNLIKELY(bucket->is_direct_mapped())) {
+ PartitionDirectUnmap(this);
return;
}
// If it's the current active page, change it. We bounce the page to
// the empty list as a force towards defragmentation.
- if (LIKELY(page == bucket->active_pages_head))
- (void)PartitionSetNewActivePage(bucket);
- DCHECK(bucket->active_pages_head != page);
+ if (LIKELY(this == bucket->active_pages_head))
+ PartitionSetNewActivePage(bucket);
+ DCHECK(bucket->active_pages_head != this);
- PartitionPageSetRawSize(page, 0);
- DCHECK(!PartitionPageGetRawSize(page));
+ PartitionPageSetRawSize(this, 0);
+ DCHECK(!get_raw_size());
- PartitionRegisterEmptyPage(page);
+ PartitionRegisterEmptyPage(this);
} else {
- DCHECK(!PartitionBucketIsDirectMapped(bucket));
+ DCHECK(!bucket->is_direct_mapped());
// Ensure that the page is full. That's the only valid case if we
// arrive here.
- DCHECK(page->num_allocated_slots < 0);
+ DCHECK(this->num_allocated_slots < 0);
// A transition of num_allocated_slots from 0 to -1 is not legal, and
// likely indicates a double-free.
- CHECK(page->num_allocated_slots != -1);
- page->num_allocated_slots = -page->num_allocated_slots - 2;
- DCHECK(page->num_allocated_slots == PartitionBucketSlots(bucket) - 1);
+ CHECK(this->num_allocated_slots != -1);
+ this->num_allocated_slots = -this->num_allocated_slots - 2;
+ DCHECK(this->num_allocated_slots == bucket->get_slots_per_span() - 1);
// Fully used page became partially used. It must be put back on the
// non-full page list. Also make it the current page to increase the
// chances of it being filled up again. The old current page will be
// the next page.
- DCHECK(!page->next_page);
+ DCHECK(!this->next_page);
if (LIKELY(bucket->active_pages_head != &g_sentinel_page))
- page->next_page = bucket->active_pages_head;
- bucket->active_pages_head = page;
+ this->next_page = bucket->active_pages_head;
+ bucket->active_pages_head = this;
--bucket->num_full_pages;
// Special case: for a partition page with just a single slot, it may
// now be empty and we want to run it through the empty logic.
- if (UNLIKELY(page->num_allocated_slots == 0))
- PartitionFreeSlowPath(page);
+ if (UNLIKELY(this->num_allocated_slots == 0))
+ FreeSlowPath();
}
}
bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
PartitionPage* page,
size_t raw_size) {
- DCHECK(PartitionBucketIsDirectMapped(page->bucket));
+ DCHECK(page->bucket->is_direct_mapped());
raw_size = PartitionCookieSizeAdjustAdd(raw_size);
@@ -982,7 +1037,7 @@ bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
if (new_size == current_size)
return true;
- char* char_ptr = static_cast<char*>(PartitionPageToPointer(page));
+ char* char_ptr = static_cast<char*>(PartitionPage::ToPointer(page));
if (new_size < current_size) {
size_t map_size = partitionPageToDirectMapExtent(page)->map_size;
@@ -1020,45 +1075,44 @@ bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
#endif
PartitionPageSetRawSize(page, raw_size);
- DCHECK(PartitionPageGetRawSize(page) == raw_size);
+ DCHECK(page->get_raw_size() == raw_size);
page->bucket->slot_size = new_size;
return true;
}
-void* PartitionReallocGeneric(PartitionRootGeneric* root,
- void* ptr,
- size_t new_size,
- const char* type_name) {
+void* PartitionRootGeneric::Realloc(void* ptr,
+ size_t new_size,
+ const char* type_name) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return realloc(ptr, new_size);
#else
if (UNLIKELY(!ptr))
- return PartitionAllocGeneric(root, new_size, type_name);
+ return this->Alloc(new_size, type_name);
if (UNLIKELY(!new_size)) {
- PartitionFreeGeneric(root, ptr);
- return 0;
+ this->Free(ptr);
+ return nullptr;
}
if (new_size > kGenericMaxDirectMapped)
PartitionExcessiveAllocationSize();
PartitionPage* page =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
// TODO(palmer): See if we can afford to make this a CHECK.
- DCHECK(PartitionPagePointerIsValid(page));
+ DCHECK(PartitionPage::IsPointerValid(page));
- if (UNLIKELY(PartitionBucketIsDirectMapped(page->bucket))) {
+ if (UNLIKELY(page->bucket->is_direct_mapped())) {
// We may be able to perform the realloc in place by changing the
// accessibility of memory pages and, if reducing the size, decommitting
// them.
- if (PartitionReallocDirectMappedInPlace(root, page, new_size)) {
+ if (PartitionReallocDirectMappedInPlace(this, page, new_size)) {
PartitionAllocHooks::ReallocHookIfEnabled(ptr, ptr, new_size, type_name);
return ptr;
}
}
- size_t actual_new_size = PartitionAllocActualSize(root, new_size);
+ size_t actual_new_size = this->ActualSize(new_size);
size_t actual_old_size = PartitionAllocGetSize(ptr);
// TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
@@ -1070,20 +1124,22 @@ void* PartitionReallocGeneric(PartitionRootGeneric* root,
// after updating statistics (and cookies, if present).
PartitionPageSetRawSize(page, PartitionCookieSizeAdjustAdd(new_size));
#if DCHECK_IS_ON()
- // Write a new trailing cookie.
- PartitionCookieWriteValue(static_cast<char*>(ptr) + new_size);
+ // Write a new trailing cookie when it is possible to keep track of
+ // |new_size| via the raw size pointer.
+ if (page->get_raw_size_ptr())
+ PartitionCookieWriteValue(static_cast<char*>(ptr) + new_size);
#endif
return ptr;
}
// This realloc cannot be resized in-place. Sadness.
- void* ret = PartitionAllocGeneric(root, new_size, type_name);
+ void* ret = this->Alloc(new_size, type_name);
size_t copy_size = actual_old_size;
if (new_size < copy_size)
copy_size = new_size;
memcpy(ret, ptr, copy_size);
- PartitionFreeGeneric(root, ptr);
+ this->Free(ptr);
return ret;
#endif
}
@@ -1094,38 +1150,37 @@ static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
if (slot_size < kSystemPageSize || !page->num_allocated_slots)
return 0;
- size_t bucket_num_slots = PartitionBucketSlots(bucket);
+ size_t bucket_num_slots = bucket->get_slots_per_span();
size_t discardable_bytes = 0;
- size_t raw_size = PartitionPageGetRawSize(const_cast<PartitionPage*>(page));
+ size_t raw_size = page->get_raw_size();
if (raw_size) {
uint32_t usedBytes = static_cast<uint32_t>(RoundUpToSystemPage(raw_size));
discardable_bytes = bucket->slot_size - usedBytes;
if (discardable_bytes && discard) {
- char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page));
+ char* ptr = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
ptr += usedBytes;
DiscardSystemPages(ptr, discardable_bytes);
}
return discardable_bytes;
}
- const size_t max_slot_count =
+ constexpr size_t kMaxSlotCount =
(kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize;
- DCHECK(bucket_num_slots <= max_slot_count);
+ DCHECK(bucket_num_slots <= kMaxSlotCount);
DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots;
- char slot_usage[max_slot_count];
+ char slot_usage[kMaxSlotCount];
#if !defined(OS_WIN)
// The last freelist entry should not be discarded when using OS_WIN.
// DiscardVirtualMemory makes the contents of discarded memory undefined.
size_t last_slot = static_cast<size_t>(-1);
#endif
memset(slot_usage, 1, num_slots);
- char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page));
- PartitionFreelistEntry* entry = page->freelist_head;
+ char* ptr = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
// First, walk the freelist for this page and make a bitmap of which slots
// are not in use.
- while (entry) {
+ for (PartitionFreelistEntry* entry = page->freelist_head; entry; /**/) {
size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
DCHECK(slotIndex < num_slots);
slot_usage[slotIndex] = 0;
@@ -1151,49 +1206,47 @@ static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
}
// First, do the work of calculating the discardable bytes. Don't actually
// discard anything unless the discard flag was passed in.
- char* begin_ptr = nullptr;
- char* end_ptr = nullptr;
- size_t unprovisioned_bytes = 0;
if (truncated_slots) {
- begin_ptr = ptr + (num_slots * slot_size);
- end_ptr = begin_ptr + (slot_size * truncated_slots);
+ size_t unprovisioned_bytes = 0;
+ char* begin_ptr = ptr + (num_slots * slot_size);
+ char* end_ptr = begin_ptr + (slot_size * truncated_slots);
begin_ptr = reinterpret_cast<char*>(
RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
// We round the end pointer here up and not down because we're at the
// end of a slot span, so we "own" all the way up the page boundary.
end_ptr = reinterpret_cast<char*>(
RoundUpToSystemPage(reinterpret_cast<size_t>(end_ptr)));
- DCHECK(end_ptr <= ptr + PartitionBucketBytes(bucket));
+ DCHECK(end_ptr <= ptr + bucket->get_bytes_per_span());
if (begin_ptr < end_ptr) {
unprovisioned_bytes = end_ptr - begin_ptr;
discardable_bytes += unprovisioned_bytes;
}
- }
- if (unprovisioned_bytes && discard) {
- DCHECK(truncated_slots > 0);
- size_t num_new_entries = 0;
- page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
- // Rewrite the freelist.
- PartitionFreelistEntry** entry_ptr = &page->freelist_head;
- for (size_t slotIndex = 0; slotIndex < num_slots; ++slotIndex) {
- if (slot_usage[slotIndex])
- continue;
- PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>(
- ptr + (slot_size * slotIndex));
- *entry_ptr = PartitionFreelistMask(entry);
- entry_ptr = reinterpret_cast<PartitionFreelistEntry**>(entry);
- num_new_entries++;
+ if (unprovisioned_bytes && discard) {
+ DCHECK(truncated_slots > 0);
+ size_t num_new_entries = 0;
+ page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
+ // Rewrite the freelist.
+ PartitionFreelistEntry** entry_ptr = &page->freelist_head;
+ for (size_t slotIndex = 0; slotIndex < num_slots; ++slotIndex) {
+ if (slot_usage[slotIndex])
+ continue;
+ auto* entry = reinterpret_cast<PartitionFreelistEntry*>(
+ ptr + (slot_size * slotIndex));
+ *entry_ptr = PartitionFreelistMask(entry);
+ entry_ptr = reinterpret_cast<PartitionFreelistEntry**>(entry);
+ num_new_entries++;
#if !defined(OS_WIN)
- last_slot = slotIndex;
+ last_slot = slotIndex;
#endif
+ }
+ // Terminate the freelist chain.
+ *entry_ptr = nullptr;
+ // The freelist head is stored unmasked.
+ page->freelist_head = PartitionFreelistMask(page->freelist_head);
+ DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
+ // Discard the memory.
+ DiscardSystemPages(begin_ptr, unprovisioned_bytes);
}
- // Terminate the freelist chain.
- *entry_ptr = nullptr;
- // The freelist head is stored unmasked.
- page->freelist_head = PartitionFreelistMask(page->freelist_head);
- DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
- // Discard the memory.
- DiscardSystemPages(begin_ptr, unprovisioned_bytes);
}
// Next, walk the slots and for any not in use, consider where the system
@@ -1233,27 +1286,27 @@ static void PartitionPurgeBucket(PartitionBucket* bucket) {
for (PartitionPage* page = bucket->active_pages_head; page;
page = page->next_page) {
DCHECK(page != &g_sentinel_page);
- (void)PartitionPurgePage(page, true);
+ PartitionPurgePage(page, true);
}
}
}
-void PartitionPurgeMemory(PartitionRoot* root, int flags) {
+void PartitionRoot::PurgeMemory(int flags) {
if (flags & PartitionPurgeDecommitEmptyPages)
- PartitionDecommitEmptyPages(root);
+ PartitionDecommitEmptyPages(this);
// We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages
// here because that flag is only useful for allocations >= system page
// size. We only have allocations that large inside generic partitions
// at the moment.
}
-void PartitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) {
- subtle::SpinLock::Guard guard(root->lock);
+void PartitionRootGeneric::PurgeMemory(int flags) {
+ subtle::SpinLock::Guard guard(this->lock);
if (flags & PartitionPurgeDecommitEmptyPages)
- PartitionDecommitEmptyPages(root);
+ PartitionDecommitEmptyPages(this);
if (flags & PartitionPurgeDiscardUnusedSystemPages) {
for (size_t i = 0; i < kGenericNumBuckets; ++i) {
- PartitionBucket* bucket = &root->buckets[i];
+ PartitionBucket* bucket = &this->buckets[i];
if (bucket->slot_size >= kSystemPageSize)
PartitionPurgeBucket(bucket);
}
@@ -1261,23 +1314,23 @@ void PartitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) {
}
static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
- const PartitionPage* page) {
- uint16_t bucket_num_slots = PartitionBucketSlots(page->bucket);
+ PartitionPage* page) {
+ uint16_t bucket_num_slots = page->bucket->get_slots_per_span();
if (PartitionPageStateIsDecommitted(page)) {
++stats_out->num_decommitted_pages;
return;
}
- stats_out->discardable_bytes +=
- PartitionPurgePage(const_cast<PartitionPage*>(page), false);
+ stats_out->discardable_bytes += PartitionPurgePage(page, false);
- size_t raw_size = PartitionPageGetRawSize(const_cast<PartitionPage*>(page));
- if (raw_size)
+ size_t raw_size = page->get_raw_size();
+ if (raw_size) {
stats_out->active_bytes += static_cast<uint32_t>(raw_size);
- else
+ } else {
stats_out->active_bytes +=
(page->num_allocated_slots * stats_out->bucket_slot_size);
+ }
size_t page_bytes_resident =
RoundUpToSystemPage((bucket_num_slots - page->num_unprovisioned_slots) *
@@ -1296,7 +1349,7 @@ static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
const PartitionBucket* bucket) {
- DCHECK(!PartitionBucketIsDirectMapped(bucket));
+ DCHECK(!bucket->is_direct_mapped());
stats_out->is_valid = false;
// If the active page list is empty (== &g_sentinel_page),
// the bucket might still need to be reported if it has a list of empty,
@@ -1311,27 +1364,27 @@ static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
stats_out->is_direct_map = false;
stats_out->num_full_pages = static_cast<size_t>(bucket->num_full_pages);
stats_out->bucket_slot_size = bucket->slot_size;
- uint16_t bucket_num_slots = PartitionBucketSlots(bucket);
+ uint16_t bucket_num_slots = bucket->get_slots_per_span();
size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots;
- stats_out->allocated_page_size = PartitionBucketBytes(bucket);
+ stats_out->allocated_page_size = bucket->get_bytes_per_span();
stats_out->active_bytes = bucket->num_full_pages * bucket_useful_storage;
stats_out->resident_bytes =
bucket->num_full_pages * stats_out->allocated_page_size;
- for (const PartitionPage* page = bucket->empty_pages_head; page;
+ for (PartitionPage* page = bucket->empty_pages_head; page;
page = page->next_page) {
DCHECK(PartitionPageStateIsEmpty(page) ||
PartitionPageStateIsDecommitted(page));
PartitionDumpPageStats(stats_out, page);
}
- for (const PartitionPage* page = bucket->decommitted_pages_head; page;
+ for (PartitionPage* page = bucket->decommitted_pages_head; page;
page = page->next_page) {
DCHECK(PartitionPageStateIsDecommitted(page));
PartitionDumpPageStats(stats_out, page);
}
if (bucket->active_pages_head != &g_sentinel_page) {
- for (const PartitionPage* page = bucket->active_pages_head; page;
+ for (PartitionPage* page = bucket->active_pages_head; page;
page = page->next_page) {
DCHECK(page != &g_sentinel_page);
PartitionDumpPageStats(stats_out, page);
@@ -1339,14 +1392,13 @@ static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
}
}
-void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
- const char* partition_name,
- bool is_light_dump,
- PartitionStatsDumper* dumper) {
+void PartitionRootGeneric::DumpStats(const char* partition_name,
+ bool is_light_dump,
+ PartitionStatsDumper* dumper) {
PartitionMemoryStats stats = {0};
- stats.total_mmapped_bytes = partition->total_size_of_super_pages +
- partition->total_size_of_direct_mapped_pages;
- stats.total_committed_bytes = partition->total_size_of_committed_pages;
+ stats.total_mmapped_bytes =
+ this->total_size_of_super_pages + this->total_size_of_direct_mapped_pages;
+ stats.total_committed_bytes = this->total_size_of_committed_pages;
size_t direct_mapped_allocations_total_size = 0;
@@ -1363,13 +1415,13 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets];
size_t num_direct_mapped_allocations = 0;
{
- subtle::SpinLock::Guard guard(partition->lock);
+ subtle::SpinLock::Guard guard(this->lock);
for (size_t i = 0; i < kGenericNumBuckets; ++i) {
- const PartitionBucket* bucket = &partition->buckets[i];
+ const PartitionBucket* bucket = &this->buckets[i];
// Don't report the pseudo buckets that the generic allocator sets up in
// order to preserve a fast size->bucket map (see
- // PartitionAllocGenericInit for details).
+ // PartitionRootGeneric::Init() for details).
if (!bucket->active_pages_head)
bucket_stats[i].is_valid = false;
else
@@ -1382,7 +1434,7 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
}
}
- for (PartitionDirectMapExtent *extent = partition->direct_map_list;
+ for (PartitionDirectMapExtent *extent = this->direct_map_list;
extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
extent = extent->next_extent, ++num_direct_mapped_allocations) {
DCHECK(!extent->next_extent ||
@@ -1397,8 +1449,8 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
if (!is_light_dump) {
// Call |PartitionsDumpBucketStats| after collecting stats because it can
- // try to allocate using |PartitionAllocGeneric| and it can't obtain the
- // lock.
+ // try to allocate using |PartitionRootGeneric::Alloc()| and it can't
+ // obtain the lock.
for (size_t i = 0; i < kGenericNumBuckets; ++i) {
if (bucket_stats[i].is_valid)
dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]);
@@ -1407,16 +1459,15 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
for (size_t i = 0; i < num_direct_mapped_allocations; ++i) {
uint32_t size = direct_map_lengths[i];
- PartitionBucketMemoryStats stats;
- memset(&stats, '\0', sizeof(stats));
- stats.is_valid = true;
- stats.is_direct_map = true;
- stats.num_full_pages = 1;
- stats.allocated_page_size = size;
- stats.bucket_slot_size = size;
- stats.active_bytes = size;
- stats.resident_bytes = size;
- dumper->PartitionsDumpBucketStats(partition_name, &stats);
+ PartitionBucketMemoryStats mapped_stats = {};
+ mapped_stats.is_valid = true;
+ mapped_stats.is_direct_map = true;
+ mapped_stats.num_full_pages = 1;
+ mapped_stats.allocated_page_size = size;
+ mapped_stats.bucket_slot_size = size;
+ mapped_stats.active_bytes = size;
+ mapped_stats.resident_bytes = size;
+ dumper->PartitionsDumpBucketStats(partition_name, &mapped_stats);
}
}
@@ -1425,15 +1476,13 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
dumper->PartitionDumpTotals(partition_name, &stats);
}
-void PartitionDumpStats(PartitionRoot* partition,
- const char* partition_name,
- bool is_light_dump,
- PartitionStatsDumper* dumper) {
-
+void PartitionRoot::DumpStats(const char* partition_name,
+ bool is_light_dump,
+ PartitionStatsDumper* dumper) {
PartitionMemoryStats stats = {0};
- stats.total_mmapped_bytes = partition->total_size_of_super_pages;
- stats.total_committed_bytes = partition->total_size_of_committed_pages;
- DCHECK(!partition->total_size_of_direct_mapped_pages);
+ stats.total_mmapped_bytes = this->total_size_of_super_pages;
+ stats.total_committed_bytes = this->total_size_of_committed_pages;
+ DCHECK(!this->total_size_of_direct_mapped_pages);
static const size_t kMaxReportableBuckets = 4096 / sizeof(void*);
std::unique_ptr<PartitionBucketMemoryStats[]> memory_stats;
@@ -1441,12 +1490,12 @@ void PartitionDumpStats(PartitionRoot* partition,
memory_stats = std::unique_ptr<PartitionBucketMemoryStats[]>(
new PartitionBucketMemoryStats[kMaxReportableBuckets]);
- const size_t partitionNumBuckets = partition->num_buckets;
+ const size_t partitionNumBuckets = this->num_buckets;
DCHECK(partitionNumBuckets <= kMaxReportableBuckets);
for (size_t i = 0; i < partitionNumBuckets; ++i) {
PartitionBucketMemoryStats bucket_stats = {0};
- PartitionDumpBucketStats(&bucket_stats, &partition->buckets()[i]);
+ PartitionDumpBucketStats(&bucket_stats, &this->buckets()[i]);
if (bucket_stats.is_valid) {
stats.total_resident_bytes += bucket_stats.resident_bytes;
stats.total_active_bytes += bucket_stats.active_bytes;
@@ -1462,7 +1511,8 @@ void PartitionDumpStats(PartitionRoot* partition,
}
if (!is_light_dump) {
// PartitionsDumpBucketStats is called after collecting stats because it
- // can use PartitionAlloc to allocate and this can affect the statistics.
+ // can use PartitionRoot::Alloc() to allocate and this can affect the
+ // statistics.
for (size_t i = 0; i < partitionNumBuckets; ++i) {
if (memory_stats[i].is_valid)
dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]);
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.h b/chromium/base/allocator/partition_allocator/partition_alloc.h
index 68b9671a9f4..68201dffb17 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.h
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.h
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
// DESCRIPTION
-// partitionAlloc() / PartitionAllocGeneric() and PartitionFree() /
-// PartitionFreeGeneric() are approximately analagous to malloc() and free().
+// PartitionRoot::Alloc() / PartitionRootGeneric::Alloc() and PartitionFree() /
+// PartitionRootGeneric::Free() are approximately analagous to malloc() and
+// free().
//
// The main difference is that a PartitionRoot / PartitionRootGeneric object
// must be supplied to these functions, representing a specific "heap partition"
@@ -23,14 +24,14 @@
// PartitionRoot is really just a header adjacent to other data areas provided
// by the allocator class.
//
-// The partitionAlloc() variant of the API has the following caveats:
+// The PartitionRoot::Alloc() variant of the API has the following caveats:
// - Allocations and frees against a single partition must be single threaded.
// - Allocations must not exceed a max size, chosen at compile-time via a
// templated parameter to PartitionAllocator.
// - Allocation sizes must be aligned to the system pointer size.
// - Allocations are bucketed exactly according to size.
//
-// And for PartitionAllocGeneric():
+// And for PartitionRootGeneric::Alloc():
// - Multi-threaded use against a single partition is ok; locking is handled.
// - Allocations of any arbitrary size can be handled (subject to a limit of
// INT_MAX bytes for security reasons).
@@ -94,8 +95,8 @@ static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
// Slot span sizes are adjusted depending on the allocation size, to make sure
// the packing does not lead to unused (wasted) space at the end of the last
// system page of the span. For our current max slot span size of 64k and other
-// constant values, we pack _all_ PartitionAllocGeneric() sizes perfectly up
-// against the end of a system page.
+// constant values, we pack _all_ PartitionRootGeneric::Alloc() sizes perfectly
+// up against the end of a system page.
static const size_t kPartitionPageShift = 14; // 16KB
static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
@@ -201,7 +202,7 @@ static const size_t kGenericMaxBucketed =
static const size_t kGenericMinDirectMappedDownsize =
kGenericMaxBucketed +
1; // Limit when downsizing a direct mapping using realloc().
-static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize;
+static const size_t kGenericMaxDirectMapped = 1UL << 31; // 2 GiB
static const size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
// Constants for the memory reclaim logic.
@@ -225,6 +226,7 @@ static const unsigned char kCookieValue[kCookieSize] = {
0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
#endif
+class PartitionStatsDumper;
struct PartitionBucket;
struct PartitionRootBase;
@@ -257,6 +259,11 @@ struct PartitionFreelistEntry {
// booted out of the active list. If there are no suitable active pages found,
// an empty or decommitted page (if one exists) will be pulled from the empty
// list on to the active list.
+//
+// TODO(ajwong): Evaluate if this should be named PartitionSlotSpanMetadata or
+// similar. If so, all uses of the term "page" in comments, member variables,
+// local variables, and documentation that refer to this concept should be
+// updated.
struct PartitionPage {
PartitionFreelistEntry* freelist_head;
PartitionPage* next_page;
@@ -266,7 +273,31 @@ struct PartitionPage {
uint16_t num_unprovisioned_slots;
uint16_t page_offset;
int16_t empty_cache_index; // -1 if not in the empty cache.
+
+ // Public API
+
+ // Note the matching Alloc() functions are in PartitionPage.
+ BASE_EXPORT NOINLINE void FreeSlowPath();
+ ALWAYS_INLINE void Free(void* ptr);
+
+ // Pointer manipulation functions. These must be static as the input |page|
+ // pointer may be the result of an offset calculation and therefore cannot
+ // be trusted. The objective of these functions is to sanitize this input.
+ ALWAYS_INLINE static void* ToPointer(const PartitionPage* page);
+ ALWAYS_INLINE static PartitionPage* FromPointerNoAlignmentCheck(void* ptr);
+ ALWAYS_INLINE static PartitionPage* FromPointer(void* ptr);
+ ALWAYS_INLINE static bool IsPointerValid(PartitionPage* page);
+
+ ALWAYS_INLINE const size_t* get_raw_size_ptr() const;
+ ALWAYS_INLINE size_t* get_raw_size_ptr() {
+ return const_cast<size_t*>(
+ const_cast<const PartitionPage*>(this)->get_raw_size_ptr());
+ }
+
+ ALWAYS_INLINE size_t get_raw_size() const;
};
+static_assert(sizeof(PartitionPage) <= kPageMetadataSize,
+ "PartitionPage must be able to fit in a metadata slot");
struct PartitionBucket {
// Accessed most in hot path => goes first.
@@ -277,10 +308,30 @@ struct PartitionBucket {
uint32_t slot_size;
unsigned num_system_pages_per_slot_span : 8;
unsigned num_full_pages : 24;
+
+ // Public API.
+
+ // Note the matching Free() functions are in PartitionPage.
+ BASE_EXPORT void* Alloc(PartitionRootBase* root, int flags, size_t size);
+ BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRootBase* root,
+ int flags,
+ size_t size);
+
+ ALWAYS_INLINE bool is_direct_mapped() const {
+ return !num_system_pages_per_slot_span;
+ }
+ ALWAYS_INLINE size_t get_bytes_per_span() const {
+ // TODO(ajwong): Chagne to CheckedMul. https://crbug.com/787153
+ return num_system_pages_per_slot_span * kSystemPageSize;
+ }
+ ALWAYS_INLINE uint16_t get_slots_per_span() const {
+ // TODO(ajwong): Chagne to CheckedMul. https://crbug.com/787153
+ return static_cast<uint16_t>(get_bytes_per_span() / slot_size);
+ }
};
// An "extent" is a span of consecutive superpages. We link to the partition's
-// next extent (if there is one) at the very start of a superpage's metadata
+// next extent (if there is one) to the very start of a superpage's metadata
// area.
struct PartitionSuperPageExtentEntry {
PartitionRootBase* root;
@@ -288,6 +339,9 @@ struct PartitionSuperPageExtentEntry {
char* super_pages_end;
PartitionSuperPageExtentEntry* next;
};
+static_assert(
+ sizeof(PartitionSuperPageExtentEntry) <= kPageMetadataSize,
+ "PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
struct PartitionDirectMapExtent {
PartitionDirectMapExtent* next_extent;
@@ -318,14 +372,29 @@ struct BASE_EXPORT PartitionRootBase {
int16_t global_empty_page_ring_index = 0;
uintptr_t inverted_self = 0;
+ // Pubic API
+
// gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory.
static void (*gOomHandlingFunction)();
};
+enum PartitionPurgeFlags {
+ // Decommitting the ring list of empty pages is reasonably fast.
+ PartitionPurgeDecommitEmptyPages = 1 << 0,
+ // Discarding unused system pages is slower, because it involves walking all
+ // freelists in all active partition pages of all buckets >= system page
+ // size. It often frees a similar amount of memory to decommitting the empty
+ // pages, though.
+ PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
+};
+
// Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
struct BASE_EXPORT PartitionRoot : public PartitionRootBase {
PartitionRoot();
~PartitionRoot() override;
+ // This references the buckets OFF the edge of this struct. All uses of
+ // PartitionRoot must have the bucket array come right after.
+ //
// The PartitionAlloc templated class ensures the following is correct.
ALWAYS_INLINE PartitionBucket* buckets() {
return reinterpret_cast<PartitionBucket*>(this + 1);
@@ -333,6 +402,16 @@ struct BASE_EXPORT PartitionRoot : public PartitionRootBase {
ALWAYS_INLINE const PartitionBucket* buckets() const {
return reinterpret_cast<const PartitionBucket*>(this + 1);
}
+
+ void Init(size_t num_buckets, size_t max_allocation);
+
+ ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
+
+ void PurgeMemory(int flags);
+
+ void DumpStats(const char* partition_name,
+ bool is_light_dump,
+ PartitionStatsDumper* dumper);
};
// Never instantiate a PartitionRootGeneric directly, instead use
@@ -353,6 +432,22 @@ struct BASE_EXPORT PartitionRootGeneric : public PartitionRootBase {
bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + 1] =
{};
PartitionBucket buckets[kGenericNumBuckets] = {};
+
+ // Public API.
+ void Init();
+
+ ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
+ ALWAYS_INLINE void Free(void* ptr);
+
+ NOINLINE void* Realloc(void* ptr, size_t new_size, const char* type_name);
+
+ ALWAYS_INLINE size_t ActualSize(size_t size);
+
+ void PurgeMemory(int flags);
+
+ void DumpStats(const char* partition_name,
+ bool is_light_dump,
+ PartitionStatsDumper* partition_stats_dumper);
};
// Flags for PartitionAllocGenericFlags.
@@ -407,42 +502,6 @@ class BASE_EXPORT PartitionStatsDumper {
};
BASE_EXPORT void PartitionAllocGlobalInit(void (*oom_handling_function)());
-BASE_EXPORT void PartitionAllocInit(PartitionRoot*,
- size_t num_buckets,
- size_t max_allocation);
-BASE_EXPORT void PartitionAllocGenericInit(PartitionRootGeneric*);
-
-enum PartitionPurgeFlags {
- // Decommitting the ring list of empty pages is reasonably fast.
- PartitionPurgeDecommitEmptyPages = 1 << 0,
- // Discarding unused system pages is slower, because it involves walking all
- // freelists in all active partition pages of all buckets >= system page
- // size. It often frees a similar amount of memory to decommitting the empty
- // pages, though.
- PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
-};
-
-BASE_EXPORT void PartitionPurgeMemory(PartitionRoot*, int);
-BASE_EXPORT void PartitionPurgeMemoryGeneric(PartitionRootGeneric*, int);
-
-BASE_EXPORT NOINLINE void* PartitionAllocSlowPath(PartitionRootBase*,
- int,
- size_t,
- PartitionBucket*);
-BASE_EXPORT NOINLINE void PartitionFreeSlowPath(PartitionPage*);
-BASE_EXPORT NOINLINE void* PartitionReallocGeneric(PartitionRootGeneric*,
- void*,
- size_t,
- const char* type_name);
-
-BASE_EXPORT void PartitionDumpStats(PartitionRoot*,
- const char* partition_name,
- bool is_light_dump,
- PartitionStatsDumper*);
-BASE_EXPORT void PartitionDumpStatsGeneric(PartitionRootGeneric*,
- const char* partition_name,
- bool is_light_dump,
- PartitionStatsDumper*);
class BASE_EXPORT PartitionAllocHooks {
public:
@@ -566,7 +625,8 @@ ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize);
}
-ALWAYS_INLINE PartitionPage* PartitionPointerToPageNoAlignmentCheck(void* ptr) {
+ALWAYS_INLINE PartitionPage* PartitionPage::FromPointerNoAlignmentCheck(
+ void* ptr) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
char* super_page_ptr =
reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask);
@@ -587,16 +647,23 @@ ALWAYS_INLINE PartitionPage* PartitionPointerToPageNoAlignmentCheck(void* ptr) {
return page;
}
-ALWAYS_INLINE void* PartitionPageToPointer(const PartitionPage* page) {
+// Resturns start of the slot span for the PartitionPage.
+ALWAYS_INLINE void* PartitionPage::ToPointer(const PartitionPage* page) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(page);
+
uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
+
+ // A valid |page| must be past the first guard System page and within
+ // the following metadata region.
DCHECK(super_page_offset > kSystemPageSize);
+ // Must be less than total metadata region.
DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
kPageMetadataSize));
uintptr_t partition_page_index =
(super_page_offset - kSystemPageSize) >> kPageMetadataShift;
- // Index 0 is invalid because it is the metadata area and the last index is
- // invalid because it is a guard page.
+ // Index 0 is invalid because it is the superpage extent metadata and the
+ // last index is invalid because the whole PartitionPage is set as guard
+ // pages for the metadata region.
DCHECK(partition_page_index);
DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
@@ -605,48 +672,33 @@ ALWAYS_INLINE void* PartitionPageToPointer(const PartitionPage* page) {
return ret;
}
-ALWAYS_INLINE PartitionPage* PartitionPointerToPage(void* ptr) {
- PartitionPage* page = PartitionPointerToPageNoAlignmentCheck(ptr);
+ALWAYS_INLINE PartitionPage* PartitionPage::FromPointer(void* ptr) {
+ PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(ptr);
// Checks that the pointer is a multiple of bucket size.
DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
- reinterpret_cast<uintptr_t>(PartitionPageToPointer(page))) %
+ reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page))) %
page->bucket->slot_size));
return page;
}
-ALWAYS_INLINE bool PartitionBucketIsDirectMapped(
- const PartitionBucket* bucket) {
- return !bucket->num_system_pages_per_slot_span;
-}
-
-ALWAYS_INLINE size_t PartitionBucketBytes(const PartitionBucket* bucket) {
- return bucket->num_system_pages_per_slot_span * kSystemPageSize;
-}
-
-ALWAYS_INLINE uint16_t PartitionBucketSlots(const PartitionBucket* bucket) {
- return static_cast<uint16_t>(PartitionBucketBytes(bucket) /
- bucket->slot_size);
-}
-
-ALWAYS_INLINE size_t* PartitionPageGetRawSizePtr(PartitionPage* page) {
+ALWAYS_INLINE const size_t* PartitionPage::get_raw_size_ptr() const {
// For single-slot buckets which span more than one partition page, we
// have some spare metadata space to store the raw allocation size. We
// can use this to report better statistics.
- PartitionBucket* bucket = page->bucket;
if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
return nullptr;
DCHECK((bucket->slot_size % kSystemPageSize) == 0);
- DCHECK(PartitionBucketIsDirectMapped(bucket) ||
- PartitionBucketSlots(bucket) == 1);
- page++;
- return reinterpret_cast<size_t*>(&page->freelist_head);
+ DCHECK(bucket->is_direct_mapped() || bucket->get_slots_per_span() == 1);
+
+ const PartitionPage* the_next_page = this + 1;
+ return reinterpret_cast<const size_t*>(&the_next_page->freelist_head);
}
-ALWAYS_INLINE size_t PartitionPageGetRawSize(PartitionPage* page) {
- size_t* raw_size_ptr = PartitionPageGetRawSizePtr(page);
- if (UNLIKELY(raw_size_ptr != nullptr))
- return *raw_size_ptr;
+ALWAYS_INLINE size_t PartitionPage::get_raw_size() const {
+ const size_t* ptr = get_raw_size_ptr();
+ if (UNLIKELY(ptr != nullptr))
+ return *ptr;
return 0;
}
@@ -657,60 +709,62 @@ ALWAYS_INLINE PartitionRootBase* PartitionPageToRoot(PartitionPage* page) {
return extent_entry->root;
}
-ALWAYS_INLINE bool PartitionPagePointerIsValid(PartitionPage* page) {
+ALWAYS_INLINE bool PartitionPage::IsPointerValid(PartitionPage* page) {
PartitionRootBase* root = PartitionPageToRoot(page);
return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
}
-ALWAYS_INLINE void* PartitionBucketAlloc(PartitionRootBase* root,
- int flags,
- size_t size,
- PartitionBucket* bucket) {
- PartitionPage* page = bucket->active_pages_head;
+ALWAYS_INLINE void* PartitionBucket::Alloc(PartitionRootBase* root,
+ int flags,
+ size_t size) {
+ PartitionPage* page = this->active_pages_head;
// Check that this page is neither full nor freed.
DCHECK(page->num_allocated_slots >= 0);
void* ret = page->freelist_head;
if (LIKELY(ret != 0)) {
// If these DCHECKs fire, you probably corrupted memory.
// TODO(palmer): See if we can afford to make this a CHECK.
- DCHECK(PartitionPagePointerIsValid(page));
+ DCHECK(PartitionPage::IsPointerValid(page));
// All large allocations must go through the slow path to correctly
// update the size metadata.
- DCHECK(PartitionPageGetRawSize(page) == 0);
+ DCHECK(page->get_raw_size() == 0);
PartitionFreelistEntry* new_head =
PartitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next);
page->freelist_head = new_head;
page->num_allocated_slots++;
} else {
- ret = PartitionAllocSlowPath(root, flags, size, bucket);
+ ret = this->SlowPathAlloc(root, flags, size);
// TODO(palmer): See if we can afford to make this a CHECK.
- DCHECK(!ret || PartitionPagePointerIsValid(PartitionPointerToPage(ret)));
+ DCHECK(!ret ||
+ PartitionPage::IsPointerValid(PartitionPage::FromPointer(ret)));
}
#if DCHECK_IS_ON()
if (!ret)
return 0;
// Fill the uninitialized pattern, and write the cookies.
- page = PartitionPointerToPage(ret);
- size_t slot_size = page->bucket->slot_size;
- size_t raw_size = PartitionPageGetRawSize(page);
+ page = PartitionPage::FromPointer(ret);
+ // TODO(ajwong): Can |page->bucket| ever not be |this|? If not, can this just
+ // be this->slot_size?
+ size_t new_slot_size = page->bucket->slot_size;
+ size_t raw_size = page->get_raw_size();
if (raw_size) {
DCHECK(raw_size == size);
- slot_size = raw_size;
+ new_slot_size = raw_size;
}
- size_t no_cookie_size = PartitionCookieSizeAdjustSubtract(slot_size);
+ size_t no_cookie_size = PartitionCookieSizeAdjustSubtract(new_slot_size);
char* char_ret = static_cast<char*>(ret);
// The value given to the application is actually just after the cookie.
ret = char_ret + kCookieSize;
- memset(ret, kUninitializedByte, no_cookie_size);
+
+ // Debug fill region kUninitializedByte and surround it with 2 cookies.
PartitionCookieWriteValue(char_ret);
+ memset(ret, kUninitializedByte, no_cookie_size);
PartitionCookieWriteValue(char_ret + kCookieSize + no_cookie_size);
#endif
return ret;
}
-ALWAYS_INLINE void* PartitionAlloc(PartitionRoot* root,
- size_t size,
- const char* type_name) {
+ALWAYS_INLINE void* PartitionRoot::Alloc(size_t size, const char* type_name) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
void* result = malloc(size);
CHECK(result);
@@ -718,23 +772,23 @@ ALWAYS_INLINE void* PartitionAlloc(PartitionRoot* root,
#else
size_t requested_size = size;
size = PartitionCookieSizeAdjustAdd(size);
- DCHECK(root->initialized);
+ DCHECK(this->initialized);
size_t index = size >> kBucketShift;
- DCHECK(index < root->num_buckets);
+ DCHECK(index < this->num_buckets);
DCHECK(size == index << kBucketShift);
- PartitionBucket* bucket = &root->buckets()[index];
- void* result = PartitionBucketAlloc(root, 0, size, bucket);
+ PartitionBucket* bucket = &this->buckets()[index];
+ void* result = bucket->Alloc(this, 0, size);
PartitionAllocHooks::AllocationHookIfEnabled(result, requested_size,
type_name);
return result;
#endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
}
-ALWAYS_INLINE void PartitionFreeWithPage(void* ptr, PartitionPage* page) {
+ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
// If these asserts fire, you probably corrupted memory.
#if DCHECK_IS_ON()
- size_t slot_size = page->bucket->slot_size;
- size_t raw_size = PartitionPageGetRawSize(page);
+ size_t slot_size = this->bucket->slot_size;
+ size_t raw_size = get_raw_size();
if (raw_size)
slot_size = raw_size;
PartitionCookieCheckValue(ptr);
@@ -742,24 +796,23 @@ ALWAYS_INLINE void PartitionFreeWithPage(void* ptr, PartitionPage* page) {
kCookieSize);
memset(ptr, kFreedByte, slot_size);
#endif
- DCHECK(page->num_allocated_slots);
- PartitionFreelistEntry* freelist_head = page->freelist_head;
+ DCHECK(this->num_allocated_slots);
// TODO(palmer): See if we can afford to make this a CHECK.
- DCHECK(!freelist_head ||
- PartitionPagePointerIsValid(PartitionPointerToPage(freelist_head)));
+ DCHECK(!freelist_head || PartitionPage::IsPointerValid(
+ PartitionPage::FromPointer(freelist_head)));
CHECK(ptr != freelist_head); // Catches an immediate double free.
// Look for double free one level deeper in debug.
DCHECK(!freelist_head || ptr != PartitionFreelistMask(freelist_head->next));
PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr);
entry->next = PartitionFreelistMask(freelist_head);
- page->freelist_head = entry;
- --page->num_allocated_slots;
- if (UNLIKELY(page->num_allocated_slots <= 0)) {
- PartitionFreeSlowPath(page);
+ freelist_head = entry;
+ --this->num_allocated_slots;
+ if (UNLIKELY(this->num_allocated_slots <= 0)) {
+ FreeSlowPath();
} else {
// All single-slot allocations must go through the slow path to
// correctly update the size metadata.
- DCHECK(PartitionPageGetRawSize(page) == 0);
+ DCHECK(get_raw_size() == 0);
}
}
@@ -771,10 +824,10 @@ ALWAYS_INLINE void PartitionFree(void* ptr) {
// inside PartitionCookieFreePointerAdjust?
PartitionAllocHooks::FreeHookIfEnabled(ptr);
ptr = PartitionCookieFreePointerAdjust(ptr);
- PartitionPage* page = PartitionPointerToPage(ptr);
+ PartitionPage* page = PartitionPage::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK.
- DCHECK(PartitionPagePointerIsValid(page));
- PartitionFreeWithPage(ptr, page);
+ DCHECK(PartitionPage::IsPointerValid(page));
+ page->Free(ptr);
#endif
}
@@ -811,36 +864,35 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
void* ret = nullptr;
{
subtle::SpinLock::Guard guard(root->lock);
- ret = PartitionBucketAlloc(root, flags, size, bucket);
+ ret = bucket->Alloc(root, flags, size);
}
PartitionAllocHooks::AllocationHookIfEnabled(ret, requested_size, type_name);
return ret;
#endif
}
-ALWAYS_INLINE void* PartitionAllocGeneric(PartitionRootGeneric* root,
- size_t size,
- const char* type_name) {
- return PartitionAllocGenericFlags(root, 0, size, type_name);
+ALWAYS_INLINE void* PartitionRootGeneric::Alloc(size_t size,
+ const char* type_name) {
+ return PartitionAllocGenericFlags(this, 0, size, type_name);
}
-ALWAYS_INLINE void PartitionFreeGeneric(PartitionRootGeneric* root, void* ptr) {
+ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
free(ptr);
#else
- DCHECK(root->initialized);
+ DCHECK(this->initialized);
if (UNLIKELY(!ptr))
return;
PartitionAllocHooks::FreeHookIfEnabled(ptr);
ptr = PartitionCookieFreePointerAdjust(ptr);
- PartitionPage* page = PartitionPointerToPage(ptr);
+ PartitionPage* page = PartitionPage::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK.
- DCHECK(PartitionPagePointerIsValid(page));
+ DCHECK(PartitionPage::IsPointerValid(page));
{
- subtle::SpinLock::Guard guard(root->lock);
- PartitionFreeWithPage(ptr, page);
+ subtle::SpinLock::Guard guard(this->lock);
+ page->Free(ptr);
}
#endif
}
@@ -853,15 +905,14 @@ ALWAYS_INLINE size_t PartitionDirectMapSize(size_t size) {
return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
}
-ALWAYS_INLINE size_t PartitionAllocActualSize(PartitionRootGeneric* root,
- size_t size) {
+ALWAYS_INLINE size_t PartitionRootGeneric::ActualSize(size_t size) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return size;
#else
- DCHECK(root->initialized);
+ DCHECK(this->initialized);
size = PartitionCookieSizeAdjustAdd(size);
- PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size);
- if (LIKELY(!PartitionBucketIsDirectMapped(bucket))) {
+ PartitionBucket* bucket = PartitionGenericSizeToBucket(this, size);
+ if (LIKELY(!bucket->is_direct_mapped())) {
size = bucket->slot_size;
} else if (size > kGenericMaxDirectMapped) {
// Too large to allocate => return the size unchanged.
@@ -885,9 +936,9 @@ ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) {
// cause trouble, and the caller is responsible for that not happening.
DCHECK(PartitionAllocSupportsGetSize());
ptr = PartitionCookieFreePointerAdjust(ptr);
- PartitionPage* page = PartitionPointerToPage(ptr);
+ PartitionPage* page = PartitionPage::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK.
- DCHECK(PartitionPagePointerIsValid(page));
+ DCHECK(PartitionPage::IsPointerValid(page));
size_t size = page->bucket->slot_size;
return PartitionCookieSizeAdjustSubtract(size);
}
@@ -902,9 +953,7 @@ class SizeSpecificPartitionAllocator {
~SizeSpecificPartitionAllocator() = default;
static const size_t kMaxAllocation = N - kAllocationGranularity;
static const size_t kNumBuckets = N / kAllocationGranularity;
- void init() {
- PartitionAllocInit(&partition_root_, kNumBuckets, kMaxAllocation);
- }
+ void init() { partition_root_.Init(kNumBuckets, kMaxAllocation); }
ALWAYS_INLINE PartitionRoot* root() { return &partition_root_; }
private:
@@ -917,7 +966,7 @@ class BASE_EXPORT PartitionAllocatorGeneric {
PartitionAllocatorGeneric();
~PartitionAllocatorGeneric();
- void init() { PartitionAllocGenericInit(&partition_root_); }
+ void init() { partition_root_.Init(); }
ALWAYS_INLINE PartitionRootGeneric* root() { return &partition_root_; }
private:
@@ -928,4 +977,4 @@ BASE_EXPORT PartitionPage* GetSentinelPageForTesting();
} // namespace base
-#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
+#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
index d2334cd8d60..6ea89b7c875 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
@@ -36,7 +36,10 @@ std::unique_ptr<T[]> WrapArrayUnique(T* ptr) {
const size_t kTestMaxAllocation = 4096;
bool IsLargeMemoryDevice() {
- return base::SysInfo::AmountOfPhysicalMemory() >= 2LL * 1024 * 1024 * 1024;
+ // Treat any device with 2GiB or more of physical memory as a "large memory
+ // device". We check for slightly less than 2GiB so that devices with a small
+ // amount of memory not accessible to the OS still count as "large".
+ return base::SysInfo::AmountOfPhysicalMemory() >= 2040LL * 1024 * 1024;
}
bool SetAddressSpaceLimit() {
@@ -100,9 +103,9 @@ const char* type_name = nullptr;
class PartitionAllocTest : public testing::Test {
protected:
- PartitionAllocTest() {}
+ PartitionAllocTest() = default;
- ~PartitionAllocTest() override {}
+ ~PartitionAllocTest() override = default;
void SetUp() override {
allocator.init();
@@ -115,25 +118,26 @@ class PartitionAllocTest : public testing::Test {
PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index];
size_t num_slots =
(bucket->num_system_pages_per_slot_span * kSystemPageSize) / real_size;
- void* first = 0;
- void* last = 0;
+ void* first = nullptr;
+ void* last = nullptr;
size_t i;
for (i = 0; i < num_slots; ++i) {
- void* ptr = PartitionAlloc(allocator.root(), size, type_name);
+ void* ptr = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
if (!i)
first = PartitionCookieFreePointerAdjust(ptr);
else if (i == num_slots - 1)
last = PartitionCookieFreePointerAdjust(ptr);
}
- EXPECT_EQ(PartitionPointerToPage(first), PartitionPointerToPage(last));
+ EXPECT_EQ(PartitionPage::FromPointer(first),
+ PartitionPage::FromPointer(last));
if (bucket->num_system_pages_per_slot_span ==
kNumSystemPagesPerPartitionPage)
EXPECT_EQ(reinterpret_cast<size_t>(first) & kPartitionPageBaseMask,
reinterpret_cast<size_t>(last) & kPartitionPageBaseMask);
EXPECT_EQ(num_slots, static_cast<size_t>(
bucket->active_pages_head->num_allocated_slots));
- EXPECT_EQ(0, bucket->active_pages_head->freelist_head);
+ EXPECT_EQ(nullptr, bucket->active_pages_head->freelist_head);
EXPECT_TRUE(bucket->active_pages_head);
EXPECT_TRUE(bucket->active_pages_head != GetSentinelPageForTesting());
return bucket->active_pages_head;
@@ -146,7 +150,7 @@ class PartitionAllocTest : public testing::Test {
DCHECK(!bucket->active_pages_head->num_allocated_slots);
for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
- void* ptr = PartitionAlloc(allocator.root(), size, type_name);
+ void* ptr = allocator.root()->Alloc(size, type_name);
EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots);
PartitionFree(ptr);
EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots);
@@ -156,13 +160,12 @@ class PartitionAllocTest : public testing::Test {
void CycleGenericFreeCache(size_t size) {
for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
- void* ptr =
- PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ void* ptr = generic_allocator.root()->Alloc(size, type_name);
PartitionPage* page =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
PartitionBucket* bucket = page->bucket;
EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots);
EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index);
}
@@ -178,13 +181,13 @@ class PartitionAllocTest : public testing::Test {
return;
}
- EXPECT_TRUE(SetAddressSpaceLimit());
+ ASSERT_TRUE(SetAddressSpaceLimit());
// Work out the number of allocations for 6 GB of memory.
const int numAllocations = (6 * 1024 * 1024) / (allocSize / 1024);
- void** ptrs = reinterpret_cast<void**>(PartitionAllocGeneric(
- generic_allocator.root(), numAllocations * sizeof(void*), type_name));
+ void** ptrs = reinterpret_cast<void**>(generic_allocator.root()->Alloc(
+ numAllocations * sizeof(void*), type_name));
int i;
for (i = 0; i < numAllocations; ++i) {
@@ -209,15 +212,15 @@ class PartitionAllocTest : public testing::Test {
// Free, reallocate and free again each block we allocated. We do this to
// check that freeing memory also works correctly after a failed allocation.
for (--i; i >= 0; --i) {
- PartitionFreeGeneric(generic_allocator.root(), ptrs[i]);
+ generic_allocator.root()->Free(ptrs[i]);
ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(),
PartitionAllocReturnNull, allocSize,
type_name);
EXPECT_TRUE(ptrs[i]);
- PartitionFreeGeneric(generic_allocator.root(), ptrs[i]);
+ generic_allocator.root()->Free(ptrs[i]);
}
- PartitionFreeGeneric(generic_allocator.root(), ptrs);
+ generic_allocator.root()->Free(ptrs);
EXPECT_TRUE(ClearAddressSpaceLimit());
}
@@ -235,7 +238,7 @@ void FreeFullPage(PartitionPage* page) {
size_t num_slots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / size;
EXPECT_EQ(num_slots, static_cast<size_t>(abs(page->num_allocated_slots)));
- char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page));
+ char* ptr = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
size_t i;
for (i = 0; i < num_slots; ++i) {
PartitionFree(ptr + kPointerOffset);
@@ -243,14 +246,18 @@ void FreeFullPage(PartitionPage* page) {
}
}
-void CheckPageInCore(void* ptr, bool inCore) {
#if defined(OS_LINUX)
- unsigned char ret;
+bool IsPageInCore(void* ptr) {
+ unsigned char ret = 0;
EXPECT_EQ(0, mincore(ptr, kSystemPageSize, &ret));
- EXPECT_EQ(inCore, ret);
-#endif
+ return (ret & 1) != 0;
}
+#define CHECK_PAGE_IN_CORE(ptr, in_core) EXPECT_EQ(IsPageInCore(ptr), in_core);
+#else
+#define CHECK_PAGE_IN_CORE(ptr, in_core) (void)(0)
+#endif // defined(OS_LINUX)
+
class MockPartitionStatsDumper : public PartitionStatsDumper {
public:
MockPartitionStatsDumper()
@@ -290,7 +297,7 @@ class MockPartitionStatsDumper : public PartitionStatsDumper {
if (bucket_stats[i].bucket_slot_size == bucket_size)
return &bucket_stats[i];
}
- return 0;
+ return nullptr;
}
private:
@@ -303,23 +310,13 @@ class MockPartitionStatsDumper : public PartitionStatsDumper {
};
// Any number of bytes that can be allocated with no trouble.
-const size_t kEasyAllocSize = (1024 * 1024) & ~(kPageAllocationGranularity - 1);
-
-// Generate many random addresses to get a very large fraction of the platform
-// address space. This gives us an allocation size that is very likely to fail
-// on most platforms without triggering bugs in allocation code for very large
-// requests.
-size_t GetHugeMemoryAmount() {
- static size_t huge_memory = 0;
- if (!huge_memory) {
- for (int i = 0; i < 100; i++) {
- huge_memory |= bit_cast<size_t>(base::GetRandomPageBase());
- }
- // Make it larger than the available address space.
- huge_memory *= 2;
- }
- return huge_memory;
-}
+constexpr size_t kEasyAllocSize =
+ (1024 * 1024) & ~(kPageAllocationGranularity - 1);
+
+// A huge amount of memory, greater than or equal to the ASLR space.
+constexpr size_t kHugeMemoryAmount =
+ std::max(base::internal::kASLRMask,
+ std::size_t{2} * base::internal::kASLRMask);
} // anonymous namespace
@@ -327,13 +324,16 @@ size_t GetHugeMemoryAmount() {
// We detect this by making a reservation and ensuring that after failure, we
// can make a new reservation.
TEST(PageAllocatorTest, AllocFailure) {
+ // Release any reservation made by another test.
+ base::ReleaseReservation();
+
// We can make a reservation.
EXPECT_TRUE(base::ReserveAddressSpace(kEasyAllocSize));
// We can't make another reservation until we trigger an allocation failure.
EXPECT_FALSE(base::ReserveAddressSpace(kEasyAllocSize));
- size_t size = GetHugeMemoryAmount();
+ size_t size = kHugeMemoryAmount;
// Skip the test for sanitizers and platforms with ASLR turned off.
if (size == 0)
return;
@@ -352,9 +352,18 @@ TEST(PageAllocatorTest, AllocFailure) {
}
// TODO(crbug.com/765801): Test failed on chromium.win/Win10 Tests x64.
+#if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
+#define MAYBE_ReserveAddressSpace DISABLED_ReserveAddressSpace
+#else
+#define MAYBE_ReserveAddressSpace ReserveAddressSpace
+#endif // defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
+
// Test that reserving address space can fail.
-TEST(PageAllocatorTest, DISABLED_ReserveAddressSpace) {
- size_t size = GetHugeMemoryAmount();
+TEST(PageAllocatorTest, MAYBE_ReserveAddressSpace) {
+ // Release any reservation made by another test.
+ base::ReleaseReservation();
+
+ size_t size = kHugeMemoryAmount;
// Skip the test for sanitizers and platforms with ASLR turned off.
if (size == 0)
return;
@@ -376,9 +385,9 @@ TEST_F(PartitionAllocTest, Basic) {
EXPECT_FALSE(bucket->empty_pages_head);
EXPECT_FALSE(bucket->decommitted_pages_head);
EXPECT_EQ(seedPage, bucket->active_pages_head);
- EXPECT_EQ(0, bucket->active_pages_head->next_page);
+ EXPECT_EQ(nullptr, bucket->active_pages_head->next_page);
- void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name);
+ void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
EXPECT_TRUE(ptr);
EXPECT_EQ(kPointerOffset,
reinterpret_cast<size_t>(ptr) & kPartitionPageOffsetMask);
@@ -396,9 +405,9 @@ TEST_F(PartitionAllocTest, Basic) {
// Test multiple allocations, and freelist handling.
TEST_F(PartitionAllocTest, MultiAlloc) {
char* ptr1 = reinterpret_cast<char*>(
- PartitionAlloc(allocator.root(), kTestAllocSize, type_name));
+ allocator.root()->Alloc(kTestAllocSize, type_name));
char* ptr2 = reinterpret_cast<char*>(
- PartitionAlloc(allocator.root(), kTestAllocSize, type_name));
+ allocator.root()->Alloc(kTestAllocSize, type_name));
EXPECT_TRUE(ptr1);
EXPECT_TRUE(ptr2);
ptrdiff_t diff = ptr2 - ptr1;
@@ -407,19 +416,19 @@ TEST_F(PartitionAllocTest, MultiAlloc) {
// Check that we re-use the just-freed slot.
PartitionFree(ptr2);
ptr2 = reinterpret_cast<char*>(
- PartitionAlloc(allocator.root(), kTestAllocSize, type_name));
+ allocator.root()->Alloc(kTestAllocSize, type_name));
EXPECT_TRUE(ptr2);
diff = ptr2 - ptr1;
EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
PartitionFree(ptr1);
ptr1 = reinterpret_cast<char*>(
- PartitionAlloc(allocator.root(), kTestAllocSize, type_name));
+ allocator.root()->Alloc(kTestAllocSize, type_name));
EXPECT_TRUE(ptr1);
diff = ptr2 - ptr1;
EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
char* ptr3 = reinterpret_cast<char*>(
- PartitionAlloc(allocator.root(), kTestAllocSize, type_name));
+ allocator.root()->Alloc(kTestAllocSize, type_name));
EXPECT_TRUE(ptr3);
diff = ptr3 - ptr1;
EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff);
@@ -437,17 +446,17 @@ TEST_F(PartitionAllocTest, MultiPages) {
FreeFullPage(page);
EXPECT_TRUE(bucket->empty_pages_head);
EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
- EXPECT_EQ(0, page->next_page);
+ EXPECT_EQ(nullptr, page->next_page);
EXPECT_EQ(0, page->num_allocated_slots);
page = GetFullPage(kTestAllocSize);
PartitionPage* page2 = GetFullPage(kTestAllocSize);
EXPECT_EQ(page2, bucket->active_pages_head);
- EXPECT_EQ(0, page2->next_page);
- EXPECT_EQ(reinterpret_cast<uintptr_t>(PartitionPageToPointer(page)) &
+ EXPECT_EQ(nullptr, page2->next_page);
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page)) &
kSuperPageBaseMask,
- reinterpret_cast<uintptr_t>(PartitionPageToPointer(page2)) &
+ reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page2)) &
kSuperPageBaseMask);
// Fully free the non-current page. This will leave us with no current
@@ -476,17 +485,17 @@ TEST_F(PartitionAllocTest, PageTransitions) {
PartitionPage* page1 = GetFullPage(kTestAllocSize);
EXPECT_EQ(page1, bucket->active_pages_head);
- EXPECT_EQ(0, page1->next_page);
+ EXPECT_EQ(nullptr, page1->next_page);
PartitionPage* page2 = GetFullPage(kTestAllocSize);
EXPECT_EQ(page2, bucket->active_pages_head);
- EXPECT_EQ(0, page2->next_page);
+ EXPECT_EQ(nullptr, page2->next_page);
// Bounce page1 back into the non-full list then fill it up again.
char* ptr =
- reinterpret_cast<char*>(PartitionPageToPointer(page1)) + kPointerOffset;
+ reinterpret_cast<char*>(PartitionPage::ToPointer(page1)) + kPointerOffset;
PartitionFree(ptr);
EXPECT_EQ(page1, bucket->active_pages_head);
- (void)PartitionAlloc(allocator.root(), kTestAllocSize, type_name);
+ (void)allocator.root()->Alloc(kTestAllocSize, type_name);
EXPECT_EQ(page1, bucket->active_pages_head);
EXPECT_EQ(page2, bucket->active_pages_head->next_page);
@@ -495,26 +504,28 @@ TEST_F(PartitionAllocTest, PageTransitions) {
// freelist. Older code had a O(n^2) condition due to failure to do this.
PartitionPage* page3 = GetFullPage(kTestAllocSize);
EXPECT_EQ(page3, bucket->active_pages_head);
- EXPECT_EQ(0, page3->next_page);
+ EXPECT_EQ(nullptr, page3->next_page);
// Work out a pointer into page2 and free it.
- ptr = reinterpret_cast<char*>(PartitionPageToPointer(page2)) + kPointerOffset;
+ ptr =
+ reinterpret_cast<char*>(PartitionPage::ToPointer(page2)) + kPointerOffset;
PartitionFree(ptr);
// Trying to allocate at this time should cause us to cycle around to page2
// and find the recently freed slot.
char* newPtr = reinterpret_cast<char*>(
- PartitionAlloc(allocator.root(), kTestAllocSize, type_name));
+ allocator.root()->Alloc(kTestAllocSize, type_name));
EXPECT_EQ(ptr, newPtr);
EXPECT_EQ(page2, bucket->active_pages_head);
EXPECT_EQ(page3, page2->next_page);
// Work out a pointer into page1 and free it. This should pull the page
// back into the list of available pages.
- ptr = reinterpret_cast<char*>(PartitionPageToPointer(page1)) + kPointerOffset;
+ ptr =
+ reinterpret_cast<char*>(PartitionPage::ToPointer(page1)) + kPointerOffset;
PartitionFree(ptr);
// This allocation should be satisfied by page1.
newPtr = reinterpret_cast<char*>(
- PartitionAlloc(allocator.root(), kTestAllocSize, type_name));
+ allocator.root()->Alloc(kTestAllocSize, type_name));
EXPECT_EQ(ptr, newPtr);
EXPECT_EQ(page1, bucket->active_pages_head);
EXPECT_EQ(page2, page1->next_page);
@@ -525,7 +536,7 @@ TEST_F(PartitionAllocTest, PageTransitions) {
// Allocating whilst in this state exposed a bug, so keep the test.
ptr = reinterpret_cast<char*>(
- PartitionAlloc(allocator.root(), kTestAllocSize, type_name));
+ allocator.root()->Alloc(kTestAllocSize, type_name));
PartitionFree(ptr);
}
@@ -588,7 +599,7 @@ TEST_F(PartitionAllocTest, MultiPageAllocs) {
size_t i;
for (i = 0; i < numPagesNeeded; ++i) {
pages[i] = GetFullPage(kTestAllocSize);
- void* storagePtr = PartitionPageToPointer(pages[i]);
+ void* storagePtr = PartitionPage::ToPointer(pages[i]);
if (!i)
firstSuperPageBase =
reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageBaseMask;
@@ -609,33 +620,31 @@ TEST_F(PartitionAllocTest, MultiPageAllocs) {
// Test the generic allocation functions that can handle arbitrary sizes and
// reallocing etc.
TEST_F(PartitionAllocTest, GenericAlloc) {
- void* ptr = PartitionAllocGeneric(generic_allocator.root(), 1, type_name);
+ void* ptr = generic_allocator.root()->Alloc(1, type_name);
EXPECT_TRUE(ptr);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
- ptr = PartitionAllocGeneric(generic_allocator.root(), kGenericMaxBucketed + 1,
- type_name);
+ generic_allocator.root()->Free(ptr);
+ ptr = generic_allocator.root()->Alloc(kGenericMaxBucketed + 1, type_name);
EXPECT_TRUE(ptr);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
- ptr = PartitionAllocGeneric(generic_allocator.root(), 1, type_name);
+ ptr = generic_allocator.root()->Alloc(1, type_name);
EXPECT_TRUE(ptr);
void* origPtr = ptr;
char* charPtr = static_cast<char*>(ptr);
*charPtr = 'A';
// Change the size of the realloc, remaining inside the same bucket.
- void* newPtr =
- PartitionReallocGeneric(generic_allocator.root(), ptr, 2, type_name);
+ void* newPtr = generic_allocator.root()->Realloc(ptr, 2, type_name);
EXPECT_EQ(ptr, newPtr);
- newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, 1, type_name);
+ newPtr = generic_allocator.root()->Realloc(ptr, 1, type_name);
EXPECT_EQ(ptr, newPtr);
- newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr,
- kGenericSmallestBucket, type_name);
+ newPtr =
+ generic_allocator.root()->Realloc(ptr, kGenericSmallestBucket, type_name);
EXPECT_EQ(ptr, newPtr);
// Change the size of the realloc, switching buckets.
- newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr,
- kGenericSmallestBucket + 1, type_name);
+ newPtr = generic_allocator.root()->Realloc(ptr, kGenericSmallestBucket + 1,
+ type_name);
EXPECT_NE(newPtr, ptr);
// Check that the realloc copied correctly.
char* newCharPtr = static_cast<char*>(newPtr);
@@ -651,14 +660,13 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
// The realloc moved. To check that the old allocation was freed, we can
// do an alloc of the old allocation size and check that the old allocation
// address is at the head of the freelist and reused.
- void* reusedPtr =
- PartitionAllocGeneric(generic_allocator.root(), 1, type_name);
+ void* reusedPtr = generic_allocator.root()->Alloc(1, type_name);
EXPECT_EQ(reusedPtr, origPtr);
- PartitionFreeGeneric(generic_allocator.root(), reusedPtr);
+ generic_allocator.root()->Free(reusedPtr);
// Downsize the realloc.
ptr = newPtr;
- newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, 1, type_name);
+ newPtr = generic_allocator.root()->Realloc(ptr, 1, type_name);
EXPECT_EQ(newPtr, origPtr);
newCharPtr = static_cast<char*>(newPtr);
EXPECT_EQ(*newCharPtr, 'B');
@@ -666,8 +674,8 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
// Upsize the realloc to outside the partition.
ptr = newPtr;
- newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr,
- kGenericMaxBucketed + 1, type_name);
+ newPtr = generic_allocator.root()->Realloc(ptr, kGenericMaxBucketed + 1,
+ type_name);
EXPECT_NE(newPtr, ptr);
newCharPtr = static_cast<char*>(newPtr);
EXPECT_EQ(*newCharPtr, 'C');
@@ -675,80 +683,79 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
// Upsize and downsize the realloc, remaining outside the partition.
ptr = newPtr;
- newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr,
- kGenericMaxBucketed * 10, type_name);
+ newPtr = generic_allocator.root()->Realloc(ptr, kGenericMaxBucketed * 10,
+ type_name);
newCharPtr = static_cast<char*>(newPtr);
EXPECT_EQ(*newCharPtr, 'D');
*newCharPtr = 'E';
ptr = newPtr;
- newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr,
- kGenericMaxBucketed * 2, type_name);
+ newPtr = generic_allocator.root()->Realloc(ptr, kGenericMaxBucketed * 2,
+ type_name);
newCharPtr = static_cast<char*>(newPtr);
EXPECT_EQ(*newCharPtr, 'E');
*newCharPtr = 'F';
// Downsize the realloc to inside the partition.
ptr = newPtr;
- newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, 1, type_name);
+ newPtr = generic_allocator.root()->Realloc(ptr, 1, type_name);
EXPECT_NE(newPtr, ptr);
EXPECT_EQ(newPtr, origPtr);
newCharPtr = static_cast<char*>(newPtr);
EXPECT_EQ(*newCharPtr, 'F');
- PartitionFreeGeneric(generic_allocator.root(), newPtr);
+ generic_allocator.root()->Free(newPtr);
}
// Test the generic allocation functions can handle some specific sizes of
// interest.
TEST_F(PartitionAllocTest, GenericAllocSizes) {
- void* ptr = PartitionAllocGeneric(generic_allocator.root(), 0, type_name);
+ void* ptr = generic_allocator.root()->Alloc(0, type_name);
EXPECT_TRUE(ptr);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
// kPartitionPageSize is interesting because it results in just one
// allocation per page, which tripped up some corner cases.
size_t size = kPartitionPageSize - kExtraAllocSize;
- ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ ptr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
- void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr2);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
// Should be freeable at this point.
PartitionPage* page =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
EXPECT_NE(-1, page->empty_cache_index);
- PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ generic_allocator.root()->Free(ptr2);
size = (((kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) -
kSystemPageSize) /
2) -
kExtraAllocSize;
- ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ ptr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
memset(ptr, 'A', size);
- ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ ptr2 = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr2);
- void* ptr3 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ void* ptr3 = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr3);
- void* ptr4 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ void* ptr4 = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr4);
- page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
PartitionPage* page2 =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr3));
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr3));
EXPECT_NE(page, page2);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
- PartitionFreeGeneric(generic_allocator.root(), ptr3);
- PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ generic_allocator.root()->Free(ptr);
+ generic_allocator.root()->Free(ptr3);
+ generic_allocator.root()->Free(ptr2);
// Should be freeable at this point.
EXPECT_NE(-1, page->empty_cache_index);
EXPECT_EQ(0, page->num_allocated_slots);
EXPECT_EQ(0, page->num_unprovisioned_slots);
- void* newPtr =
- PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ void* newPtr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_EQ(ptr3, newPtr);
- newPtr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ newPtr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_EQ(ptr2, newPtr);
#if defined(OS_LINUX) && !DCHECK_IS_ON()
// On Linux, we have a guarantee that freelisting a page should cause its
@@ -760,18 +767,17 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) {
// byte pattern.
EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1)));
#endif
- PartitionFreeGeneric(generic_allocator.root(), newPtr);
- PartitionFreeGeneric(generic_allocator.root(), ptr3);
- PartitionFreeGeneric(generic_allocator.root(), ptr4);
+ generic_allocator.root()->Free(newPtr);
+ generic_allocator.root()->Free(ptr3);
+ generic_allocator.root()->Free(ptr4);
// Can we allocate a massive (512MB) size?
// Allocate 512MB, but +1, to test for cookie writing alignment issues.
// Test this only if the device has enough memory or it might fail due
// to OOM.
if (IsLargeMemoryDevice()) {
- ptr = PartitionAllocGeneric(generic_allocator.root(), 512 * 1024 * 1024 + 1,
- type_name);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ ptr = generic_allocator.root()->Alloc(512 * 1024 * 1024 + 1, type_name);
+ generic_allocator.root()->Free(ptr);
}
// Check a more reasonable, but still direct mapped, size.
@@ -779,18 +785,18 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) {
size = 20 * 1024 * 1024;
size -= kSystemPageSize;
size -= 1;
- ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ ptr = generic_allocator.root()->Alloc(size, type_name);
char* charPtr = reinterpret_cast<char*>(ptr);
*(charPtr + (size - 1)) = 'A';
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
// Can we free null?
- PartitionFreeGeneric(generic_allocator.root(), 0);
+ generic_allocator.root()->Free(nullptr);
// Do we correctly get a null for a failed allocation?
- EXPECT_EQ(0, PartitionAllocGenericFlags(generic_allocator.root(),
- PartitionAllocReturnNull,
- 3u * 1024 * 1024 * 1024, type_name));
+ EXPECT_EQ(nullptr, PartitionAllocGenericFlags(
+ generic_allocator.root(), PartitionAllocReturnNull,
+ 3u * 1024 * 1024 * 1024, type_name));
}
// Test that we can fetch the real allocated size after an allocation.
@@ -802,36 +808,30 @@ TEST_F(PartitionAllocTest, GenericAllocGetSize) {
// Allocate something small.
requestedSize = 511 - kExtraAllocSize;
- predictedSize =
- PartitionAllocActualSize(generic_allocator.root(), requestedSize);
- ptr =
- PartitionAllocGeneric(generic_allocator.root(), requestedSize, type_name);
+ predictedSize = generic_allocator.root()->ActualSize(requestedSize);
+ ptr = generic_allocator.root()->Alloc(requestedSize, type_name);
EXPECT_TRUE(ptr);
actualSize = PartitionAllocGetSize(ptr);
EXPECT_EQ(predictedSize, actualSize);
EXPECT_LT(requestedSize, actualSize);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
// Allocate a size that should be a perfect match for a bucket, because it
// is an exact power of 2.
requestedSize = (256 * 1024) - kExtraAllocSize;
- predictedSize =
- PartitionAllocActualSize(generic_allocator.root(), requestedSize);
- ptr =
- PartitionAllocGeneric(generic_allocator.root(), requestedSize, type_name);
+ predictedSize = generic_allocator.root()->ActualSize(requestedSize);
+ ptr = generic_allocator.root()->Alloc(requestedSize, type_name);
EXPECT_TRUE(ptr);
actualSize = PartitionAllocGetSize(ptr);
EXPECT_EQ(predictedSize, actualSize);
EXPECT_EQ(requestedSize, actualSize);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
// Allocate a size that is a system page smaller than a bucket. GetSize()
// should return a larger size than we asked for now.
requestedSize = (256 * 1024) - kSystemPageSize - kExtraAllocSize;
- predictedSize =
- PartitionAllocActualSize(generic_allocator.root(), requestedSize);
- ptr =
- PartitionAllocGeneric(generic_allocator.root(), requestedSize, type_name);
+ predictedSize = generic_allocator.root()->ActualSize(requestedSize);
+ ptr = generic_allocator.root()->Alloc(requestedSize, type_name);
EXPECT_TRUE(ptr);
actualSize = PartitionAllocGetSize(ptr);
EXPECT_EQ(predictedSize, actualSize);
@@ -839,51 +839,46 @@ TEST_F(PartitionAllocTest, GenericAllocGetSize) {
// Check that we can write at the end of the reported size too.
char* charPtr = reinterpret_cast<char*>(ptr);
*(charPtr + (actualSize - 1)) = 'A';
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
// Allocate something very large, and uneven.
if (IsLargeMemoryDevice()) {
requestedSize = 512 * 1024 * 1024 - 1;
- predictedSize =
- PartitionAllocActualSize(generic_allocator.root(), requestedSize);
- ptr = PartitionAllocGeneric(generic_allocator.root(), requestedSize,
- type_name);
+ predictedSize = generic_allocator.root()->ActualSize(requestedSize);
+ ptr = generic_allocator.root()->Alloc(requestedSize, type_name);
EXPECT_TRUE(ptr);
actualSize = PartitionAllocGetSize(ptr);
EXPECT_EQ(predictedSize, actualSize);
EXPECT_LT(requestedSize, actualSize);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
}
// Too large allocation.
- requestedSize = INT_MAX;
- predictedSize =
- PartitionAllocActualSize(generic_allocator.root(), requestedSize);
+ requestedSize = kGenericMaxDirectMapped + 1;
+ predictedSize = generic_allocator.root()->ActualSize(requestedSize);
EXPECT_EQ(requestedSize, predictedSize);
}
// Test the realloc() contract.
TEST_F(PartitionAllocTest, Realloc) {
// realloc(0, size) should be equivalent to malloc().
- void* ptr = PartitionReallocGeneric(generic_allocator.root(), 0,
- kTestAllocSize, type_name);
+ void* ptr =
+ generic_allocator.root()->Realloc(nullptr, kTestAllocSize, type_name);
memset(ptr, 'A', kTestAllocSize);
PartitionPage* page =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
// realloc(ptr, 0) should be equivalent to free().
- void* ptr2 =
- PartitionReallocGeneric(generic_allocator.root(), ptr, 0, type_name);
- EXPECT_EQ(0, ptr2);
+ void* ptr2 = generic_allocator.root()->Realloc(ptr, 0, type_name);
+ EXPECT_EQ(nullptr, ptr2);
EXPECT_EQ(PartitionCookieFreePointerAdjust(ptr), page->freelist_head);
// Test that growing an allocation with realloc() copies everything from the
// old allocation.
size_t size = kSystemPageSize - kExtraAllocSize;
- EXPECT_EQ(size, PartitionAllocActualSize(generic_allocator.root(), size));
- ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ EXPECT_EQ(size, generic_allocator.root()->ActualSize(size));
+ ptr = generic_allocator.root()->Alloc(size, type_name);
memset(ptr, 'A', size);
- ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr, size + 1,
- type_name);
+ ptr2 = generic_allocator.root()->Realloc(ptr, size + 1, type_name);
EXPECT_NE(ptr, ptr2);
char* charPtr2 = static_cast<char*>(ptr2);
EXPECT_EQ('A', charPtr2[0]);
@@ -894,8 +889,7 @@ TEST_F(PartitionAllocTest, Realloc) {
// Test that shrinking an allocation with realloc() also copies everything
// from the old allocation.
- ptr = PartitionReallocGeneric(generic_allocator.root(), ptr2, size - 1,
- type_name);
+ ptr = generic_allocator.root()->Realloc(ptr2, size - 1, type_name);
EXPECT_NE(ptr2, ptr);
char* charPtr = static_cast<char*>(ptr);
EXPECT_EQ('A', charPtr[0]);
@@ -904,32 +898,30 @@ TEST_F(PartitionAllocTest, Realloc) {
EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1]));
#endif
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
// Test that shrinking a direct mapped allocation happens in-place.
size = kGenericMaxBucketed + 16 * kSystemPageSize;
- ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ ptr = generic_allocator.root()->Alloc(size, type_name);
size_t actualSize = PartitionAllocGetSize(ptr);
- ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr,
- kGenericMaxBucketed + 8 * kSystemPageSize,
- type_name);
+ ptr2 = generic_allocator.root()->Realloc(
+ ptr, kGenericMaxBucketed + 8 * kSystemPageSize, type_name);
EXPECT_EQ(ptr, ptr2);
EXPECT_EQ(actualSize - 8 * kSystemPageSize, PartitionAllocGetSize(ptr2));
// Test that a previously in-place shrunk direct mapped allocation can be
// expanded up again within its original size.
- ptr = PartitionReallocGeneric(generic_allocator.root(), ptr2,
- size - kSystemPageSize, type_name);
+ ptr = generic_allocator.root()->Realloc(ptr2, size - kSystemPageSize,
+ type_name);
EXPECT_EQ(ptr2, ptr);
EXPECT_EQ(actualSize - kSystemPageSize, PartitionAllocGetSize(ptr));
// Test that a direct mapped allocation is performed not in-place when the
// new size is small enough.
- ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr, kSystemPageSize,
- type_name);
+ ptr2 = generic_allocator.root()->Realloc(ptr, kSystemPageSize, type_name);
EXPECT_NE(ptr, ptr2);
- PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ generic_allocator.root()->Free(ptr2);
}
// Tests the handing out of freelists for partial pages.
@@ -939,13 +931,13 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
big_size + kExtraAllocSize);
size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift;
PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index];
- EXPECT_EQ(0, bucket->empty_pages_head);
+ EXPECT_EQ(nullptr, bucket->empty_pages_head);
- void* ptr = PartitionAlloc(allocator.root(), big_size, type_name);
+ void* ptr = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr);
PartitionPage* page =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
size_t totalSlots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
(big_size + kExtraAllocSize);
@@ -957,35 +949,35 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
EXPECT_EQ(1, page->num_allocated_slots);
EXPECT_EQ(2, page->num_unprovisioned_slots);
- void* ptr2 = PartitionAlloc(allocator.root(), big_size, type_name);
+ void* ptr2 = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr2);
EXPECT_FALSE(page->freelist_head);
EXPECT_EQ(2, page->num_allocated_slots);
EXPECT_EQ(2, page->num_unprovisioned_slots);
- void* ptr3 = PartitionAlloc(allocator.root(), big_size, type_name);
+ void* ptr3 = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr3);
EXPECT_TRUE(page->freelist_head);
EXPECT_EQ(3, page->num_allocated_slots);
EXPECT_EQ(0, page->num_unprovisioned_slots);
- void* ptr4 = PartitionAlloc(allocator.root(), big_size, type_name);
+ void* ptr4 = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr4);
EXPECT_FALSE(page->freelist_head);
EXPECT_EQ(4, page->num_allocated_slots);
EXPECT_EQ(0, page->num_unprovisioned_slots);
- void* ptr5 = PartitionAlloc(allocator.root(), big_size, type_name);
+ void* ptr5 = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr5);
PartitionPage* page2 =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr5));
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr5));
EXPECT_EQ(1, page2->num_allocated_slots);
// Churn things a little whilst there's a partial page freelist.
PartitionFree(ptr);
- ptr = PartitionAlloc(allocator.root(), big_size, type_name);
- void* ptr6 = PartitionAlloc(allocator.root(), big_size, type_name);
+ ptr = allocator.root()->Alloc(big_size, type_name);
+ void* ptr6 = allocator.root()->Alloc(big_size, type_name);
PartitionFree(ptr);
PartitionFree(ptr2);
@@ -1003,11 +995,11 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
size_t mediumSize = (kSystemPageSize / 2) - kExtraAllocSize;
bucket_index = (mediumSize + kExtraAllocSize) >> kBucketShift;
bucket = &allocator.root()->buckets()[bucket_index];
- EXPECT_EQ(0, bucket->empty_pages_head);
+ EXPECT_EQ(nullptr, bucket->empty_pages_head);
- ptr = PartitionAlloc(allocator.root(), mediumSize, type_name);
+ ptr = allocator.root()->Alloc(mediumSize, type_name);
EXPECT_TRUE(ptr);
- page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
totalSlots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
@@ -1021,11 +1013,11 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
size_t smallSize = (kSystemPageSize / 4) - kExtraAllocSize;
bucket_index = (smallSize + kExtraAllocSize) >> kBucketShift;
bucket = &allocator.root()->buckets()[bucket_index];
- EXPECT_EQ(0, bucket->empty_pages_head);
+ EXPECT_EQ(nullptr, bucket->empty_pages_head);
- ptr = PartitionAlloc(allocator.root(), smallSize, type_name);
+ ptr = allocator.root()->Alloc(smallSize, type_name);
EXPECT_TRUE(ptr);
- page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
totalSlots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
@@ -1040,11 +1032,11 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
size_t verySmallSize = 32 - kExtraAllocSize;
bucket_index = (verySmallSize + kExtraAllocSize) >> kBucketShift;
bucket = &allocator.root()->buckets()[bucket_index];
- EXPECT_EQ(0, bucket->empty_pages_head);
+ EXPECT_EQ(nullptr, bucket->empty_pages_head);
- ptr = PartitionAlloc(allocator.root(), verySmallSize, type_name);
+ ptr = allocator.root()->Alloc(verySmallSize, type_name);
EXPECT_TRUE(ptr);
- page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
totalSlots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
@@ -1060,30 +1052,29 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
// larger than a system page.
size_t pageAndAHalfSize =
(kSystemPageSize + (kSystemPageSize / 2)) - kExtraAllocSize;
- ptr = PartitionAllocGeneric(generic_allocator.root(), pageAndAHalfSize,
- type_name);
+ ptr = generic_allocator.root()->Alloc(pageAndAHalfSize, type_name);
EXPECT_TRUE(ptr);
- page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
EXPECT_TRUE(page->freelist_head);
totalSlots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
(pageAndAHalfSize + kExtraAllocSize);
EXPECT_EQ(totalSlots - 2, page->num_unprovisioned_slots);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
// And then make sure than exactly the page size only faults one page.
size_t pageSize = kSystemPageSize - kExtraAllocSize;
- ptr = PartitionAllocGeneric(generic_allocator.root(), pageSize, type_name);
+ ptr = generic_allocator.root()->Alloc(pageSize, type_name);
EXPECT_TRUE(ptr);
- page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
EXPECT_FALSE(page->freelist_head);
totalSlots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
(pageSize + kExtraAllocSize);
EXPECT_EQ(totalSlots - 1, page->num_unprovisioned_slots);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
}
// Test some of the fragmentation-resistant properties of the allocator.
@@ -1093,26 +1084,26 @@ TEST_F(PartitionAllocTest, PageRefilling) {
// Grab two full pages and a non-full page.
PartitionPage* page1 = GetFullPage(kTestAllocSize);
PartitionPage* page2 = GetFullPage(kTestAllocSize);
- void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name);
+ void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
EXPECT_TRUE(ptr);
EXPECT_NE(page1, bucket->active_pages_head);
EXPECT_NE(page2, bucket->active_pages_head);
PartitionPage* page =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
// Work out a pointer into page2 and free it; and then page1 and free it.
char* ptr2 =
- reinterpret_cast<char*>(PartitionPageToPointer(page1)) + kPointerOffset;
+ reinterpret_cast<char*>(PartitionPage::ToPointer(page1)) + kPointerOffset;
PartitionFree(ptr2);
ptr2 =
- reinterpret_cast<char*>(PartitionPageToPointer(page2)) + kPointerOffset;
+ reinterpret_cast<char*>(PartitionPage::ToPointer(page2)) + kPointerOffset;
PartitionFree(ptr2);
// If we perform two allocations from the same bucket now, we expect to
// refill both the nearly full pages.
- (void)PartitionAlloc(allocator.root(), kTestAllocSize, type_name);
- (void)PartitionAlloc(allocator.root(), kTestAllocSize, type_name);
+ (void)allocator.root()->Alloc(kTestAllocSize, type_name);
+ (void)allocator.root()->Alloc(kTestAllocSize, type_name);
EXPECT_EQ(1, page->num_allocated_slots);
FreeFullPage(page2);
@@ -1124,7 +1115,7 @@ TEST_F(PartitionAllocTest, PageRefilling) {
TEST_F(PartitionAllocTest, PartialPages) {
// Find a size that is backed by a partial partition page.
size_t size = sizeof(void*);
- PartitionBucket* bucket = 0;
+ PartitionBucket* bucket = nullptr;
while (size < kTestMaxAllocation) {
bucket = &allocator.root()->buckets()[size >> kBucketShift];
if (bucket->num_system_pages_per_slot_span %
@@ -1155,7 +1146,7 @@ TEST_F(PartitionAllocTest, MappingCollision) {
firstSuperPagePages[i] = GetFullPage(kTestAllocSize);
char* pageBase =
- reinterpret_cast<char*>(PartitionPageToPointer(firstSuperPagePages[0]));
+ reinterpret_cast<char*>(PartitionPage::ToPointer(firstSuperPagePages[0]));
EXPECT_EQ(kPartitionPageSize,
reinterpret_cast<uintptr_t>(pageBase) & kSuperPageOffsetMask);
pageBase -= kPartitionPageSize;
@@ -1175,8 +1166,8 @@ TEST_F(PartitionAllocTest, MappingCollision) {
FreePages(map1, kPageAllocationGranularity);
FreePages(map2, kPageAllocationGranularity);
- pageBase =
- reinterpret_cast<char*>(PartitionPageToPointer(secondSuperPagePages[0]));
+ pageBase = reinterpret_cast<char*>(
+ PartitionPage::ToPointer(secondSuperPagePages[0]));
EXPECT_EQ(kPartitionPageSize,
reinterpret_cast<uintptr_t>(pageBase) & kSuperPageOffsetMask);
pageBase -= kPartitionPageSize;
@@ -1199,21 +1190,21 @@ TEST_F(PartitionAllocTest, MappingCollision) {
FreePages(map2, kPageAllocationGranularity);
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(
- PartitionPageToPointer(pageInThirdSuperPage)) &
+ PartitionPage::ToPointer(pageInThirdSuperPage)) &
kPartitionPageOffsetMask);
// And make sure we really did get a page in a new superpage.
EXPECT_NE(reinterpret_cast<uintptr_t>(
- PartitionPageToPointer(firstSuperPagePages[0])) &
+ PartitionPage::ToPointer(firstSuperPagePages[0])) &
kSuperPageBaseMask,
reinterpret_cast<uintptr_t>(
- PartitionPageToPointer(pageInThirdSuperPage)) &
+ PartitionPage::ToPointer(pageInThirdSuperPage)) &
kSuperPageBaseMask);
EXPECT_NE(reinterpret_cast<uintptr_t>(
- PartitionPageToPointer(secondSuperPagePages[0])) &
+ PartitionPage::ToPointer(secondSuperPagePages[0])) &
kSuperPageBaseMask,
reinterpret_cast<uintptr_t>(
- PartitionPageToPointer(pageInThirdSuperPage)) &
+ PartitionPage::ToPointer(pageInThirdSuperPage)) &
kSuperPageBaseMask);
FreeFullPage(pageInThirdSuperPage);
@@ -1231,11 +1222,11 @@ TEST_F(PartitionAllocTest, FreeCache) {
size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift;
PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index];
- void* ptr = PartitionAlloc(allocator.root(), big_size, type_name);
+ void* ptr = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr);
PartitionPage* page =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
- EXPECT_EQ(0, bucket->empty_pages_head);
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ EXPECT_EQ(nullptr, bucket->empty_pages_head);
EXPECT_EQ(1, page->num_allocated_slots);
EXPECT_EQ(kPartitionPageSize,
allocator.root()->total_size_of_committed_pages);
@@ -1258,14 +1249,14 @@ TEST_F(PartitionAllocTest, FreeCache) {
// Check that an allocation works ok whilst in this state (a free'd page
// as the active pages head).
- ptr = PartitionAlloc(allocator.root(), big_size, type_name);
+ ptr = allocator.root()->Alloc(big_size, type_name);
EXPECT_FALSE(bucket->empty_pages_head);
PartitionFree(ptr);
// Also check that a page that is bouncing immediately between empty and
// used does not get freed.
for (size_t i = 0; i < kMaxFreeableSpans * 2; ++i) {
- ptr = PartitionAlloc(allocator.root(), big_size, type_name);
+ ptr = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(page->freelist_head);
PartitionFree(ptr);
EXPECT_TRUE(page->freelist_head);
@@ -1278,23 +1269,23 @@ TEST_F(PartitionAllocTest, FreeCache) {
TEST_F(PartitionAllocTest, LostFreePagesBug) {
size_t size = kPartitionPageSize - kExtraAllocSize;
- void* ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ void* ptr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
- void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr2);
PartitionPage* page =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
PartitionPage* page2 =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr2));
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr2));
PartitionBucket* bucket = page->bucket;
- EXPECT_EQ(0, bucket->empty_pages_head);
+ EXPECT_EQ(nullptr, bucket->empty_pages_head);
EXPECT_EQ(-1, page->num_allocated_slots);
EXPECT_EQ(1, page2->num_allocated_slots);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
- PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ generic_allocator.root()->Free(ptr);
+ generic_allocator.root()->Free(ptr2);
EXPECT_TRUE(bucket->empty_pages_head);
EXPECT_TRUE(bucket->empty_pages_head->next_page);
@@ -1313,9 +1304,9 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
// At this moment, we have two decommitted pages, on the empty list.
- ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ ptr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head);
@@ -1326,9 +1317,9 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
// We're now set up to trigger a historical bug by scanning over the active
// pages list. The current code gets into a different state, but we'll keep
// the test as being an interesting corner case.
- ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ ptr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
EXPECT_TRUE(bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head);
@@ -1337,7 +1328,6 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
#if !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX)
-
// Unit tests that check if an allocation fails in "return null" mode,
// repeating it doesn't crash, and still returns null. The tests need to
// stress memory subsystem limits to do so, hence they try to allocate
@@ -1387,40 +1377,35 @@ TEST_F(PartitionAllocTest, MAYBE_RepeatedReturnNull) {
// malloc(0), which is not good.
TEST_F(PartitionAllocDeathTest, LargeAllocs) {
// Largest alloc.
- EXPECT_DEATH(PartitionAllocGeneric(generic_allocator.root(),
- static_cast<size_t>(-1), type_name),
- "");
+ EXPECT_DEATH(
+ generic_allocator.root()->Alloc(static_cast<size_t>(-1), type_name), "");
// And the smallest allocation we expect to die.
EXPECT_DEATH(
- PartitionAllocGeneric(generic_allocator.root(),
- static_cast<size_t>(INT_MAX) + 1, type_name),
+ generic_allocator.root()->Alloc(kGenericMaxDirectMapped + 1, type_name),
"");
}
// Check that our immediate double-free detection works.
TEST_F(PartitionAllocDeathTest, ImmediateDoubleFree) {
- void* ptr = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize,
- type_name);
+ void* ptr = generic_allocator.root()->Alloc(kTestAllocSize, type_name);
EXPECT_TRUE(ptr);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
- EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), ptr), "");
+ EXPECT_DEATH(generic_allocator.root()->Free(ptr), "");
}
// Check that our refcount-based double-free detection works.
TEST_F(PartitionAllocDeathTest, RefcountDoubleFree) {
- void* ptr = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize,
- type_name);
+ void* ptr = generic_allocator.root()->Alloc(kTestAllocSize, type_name);
EXPECT_TRUE(ptr);
- void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize,
- type_name);
+ void* ptr2 = generic_allocator.root()->Alloc(kTestAllocSize, type_name);
EXPECT_TRUE(ptr2);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
- PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ generic_allocator.root()->Free(ptr);
+ generic_allocator.root()->Free(ptr2);
// This is not an immediate double-free so our immediate detection won't
// fire. However, it does take the "refcount" of the partition page to -1,
// which is illegal and should be trapped.
- EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), ptr), "");
+ EXPECT_DEATH(generic_allocator.root()->Free(ptr), "");
}
// Check that guard pages are present where expected.
@@ -1445,7 +1430,7 @@ TEST_F(PartitionAllocDeathTest, GuardPages) {
static_assert(kSize > kGenericMaxBucketed,
"allocation not large enough for direct allocation");
size_t size = kSize - kExtraAllocSize;
- void* ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ void* ptr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset;
@@ -1453,7 +1438,7 @@ TEST_F(PartitionAllocDeathTest, GuardPages) {
EXPECT_DEATH(*(charPtr - 1) = 'A', "");
EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', "");
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
}
// Check that a bad free() is caught where the free() refers to an unused
@@ -1461,14 +1446,14 @@ TEST_F(PartitionAllocDeathTest, GuardPages) {
TEST_F(PartitionAllocDeathTest, FreeWrongPartitionPage) {
// This large size will result in a direct mapped allocation with guard
// pages at either end.
- void* ptr = PartitionAllocGeneric(generic_allocator.root(),
- kPartitionPageSize * 2, type_name);
+ void* ptr =
+ generic_allocator.root()->Alloc(kPartitionPageSize * 2, type_name);
EXPECT_TRUE(ptr);
char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize;
- EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), badPtr), "");
+ EXPECT_DEATH(generic_allocator.root()->Free(badPtr), "");
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
}
#endif // !defined(OS_ANDROID) && !defined(OS_IOS)
@@ -1477,10 +1462,10 @@ TEST_F(PartitionAllocDeathTest, FreeWrongPartitionPage) {
// crashing and return non-zero values when memory is allocated.
TEST_F(PartitionAllocTest, DumpMemoryStats) {
{
- void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name);
+ void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
MockPartitionStatsDumper mockStatsDumper;
- PartitionDumpStats(allocator.root(), "mock_allocator",
- false /* detailed dump */, &mockStatsDumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &mockStatsDumper);
EXPECT_TRUE(mockStatsDumper.IsMemoryAllocationRecorded());
PartitionFree(ptr);
}
@@ -1488,12 +1473,11 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
// This series of tests checks the active -> empty -> decommitted states.
{
{
- void* ptr = PartitionAllocGeneric(generic_allocator.root(),
- 2048 - kExtraAllocSize, type_name);
+ void* ptr =
+ generic_allocator.root()->Alloc(2048 - kExtraAllocSize, type_name);
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
@@ -1508,14 +1492,13 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(1u, stats->num_active_pages);
EXPECT_EQ(0u, stats->num_empty_pages);
EXPECT_EQ(0u, stats->num_decommitted_pages);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
}
{
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
@@ -1539,9 +1522,8 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
{
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
@@ -1562,22 +1544,19 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
// This test checks for correct empty page list accounting.
{
size_t size = kPartitionPageSize - kExtraAllocSize;
- void* ptr1 =
- PartitionAllocGeneric(generic_allocator.root(), size, type_name);
- void* ptr2 =
- PartitionAllocGeneric(generic_allocator.root(), size, type_name);
- PartitionFreeGeneric(generic_allocator.root(), ptr1);
- PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ void* ptr1 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
+ generic_allocator.root()->Free(ptr1);
+ generic_allocator.root()->Free(ptr2);
CycleGenericFreeCache(kTestAllocSize);
- ptr1 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ ptr1 = generic_allocator.root()->Alloc(size, type_name);
{
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -1594,7 +1573,7 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_empty_pages);
EXPECT_EQ(1u, stats->num_decommitted_pages);
}
- PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ generic_allocator.root()->Free(ptr1);
}
// This test checks for correct direct mapped accounting.
@@ -1605,16 +1584,13 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
(size_smaller + kSystemPageOffsetMask) & kSystemPageBaseMask;
size_t real_size_bigger =
(size_bigger + kSystemPageOffsetMask) & kSystemPageBaseMask;
- void* ptr = PartitionAllocGeneric(generic_allocator.root(), size_smaller,
- type_name);
- void* ptr2 =
- PartitionAllocGeneric(generic_allocator.root(), size_bigger, type_name);
+ void* ptr = generic_allocator.root()->Alloc(size_smaller, type_name);
+ void* ptr2 = generic_allocator.root()->Alloc(size_bigger, type_name);
{
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -1647,29 +1623,25 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages);
}
- PartitionFreeGeneric(generic_allocator.root(), ptr2);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr2);
+ generic_allocator.root()->Free(ptr);
// Whilst we're here, allocate again and free with different ordering to
// give a workout to our linked list code.
- ptr = PartitionAllocGeneric(generic_allocator.root(), size_smaller,
- type_name);
- ptr2 =
- PartitionAllocGeneric(generic_allocator.root(), size_bigger, type_name);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
- PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ ptr = generic_allocator.root()->Alloc(size_smaller, type_name);
+ ptr2 = generic_allocator.root()->Alloc(size_bigger, type_name);
+ generic_allocator.root()->Free(ptr);
+ generic_allocator.root()->Free(ptr2);
}
// This test checks large-but-not-quite-direct allocations.
{
- void* ptr =
- PartitionAllocGeneric(generic_allocator.root(), 65536 + 1, type_name);
+ void* ptr = generic_allocator.root()->Alloc(65536 + 1, type_name);
{
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder);
@@ -1689,13 +1661,12 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages);
}
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
{
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder);
@@ -1714,15 +1685,14 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages);
}
- void* ptr2 = PartitionAllocGeneric(generic_allocator.root(),
- 65536 + kSystemPageSize + 1, type_name);
+ void* ptr2 =
+ generic_allocator.root()->Alloc(65536 + kSystemPageSize + 1, type_name);
EXPECT_EQ(ptr, ptr2);
{
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder);
@@ -1743,20 +1713,19 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages);
}
- PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ generic_allocator.root()->Free(ptr2);
}
}
// Tests the API to purge freeable memory.
TEST_F(PartitionAllocTest, Purge) {
- char* ptr = reinterpret_cast<char*>(PartitionAllocGeneric(
- generic_allocator.root(), 2048 - kExtraAllocSize, type_name));
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ char* ptr = reinterpret_cast<char*>(
+ generic_allocator.root()->Alloc(2048 - kExtraAllocSize, type_name));
+ generic_allocator.root()->Free(ptr);
{
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
@@ -1765,13 +1734,11 @@ TEST_F(PartitionAllocTest, Purge) {
EXPECT_EQ(kSystemPageSize, stats->decommittable_bytes);
EXPECT_EQ(kSystemPageSize, stats->resident_bytes);
}
- PartitionPurgeMemoryGeneric(generic_allocator.root(),
- PartitionPurgeDecommitEmptyPages);
+ generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
{
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
@@ -1782,17 +1749,15 @@ TEST_F(PartitionAllocTest, Purge) {
}
// Calling purge again here is a good way of testing we didn't mess up the
// state of the free cache ring.
- PartitionPurgeMemoryGeneric(generic_allocator.root(),
- PartitionPurgeDecommitEmptyPages);
+ generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
char* bigPtr = reinterpret_cast<char*>(
- PartitionAllocGeneric(generic_allocator.root(), 256 * 1024, type_name));
- PartitionFreeGeneric(generic_allocator.root(), bigPtr);
- PartitionPurgeMemoryGeneric(generic_allocator.root(),
- PartitionPurgeDecommitEmptyPages);
+ generic_allocator.root()->Alloc(256 * 1024, type_name));
+ generic_allocator.root()->Free(bigPtr);
+ generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
- CheckPageInCore(ptr - kPointerOffset, false);
- CheckPageInCore(bigPtr - kPointerOffset, false);
+ CHECK_PAGE_IN_CORE(ptr - kPointerOffset, false);
+ CHECK_PAGE_IN_CORE(bigPtr - kPointerOffset, false);
}
// Tests that we prefer to allocate into a non-empty partition page over an
@@ -1803,19 +1768,19 @@ TEST_F(PartitionAllocTest, PreferActiveOverEmpty) {
// Allocate 3 full slot spans worth of 8192-byte allocations.
// Each slot span for this size is 16384 bytes, or 1 partition page and 2
// slots.
- void* ptr1 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
- void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
- void* ptr3 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
- void* ptr4 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
- void* ptr5 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
- void* ptr6 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ void* ptr1 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr3 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr4 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr5 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr6 = generic_allocator.root()->Alloc(size, type_name);
PartitionPage* page1 =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1));
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr1));
PartitionPage* page2 =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr3));
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr3));
PartitionPage* page3 =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr6));
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr6));
EXPECT_NE(page1, page2);
EXPECT_NE(page2, page3);
PartitionBucket* bucket = page1->bucket;
@@ -1824,46 +1789,44 @@ TEST_F(PartitionAllocTest, PreferActiveOverEmpty) {
// Free up the 2nd slot in each slot span.
// This leaves the active list containing 3 pages, each with 1 used and 1
// free slot. The active page will be the one containing ptr1.
- PartitionFreeGeneric(generic_allocator.root(), ptr6);
- PartitionFreeGeneric(generic_allocator.root(), ptr4);
- PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ generic_allocator.root()->Free(ptr6);
+ generic_allocator.root()->Free(ptr4);
+ generic_allocator.root()->Free(ptr2);
EXPECT_EQ(page1, bucket->active_pages_head);
// Empty the middle page in the active list.
- PartitionFreeGeneric(generic_allocator.root(), ptr3);
+ generic_allocator.root()->Free(ptr3);
EXPECT_EQ(page1, bucket->active_pages_head);
// Empty the the first page in the active list -- also the current page.
- PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ generic_allocator.root()->Free(ptr1);
// A good choice here is to re-fill the third page since the first two are
// empty. We used to fail that.
- void* ptr7 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ void* ptr7 = generic_allocator.root()->Alloc(size, type_name);
EXPECT_EQ(ptr6, ptr7);
EXPECT_EQ(page3, bucket->active_pages_head);
- PartitionFreeGeneric(generic_allocator.root(), ptr5);
- PartitionFreeGeneric(generic_allocator.root(), ptr7);
+ generic_allocator.root()->Free(ptr5);
+ generic_allocator.root()->Free(ptr7);
}
// Tests the API to purge discardable memory.
TEST_F(PartitionAllocTest, PurgeDiscardable) {
// Free the second of two 4096 byte allocations and then purge.
{
- void* ptr1 = PartitionAllocGeneric(
- generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
- char* ptr2 = reinterpret_cast<char*>(
- PartitionAllocGeneric(generic_allocator.root(),
- kSystemPageSize - kExtraAllocSize, type_name));
- PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ void* ptr1 = generic_allocator.root()->Alloc(
+ kSystemPageSize - kExtraAllocSize, type_name);
+ char* ptr2 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ kSystemPageSize - kExtraAllocSize, type_name));
+ generic_allocator.root()->Free(ptr2);
PartitionPage* page =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1));
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr1));
EXPECT_EQ(2u, page->num_unprovisioned_slots);
{
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -1875,27 +1838,25 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
EXPECT_EQ(kSystemPageSize, stats->active_bytes);
EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes);
}
- CheckPageInCore(ptr2 - kPointerOffset, true);
- PartitionPurgeMemoryGeneric(generic_allocator.root(),
- PartitionPurgeDiscardUnusedSystemPages);
- CheckPageInCore(ptr2 - kPointerOffset, false);
+ CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, true);
+ generic_allocator.root()->PurgeMemory(
+ PartitionPurgeDiscardUnusedSystemPages);
+ CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, false);
EXPECT_EQ(3u, page->num_unprovisioned_slots);
- PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ generic_allocator.root()->Free(ptr1);
}
// Free the first of two 4096 byte allocations and then purge.
{
- char* ptr1 = reinterpret_cast<char*>(
- PartitionAllocGeneric(generic_allocator.root(),
- kSystemPageSize - kExtraAllocSize, type_name));
- void* ptr2 = PartitionAllocGeneric(
- generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
- PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ kSystemPageSize - kExtraAllocSize, type_name));
+ void* ptr2 = generic_allocator.root()->Alloc(
+ kSystemPageSize - kExtraAllocSize, type_name);
+ generic_allocator.root()->Free(ptr1);
{
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -1911,35 +1872,30 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
EXPECT_EQ(kSystemPageSize, stats->active_bytes);
EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes);
}
- CheckPageInCore(ptr1 - kPointerOffset, true);
- PartitionPurgeMemoryGeneric(generic_allocator.root(),
- PartitionPurgeDiscardUnusedSystemPages);
-#if defined(OS_WIN)
- CheckPageInCore(ptr1 - kPointerOffset, true);
-#else
- CheckPageInCore(ptr1 - kPointerOffset, false);
-#endif
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+ generic_allocator.root()->PurgeMemory(
+ PartitionPurgeDiscardUnusedSystemPages);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, false);
- PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ generic_allocator.root()->Free(ptr2);
}
{
- char* ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric(
- generic_allocator.root(), 9216 - kExtraAllocSize, type_name));
- void* ptr2 = PartitionAllocGeneric(generic_allocator.root(),
- 9216 - kExtraAllocSize, type_name);
- void* ptr3 = PartitionAllocGeneric(generic_allocator.root(),
- 9216 - kExtraAllocSize, type_name);
- void* ptr4 = PartitionAllocGeneric(generic_allocator.root(),
- 9216 - kExtraAllocSize, type_name);
+ char* ptr1 = reinterpret_cast<char*>(
+ generic_allocator.root()->Alloc(9216 - kExtraAllocSize, type_name));
+ void* ptr2 =
+ generic_allocator.root()->Alloc(9216 - kExtraAllocSize, type_name);
+ void* ptr3 =
+ generic_allocator.root()->Alloc(9216 - kExtraAllocSize, type_name);
+ void* ptr4 =
+ generic_allocator.root()->Alloc(9216 - kExtraAllocSize, type_name);
memset(ptr1, 'A', 9216 - kExtraAllocSize);
memset(ptr2, 'A', 9216 - kExtraAllocSize);
- PartitionFreeGeneric(generic_allocator.root(), ptr2);
- PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ generic_allocator.root()->Free(ptr2);
+ generic_allocator.root()->Free(ptr1);
{
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(9216);
@@ -1950,36 +1906,33 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
EXPECT_EQ(9216u * 2, stats->active_bytes);
EXPECT_EQ(9 * kSystemPageSize, stats->resident_bytes);
}
- CheckPageInCore(ptr1 - kPointerOffset, true);
- CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
- PartitionPurgeMemoryGeneric(generic_allocator.root(),
- PartitionPurgeDiscardUnusedSystemPages);
- CheckPageInCore(ptr1 - kPointerOffset, true);
- CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, false);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
-
- PartitionFreeGeneric(generic_allocator.root(), ptr3);
- PartitionFreeGeneric(generic_allocator.root(), ptr4);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
+ generic_allocator.root()->PurgeMemory(
+ PartitionPurgeDiscardUnusedSystemPages);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, false);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
+
+ generic_allocator.root()->Free(ptr3);
+ generic_allocator.root()->Free(ptr4);
}
{
- char* ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric(
- generic_allocator.root(), (64 * kSystemPageSize) - kExtraAllocSize,
- type_name));
+ char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ (64 * kSystemPageSize) - kExtraAllocSize, type_name));
memset(ptr1, 'A', (64 * kSystemPageSize) - kExtraAllocSize);
- PartitionFreeGeneric(generic_allocator.root(), ptr1);
- ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric(
- generic_allocator.root(), (61 * kSystemPageSize) - kExtraAllocSize,
- type_name));
+ generic_allocator.root()->Free(ptr1);
+ ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ (61 * kSystemPageSize) - kExtraAllocSize, type_name));
{
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -1991,49 +1944,46 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
EXPECT_EQ(61 * kSystemPageSize, stats->active_bytes);
EXPECT_EQ(64 * kSystemPageSize, stats->resident_bytes);
}
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 60), true);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), true);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), true);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), true);
- PartitionPurgeMemoryGeneric(generic_allocator.root(),
- PartitionPurgeDiscardUnusedSystemPages);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 60), true);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), false);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), false);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), false);
-
- PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 60), true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 61), true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 62), true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 63), true);
+ generic_allocator.root()->PurgeMemory(
+ PartitionPurgeDiscardUnusedSystemPages);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 60), true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 61), false);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 62), false);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 63), false);
+
+ generic_allocator.root()->Free(ptr1);
}
// This sub-test tests truncation of the provisioned slots in a trickier
// case where the freelist is rewritten.
- PartitionPurgeMemoryGeneric(generic_allocator.root(),
- PartitionPurgeDecommitEmptyPages);
+ generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
{
- char* ptr1 = reinterpret_cast<char*>(
- PartitionAllocGeneric(generic_allocator.root(),
- kSystemPageSize - kExtraAllocSize, type_name));
- void* ptr2 = PartitionAllocGeneric(
- generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
- void* ptr3 = PartitionAllocGeneric(
- generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
- void* ptr4 = PartitionAllocGeneric(
- generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
+ char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ kSystemPageSize - kExtraAllocSize, type_name));
+ void* ptr2 = generic_allocator.root()->Alloc(
+ kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr3 = generic_allocator.root()->Alloc(
+ kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr4 = generic_allocator.root()->Alloc(
+ kSystemPageSize - kExtraAllocSize, type_name);
ptr1[0] = 'A';
ptr1[kSystemPageSize] = 'A';
ptr1[kSystemPageSize * 2] = 'A';
ptr1[kSystemPageSize * 3] = 'A';
PartitionPage* page =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1));
- PartitionFreeGeneric(generic_allocator.root(), ptr2);
- PartitionFreeGeneric(generic_allocator.root(), ptr4);
- PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr1));
+ generic_allocator.root()->Free(ptr2);
+ generic_allocator.root()->Free(ptr4);
+ generic_allocator.root()->Free(ptr1);
EXPECT_EQ(0u, page->num_unprovisioned_slots);
{
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -2049,63 +1999,56 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
EXPECT_EQ(kSystemPageSize, stats->active_bytes);
EXPECT_EQ(4 * kSystemPageSize, stats->resident_bytes);
}
- CheckPageInCore(ptr1 - kPointerOffset, true);
- CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
- PartitionPurgeMemoryGeneric(generic_allocator.root(),
- PartitionPurgeDiscardUnusedSystemPages);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
+ generic_allocator.root()->PurgeMemory(
+ PartitionPurgeDiscardUnusedSystemPages);
EXPECT_EQ(1u, page->num_unprovisioned_slots);
- CheckPageInCore(ptr1 - kPointerOffset, true);
-#if defined(OS_WIN)
- CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
-#else
- CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, false);
-#endif
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, false);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
// Let's check we didn't brick the freelist.
- void* ptr1b = PartitionAllocGeneric(
- generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr1b = generic_allocator.root()->Alloc(
+ kSystemPageSize - kExtraAllocSize, type_name);
EXPECT_EQ(ptr1, ptr1b);
- void* ptr2b = PartitionAllocGeneric(
- generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr2b = generic_allocator.root()->Alloc(
+ kSystemPageSize - kExtraAllocSize, type_name);
EXPECT_EQ(ptr2, ptr2b);
EXPECT_FALSE(page->freelist_head);
- PartitionFreeGeneric(generic_allocator.root(), ptr1);
- PartitionFreeGeneric(generic_allocator.root(), ptr2);
- PartitionFreeGeneric(generic_allocator.root(), ptr3);
+ generic_allocator.root()->Free(ptr1);
+ generic_allocator.root()->Free(ptr2);
+ generic_allocator.root()->Free(ptr3);
}
// This sub-test is similar, but tests a double-truncation.
- PartitionPurgeMemoryGeneric(generic_allocator.root(),
- PartitionPurgeDecommitEmptyPages);
+ generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
{
- char* ptr1 = reinterpret_cast<char*>(
- PartitionAllocGeneric(generic_allocator.root(),
- kSystemPageSize - kExtraAllocSize, type_name));
- void* ptr2 = PartitionAllocGeneric(
- generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
- void* ptr3 = PartitionAllocGeneric(
- generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
- void* ptr4 = PartitionAllocGeneric(
- generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
+ char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ kSystemPageSize - kExtraAllocSize, type_name));
+ void* ptr2 = generic_allocator.root()->Alloc(
+ kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr3 = generic_allocator.root()->Alloc(
+ kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr4 = generic_allocator.root()->Alloc(
+ kSystemPageSize - kExtraAllocSize, type_name);
ptr1[0] = 'A';
ptr1[kSystemPageSize] = 'A';
ptr1[kSystemPageSize * 2] = 'A';
ptr1[kSystemPageSize * 3] = 'A';
PartitionPage* page =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1));
- PartitionFreeGeneric(generic_allocator.root(), ptr4);
- PartitionFreeGeneric(generic_allocator.root(), ptr3);
+ PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr1));
+ generic_allocator.root()->Free(ptr4);
+ generic_allocator.root()->Free(ptr3);
EXPECT_EQ(0u, page->num_unprovisioned_slots);
{
MockPartitionStatsDumper dumper;
- PartitionDumpStatsGeneric(generic_allocator.root(),
- "mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ generic_allocator.root()->DumpStats("mock_generic_allocator",
+ false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -2117,22 +2060,22 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
EXPECT_EQ(2 * kSystemPageSize, stats->active_bytes);
EXPECT_EQ(4 * kSystemPageSize, stats->resident_bytes);
}
- CheckPageInCore(ptr1 - kPointerOffset, true);
- CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
- PartitionPurgeMemoryGeneric(generic_allocator.root(),
- PartitionPurgeDiscardUnusedSystemPages);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
+ generic_allocator.root()->PurgeMemory(
+ PartitionPurgeDiscardUnusedSystemPages);
EXPECT_EQ(2u, page->num_unprovisioned_slots);
- CheckPageInCore(ptr1 - kPointerOffset, true);
- CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), false);
- CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, true);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), false);
+ CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
EXPECT_FALSE(page->freelist_head);
- PartitionFreeGeneric(generic_allocator.root(), ptr1);
- PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ generic_allocator.root()->Free(ptr1);
+ generic_allocator.root()->Free(ptr2);
}
}
@@ -2141,18 +2084,29 @@ TEST_F(PartitionAllocTest, ReallocMovesCookies) {
// use of the entire result is compatible with the debug mode's cookies, even
// when the bucket size is large enough to span more than one partition page
// and we can track the "raw" size. See https://crbug.com/709271
- const size_t kSize = base::kMaxSystemPagesPerSlotSpan * base::kSystemPageSize;
- void* ptr =
- PartitionAllocGeneric(generic_allocator.root(), kSize + 1, type_name);
+ static constexpr size_t kSize =
+ base::kMaxSystemPagesPerSlotSpan * base::kSystemPageSize;
+ void* ptr = generic_allocator.root()->Alloc(kSize + 1, type_name);
EXPECT_TRUE(ptr);
memset(ptr, 0xbd, kSize + 1);
- ptr = PartitionReallocGeneric(generic_allocator.root(), ptr, kSize + 2,
- type_name);
+ ptr = generic_allocator.root()->Realloc(ptr, kSize + 2, type_name);
EXPECT_TRUE(ptr);
memset(ptr, 0xbd, kSize + 2);
- PartitionFreeGeneric(generic_allocator.root(), ptr);
+ generic_allocator.root()->Free(ptr);
+}
+
+TEST_F(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) {
+ // For crbug.com/781473
+ static constexpr size_t kSize = 264;
+ void* ptr = generic_allocator.root()->Alloc(kSize, type_name);
+ EXPECT_TRUE(ptr);
+
+ ptr = generic_allocator.root()->Realloc(ptr, kSize + 16, type_name);
+ EXPECT_TRUE(ptr);
+
+ generic_allocator.root()->Free(ptr);
}
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/spin_lock.cc b/chromium/base/allocator/partition_allocator/spin_lock.cc
index d16778d5eec..f127610c18f 100644
--- a/chromium/base/allocator/partition_allocator/spin_lock.cc
+++ b/chromium/base/allocator/partition_allocator/spin_lock.cc
@@ -37,6 +37,11 @@
// Don't bother doing using .word here since r2 is the lowest supported mips64
// that Chromium supports.
#define YIELD_PROCESSOR __asm__ __volatile__("pause")
+#elif defined(ARCH_CPU_PPC64_FAMILY)
+#define YIELD_PROCESSOR __asm__ __volatile__("or 31,31,31")
+#elif defined(ARCH_CPU_S390_FAMILY)
+// just do nothing
+#define YIELD_PROCESSOR ((void)0)
#endif
#endif
diff --git a/chromium/base/allocator/tcmalloc_unittest.cc b/chromium/base/allocator/tcmalloc_unittest.cc
index 47246585f06..78c4f84ce1f 100644
--- a/chromium/base/allocator/tcmalloc_unittest.cc
+++ b/chromium/base/allocator/tcmalloc_unittest.cc
@@ -76,7 +76,7 @@ static int NextSize(int size) {
static void TestCalloc(size_t n, size_t s, bool ok) {
char* p = reinterpret_cast<char*>(calloc(n, s));
if (!ok) {
- EXPECT_EQ(NULL, p) << "calloc(n, s) should not succeed";
+ EXPECT_EQ(nullptr, p) << "calloc(n, s) should not succeed";
} else {
EXPECT_NE(reinterpret_cast<void*>(NULL), p)
<< "calloc(n, s) should succeed";
@@ -154,7 +154,7 @@ TEST(TCMallocTest, Realloc) {
EXPECT_TRUE(Valid(dst, min(src_size, dst_size)));
Fill(dst, dst_size);
EXPECT_TRUE(Valid(dst, dst_size));
- if (dst != NULL)
+ if (dst != nullptr)
free(dst);
}
}
diff --git a/chromium/base/android/linker/BUILD.gn b/chromium/base/android/linker/BUILD.gn
index 5f85cc2d65f..fcebc7e27e3 100644
--- a/chromium/base/android/linker/BUILD.gn
+++ b/chromium/base/android/linker/BUILD.gn
@@ -6,6 +6,17 @@ import("//build/config/android/config.gni")
assert(is_android)
+declare_args() {
+ # Set this variable to true to enable GDB support in release builds.
+ #
+ # The default is to disable it to reduce the likelyhood of runtime crashes
+ # on devices that use machine translation (i.e. that run ARM binaries on
+ # x86 CPUs with a translation layer like Intel's Houdini). For full details
+ # see https://crbug.com/796938.
+ #
+ chromium_android_linker_enable_release_debugging = false
+}
+
shared_library("chromium_android_linker") {
sources = [
"android_dlext.h",
@@ -17,6 +28,11 @@ shared_library("chromium_android_linker") {
"modern_linker_jni.h",
]
+ # Disable GDB support for release builds, unless explicitly wanted.
+ if (!is_debug && !chromium_android_linker_enable_release_debugging) {
+ defines = [ "LEGACY_LINKER_DISABLE_DEBUGGER_SUPPORT" ]
+ }
+
# The NDK contains the crazy_linker here:
# '<(android_ndk_root)/crazy_linker.gyp:crazy_linker'
# However, we use our own fork. See bug 384700.
diff --git a/chromium/base/at_exit.cc b/chromium/base/at_exit.cc
index 2433e5c8920..52c2151398e 100644
--- a/chromium/base/at_exit.cc
+++ b/chromium/base/at_exit.cc
@@ -20,7 +20,7 @@ namespace base {
// version of the constructor, and if we are building a dynamic library we may
// end up with multiple AtExitManagers on the same process. We don't protect
// this for thread-safe access, since it will only be modified in testing.
-static AtExitManager* g_top_manager = NULL;
+static AtExitManager* g_top_manager = nullptr;
static bool g_disable_managers = false;
diff --git a/chromium/base/at_exit_unittest.cc b/chromium/base/at_exit_unittest.cc
index cda73403fb0..3de061f6a72 100644
--- a/chromium/base/at_exit_unittest.cc
+++ b/chromium/base/at_exit_unittest.cc
@@ -30,7 +30,7 @@ void ExpectCounter1IsZero(void* unused) {
}
void ExpectParamIsNull(void* param) {
- EXPECT_EQ(static_cast<void*>(NULL), param);
+ EXPECT_EQ(nullptr, param);
}
void ExpectParamIsCounter(void* param) {
@@ -48,9 +48,9 @@ class AtExitTest : public testing::Test {
TEST_F(AtExitTest, Basic) {
ZeroTestCounters();
- base::AtExitManager::RegisterCallback(&IncrementTestCounter1, NULL);
- base::AtExitManager::RegisterCallback(&IncrementTestCounter2, NULL);
- base::AtExitManager::RegisterCallback(&IncrementTestCounter1, NULL);
+ base::AtExitManager::RegisterCallback(&IncrementTestCounter1, nullptr);
+ base::AtExitManager::RegisterCallback(&IncrementTestCounter2, nullptr);
+ base::AtExitManager::RegisterCallback(&IncrementTestCounter1, nullptr);
EXPECT_EQ(0, g_test_counter_1);
EXPECT_EQ(0, g_test_counter_2);
@@ -61,9 +61,9 @@ TEST_F(AtExitTest, Basic) {
TEST_F(AtExitTest, LIFOOrder) {
ZeroTestCounters();
- base::AtExitManager::RegisterCallback(&IncrementTestCounter1, NULL);
- base::AtExitManager::RegisterCallback(&ExpectCounter1IsZero, NULL);
- base::AtExitManager::RegisterCallback(&IncrementTestCounter2, NULL);
+ base::AtExitManager::RegisterCallback(&IncrementTestCounter1, nullptr);
+ base::AtExitManager::RegisterCallback(&ExpectCounter1IsZero, nullptr);
+ base::AtExitManager::RegisterCallback(&IncrementTestCounter2, nullptr);
EXPECT_EQ(0, g_test_counter_1);
EXPECT_EQ(0, g_test_counter_2);
@@ -73,7 +73,7 @@ TEST_F(AtExitTest, LIFOOrder) {
}
TEST_F(AtExitTest, Param) {
- base::AtExitManager::RegisterCallback(&ExpectParamIsNull, NULL);
+ base::AtExitManager::RegisterCallback(&ExpectParamIsNull, nullptr);
base::AtExitManager::RegisterCallback(&ExpectParamIsCounter,
&g_test_counter_1);
base::AtExitManager::ProcessCallbacksNow();
diff --git a/chromium/base/base64_decode_fuzzer.cc b/chromium/base/base64_decode_fuzzer.cc
new file mode 100644
index 00000000000..3716f727c67
--- /dev/null
+++ b/chromium/base/base64_decode_fuzzer.cc
@@ -0,0 +1,15 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/base64.h"
+#include "base/strings/string_piece.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ std::string decode_output;
+ base::StringPiece data_piece(reinterpret_cast<const char*>(data), size);
+ base::Base64Decode(data_piece, &decode_output);
+ return 0;
+}
diff --git a/chromium/base/base64_encode_fuzzer.cc b/chromium/base/base64_encode_fuzzer.cc
new file mode 100644
index 00000000000..c324be08b70
--- /dev/null
+++ b/chromium/base/base64_encode_fuzzer.cc
@@ -0,0 +1,20 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/base64.h"
+#include "base/logging.h"
+#include "base/strings/string_piece.h"
+
+// Encode some random data, and then decode it.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ std::string encode_output;
+ std::string decode_output;
+ base::StringPiece data_piece(reinterpret_cast<const char*>(data), size);
+ base::Base64Encode(data_piece, &encode_output);
+ CHECK(base::Base64Decode(encode_output, &decode_output));
+ CHECK_EQ(data_piece, decode_output);
+ return 0;
+}
diff --git a/chromium/base/base_switches.cc b/chromium/base/base_switches.cc
index 546e7924a92..9554233b41c 100644
--- a/chromium/base/base_switches.cc
+++ b/chromium/base/base_switches.cc
@@ -10,11 +10,17 @@ namespace switches {
// Disables the crash reporting.
const char kDisableBreakpad[] = "disable-breakpad";
+// Comma-separated list of feature names to disable. See also kEnableFeatures.
+const char kDisableFeatures[] = "disable-features";
+
// Indicates that crash reporting should be enabled. On platforms where helper
// processes cannot access to files needed to make this decision, this flag is
// generated internally.
const char kEnableCrashReporter[] = "enable-crash-reporter";
+// Comma-separated list of feature names to enable. See also kDisableFeatures.
+const char kEnableFeatures[] = "enable-features";
+
// Makes memory allocators keep track of their allocations and context, so a
// detailed breakdown of memory usage can be presented in chrome://tracing when
// the memory-infra category is enabled.
@@ -113,4 +119,10 @@ const char kEnableCrashReporterForTesting[] =
"enable-crash-reporter-for-testing";
#endif
+#if defined(OS_ANDROID)
+// Calls madvise(MADV_RANDOM) on executable code right after the library is
+// loaded, from all processes.
+const char kMadviseRandomExecutableCode[] = "madvise-random-executable-code";
+#endif
+
} // namespace switches
diff --git a/chromium/base/base_switches.h b/chromium/base/base_switches.h
index f782dfc036a..56be3060f68 100644
--- a/chromium/base/base_switches.h
+++ b/chromium/base/base_switches.h
@@ -12,8 +12,10 @@
namespace switches {
extern const char kDisableBreakpad[];
+extern const char kDisableFeatures[];
extern const char kDisableLowEndDeviceMode[];
extern const char kEnableCrashReporter[];
+extern const char kEnableFeatures[];
extern const char kEnableHeapProfiling[];
extern const char kEnableHeapProfilingModePseudo[];
extern const char kEnableHeapProfilingModeNative[];
@@ -39,6 +41,10 @@ extern const char kDisableUsbKeyboardDetect[];
extern const char kEnableCrashReporterForTesting[];
#endif
+#if defined(OS_ANDROID)
+extern const char kMadviseRandomExecutableCode[];
+#endif
+
} // namespace switches
#endif // BASE_BASE_SWITCHES_H_
diff --git a/chromium/base/bind_internal.h b/chromium/base/bind_internal.h
index c19f75915a6..172ce5315d7 100644
--- a/chromium/base/bind_internal.h
+++ b/chromium/base/bind_internal.h
@@ -308,7 +308,8 @@ struct Invoker;
template <typename StorageType, typename R, typename... UnboundArgs>
struct Invoker<StorageType, R(UnboundArgs...)> {
- static R RunOnce(BindStateBase* base, UnboundArgs&&... unbound_args) {
+ static R RunOnce(BindStateBase* base,
+ PassingTraitsType<UnboundArgs>... unbound_args) {
// Local references to make debugger stepping easier. If in a debugger,
// you really want to warp ahead and step through the
// InvokeHelper<>::MakeItSo() call below.
@@ -321,7 +322,8 @@ struct Invoker<StorageType, R(UnboundArgs...)> {
std::forward<UnboundArgs>(unbound_args)...);
}
- static R Run(BindStateBase* base, UnboundArgs&&... unbound_args) {
+ static R Run(BindStateBase* base,
+ PassingTraitsType<UnboundArgs>... unbound_args) {
// Local references to make debugger stepping easier. If in a debugger,
// you really want to warp ahead and step through the
// InvokeHelper<>::MakeItSo() call below.
diff --git a/chromium/base/bind_unittest.cc b/chromium/base/bind_unittest.cc
index ef948c19a46..7deba473e97 100644
--- a/chromium/base/bind_unittest.cc
+++ b/chromium/base/bind_unittest.cc
@@ -31,7 +31,7 @@ class IncompleteType;
class NoRef {
public:
- NoRef() {}
+ NoRef() = default;
MOCK_METHOD0(VoidMethod0, void());
MOCK_CONST_METHOD0(VoidConstMethod0, void());
@@ -49,7 +49,7 @@ class NoRef {
class HasRef : public NoRef {
public:
- HasRef() {}
+ HasRef() = default;
MOCK_CONST_METHOD0(AddRef, void());
MOCK_CONST_METHOD0(Release, bool());
@@ -61,7 +61,7 @@ class HasRef : public NoRef {
class HasRefPrivateDtor : public HasRef {
private:
- ~HasRefPrivateDtor() {}
+ ~HasRefPrivateDtor() = default;
};
static const int kParentValue = 1;
@@ -196,11 +196,8 @@ class CopyCounter {
public:
CopyCounter(int* copies, int* assigns)
: counter_(copies, assigns, nullptr, nullptr) {}
- CopyCounter(const CopyCounter& other) : counter_(other.counter_) {}
- CopyCounter& operator=(const CopyCounter& other) {
- counter_ = other.counter_;
- return *this;
- }
+ CopyCounter(const CopyCounter& other) = default;
+ CopyCounter& operator=(const CopyCounter& other) = default;
explicit CopyCounter(const DerivedCopyMoveCounter& other) : counter_(other) {}
@@ -321,8 +318,7 @@ class BindTest : public ::testing::Test {
static_func_mock_ptr = &static_func_mock_;
}
- virtual ~BindTest() {
- }
+ virtual ~BindTest() = default;
static void VoidFunc0() {
static_func_mock_ptr->VoidMethod0();
diff --git a/chromium/base/bits.h b/chromium/base/bits.h
index d101cb731a7..37d34d94cf1 100644
--- a/chromium/base/bits.h
+++ b/chromium/base/bits.h
@@ -12,6 +12,7 @@
#include "base/compiler_specific.h"
#include "base/logging.h"
+#include "build/build_config.h"
#if defined(COMPILER_MSVC)
#include <intrin.h>
@@ -54,57 +55,130 @@ inline size_t Align(size_t size, size_t alignment) {
return (size + alignment - 1) & ~(alignment - 1);
}
-// These functions count the number of leading zeros in a binary value, starting
-// with the most significant bit. C does not have an operator to do this, but
-// fortunately the various compilers have built-ins that map to fast underlying
-// processor instructions.
+// CountLeadingZeroBits(value) returns the number of zero bits following the
+// most significant 1 bit in |value| if |value| is non-zero, otherwise it
+// returns {sizeof(T) * 8}.
+// Example: 00100010 -> 2
+//
+// CountTrailingZeroBits(value) returns the number of zero bits preceding the
+// least significant 1 bit in |value| if |value| is non-zero, otherwise it
+// returns {sizeof(T) * 8}.
+// Example: 00100010 -> 1
+//
+// C does not have an operator to do this, but fortunately the various
+// compilers have built-ins that map to fast underlying processor instructions.
#if defined(COMPILER_MSVC)
-ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+ typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 4,
+ unsigned>::type
+ CountLeadingZeroBits(T x) {
+ static_assert(bits > 0, "invalid instantiation");
+ unsigned long index;
+ return LIKELY(_BitScanReverse(&index, static_cast<uint32_t>(x)))
+ ? (31 - index - (32 - bits))
+ : bits;
+}
+
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+ typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) == 8,
+ unsigned>::type
+ CountLeadingZeroBits(T x) {
+ static_assert(bits > 0, "invalid instantiation");
+ unsigned long index;
+ return LIKELY(_BitScanReverse64(&index, static_cast<uint64_t>(x)))
+ ? (63 - index)
+ : 64;
+}
+
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+ typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 4,
+ unsigned>::type
+ CountTrailingZeroBits(T x) {
+ static_assert(bits > 0, "invalid instantiation");
unsigned long index;
- return LIKELY(_BitScanReverse(&index, x)) ? (31 - index) : 32;
+ return LIKELY(_BitScanForward(&index, static_cast<uint32_t>(x))) ? index
+ : bits;
+}
+
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+ typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) == 8,
+ unsigned>::type
+ CountTrailingZeroBits(T x) {
+ static_assert(bits > 0, "invalid instantiation");
+ unsigned long index;
+ return LIKELY(_BitScanForward64(&index, static_cast<uint64_t>(x))) ? index
+ : 64;
+}
+
+ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
+ return CountLeadingZeroBits(x);
}
#if defined(ARCH_CPU_64_BITS)
// MSVC only supplies _BitScanForward64 when building for a 64-bit target.
ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
- unsigned long index;
- return LIKELY(_BitScanReverse64(&index, x)) ? (63 - index) : 64;
+ return CountLeadingZeroBits(x);
}
#endif
#elif defined(COMPILER_GCC)
-// This is very annoying. __builtin_clz has undefined behaviour for an input of
-// 0, even though there's clearly a return value that makes sense, and even
-// though some processor clz instructions have defined behaviour for 0. We could
-// drop to raw __asm__ to do better, but we'll avoid doing that unless we see
-// proof that we need to.
-ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
- return LIKELY(x) ? __builtin_clz(x) : 32;
+// __builtin_clz has undefined behaviour for an input of 0, even though there's
+// clearly a return value that makes sense, and even though some processor clz
+// instructions have defined behaviour for 0. We could drop to raw __asm__ to
+// do better, but we'll avoid doing that unless we see proof that we need to.
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+ typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
+ unsigned>::type
+ CountLeadingZeroBits(T value) {
+ static_assert(bits > 0, "invalid instantiation");
+ return LIKELY(value)
+ ? bits == 64
+ ? __builtin_clzll(static_cast<uint64_t>(value))
+ : __builtin_clz(static_cast<uint32_t>(value)) - (32 - bits)
+ : bits;
}
-ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
- return LIKELY(x) ? __builtin_clzll(x) : 64;
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+ typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
+ unsigned>::type
+ CountTrailingZeroBits(T value) {
+ return LIKELY(value) ? bits == 64
+ ? __builtin_ctzll(static_cast<uint64_t>(value))
+ : __builtin_ctz(static_cast<uint32_t>(value))
+ : bits;
}
-#endif
+ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
+ return CountLeadingZeroBits(x);
+}
#if defined(ARCH_CPU_64_BITS)
-ALWAYS_INLINE size_t CountLeadingZeroBitsSizeT(size_t x) {
- return CountLeadingZeroBits64(x);
+ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
+ return CountLeadingZeroBits(x);
}
-#else
+#endif
+
+#endif
ALWAYS_INLINE size_t CountLeadingZeroBitsSizeT(size_t x) {
- return CountLeadingZeroBits32(x);
+ return CountLeadingZeroBits(x);
}
-#endif
+ALWAYS_INLINE size_t CountTrailingZeroBitsSizeT(size_t x) {
+ return CountTrailingZeroBits(x);
+}
} // namespace bits
} // namespace base
diff --git a/chromium/base/bits_unittest.cc b/chromium/base/bits_unittest.cc
index 270b8ef7d3b..39bf6b042b7 100644
--- a/chromium/base/bits_unittest.cc
+++ b/chromium/base/bits_unittest.cc
@@ -5,6 +5,7 @@
// This file contains the unit tests for the bit utilities.
#include "base/bits.h"
+#include "build/build_config.h"
#include <stddef.h>
@@ -61,24 +62,112 @@ TEST(BitsTest, Align) {
EXPECT_EQ(kSizeTMax / 2 + 1, Align(1, kSizeTMax / 2 + 1));
}
-TEST(BitsTest, CLZWorks) {
- EXPECT_EQ(32u, CountLeadingZeroBits32(0u));
- EXPECT_EQ(31u, CountLeadingZeroBits32(1u));
- EXPECT_EQ(1u, CountLeadingZeroBits32(1u << 30));
- EXPECT_EQ(0u, CountLeadingZeroBits32(1u << 31));
+TEST(BitsTest, CountLeadingZeroBits8) {
+ EXPECT_EQ(8u, CountLeadingZeroBits(uint8_t{0}));
+ EXPECT_EQ(7u, CountLeadingZeroBits(uint8_t{1}));
+ for (uint8_t shift = 0; shift <= 7; shift++) {
+ EXPECT_EQ(7u - shift,
+ CountLeadingZeroBits(static_cast<uint8_t>(1 << shift)));
+ }
+ EXPECT_EQ(4u, CountLeadingZeroBits(uint8_t{0x0f}));
+}
+
+TEST(BitsTest, CountLeadingZeroBits16) {
+ EXPECT_EQ(16u, CountLeadingZeroBits(uint16_t{0}));
+ EXPECT_EQ(15u, CountLeadingZeroBits(uint16_t{1}));
+ for (uint16_t shift = 0; shift <= 15; shift++) {
+ EXPECT_EQ(15u - shift,
+ CountLeadingZeroBits(static_cast<uint16_t>(1 << shift)));
+ }
+ EXPECT_EQ(4u, CountLeadingZeroBits(uint16_t{0x0f0f}));
+}
+
+TEST(BitsTest, CountLeadingZeroBits32) {
+ EXPECT_EQ(32u, CountLeadingZeroBits(uint32_t{0}));
+ EXPECT_EQ(31u, CountLeadingZeroBits(uint32_t{1}));
+ for (uint32_t shift = 0; shift <= 31; shift++) {
+ EXPECT_EQ(31u - shift, CountLeadingZeroBits(uint32_t{1} << shift));
+ }
+ EXPECT_EQ(4u, CountLeadingZeroBits(uint32_t{0x0f0f0f0f}));
+}
+
+TEST(BitsTest, CountTrailingeZeroBits8) {
+ EXPECT_EQ(8u, CountTrailingZeroBits(uint8_t{0}));
+ EXPECT_EQ(7u, CountTrailingZeroBits(uint8_t{128}));
+ for (uint8_t shift = 0; shift <= 7; shift++) {
+ EXPECT_EQ(shift, CountTrailingZeroBits(static_cast<uint8_t>(1 << shift)));
+ }
+ EXPECT_EQ(4u, CountTrailingZeroBits(uint8_t{0xf0}));
+}
+
+TEST(BitsTest, CountTrailingeZeroBits16) {
+ EXPECT_EQ(16u, CountTrailingZeroBits(uint16_t{0}));
+ EXPECT_EQ(15u, CountTrailingZeroBits(uint16_t{32768}));
+ for (uint16_t shift = 0; shift <= 15; shift++) {
+ EXPECT_EQ(shift, CountTrailingZeroBits(static_cast<uint16_t>(1 << shift)));
+ }
+ EXPECT_EQ(4u, CountTrailingZeroBits(uint16_t{0xf0f0}));
+}
+
+TEST(BitsTest, CountTrailingeZeroBits32) {
+ EXPECT_EQ(32u, CountTrailingZeroBits(uint32_t{0}));
+ EXPECT_EQ(31u, CountTrailingZeroBits(uint32_t{1} << 31));
+ for (uint32_t shift = 0; shift <= 31; shift++) {
+ EXPECT_EQ(shift, CountTrailingZeroBits(uint32_t{1} << shift));
+ }
+ EXPECT_EQ(4u, CountTrailingZeroBits(uint32_t{0xf0f0f0f0}));
+}
+
+#if defined(ARCH_CPU_64_BITS)
+
+TEST(BitsTest, CountLeadingZeroBits64) {
+ EXPECT_EQ(64u, CountLeadingZeroBits(uint64_t{0}));
+ EXPECT_EQ(63u, CountLeadingZeroBits(uint64_t{1}));
+ for (uint64_t shift = 0; shift <= 63; shift++) {
+ EXPECT_EQ(63u - shift, CountLeadingZeroBits(uint64_t{1} << shift));
+ }
+ EXPECT_EQ(4u, CountLeadingZeroBits(uint64_t{0x0f0f0f0f0f0f0f0f}));
+}
+
+TEST(BitsTest, CountTrailingeZeroBits64) {
+ EXPECT_EQ(64u, CountTrailingZeroBits(uint64_t{0}));
+ EXPECT_EQ(63u, CountTrailingZeroBits(uint64_t{1} << 63));
+ for (uint64_t shift = 0; shift <= 31; shift++) {
+ EXPECT_EQ(shift, CountTrailingZeroBits(uint64_t{1} << shift));
+ }
+ EXPECT_EQ(4u, CountTrailingZeroBits(uint64_t{0xf0f0f0f0f0f0f0f0}));
+}
+
+#endif // ARCH_CPU_64_BITS
+
+TEST(BitsTest, CountLeadingZeroBitsSizeT) {
+#if defined(ARCH_CPU_64_BITS)
+ EXPECT_EQ(64u, CountLeadingZeroBitsSizeT(size_t{0}));
+ EXPECT_EQ(63u, CountLeadingZeroBitsSizeT(size_t{1}));
+ EXPECT_EQ(32u, CountLeadingZeroBitsSizeT(size_t{1} << 31));
+ EXPECT_EQ(1u, CountLeadingZeroBitsSizeT(size_t{1} << 62));
+ EXPECT_EQ(0u, CountLeadingZeroBitsSizeT(size_t{1} << 63));
+#else
+ EXPECT_EQ(32u, CountLeadingZeroBitsSizeT(size_t{0}));
+ EXPECT_EQ(31u, CountLeadingZeroBitsSizeT(size_t{1}));
+ EXPECT_EQ(1u, CountLeadingZeroBitsSizeT(size_t{1} << 30));
+ EXPECT_EQ(0u, CountLeadingZeroBitsSizeT(size_t{1} << 31));
+#endif // ARCH_CPU_64_BITS
+}
+TEST(BitsTest, CountTrailingZeroBitsSizeT) {
#if defined(ARCH_CPU_64_BITS)
- EXPECT_EQ(64u, CountLeadingZeroBitsSizeT(0ull));
- EXPECT_EQ(63u, CountLeadingZeroBitsSizeT(1ull));
- EXPECT_EQ(32u, CountLeadingZeroBitsSizeT(1ull << 31));
- EXPECT_EQ(1u, CountLeadingZeroBitsSizeT(1ull << 62));
- EXPECT_EQ(0u, CountLeadingZeroBitsSizeT(1ull << 63));
+ EXPECT_EQ(64u, CountTrailingZeroBitsSizeT(size_t{0}));
+ EXPECT_EQ(63u, CountTrailingZeroBitsSizeT(size_t{1} << 63));
+ EXPECT_EQ(31u, CountTrailingZeroBitsSizeT(size_t{1} << 31));
+ EXPECT_EQ(1u, CountTrailingZeroBitsSizeT(size_t{2}));
+ EXPECT_EQ(0u, CountTrailingZeroBitsSizeT(size_t{1}));
#else
- EXPECT_EQ(32u, CountLeadingZeroBitsSizeT(0u));
- EXPECT_EQ(31u, CountLeadingZeroBitsSizeT(1u));
- EXPECT_EQ(1u, CountLeadingZeroBitsSizeT(1u << 30));
- EXPECT_EQ(0u, CountLeadingZeroBitsSizeT(1u << 31));
-#endif
+ EXPECT_EQ(32u, CountTrailingZeroBitsSizeT(size_t{0}));
+ EXPECT_EQ(31u, CountTrailingZeroBitsSizeT(size_t{1} << 31));
+ EXPECT_EQ(1u, CountTrailingZeroBitsSizeT(size_t{2}));
+ EXPECT_EQ(0u, CountTrailingZeroBitsSizeT(size_t{1}));
+#endif // ARCH_CPU_64_BITS
}
} // namespace bits
diff --git a/chromium/base/callback.h b/chromium/base/callback.h
index 043f72d4108..5e230fbb436 100644
--- a/chromium/base/callback.h
+++ b/chromium/base/callback.h
@@ -23,7 +23,8 @@ template <typename R, typename... Args>
class OnceCallback<R(Args...)> : public internal::CallbackBase {
public:
using RunType = R(Args...);
- using PolymorphicInvoke = R (*)(internal::BindStateBase*, Args&&...);
+ using PolymorphicInvoke = R (*)(internal::BindStateBase*,
+ internal::PassingTraitsType<Args>...);
OnceCallback() : internal::CallbackBase(nullptr) {}
@@ -69,7 +70,8 @@ template <typename R, typename... Args>
class RepeatingCallback<R(Args...)> : public internal::CallbackBaseCopyable {
public:
using RunType = R(Args...);
- using PolymorphicInvoke = R (*)(internal::BindStateBase*, Args&&...);
+ using PolymorphicInvoke = R (*)(internal::BindStateBase*,
+ internal::PassingTraitsType<Args>...);
RepeatingCallback() : internal::CallbackBaseCopyable(nullptr) {}
diff --git a/chromium/base/callback_helpers.cc b/chromium/base/callback_helpers.cc
index 1f87a6c35e6..90867310c85 100644
--- a/chromium/base/callback_helpers.cc
+++ b/chromium/base/callback_helpers.cc
@@ -6,7 +6,7 @@
namespace base {
-ScopedClosureRunner::ScopedClosureRunner() {}
+ScopedClosureRunner::ScopedClosureRunner() = default;
ScopedClosureRunner::ScopedClosureRunner(OnceClosure closure)
: closure_(std::move(closure)) {}
diff --git a/chromium/base/callback_internal.cc b/chromium/base/callback_internal.cc
index 864c1a036cd..0179bb763f6 100644
--- a/chromium/base/callback_internal.cc
+++ b/chromium/base/callback_internal.cc
@@ -71,7 +71,7 @@ CallbackBase::CallbackBase(BindStateBase* bind_state)
DCHECK(!bind_state_.get() || bind_state_->HasOneRef());
}
-CallbackBase::~CallbackBase() {}
+CallbackBase::~CallbackBase() = default;
CallbackBaseCopyable::CallbackBaseCopyable(const CallbackBaseCopyable& c)
: CallbackBase(nullptr) {
diff --git a/chromium/base/callback_internal.h b/chromium/base/callback_internal.h
index 8ad8449a13d..616abddd3e5 100644
--- a/chromium/base/callback_internal.h
+++ b/chromium/base/callback_internal.h
@@ -31,6 +31,22 @@ struct BindStateBaseRefCountTraits {
static void Destruct(const BindStateBase*);
};
+template <typename T, bool IsScalar = std::is_scalar<T>::value>
+struct PassingTraits;
+
+template <typename T>
+struct PassingTraits<T, false> {
+ using Type = T&&;
+};
+
+template <typename T>
+struct PassingTraits<T, true> {
+ using Type = T;
+};
+
+template <typename T>
+using PassingTraitsType = typename PassingTraits<T>::Type;
+
// BindStateBase is used to provide an opaque handle that the Callback
// class can use to represent a function object with bound arguments. It
// behaves as an existential type that is used by a corresponding
diff --git a/chromium/base/callback_unittest.cc b/chromium/base/callback_unittest.cc
index f76adbcdd2c..c07d3ee20ea 100644
--- a/chromium/base/callback_unittest.cc
+++ b/chromium/base/callback_unittest.cc
@@ -25,7 +25,7 @@ struct FakeBindState : internal::BindStateBase {
FakeBindState() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
private:
- ~FakeBindState() {}
+ ~FakeBindState() = default;
static void Destroy(const internal::BindStateBase* self) {
delete static_cast<const FakeBindState*>(self);
}
@@ -41,7 +41,7 @@ class CallbackTest : public ::testing::Test {
CallbackTest()
: callback_a_(new FakeBindState()), callback_b_(new FakeBindState()) {}
- ~CallbackTest() override {}
+ ~CallbackTest() override = default;
protected:
Callback<void()> callback_a_;
diff --git a/chromium/base/cancelable_callback.h b/chromium/base/cancelable_callback.h
index 13cbd0c2139..a98101a162d 100644
--- a/chromium/base/cancelable_callback.h
+++ b/chromium/base/cancelable_callback.h
@@ -56,28 +56,24 @@
#include "base/memory/weak_ptr.h"
namespace base {
+namespace internal {
-template <typename Sig>
-class CancelableCallback;
-
-template <typename... A>
-class CancelableCallback<void(A...)> {
+template <typename CallbackType>
+class CancelableCallbackImpl {
public:
- CancelableCallback() : weak_factory_(this) {}
+ CancelableCallbackImpl() : weak_ptr_factory_(this) {}
// |callback| must not be null.
- explicit CancelableCallback(const base::Callback<void(A...)>& callback)
- : callback_(callback), weak_factory_(this) {
- DCHECK(!callback.is_null());
- InitializeForwarder();
+ explicit CancelableCallbackImpl(CallbackType callback)
+ : callback_(std::move(callback)), weak_ptr_factory_(this) {
+ DCHECK(callback_);
}
- ~CancelableCallback() {}
+ ~CancelableCallbackImpl() = default;
// Cancels and drops the reference to the wrapped callback.
void Cancel() {
- weak_factory_.InvalidateWeakPtrs();
- forwarder_.Reset();
+ weak_ptr_factory_.InvalidateWeakPtrs();
callback_.Reset();
}
@@ -88,48 +84,72 @@ class CancelableCallback<void(A...)> {
// Sets |callback| as the closure that may be cancelled. |callback| may not
// be null. Outstanding and any previously wrapped callbacks are cancelled.
- void Reset(const base::Callback<void(A...)>& callback) {
- DCHECK(!callback.is_null());
-
+ void Reset(CallbackType callback) {
+ DCHECK(callback);
// Outstanding tasks (e.g., posted to a message loop) must not be called.
Cancel();
-
- // |forwarder_| is no longer valid after Cancel(), so re-bind.
- InitializeForwarder();
-
- callback_ = callback;
+ callback_ = std::move(callback);
}
// Returns a callback that can be disabled by calling Cancel().
- const base::Callback<void(A...)>& callback() const {
- return forwarder_;
+ CallbackType callback() const {
+ if (!callback_)
+ return CallbackType();
+ CallbackType forwarder;
+ MakeForwarder(&forwarder);
+ return forwarder;
}
private:
- void Forward(A... args) const {
- callback_.Run(std::forward<A>(args)...);
+ template <typename... Args>
+ void MakeForwarder(RepeatingCallback<void(Args...)>* out) const {
+ using ForwarderType = void (CancelableCallbackImpl::*)(Args...);
+ ForwarderType forwarder = &CancelableCallbackImpl::ForwardRepeating;
+ *out = BindRepeating(forwarder, weak_ptr_factory_.GetWeakPtr());
}
- // Helper method to bind |forwarder_| using a weak pointer from
- // |weak_factory_|.
- void InitializeForwarder() {
- forwarder_ = base::Bind(&CancelableCallback<void(A...)>::Forward,
- weak_factory_.GetWeakPtr());
+ template <typename... Args>
+ void MakeForwarder(OnceCallback<void(Args...)>* out) const {
+ using ForwarderType = void (CancelableCallbackImpl::*)(Args...);
+ ForwarderType forwarder = &CancelableCallbackImpl::ForwardOnce;
+ *out = BindOnce(forwarder, weak_ptr_factory_.GetWeakPtr());
}
- // The wrapper closure.
- base::Callback<void(A...)> forwarder_;
+ template <typename... Args>
+ void ForwardRepeating(Args... args) {
+ callback_.Run(std::forward<Args>(args)...);
+ }
- // The stored closure that may be cancelled.
- base::Callback<void(A...)> callback_;
+ template <typename... Args>
+ void ForwardOnce(Args... args) {
+ weak_ptr_factory_.InvalidateWeakPtrs();
+ std::move(callback_).Run(std::forward<Args>(args)...);
+ }
- // Used to ensure Forward() is not run when this object is destroyed.
- base::WeakPtrFactory<CancelableCallback<void(A...)>> weak_factory_;
+ // The stored closure that may be cancelled.
+ CallbackType callback_;
+ mutable base::WeakPtrFactory<CancelableCallbackImpl> weak_ptr_factory_;
- DISALLOW_COPY_AND_ASSIGN(CancelableCallback);
+ DISALLOW_COPY_AND_ASSIGN(CancelableCallbackImpl);
};
-typedef CancelableCallback<void(void)> CancelableClosure;
+} // namespace internal
+
+// Consider using base::WeakPtr directly instead of base::CancelableCallback for
+// the task cancellation.
+template <typename Signature>
+using CancelableOnceCallback =
+ internal::CancelableCallbackImpl<OnceCallback<Signature>>;
+using CancelableOnceClosure = CancelableOnceCallback<void()>;
+
+template <typename Signature>
+using CancelableRepeatingCallback =
+ internal::CancelableCallbackImpl<RepeatingCallback<Signature>>;
+using CancelableRepeatingClosure = CancelableOnceCallback<void()>;
+
+template <typename Signature>
+using CancelableCallback = CancelableRepeatingCallback<Signature>;
+using CancelableClosure = CancelableCallback<void()>;
} // namespace base
diff --git a/chromium/base/cancelable_callback_unittest.cc b/chromium/base/cancelable_callback_unittest.cc
index e793a836db7..373498cbded 100644
--- a/chromium/base/cancelable_callback_unittest.cc
+++ b/chromium/base/cancelable_callback_unittest.cc
@@ -23,7 +23,8 @@ namespace {
class TestRefCounted : public RefCountedThreadSafe<TestRefCounted> {
private:
friend class RefCountedThreadSafe<TestRefCounted>;
- ~TestRefCounted() {};
+ ~TestRefCounted() = default;
+ ;
};
void Increment(int* count) { (*count)++; }
diff --git a/chromium/base/command_line.cc b/chromium/base/command_line.cc
index 873da813483..3a5d089bb2f 100644
--- a/chromium/base/command_line.cc
+++ b/chromium/base/command_line.cc
@@ -10,6 +10,7 @@
#include "base/files/file_path.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_split.h"
#include "base/strings/string_tokenizer.h"
#include "base/strings/string_util.h"
@@ -23,7 +24,7 @@
namespace base {
-CommandLine* CommandLine::current_process_commandline_ = NULL;
+CommandLine* CommandLine::current_process_commandline_ = nullptr;
namespace {
@@ -173,23 +174,11 @@ CommandLine::CommandLine(const StringVector& argv)
InitFromArgv(argv);
}
-CommandLine::CommandLine(const CommandLine& other)
- : argv_(other.argv_),
- switches_(other.switches_),
- begin_args_(other.begin_args_) {
- ResetStringPieces();
-}
+CommandLine::CommandLine(const CommandLine& other) = default;
-CommandLine& CommandLine::operator=(const CommandLine& other) {
- argv_ = other.argv_;
- switches_ = other.switches_;
- begin_args_ = other.begin_args_;
- ResetStringPieces();
- return *this;
-}
+CommandLine& CommandLine::operator=(const CommandLine& other) = default;
-CommandLine::~CommandLine() {
-}
+CommandLine::~CommandLine() = default;
#if defined(OS_WIN)
// static
@@ -234,7 +223,7 @@ bool CommandLine::Init(int argc, const char* const* argv) {
void CommandLine::Reset() {
DCHECK(current_process_commandline_);
delete current_process_commandline_;
- current_process_commandline_ = NULL;
+ current_process_commandline_ = nullptr;
}
// static
@@ -268,7 +257,6 @@ void CommandLine::InitFromArgv(int argc,
void CommandLine::InitFromArgv(const StringVector& argv) {
argv_ = StringVector(1);
switches_.clear();
- switches_by_stringpiece_.clear();
begin_args_ = 1;
SetProgram(argv.empty() ? FilePath() : FilePath(argv[0]));
AppendSwitchesAndArguments(this, argv);
@@ -288,8 +276,7 @@ void CommandLine::SetProgram(const FilePath& program) {
bool CommandLine::HasSwitch(const base::StringPiece& switch_string) const {
DCHECK_EQ(ToLowerASCII(switch_string), switch_string);
- return switches_by_stringpiece_.find(switch_string) !=
- switches_by_stringpiece_.end();
+ return ContainsKey(switches_, switch_string);
}
bool CommandLine::HasSwitch(const char switch_constant[]) const {
@@ -318,9 +305,8 @@ FilePath CommandLine::GetSwitchValuePath(
CommandLine::StringType CommandLine::GetSwitchValueNative(
const base::StringPiece& switch_string) const {
DCHECK_EQ(ToLowerASCII(switch_string), switch_string);
- auto result = switches_by_stringpiece_.find(switch_string);
- return result == switches_by_stringpiece_.end() ? StringType()
- : *(result->second);
+ auto result = switches_.find(switch_string);
+ return result == switches_.end() ? StringType() : result->second;
}
void CommandLine::AppendSwitch(const std::string& switch_string) {
@@ -346,7 +332,6 @@ void CommandLine::AppendSwitchNative(const std::string& switch_string,
switches_.insert(make_pair(switch_key.substr(prefix_length), value));
if (!insertion.second)
insertion.first->second = value;
- switches_by_stringpiece_[insertion.first->first] = &(insertion.first->second);
// Preserve existing switch prefixes in |argv_|; only append one if necessary.
if (prefix_length == 0)
combined_switch_string = kSwitchPrefixes[0] + combined_switch_string;
@@ -489,10 +474,4 @@ CommandLine::StringType CommandLine::GetArgumentsStringInternal(
return params;
}
-void CommandLine::ResetStringPieces() {
- switches_by_stringpiece_.clear();
- for (const auto& entry : switches_)
- switches_by_stringpiece_[entry.first] = &(entry.second);
-}
-
} // namespace base
diff --git a/chromium/base/command_line.h b/chromium/base/command_line.h
index 3d29f8fee7f..31b22d6b9a9 100644
--- a/chromium/base/command_line.h
+++ b/chromium/base/command_line.h
@@ -40,8 +40,7 @@ class BASE_EXPORT CommandLine {
using CharType = StringType::value_type;
using StringVector = std::vector<StringType>;
- using SwitchMap = std::map<std::string, StringType>;
- using StringPieceSwitchMap = std::map<StringPiece, const StringType*>;
+ using SwitchMap = std::map<std::string, StringType, std::less<>>;
// A constructor for CommandLines that only carry switches and arguments.
enum NoProgram { NO_PROGRAM };
@@ -230,11 +229,6 @@ class BASE_EXPORT CommandLine {
// also quotes parts with '%' in them.
StringType GetArgumentsStringInternal(bool quote_placeholders) const;
- // Reconstruct |switches_by_stringpiece| to be a mirror of |switches|.
- // |switches_by_stringpiece| only contains pointers to objects owned by
- // |switches|.
- void ResetStringPieces();
-
// The singleton CommandLine representing the current process's command line.
static CommandLine* current_process_commandline_;
@@ -244,12 +238,6 @@ class BASE_EXPORT CommandLine {
// Parsed-out switch keys and values.
SwitchMap switches_;
- // A mirror of |switches_| with only references to the actual strings.
- // The StringPiece internally holds a pointer to a key in |switches_| while
- // the mapped_type points to a value in |switches_|.
- // Used for allocation-free lookups.
- StringPieceSwitchMap switches_by_stringpiece_;
-
// The index after the program and switches, any arguments start here.
size_t begin_args_;
};
diff --git a/chromium/base/command_line_unittest.cc b/chromium/base/command_line_unittest.cc
index 79c9aecc2a2..6785350ff13 100644
--- a/chromium/base/command_line_unittest.cc
+++ b/chromium/base/command_line_unittest.cc
@@ -176,7 +176,7 @@ TEST(CommandLineTest, EmptyString) {
EXPECT_EQ(1U, cl_from_string.argv().size());
EXPECT_TRUE(cl_from_string.GetArgs().empty());
#endif
- CommandLine cl_from_argv(0, NULL);
+ CommandLine cl_from_argv(0, nullptr);
EXPECT_TRUE(cl_from_argv.GetCommandLineString().empty());
EXPECT_TRUE(cl_from_argv.GetProgram().empty());
EXPECT_EQ(1U, cl_from_argv.argv().size());
@@ -382,9 +382,9 @@ TEST(CommandLineTest, ProgramQuotes) {
TEST(CommandLineTest, Init) {
// Call Init without checking output once so we know it's been called
// whether or not the test runner does so.
- CommandLine::Init(0, NULL);
+ CommandLine::Init(0, nullptr);
CommandLine* initial = CommandLine::ForCurrentProcess();
- EXPECT_FALSE(CommandLine::Init(0, NULL));
+ EXPECT_FALSE(CommandLine::Init(0, nullptr));
CommandLine* current = CommandLine::ForCurrentProcess();
EXPECT_EQ(initial, current);
}
diff --git a/chromium/base/containers/linked_list_unittest.cc b/chromium/base/containers/linked_list_unittest.cc
index f4ecc71066f..3470c86b62c 100644
--- a/chromium/base/containers/linked_list_unittest.cc
+++ b/chromium/base/containers/linked_list_unittest.cc
@@ -28,7 +28,7 @@ class MultipleInheritanceNodeBase {
class MultipleInheritanceNode : public MultipleInheritanceNodeBase,
public LinkNode<MultipleInheritanceNode> {
public:
- MultipleInheritanceNode() {}
+ MultipleInheritanceNode() = default;
};
// Checks that when iterating |list| (either from head to tail, or from
@@ -65,12 +65,12 @@ TEST(LinkedList, Empty) {
LinkedList<Node> list;
EXPECT_EQ(list.end(), list.head());
EXPECT_EQ(list.end(), list.tail());
- ExpectListContents(list, 0, NULL);
+ ExpectListContents(list, 0, nullptr);
}
TEST(LinkedList, Append) {
LinkedList<Node> list;
- ExpectListContents(list, 0, NULL);
+ ExpectListContents(list, 0, nullptr);
Node n1(1);
list.Append(&n1);
@@ -159,7 +159,7 @@ TEST(LinkedList, RemoveFromList) {
n2.RemoveFromList();
n4.RemoveFromList();
- ExpectListContents(list, 0, NULL);
+ ExpectListContents(list, 0, nullptr);
EXPECT_EQ(list.end(), list.head());
EXPECT_EQ(list.end(), list.tail());
@@ -300,8 +300,8 @@ TEST(LinkedList, RemovedNodeHasNullNextPrevious) {
list.Append(&n);
n.RemoveFromList();
- EXPECT_EQ(NULL, n.next());
- EXPECT_EQ(NULL, n.previous());
+ EXPECT_EQ(nullptr, n.next());
+ EXPECT_EQ(nullptr, n.previous());
}
} // namespace
diff --git a/chromium/base/containers/small_map.h b/chromium/base/containers/small_map.h
index 7ffd6d4adc2..495332fc35f 100644
--- a/chromium/base/containers/small_map.h
+++ b/chromium/base/containers/small_map.h
@@ -14,7 +14,6 @@
#include "base/containers/hash_tables.h"
#include "base/logging.h"
-#include "base/memory/manual_constructor.h"
namespace base {
@@ -67,13 +66,12 @@ namespace base {
// be used by default. If the wrapped map type has a strict weak
// ordering "key_compare" (std::map does), that will be used to
// implement equality by default.
-// MapInit: A functor that takes a ManualConstructor<NormalMap>* and uses it to
-// initialize the map. This functor will be called at most once per
-// small_map, when the map exceeds the threshold of kArraySize and we
-// are about to copy values from the array to the map. The functor
-// *must* call one of the Init() methods provided by
-// ManualConstructor, since after it runs we assume that the NormalMap
-// has been initialized.
+// MapInit: A functor that takes a NormalMap* and uses it to initialize the map.
+// This functor will be called at most once per small_map, when the map
+// exceeds the threshold of kArraySize and we are about to copy values
+// from the array to the map. The functor *must* initialize the
+// NormalMap* argument with placement new, since after it runs we
+// assume that the NormalMap has been initialized.
//
// example:
// base::small_map<std::map<string, int>> days;
diff --git a/chromium/base/containers/small_map_unittest.cc b/chromium/base/containers/small_map_unittest.cc
index d33549f1cd5..6561851f9de 100644
--- a/chromium/base/containers/small_map_unittest.cc
+++ b/chromium/base/containers/small_map_unittest.cc
@@ -443,7 +443,7 @@ namespace {
class unordered_map_add_item : public std::unordered_map<int, int> {
public:
- unordered_map_add_item() {}
+ unordered_map_add_item() = default;
explicit unordered_map_add_item(const std::pair<int, int>& item) {
insert(item);
}
diff --git a/chromium/base/containers/span.h b/chromium/base/containers/span.h
index 76aa6c313ba..7dec11805fc 100644
--- a/chromium/base/containers/span.h
+++ b/chromium/base/containers/span.h
@@ -121,13 +121,25 @@ using EnableIfConstSpanCompatibleContainer =
// // std::string HexEncode(base::span<const uint8_t> data);
// std::vector<uint8_t> data_buffer = GenerateData();
// std::string r = HexEncode(data_buffer);
-
+//
// Mutable:
// // ssize_t SafeSNPrintf(base::span<char>, const char* fmt, Args...);
// char str_buffer[100];
// SafeSNPrintf(str_buffer, "Pi ~= %lf", 3.14);
//
-// ======= Differences from the working group proposal =======
+// Spans with "const" and pointers
+// -------------------------------
+//
+// Const and pointers can get confusing. Here are vectors of pointers and their
+// corresponding spans (you can always make the span "more const" too):
+//
+// const std::vector<int*> => base::span<int* const>
+// std::vector<const int*> => base::span<const int*>
+// const std::vector<const int*> => base::span<const int* const>
+//
+// Differences from the working group proposal
+// -------------------------------------------
+//
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/p0122r5.pdf is the
// latest working group proposal. The biggest difference is span does not
// support a static extent template parameter. Other differences are documented
diff --git a/chromium/base/containers/stack_container_unittest.cc b/chromium/base/containers/stack_container_unittest.cc
index 2bb95379ba1..b6bb9b63527 100644
--- a/chromium/base/containers/stack_container_unittest.cc
+++ b/chromium/base/containers/stack_container_unittest.cc
@@ -90,7 +90,7 @@ TEST(StackContainer, VectorDoubleDelete) {
EXPECT_EQ(alive, 1);
Dummy* dummy_unref = dummy.get();
- dummy = NULL;
+ dummy = nullptr;
EXPECT_EQ(alive, 1);
Container::iterator itr = std::find(vect->begin(), vect->end(), dummy_unref);
@@ -107,7 +107,7 @@ template <size_t alignment>
class AlignedData {
public:
AlignedData() { memset(data_, 0, alignment); }
- ~AlignedData() {}
+ ~AlignedData() = default;
alignas(alignment) char data_[alignment];
};
diff --git a/chromium/base/cpu.cc b/chromium/base/cpu.cc
index 136501c128d..cd9066f53ae 100644
--- a/chromium/base/cpu.cc
+++ b/chromium/base/cpu.cc
@@ -10,6 +10,7 @@
#include <string.h>
#include <algorithm>
+#include <utility>
#include "base/macros.h"
#include "build/build_config.h"
@@ -106,17 +107,14 @@ std::string* CpuInfoBrand() {
std::string contents;
ReadFileToString(FilePath("/proc/cpuinfo"), &contents);
DCHECK(!contents.empty());
- if (contents.empty()) {
- return new std::string();
- }
std::istringstream iss(contents);
std::string line;
while (std::getline(iss, line)) {
- if ((line.compare(0, strlen(kModelNamePrefix), kModelNamePrefix) == 0 ||
- line.compare(0, strlen(kProcessorPrefix), kProcessorPrefix) == 0)) {
+ if (line.compare(0, strlen(kModelNamePrefix), kModelNamePrefix) == 0)
return new std::string(line.substr(strlen(kModelNamePrefix)));
- }
+ if (line.compare(0, strlen(kProcessorPrefix), kProcessorPrefix) == 0)
+ return new std::string(line.substr(strlen(kProcessorPrefix)));
}
return new std::string();
@@ -127,12 +125,16 @@ std::string* CpuInfoBrand() {
#endif // defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) ||
// defined(OS_LINUX))
-} // anonymous namespace
+} // namespace
void CPU::Initialize() {
#if defined(ARCH_CPU_X86_FAMILY)
int cpu_info[4] = {-1};
- char cpu_string[48];
+ // This array is used to temporarily hold the vendor name and then the brand
+ // name. Thus it has to be big enough for both use cases. There are
+ // static_asserts below for each of the use cases to make sure this array is
+ // big enough.
+ char cpu_string[sizeof(cpu_info) * 3 + 1];
// __cpuid with an InfoType argument of 0 returns the number of
// valid Ids in CPUInfo[0] and the CPU identification string in
@@ -140,12 +142,16 @@ void CPU::Initialize() {
// not in linear order. The code below arranges the information
// in a human readable form. The human readable order is CPUInfo[1] |
// CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
- // before using memcpy to copy these three array elements to cpu_string.
+ // before using memcpy() to copy these three array elements to |cpu_string|.
__cpuid(cpu_info, 0);
int num_ids = cpu_info[0];
std::swap(cpu_info[2], cpu_info[3]);
- memcpy(cpu_string, &cpu_info[1], 3 * sizeof(cpu_info[1]));
- cpu_vendor_.assign(cpu_string, 3 * sizeof(cpu_info[1]));
+ static constexpr size_t kVendorNameSize = 3 * sizeof(cpu_info[1]);
+ static_assert(kVendorNameSize < arraysize(cpu_string),
+ "cpu_string too small");
+ memcpy(cpu_string, &cpu_info[1], kVendorNameSize);
+ cpu_string[kVendorNameSize] = '\0';
+ cpu_vendor_ = cpu_string;
// Interpret CPU feature information.
if (num_ids > 0) {
@@ -191,28 +197,33 @@ void CPU::Initialize() {
// Get the brand string of the cpu.
__cpuid(cpu_info, 0x80000000);
- const int parameter_end = 0x80000004;
- int max_parameter = cpu_info[0];
-
- if (cpu_info[0] >= parameter_end) {
- char* cpu_string_ptr = cpu_string;
-
- for (int parameter = 0x80000002; parameter <= parameter_end &&
- cpu_string_ptr < &cpu_string[sizeof(cpu_string)]; parameter++) {
+ const int max_parameter = cpu_info[0];
+
+ static constexpr int kParameterStart = 0x80000002;
+ static constexpr int kParameterEnd = 0x80000004;
+ static constexpr int kParameterSize = kParameterEnd - kParameterStart + 1;
+ static_assert(kParameterSize * sizeof(cpu_info) + 1 == arraysize(cpu_string),
+ "cpu_string has wrong size");
+
+ if (max_parameter >= kParameterEnd) {
+ size_t i = 0;
+ for (int parameter = kParameterStart; parameter <= kParameterEnd;
+ ++parameter) {
__cpuid(cpu_info, parameter);
- memcpy(cpu_string_ptr, cpu_info, sizeof(cpu_info));
- cpu_string_ptr += sizeof(cpu_info);
+ memcpy(&cpu_string[i], cpu_info, sizeof(cpu_info));
+ i += sizeof(cpu_info);
}
- cpu_brand_.assign(cpu_string, cpu_string_ptr - cpu_string);
+ cpu_string[i] = '\0';
+ cpu_brand_ = cpu_string;
}
- const int parameter_containing_non_stop_time_stamp_counter = 0x80000007;
- if (max_parameter >= parameter_containing_non_stop_time_stamp_counter) {
- __cpuid(cpu_info, parameter_containing_non_stop_time_stamp_counter);
+ static constexpr int kParameterContainingNonStopTimeStampCounter = 0x80000007;
+ if (max_parameter >= kParameterContainingNonStopTimeStampCounter) {
+ __cpuid(cpu_info, kParameterContainingNonStopTimeStampCounter);
has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
}
#elif defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
- cpu_brand_.assign(*CpuInfoBrand());
+ cpu_brand_ = *CpuInfoBrand();
#endif
}
diff --git a/chromium/base/cpu_unittest.cc b/chromium/base/cpu_unittest.cc
index 9cabfd6998d..8a68ea07817 100644
--- a/chromium/base/cpu_unittest.cc
+++ b/chromium/base/cpu_unittest.cc
@@ -3,8 +3,8 @@
// found in the LICENSE file.
#include "base/cpu.h"
+#include "base/stl_util.h"
#include "build/build_config.h"
-
#include "testing/gtest/include/gtest/gtest.h"
#if _MSC_VER >= 1700
@@ -125,3 +125,10 @@ TEST(CPU, RunExtendedInstructions) {
#endif // defined(COMPILER_GCC)
#endif // defined(ARCH_CPU_X86_FAMILY)
}
+
+// For https://crbug.com/249713
+TEST(CPU, BrandAndVendorContainsNoNUL) {
+ base::CPU cpu;
+ EXPECT_FALSE(base::ContainsValue(cpu.cpu_brand(), '\0'));
+ EXPECT_FALSE(base::ContainsValue(cpu.vendor_name(), '\0'));
+}
diff --git a/chromium/base/debug/activity_analyzer.cc b/chromium/base/debug/activity_analyzer.cc
index 19bb767112c..d787829579a 100644
--- a/chromium/base/debug/activity_analyzer.cc
+++ b/chromium/base/debug/activity_analyzer.cc
@@ -24,7 +24,6 @@ namespace {
// An empty snapshot that can be returned when there otherwise is none.
LazyInstance<ActivityUserData::Snapshot>::Leaky g_empty_user_data_snapshot;
-#if !defined(OS_NACL)
// DO NOT CHANGE VALUES. This is logged persistently in a histogram.
enum AnalyzerCreationError {
kInvalidMemoryMappedFile,
@@ -39,12 +38,11 @@ void LogAnalyzerCreationError(AnalyzerCreationError error) {
UMA_HISTOGRAM_ENUMERATION("ActivityTracker.Collect.AnalyzerCreationError",
error, kAnalyzerCreationErrorMax);
}
-#endif // !defined(OS_NACL)
} // namespace
-ThreadActivityAnalyzer::Snapshot::Snapshot() {}
-ThreadActivityAnalyzer::Snapshot::~Snapshot() {}
+ThreadActivityAnalyzer::Snapshot::Snapshot() = default;
+ThreadActivityAnalyzer::Snapshot::~Snapshot() = default;
ThreadActivityAnalyzer::ThreadActivityAnalyzer(
const ThreadActivityTracker& tracker)
@@ -62,7 +60,7 @@ ThreadActivityAnalyzer::ThreadActivityAnalyzer(
PersistentMemoryAllocator::kSizeAny),
allocator->GetAllocSize(reference)) {}
-ThreadActivityAnalyzer::~ThreadActivityAnalyzer() {}
+ThreadActivityAnalyzer::~ThreadActivityAnalyzer() = default;
void ThreadActivityAnalyzer::AddGlobalInformation(
GlobalActivityAnalyzer* global) {
@@ -89,7 +87,29 @@ GlobalActivityAnalyzer::GlobalActivityAnalyzer(
DCHECK(allocator_);
}
-GlobalActivityAnalyzer::~GlobalActivityAnalyzer() {}
+GlobalActivityAnalyzer::~GlobalActivityAnalyzer() = default;
+
+// static
+std::unique_ptr<GlobalActivityAnalyzer>
+GlobalActivityAnalyzer::CreateWithAllocator(
+ std::unique_ptr<PersistentMemoryAllocator> allocator) {
+ if (allocator->GetMemoryState() ==
+ PersistentMemoryAllocator::MEMORY_UNINITIALIZED) {
+ LogAnalyzerCreationError(kPmaUninitialized);
+ return nullptr;
+ }
+ if (allocator->GetMemoryState() ==
+ PersistentMemoryAllocator::MEMORY_DELETED) {
+ LogAnalyzerCreationError(kPmaDeleted);
+ return nullptr;
+ }
+ if (allocator->IsCorrupt()) {
+ LogAnalyzerCreationError(kPmaCorrupt);
+ return nullptr;
+ }
+
+ return WrapUnique(new GlobalActivityAnalyzer(std::move(allocator)));
+}
#if !defined(OS_NACL)
// static
@@ -109,27 +129,34 @@ std::unique_ptr<GlobalActivityAnalyzer> GlobalActivityAnalyzer::CreateWithFile(
return nullptr;
}
- std::unique_ptr<FilePersistentMemoryAllocator> allocator(
- new FilePersistentMemoryAllocator(std::move(mmfile), 0, 0,
- base::StringPiece(), true));
- if (allocator->GetMemoryState() ==
- PersistentMemoryAllocator::MEMORY_UNINITIALIZED) {
- LogAnalyzerCreationError(kPmaUninitialized);
- return nullptr;
- }
- if (allocator->GetMemoryState() ==
- PersistentMemoryAllocator::MEMORY_DELETED) {
- LogAnalyzerCreationError(kPmaDeleted);
- return nullptr;
- }
- if (allocator->IsCorrupt()) {
- LogAnalyzerCreationError(kPmaCorrupt);
+ return CreateWithAllocator(std::make_unique<FilePersistentMemoryAllocator>(
+ std::move(mmfile), 0, 0, StringPiece(), /*readonly=*/true));
+}
+#endif // !defined(OS_NACL)
+
+// static
+std::unique_ptr<GlobalActivityAnalyzer>
+GlobalActivityAnalyzer::CreateWithSharedMemory(
+ std::unique_ptr<SharedMemory> shm) {
+ if (shm->mapped_size() == 0 ||
+ !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
return nullptr;
}
+ return CreateWithAllocator(std::make_unique<SharedPersistentMemoryAllocator>(
+ std::move(shm), 0, StringPiece(), /*readonly=*/true));
+}
- return WrapUnique(new GlobalActivityAnalyzer(std::move(allocator)));
+// static
+std::unique_ptr<GlobalActivityAnalyzer>
+GlobalActivityAnalyzer::CreateWithSharedMemoryHandle(
+ const SharedMemoryHandle& handle,
+ size_t size) {
+ std::unique_ptr<SharedMemory> shm(
+ new SharedMemory(handle, /*readonly=*/true));
+ if (!shm->Map(size))
+ return nullptr;
+ return CreateWithSharedMemory(std::move(shm));
}
-#endif // !defined(OS_NACL)
int64_t GlobalActivityAnalyzer::GetFirstProcess() {
PrepareAllAnalyzers();
@@ -271,12 +298,12 @@ bool GlobalActivityAnalyzer::IsDataComplete() const {
return !allocator_->IsFull();
}
-GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot() {}
+GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot() = default;
GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot(
const UserDataSnapshot& rhs) = default;
GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot(
UserDataSnapshot&& rhs) = default;
-GlobalActivityAnalyzer::UserDataSnapshot::~UserDataSnapshot() {}
+GlobalActivityAnalyzer::UserDataSnapshot::~UserDataSnapshot() = default;
void GlobalActivityAnalyzer::PrepareAllAnalyzers() {
// Record the time when analysis started.
diff --git a/chromium/base/debug/activity_analyzer.h b/chromium/base/debug/activity_analyzer.h
index 88c8e84b99d..9add85a9e04 100644
--- a/chromium/base/debug/activity_analyzer.h
+++ b/chromium/base/debug/activity_analyzer.h
@@ -137,6 +137,10 @@ class BASE_EXPORT GlobalActivityAnalyzer {
~GlobalActivityAnalyzer();
+ // Creates a global analyzer using a given persistent-memory |allocator|.
+ static std::unique_ptr<GlobalActivityAnalyzer> CreateWithAllocator(
+ std::unique_ptr<PersistentMemoryAllocator> allocator);
+
#if !defined(OS_NACL)
// Creates a global analyzer using the contents of a file given in
// |file_path|.
@@ -144,6 +148,16 @@ class BASE_EXPORT GlobalActivityAnalyzer {
const FilePath& file_path);
#endif // !defined(OS_NACL)
+ // Like above but accesses an allocator in a mapped shared-memory segment.
+ static std::unique_ptr<GlobalActivityAnalyzer> CreateWithSharedMemory(
+ std::unique_ptr<SharedMemory> shm);
+
+ // Like above but takes a handle to an existing shared memory segment and
+ // maps it before creating the tracker.
+ static std::unique_ptr<GlobalActivityAnalyzer> CreateWithSharedMemoryHandle(
+ const SharedMemoryHandle& handle,
+ size_t size);
+
// Iterates over all known valid processes and returns their PIDs or zero
// if there are no more. Calls to GetFirstProcess() will perform a global
// snapshot in order to provide a relatively consistent state across the
diff --git a/chromium/base/debug/activity_analyzer_unittest.cc b/chromium/base/debug/activity_analyzer_unittest.cc
index 9525ed7a4ea..e08b43aff3e 100644
--- a/chromium/base/debug/activity_analyzer_unittest.cc
+++ b/chromium/base/debug/activity_analyzer_unittest.cc
@@ -37,7 +37,7 @@ class TestActivityTracker : public ThreadActivityTracker {
: ThreadActivityTracker(memset(memory.get(), 0, mem_size), mem_size),
mem_segment_(std::move(memory)) {}
- ~TestActivityTracker() override {}
+ ~TestActivityTracker() override = default;
private:
std::unique_ptr<char[]> mem_segment_;
@@ -51,7 +51,7 @@ class ActivityAnalyzerTest : public testing::Test {
const int kMemorySize = 1 << 20; // 1MiB
const int kStackSize = 1 << 10; // 1KiB
- ActivityAnalyzerTest() {}
+ ActivityAnalyzerTest() = default;
~ActivityAnalyzerTest() override {
GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get();
@@ -119,7 +119,7 @@ class SimpleActivityThread : public SimpleThread {
exit_(false),
exit_condition_(&lock_) {}
- ~SimpleActivityThread() override {}
+ ~SimpleActivityThread() override = default;
void Run() override {
ThreadActivityTracker::ActivityId id =
@@ -216,6 +216,35 @@ TEST_F(ActivityAnalyzerTest, GlobalAnalyzerConstruction) {
EXPECT_EQ("bar", data_snapshot.at("foo").GetString());
}
+TEST_F(ActivityAnalyzerTest, GlobalAnalyzerFromSharedMemory) {
+ SharedMemoryHandle handle1;
+ SharedMemoryHandle handle2;
+
+ {
+ std::unique_ptr<SharedMemory> shmem(new SharedMemory());
+ ASSERT_TRUE(shmem->CreateAndMapAnonymous(kMemorySize));
+ handle1 = shmem->handle().Duplicate();
+ ASSERT_TRUE(handle1.IsValid());
+ handle2 = shmem->handle().Duplicate();
+ ASSERT_TRUE(handle2.IsValid());
+ }
+
+ GlobalActivityTracker::CreateWithSharedMemoryHandle(handle1, kMemorySize, 0,
+ "", 3);
+ GlobalActivityTracker::Get()->process_data().SetString("foo", "bar");
+
+ std::unique_ptr<GlobalActivityAnalyzer> analyzer =
+ GlobalActivityAnalyzer::CreateWithSharedMemoryHandle(handle2,
+ kMemorySize);
+
+ const int64_t pid = analyzer->GetFirstProcess();
+ ASSERT_NE(0, pid);
+ const ActivityUserData::Snapshot& data_snapshot =
+ analyzer->GetProcessDataSnapshot(pid);
+ ASSERT_LE(1U, data_snapshot.size());
+ EXPECT_EQ("bar", data_snapshot.at("foo").GetString());
+}
+
TEST_F(ActivityAnalyzerTest, UserDataSnapshotTest) {
GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
ThreadActivityAnalyzer::Snapshot tracker_snapshot;
diff --git a/chromium/base/debug/activity_tracker.cc b/chromium/base/debug/activity_tracker.cc
index 817768973fd..1b3aaeec7b8 100644
--- a/chromium/base/debug/activity_tracker.cc
+++ b/chromium/base/debug/activity_tracker.cc
@@ -120,8 +120,8 @@ Time WallTimeFromTickTime(int64_t ticks_start, int64_t ticks, Time time_start) {
} // namespace
-OwningProcess::OwningProcess() {}
-OwningProcess::~OwningProcess() {}
+OwningProcess::OwningProcess() = default;
+OwningProcess::~OwningProcess() = default;
void OwningProcess::Release_Initialize(int64_t pid) {
uint32_t old_id = data_id.load(std::memory_order_acquire);
@@ -185,7 +185,7 @@ ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator(
DCHECK(allocator);
}
-ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() {}
+ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() = default;
ActivityTrackerMemoryAllocator::Reference
ActivityTrackerMemoryAllocator::GetObjectReference() {
@@ -276,9 +276,9 @@ void Activity::FillFrom(Activity* activity,
#endif
}
-ActivityUserData::TypedValue::TypedValue() {}
+ActivityUserData::TypedValue::TypedValue() = default;
ActivityUserData::TypedValue::TypedValue(const TypedValue& other) = default;
-ActivityUserData::TypedValue::~TypedValue() {}
+ActivityUserData::TypedValue::~TypedValue() = default;
StringPiece ActivityUserData::TypedValue::Get() const {
DCHECK_EQ(RAW_VALUE, type_);
@@ -323,13 +323,13 @@ StringPiece ActivityUserData::TypedValue::GetStringReference() const {
// These are required because std::atomic is (currently) not a POD type and
// thus clang requires explicit out-of-line constructors and destructors even
// when they do nothing.
-ActivityUserData::ValueInfo::ValueInfo() {}
+ActivityUserData::ValueInfo::ValueInfo() = default;
ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
-ActivityUserData::ValueInfo::~ValueInfo() {}
-ActivityUserData::MemoryHeader::MemoryHeader() {}
-ActivityUserData::MemoryHeader::~MemoryHeader() {}
-ActivityUserData::FieldHeader::FieldHeader() {}
-ActivityUserData::FieldHeader::~FieldHeader() {}
+ActivityUserData::ValueInfo::~ValueInfo() = default;
+ActivityUserData::MemoryHeader::MemoryHeader() = default;
+ActivityUserData::MemoryHeader::~MemoryHeader() = default;
+ActivityUserData::FieldHeader::FieldHeader() = default;
+ActivityUserData::FieldHeader::~FieldHeader() = default;
ActivityUserData::ActivityUserData() : ActivityUserData(nullptr, 0, -1) {}
@@ -362,7 +362,7 @@ ActivityUserData::ActivityUserData(void* memory, size_t size, int64_t pid)
ImportExistingData();
}
-ActivityUserData::~ActivityUserData() {}
+ActivityUserData::~ActivityUserData() = default;
bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
DCHECK(output_snapshot);
@@ -638,13 +638,11 @@ struct ThreadActivityTracker::Header {
// A memory location used to indicate if changes have been made to the data
// that would invalidate an in-progress read of its contents. The active
- // tracker will zero the value whenever something gets popped from the
- // stack. A monitoring tracker can write a non-zero value here, copy the
- // stack contents, and read the value to know, if it is still non-zero, that
- // the contents didn't change while being copied. This can handle concurrent
- // snapshot operations only if each snapshot writes a different bit (which
- // is not the current implementation so no parallel snapshots allowed).
- std::atomic<uint32_t> data_unchanged;
+ // tracker will increment the value whenever something gets popped from the
+ // stack. A monitoring tracker can check the value before and after access
+ // to know, if it's still the same, that the contents didn't change while
+ // being copied.
+ std::atomic<uint32_t> data_version;
// The last "exception" activity. This can't be stored on the stack because
// that could get popped as things unwind.
@@ -657,8 +655,8 @@ struct ThreadActivityTracker::Header {
char thread_name[32];
};
-ThreadActivityTracker::Snapshot::Snapshot() {}
-ThreadActivityTracker::Snapshot::~Snapshot() {}
+ThreadActivityTracker::Snapshot::Snapshot() = default;
+ThreadActivityTracker::Snapshot::~Snapshot() = default;
ThreadActivityTracker::ScopedActivity::ScopedActivity(
ThreadActivityTracker* tracker,
@@ -728,7 +726,7 @@ ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
DCHECK_EQ(0, header_->start_ticks);
DCHECK_EQ(0U, header_->stack_slots);
DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
- DCHECK_EQ(0U, header_->data_unchanged.load(std::memory_order_relaxed));
+ DCHECK_EQ(0U, header_->data_version.load(std::memory_order_relaxed));
DCHECK_EQ(0, stack_[0].time_internal);
DCHECK_EQ(0U, stack_[0].origin_address);
DCHECK_EQ(0U, stack_[0].call_stack[0]);
@@ -760,7 +758,7 @@ ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
}
}
-ThreadActivityTracker::~ThreadActivityTracker() {}
+ThreadActivityTracker::~ThreadActivityTracker() = default;
ThreadActivityTracker::ActivityId ThreadActivityTracker::PushActivity(
const void* program_counter,
@@ -840,12 +838,11 @@ void ThreadActivityTracker::PopActivity(ActivityId id) {
CalledOnValidThread());
// The stack has shrunk meaning that some other thread trying to copy the
- // contents for reporting purposes could get bad data. That thread would
- // have written a non-zero value into |data_unchanged|; clearing it here
- // will let that thread detect that something did change. This needs to
+ // contents for reporting purposes could get bad data. Increment the data
+ // version so that it con tell that things have changed. This needs to
// happen after the atomic |depth| operation above so a "release" store
// is required.
- header_->data_unchanged.store(0, std::memory_order_release);
+ header_->data_version.fetch_add(1, std::memory_order_release);
}
std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
@@ -894,7 +891,7 @@ void ThreadActivityTracker::RecordExceptionActivity(const void* program_counter,
// The data has changed meaning that some other thread trying to copy the
// contents for reporting purposes could get bad data.
- header_->data_unchanged.store(0, std::memory_order_relaxed);
+ header_->data_version.fetch_add(1, std::memory_order_relaxed);
}
bool ThreadActivityTracker::IsValid() const {
@@ -940,12 +937,13 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
const int64_t starting_process_id = header_->owner.process_id;
const int64_t starting_thread_id = header_->thread_ref.as_id;
- // Write a non-zero value to |data_unchanged| so it's possible to detect
- // at the end that nothing has changed since copying the data began. A
- // "cst" operation is required to ensure it occurs before everything else.
- // Using "cst" memory ordering is relatively expensive but this is only
- // done during analysis so doesn't directly affect the worker threads.
- header_->data_unchanged.store(1, std::memory_order_seq_cst);
+ // Note the current |data_version| so it's possible to detect at the end
+ // that nothing has changed since copying the data began. A "cst" operation
+ // is required to ensure it occurs before everything else. Using "cst"
+ // memory ordering is relatively expensive but this is only done during
+ // analysis so doesn't directly affect the worker threads.
+ const uint32_t pre_version =
+ header_->data_version.load(std::memory_order_seq_cst);
// Fetching the current depth also "acquires" the contents of the stack.
depth = header_->current_depth.load(std::memory_order_acquire);
@@ -965,7 +963,7 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
// Retry if something changed during the copy. A "cst" operation ensures
// it must happen after all the above operations.
- if (!header_->data_unchanged.load(std::memory_order_seq_cst))
+ if (header_->data_version.load(std::memory_order_seq_cst) != pre_version)
continue;
// Stack copied. Record it's full depth.
@@ -1025,6 +1023,10 @@ const void* ThreadActivityTracker::GetBaseAddress() {
return header_;
}
+uint32_t ThreadActivityTracker::GetDataVersionForTesting() {
+ return header_->data_version.load(std::memory_order_relaxed);
+}
+
void ThreadActivityTracker::SetOwningProcessIdForTesting(int64_t pid,
int64_t stamp) {
header_->owner.SetOwningProcessIdForTesting(pid, stamp);
@@ -1080,18 +1082,18 @@ ThreadActivityTracker::CreateUserDataForActivity(
// of std::atomic because the latter can create global ctors and dtors.
subtle::AtomicWord GlobalActivityTracker::g_tracker_ = 0;
-GlobalActivityTracker::ModuleInfo::ModuleInfo() {}
+GlobalActivityTracker::ModuleInfo::ModuleInfo() = default;
GlobalActivityTracker::ModuleInfo::ModuleInfo(ModuleInfo&& rhs) = default;
GlobalActivityTracker::ModuleInfo::ModuleInfo(const ModuleInfo& rhs) = default;
-GlobalActivityTracker::ModuleInfo::~ModuleInfo() {}
+GlobalActivityTracker::ModuleInfo::~ModuleInfo() = default;
GlobalActivityTracker::ModuleInfo& GlobalActivityTracker::ModuleInfo::operator=(
ModuleInfo&& rhs) = default;
GlobalActivityTracker::ModuleInfo& GlobalActivityTracker::ModuleInfo::operator=(
const ModuleInfo& rhs) = default;
-GlobalActivityTracker::ModuleInfoRecord::ModuleInfoRecord() {}
-GlobalActivityTracker::ModuleInfoRecord::~ModuleInfoRecord() {}
+GlobalActivityTracker::ModuleInfoRecord::ModuleInfoRecord() = default;
+GlobalActivityTracker::ModuleInfoRecord::~ModuleInfoRecord() = default;
bool GlobalActivityTracker::ModuleInfoRecord::DecodeTo(
GlobalActivityTracker::ModuleInfo* info,
@@ -1223,7 +1225,7 @@ GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory,
int64_t pid)
: ActivityUserData(memory, size, pid) {}
-GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {}
+GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() = default;
void GlobalActivityTracker::ThreadSafeUserData::Set(StringPiece name,
ValueType type,
@@ -1262,7 +1264,7 @@ void GlobalActivityTracker::CreateWithAllocator(
#if !defined(OS_NACL)
// static
-void GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
+bool GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
size_t size,
uint64_t id,
StringPiece name,
@@ -1272,21 +1274,23 @@ void GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
// Create and map the file into memory and make it globally available.
std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile());
- bool success =
- mapped_file->Initialize(File(file_path,
- File::FLAG_CREATE_ALWAYS | File::FLAG_READ |
- File::FLAG_WRITE | File::FLAG_SHARE_DELETE),
- {0, static_cast<int64_t>(size)},
- MemoryMappedFile::READ_WRITE_EXTEND);
- DCHECK(success);
+ bool success = mapped_file->Initialize(
+ File(file_path, File::FLAG_CREATE_ALWAYS | File::FLAG_READ |
+ File::FLAG_WRITE | File::FLAG_SHARE_DELETE),
+ {0, size}, MemoryMappedFile::READ_WRITE_EXTEND);
+ if (!success)
+ return false;
+ if (!FilePersistentMemoryAllocator::IsFileAcceptable(*mapped_file, false))
+ return false;
CreateWithAllocator(std::make_unique<FilePersistentMemoryAllocator>(
std::move(mapped_file), size, id, name, false),
stack_depth, 0);
+ return true;
}
#endif // !defined(OS_NACL)
// static
-void GlobalActivityTracker::CreateWithLocalMemory(size_t size,
+bool GlobalActivityTracker::CreateWithLocalMemory(size_t size,
uint64_t id,
StringPiece name,
int stack_depth,
@@ -1294,6 +1298,37 @@ void GlobalActivityTracker::CreateWithLocalMemory(size_t size,
CreateWithAllocator(
std::make_unique<LocalPersistentMemoryAllocator>(size, id, name),
stack_depth, process_id);
+ return true;
+}
+
+// static
+bool GlobalActivityTracker::CreateWithSharedMemory(
+ std::unique_ptr<SharedMemory> shm,
+ uint64_t id,
+ StringPiece name,
+ int stack_depth) {
+ if (shm->mapped_size() == 0 ||
+ !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
+ return false;
+ }
+ CreateWithAllocator(std::make_unique<SharedPersistentMemoryAllocator>(
+ std::move(shm), id, name, false),
+ stack_depth, 0);
+ return true;
+}
+
+// static
+bool GlobalActivityTracker::CreateWithSharedMemoryHandle(
+ const SharedMemoryHandle& handle,
+ size_t size,
+ uint64_t id,
+ StringPiece name,
+ int stack_depth) {
+ std::unique_ptr<SharedMemory> shm(
+ new SharedMemory(handle, /*readonly=*/false));
+ if (!shm->Map(size))
+ return false;
+ return CreateWithSharedMemory(std::move(shm), id, name, stack_depth);
}
// static
@@ -1406,7 +1441,8 @@ void GlobalActivityTracker::RecordProcessLaunch(
// TODO(bcwhite): Measure this in UMA.
NOTREACHED() << "Process #" << process_id
<< " was previously recorded as \"launched\""
- << " with no corresponding exit.";
+ << " with no corresponding exit.\n"
+ << known_processes_[pid];
known_processes_.erase(pid);
}
@@ -1495,6 +1531,8 @@ void GlobalActivityTracker::CleanupAfterProcess(int64_t process_id,
while ((ref = iter.GetNextOfType(kTypeIdProcessDataRecord)) != 0) {
const void* memory = allocator_->GetAsArray<char>(
ref, kTypeIdProcessDataRecord, PersistentMemoryAllocator::kSizeAny);
+ if (!memory)
+ continue;
int64_t found_id;
int64_t create_stamp;
if (ActivityUserData::GetOwningProcessId(memory, &found_id,
@@ -1532,6 +1570,8 @@ void GlobalActivityTracker::CleanupAfterProcess(int64_t process_id,
case ModuleInfoRecord::kPersistentTypeId: {
const void* memory = allocator_->GetAsArray<char>(
ref, type, PersistentMemoryAllocator::kSizeAny);
+ if (!memory)
+ continue;
int64_t found_id;
int64_t create_stamp;
diff --git a/chromium/base/debug/activity_tracker.h b/chromium/base/debug/activity_tracker.h
index 52aaa319dc2..bfd9f9d45c5 100644
--- a/chromium/base/debug/activity_tracker.h
+++ b/chromium/base/debug/activity_tracker.h
@@ -27,6 +27,7 @@
#include "base/compiler_specific.h"
#include "base/gtest_prod_util.h"
#include "base/location.h"
+#include "base/memory/shared_memory.h"
#include "base/metrics/persistent_memory_allocator.h"
#include "base/process/process_handle.h"
#include "base/strings/string_piece.h"
@@ -713,6 +714,10 @@ class BASE_EXPORT ThreadActivityTracker {
// Gets the base memory address used for storing data.
const void* GetBaseAddress();
+ // Access the "data version" value so tests can determine if an activity
+ // was pushed and popped in a single call.
+ uint32_t GetDataVersionForTesting();
+
// Explicitly sets the process ID.
void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
@@ -883,8 +888,8 @@ class BASE_EXPORT GlobalActivityTracker {
// Like above but internally creates an allocator around a disk file with
// the specified |size| at the given |file_path|. Any existing file will be
// overwritten. The |id| and |name| are arbitrary and stored in the allocator
- // for reference by whatever process reads it.
- static void CreateWithFile(const FilePath& file_path,
+ // for reference by whatever process reads it. Returns true if successful.
+ static bool CreateWithFile(const FilePath& file_path,
size_t size,
uint64_t id,
StringPiece name,
@@ -894,12 +899,27 @@ class BASE_EXPORT GlobalActivityTracker {
// Like above but internally creates an allocator using local heap memory of
// the specified size. This is used primarily for unit tests. The |process_id|
// can be zero to get it from the OS but is taken for testing purposes.
- static void CreateWithLocalMemory(size_t size,
+ static bool CreateWithLocalMemory(size_t size,
uint64_t id,
StringPiece name,
int stack_depth,
int64_t process_id);
+ // Like above but internally creates an allocator using a shared-memory
+ // segment. The segment must already be mapped into the local memory space.
+ static bool CreateWithSharedMemory(std::unique_ptr<SharedMemory> shm,
+ uint64_t id,
+ StringPiece name,
+ int stack_depth);
+
+ // Like above but takes a handle to an existing shared memory segment and
+ // maps it before creating the tracker.
+ static bool CreateWithSharedMemoryHandle(const SharedMemoryHandle& handle,
+ size_t size,
+ uint64_t id,
+ StringPiece name,
+ int stack_depth);
+
// Gets the global activity-tracker or null if none exists.
static GlobalActivityTracker* Get() {
return reinterpret_cast<GlobalActivityTracker*>(
diff --git a/chromium/base/debug/activity_tracker_unittest.cc b/chromium/base/debug/activity_tracker_unittest.cc
index be14a0a0307..79d19dbef02 100644
--- a/chromium/base/debug/activity_tracker_unittest.cc
+++ b/chromium/base/debug/activity_tracker_unittest.cc
@@ -33,7 +33,7 @@ class TestActivityTracker : public ThreadActivityTracker {
: ThreadActivityTracker(memset(memory.get(), 0, mem_size), mem_size),
mem_segment_(std::move(memory)) {}
- ~TestActivityTracker() override {}
+ ~TestActivityTracker() override = default;
private:
std::unique_ptr<char[]> mem_segment_;
@@ -49,7 +49,7 @@ class ActivityTrackerTest : public testing::Test {
using ActivityId = ThreadActivityTracker::ActivityId;
- ActivityTrackerTest() {}
+ ActivityTrackerTest() = default;
~ActivityTrackerTest() override {
GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get();
@@ -250,6 +250,83 @@ TEST_F(ActivityTrackerTest, ScopedTaskTest) {
ASSERT_EQ(2U, GetGlobalUserDataMemoryCacheUsed());
}
+namespace {
+
+class SimpleLockThread : public SimpleThread {
+ public:
+ SimpleLockThread(const std::string& name, Lock* lock)
+ : SimpleThread(name, Options()),
+ lock_(lock),
+ data_changed_(false),
+ is_running_(false) {}
+
+ ~SimpleLockThread() override = default;
+
+ void Run() override {
+ ThreadActivityTracker* tracker =
+ GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
+ uint32_t pre_version = tracker->GetDataVersionForTesting();
+
+ is_running_.store(true, std::memory_order_relaxed);
+ lock_->Acquire();
+ data_changed_ = tracker->GetDataVersionForTesting() != pre_version;
+ lock_->Release();
+ is_running_.store(false, std::memory_order_relaxed);
+ }
+
+ bool IsRunning() { return is_running_.load(std::memory_order_relaxed); }
+
+ bool WasDataChanged() { return data_changed_; };
+
+ private:
+ Lock* lock_;
+ bool data_changed_;
+ std::atomic<bool> is_running_;
+
+ DISALLOW_COPY_AND_ASSIGN(SimpleLockThread);
+};
+
+} // namespace
+
+TEST_F(ActivityTrackerTest, LockTest) {
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+
+ ThreadActivityTracker* tracker =
+ GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
+ ThreadActivityTracker::Snapshot snapshot;
+ ASSERT_EQ(0U, GetGlobalUserDataMemoryCacheUsed());
+
+ Lock lock;
+ uint32_t pre_version = tracker->GetDataVersionForTesting();
+
+ // Check no activity when only "trying" a lock.
+ EXPECT_TRUE(lock.Try());
+ EXPECT_EQ(pre_version, tracker->GetDataVersionForTesting());
+ lock.Release();
+ EXPECT_EQ(pre_version, tracker->GetDataVersionForTesting());
+
+ // Check no activity when acquiring a free lock.
+ SimpleLockThread t1("locker1", &lock);
+ t1.Start();
+ t1.Join();
+ EXPECT_FALSE(t1.WasDataChanged());
+
+ // Check that activity is recorded when acquring a busy lock.
+ SimpleLockThread t2("locker2", &lock);
+ lock.Acquire();
+ t2.Start();
+ while (!t2.IsRunning())
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(10));
+ // t2 can't join until the lock is released but have to give time for t2 to
+ // actually block on the lock before releasing it or the results will not
+ // be correct.
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(200));
+ lock.Release();
+ // Now the results will be valid.
+ t2.Join();
+ EXPECT_TRUE(t2.WasDataChanged());
+}
+
TEST_F(ActivityTrackerTest, ExceptionTest) {
GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
GlobalActivityTracker* global = GlobalActivityTracker::Get();
@@ -324,7 +401,7 @@ class SimpleActivityThread : public SimpleThread {
exit_(false),
exit_condition_(&lock_) {}
- ~SimpleActivityThread() override {}
+ ~SimpleActivityThread() override = default;
void Run() override {
ThreadActivityTracker::ActivityId id =
diff --git a/chromium/base/debug/crash_logging.cc b/chromium/base/debug/crash_logging.cc
index 32b6b05e396..4abbada0080 100644
--- a/chromium/base/debug/crash_logging.cc
+++ b/chromium/base/debug/crash_logging.cc
@@ -21,10 +21,12 @@ namespace debug {
namespace {
+CrashKeyImplementation* g_crash_key_impl = nullptr;
+
// Global map of crash key names to registration entries.
typedef std::unordered_map<base::StringPiece, CrashKey, base::StringPieceHash>
CrashKeyMap;
-CrashKeyMap* g_crash_keys_ = NULL;
+CrashKeyMap* g_crash_keys_ = nullptr;
// The maximum length of a single chunk.
size_t g_chunk_max_length_ = 0;
@@ -34,8 +36,8 @@ const char kChunkFormatString[] = "%s-%" PRIuS;
// The functions that are called to actually set the key-value pairs in the
// crash reportng system.
-SetCrashKeyValueFuncT g_set_key_func_ = NULL;
-ClearCrashKeyValueFuncT g_clear_key_func_ = NULL;
+SetCrashKeyValueFuncT g_set_key_func_ = nullptr;
+ClearCrashKeyValueFuncT g_clear_key_func_ = nullptr;
// For a given |length|, computes the number of chunks a value of that size
// will occupy.
@@ -49,6 +51,33 @@ const size_t kLargestValueAllowed = 2048;
} // namespace
+CrashKeyString* AllocateCrashKeyString(const char name[],
+ CrashKeySize value_length) {
+ if (!g_crash_key_impl)
+ return nullptr;
+
+ return g_crash_key_impl->Allocate(name, value_length);
+}
+
+void SetCrashKeyString(CrashKeyString* crash_key, base::StringPiece value) {
+ if (!g_crash_key_impl || !crash_key)
+ return;
+
+ g_crash_key_impl->Set(crash_key, value);
+}
+
+void ClearCrashKeyString(CrashKeyString* crash_key) {
+ if (!g_crash_key_impl || !crash_key)
+ return;
+
+ g_crash_key_impl->Clear(crash_key);
+}
+
+void SetCrashKeyImplementation(std::unique_ptr<CrashKeyImplementation> impl) {
+ delete g_crash_key_impl;
+ g_crash_key_impl = impl.release();
+}
+
void SetCrashKeyValue(const base::StringPiece& key,
const base::StringPiece& value) {
if (!g_set_key_func_ || !g_crash_keys_)
@@ -147,7 +176,7 @@ size_t InitCrashKeys(const CrashKey* const keys, size_t count,
DCHECK(!g_crash_keys_) << "Crash logging may only be initialized once";
if (!keys) {
delete g_crash_keys_;
- g_crash_keys_ = NULL;
+ g_crash_keys_ = nullptr;
return 0;
}
@@ -168,10 +197,10 @@ size_t InitCrashKeys(const CrashKey* const keys, size_t count,
const CrashKey* LookupCrashKey(const base::StringPiece& key) {
if (!g_crash_keys_)
- return NULL;
+ return nullptr;
CrashKeyMap::const_iterator it = g_crash_keys_->find(key.as_string());
if (it == g_crash_keys_->end())
- return NULL;
+ return nullptr;
return &(it->second);
}
@@ -197,10 +226,10 @@ std::vector<std::string> ChunkCrashKeyValue(const CrashKey& crash_key,
void ResetCrashLoggingForTesting() {
delete g_crash_keys_;
- g_crash_keys_ = NULL;
+ g_crash_keys_ = nullptr;
g_chunk_max_length_ = 0;
- g_set_key_func_ = NULL;
- g_clear_key_func_ = NULL;
+ g_set_key_func_ = nullptr;
+ g_clear_key_func_ = nullptr;
}
} // namespace debug
diff --git a/chromium/base/debug/crash_logging.h b/chromium/base/debug/crash_logging.h
index eaa65e4cff9..1377e18c497 100644
--- a/chromium/base/debug/crash_logging.h
+++ b/chromium/base/debug/crash_logging.h
@@ -7,6 +7,7 @@
#include <stddef.h>
+#include <memory>
#include <string>
#include <type_traits>
#include <vector>
@@ -15,15 +16,81 @@
#include "base/macros.h"
#include "base/strings/string_piece.h"
-// These functions add metadata to the upload payload when sending crash reports
-// to the crash server.
-//
-// IMPORTANT: On OS X and Linux, the key/value pairs are only sent as part of
-// the upload and are not included in the minidump!
-
namespace base {
namespace debug {
+// A crash key is an annotation that is carried along with a crash report, to
+// provide additional debugging information beyond a stack trace. Crash keys
+// have a name and a string value.
+//
+// The preferred API is //components/crash/core/common:crash_key, however not
+// all clients can hold a direct dependency on that target. The API provided
+// in this file indirects the dependency.
+//
+// Example usage:
+// static CrashKeyString* crash_key =
+// AllocateCrashKeyString("name", CrashKeySize::Size32);
+// SetCrashKeyString(crash_key, "value");
+// ClearCrashKeyString(crash_key);
+
+// The maximum length for a crash key's value must be one of the following
+// pre-determined values.
+enum class CrashKeySize {
+ Size32 = 32,
+ Size64 = 64,
+ Size256 = 256,
+};
+
+struct CrashKeyString;
+
+// Allocates a new crash key with the specified |name| with storage for a
+// value up to length |size|. This will return null if the crash key system is
+// not initialized.
+BASE_EXPORT CrashKeyString* AllocateCrashKeyString(const char name[],
+ CrashKeySize size);
+
+// Stores |value| into the specified |crash_key|. The |crash_key| may be null
+// if AllocateCrashKeyString() returned null. If |value| is longer than the
+// size with which the key was allocated, it will be truncated.
+BASE_EXPORT void SetCrashKeyString(CrashKeyString* crash_key,
+ base::StringPiece value);
+
+// Clears any value that was stored in |crash_key|. The |crash_key| may be
+// null.
+BASE_EXPORT void ClearCrashKeyString(CrashKeyString* crash_key);
+
+////////////////////////////////////////////////////////////////////////////////
+// The following declarations are used to initialize the crash key system
+// in //base by providing implementations for the above functions.
+
+// The virtual interface that provides the implementation for the crash key
+// API. This is implemented by a higher-layer component, and the instance is
+// set using the function below.
+class CrashKeyImplementation {
+ public:
+ virtual ~CrashKeyImplementation() {}
+
+ virtual CrashKeyString* Allocate(const char name[], CrashKeySize size) = 0;
+ virtual void Set(CrashKeyString* crash_key, base::StringPiece value) = 0;
+ virtual void Clear(CrashKeyString* crash_key) = 0;
+};
+
+// Initializes the crash key system in base by replacing the existing
+// implementation, if it exists, with |impl|. The |impl| is copied into base.
+BASE_EXPORT void SetCrashKeyImplementation(
+ std::unique_ptr<CrashKeyImplementation> impl);
+
+// The base structure for a crash key, storing the allocation metadata.
+struct CrashKeyString {
+ constexpr CrashKeyString(const char name[], CrashKeySize size)
+ : name(name), size(size) {}
+ const char* const name;
+ const CrashKeySize size;
+};
+
+// The API below is deprecated.
+////////////////////////////////////////////////////////////////////////////////
+
class StackTrace;
// Sets or clears a specific key-value pair from the crash metadata. Keys and
diff --git a/chromium/base/debug/crash_logging_unittest.cc b/chromium/base/debug/crash_logging_unittest.cc
index 5197c03e272..d877187be3d 100644
--- a/chromium/base/debug/crash_logging_unittest.cc
+++ b/chromium/base/debug/crash_logging_unittest.cc
@@ -15,7 +15,7 @@
namespace {
-std::map<std::string, std::string>* key_values_ = NULL;
+std::map<std::string, std::string>* key_values_ = nullptr;
} // namespace
@@ -32,7 +32,7 @@ class CrashLoggingTest : public testing::Test {
base::debug::ResetCrashLoggingForTesting();
delete key_values_;
- key_values_ = NULL;
+ key_values_ = nullptr;
}
private:
@@ -183,3 +183,13 @@ TEST_F(CrashLoggingTest, ChunkRounding) {
base::debug::CrashKey key = { "round", 12 };
EXPECT_EQ(3u, base::debug::InitCrashKeys(&key, 1, 5));
}
+
+TEST_F(CrashLoggingTest, UninitializedCrashKeyStringSupport) {
+ auto* crash_key = base::debug::AllocateCrashKeyString(
+ "test", base::debug::CrashKeySize::Size32);
+ EXPECT_FALSE(crash_key);
+
+ base::debug::SetCrashKeyString(crash_key, "value");
+
+ base::debug::ClearCrashKeyString(crash_key);
+}
diff --git a/chromium/base/debug/dump_without_crashing.cc b/chromium/base/debug/dump_without_crashing.cc
index 307e64e98f7..1ab8c9cc413 100644
--- a/chromium/base/debug/dump_without_crashing.cc
+++ b/chromium/base/debug/dump_without_crashing.cc
@@ -10,7 +10,7 @@ namespace {
// Pointer to the function that's called by DumpWithoutCrashing() to dump the
// process's memory.
-void (CDECL *dump_without_crashing_function_)() = NULL;
+void(CDECL* dump_without_crashing_function_)() = nullptr;
} // namespace
diff --git a/chromium/base/debug/proc_maps_linux.h b/chromium/base/debug/proc_maps_linux.h
index 38e92314c8f..f5f8a59b8c4 100644
--- a/chromium/base/debug/proc_maps_linux.h
+++ b/chromium/base/debug/proc_maps_linux.h
@@ -31,6 +31,9 @@ struct MappedMemoryRegion {
// Byte offset into |path| of the range mapped into memory.
unsigned long long offset;
+ // Image base, if this mapping corresponds to an ELF image.
+ uintptr_t base;
+
// Bitmask of read/write/execute/private/shared permissions.
uint8_t permissions;
diff --git a/chromium/base/debug/profiler.cc b/chromium/base/debug/profiler.cc
index b19e7ecd00f..e225f684f26 100644
--- a/chromium/base/debug/profiler.cc
+++ b/chromium/base/debug/profiler.cc
@@ -92,19 +92,19 @@ bool IsBinaryInstrumented() {
}
ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc() {
- return NULL;
+ return nullptr;
}
DynamicFunctionEntryHook GetProfilerDynamicFunctionEntryHookFunc() {
- return NULL;
+ return nullptr;
}
AddDynamicSymbol GetProfilerAddDynamicSymbolFunc() {
- return NULL;
+ return nullptr;
}
MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc() {
- return NULL;
+ return nullptr;
}
#else // defined(OS_WIN)
diff --git a/chromium/base/debug/stack_trace.cc b/chromium/base/debug/stack_trace.cc
index 798503ed863..771512176b5 100644
--- a/chromium/base/debug/stack_trace.cc
+++ b/chromium/base/debug/stack_trace.cc
@@ -209,7 +209,7 @@ const void *const *StackTrace::Addresses(size_t* count) const {
*count = count_;
if (count_)
return trace_;
- return NULL;
+ return nullptr;
}
std::string StackTrace::ToString() const {
diff --git a/chromium/base/debug/stack_trace_posix.cc b/chromium/base/debug/stack_trace_posix.cc
index 597c81dfc05..1d7df1f62f1 100644
--- a/chromium/base/debug/stack_trace_posix.cc
+++ b/chromium/base/debug/stack_trace_posix.cc
@@ -11,6 +11,7 @@
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
#include <sys/param.h>
#include <sys/stat.h>
#include <sys/types.h>
@@ -38,7 +39,9 @@
#include "base/debug/proc_maps_linux.h"
#endif
+#include "base/cfi_flags.h"
#include "base/debug/debugger.h"
+#include "base/files/scoped_file.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/free_deleter.h"
@@ -104,7 +107,7 @@ void DemangleSymbols(std::string* text) {
// Try to demangle the mangled symbol candidate.
int status = 0;
std::unique_ptr<char, base::FreeDeleter> demangled_symbol(
- abi::__cxa_demangle(mangled_symbol.c_str(), NULL, 0, &status));
+ abi::__cxa_demangle(mangled_symbol.c_str(), nullptr, 0, &status));
if (status == 0) { // Demangling is successful.
// Remove the mangled symbol.
text->erase(mangled_start, mangled_end - mangled_start);
@@ -126,7 +129,7 @@ class BacktraceOutputHandler {
virtual void HandleOutput(const char* output) = 0;
protected:
- virtual ~BacktraceOutputHandler() {}
+ virtual ~BacktraceOutputHandler() = default;
};
#if !defined(__UCLIBC__) && !defined(_AIX)
@@ -235,7 +238,7 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
action.sa_sigaction = &StackDumpSignalHandler;
sigemptyset(&action.sa_mask);
- sigaction(signal, &action, NULL);
+ sigaction(signal, &action, nullptr);
return;
}
@@ -311,7 +314,7 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
}
PrintToStderr("\n");
-#if defined(CFI_ENFORCEMENT_TRAP)
+#if BUILDFLAG(CFI_ENFORCEMENT_TRAP)
if (signal == SIGILL && info->si_code == ILL_ILLOPN) {
PrintToStderr(
"CFI: Most likely a control flow integrity violation; for more "
@@ -319,7 +322,7 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
PrintToStderr(
"https://www.chromium.org/developers/testing/control-flow-integrity\n");
}
-#endif
+#endif // BUILDFLAG(CFI_ENFORCEMENT_TRAP)
debug::StackTrace().Print();
@@ -412,7 +415,7 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
class PrintBacktraceOutputHandler : public BacktraceOutputHandler {
public:
- PrintBacktraceOutputHandler() {}
+ PrintBacktraceOutputHandler() = default;
void HandleOutput(const char* output) override {
// NOTE: This code MUST be async-signal safe (it's used by in-process
@@ -563,25 +566,10 @@ class SandboxSymbolizeHelper {
// The assumption here is that iterating over
// std::vector<MappedMemoryRegion> using a const_iterator does not allocate
// dynamic memory, hence it is async-signal-safe.
- std::vector<MappedMemoryRegion>::const_iterator it;
- bool is_first = true;
- for (it = instance->regions_.begin(); it != instance->regions_.end();
- ++it, is_first = false) {
- const MappedMemoryRegion& region = *it;
+ for (const MappedMemoryRegion& region : instance->regions_) {
if (region.start <= pc && pc < region.end) {
start_address = region.start;
- // Don't subtract 'start_address' from the first entry:
- // * If a binary is compiled w/o -pie, then the first entry in
- // process maps is likely the binary itself (all dynamic libs
- // are mapped higher in address space). For such a binary,
- // instruction offset in binary coincides with the actual
- // instruction address in virtual memory (as code section
- // is mapped to a fixed memory range).
- // * If a binary is compiled with -pie, all the modules are
- // mapped high at address space (in particular, higher than
- // shadow memory of the tool), so the module can't be the
- // first entry.
- base_address = (is_first ? 0U : start_address) - region.offset;
+ base_address = region.base;
if (file_path && file_path_size > 0) {
strncpy(file_path, region.path.c_str(), file_path_size);
// Ensure null termination.
@@ -593,6 +581,60 @@ class SandboxSymbolizeHelper {
return -1;
}
+ // Set the base address for each memory region by reading ELF headers in
+ // process memory.
+ void SetBaseAddressesForMemoryRegions() {
+ base::ScopedFD mem_fd(
+ HANDLE_EINTR(open("/proc/self/mem", O_RDONLY | O_CLOEXEC)));
+ if (!mem_fd.is_valid())
+ return;
+
+ auto safe_memcpy = [&mem_fd](void* dst, uintptr_t src, size_t size) {
+ return HANDLE_EINTR(pread(mem_fd.get(), dst, size, src)) == ssize_t(size);
+ };
+
+ uintptr_t cur_base = 0;
+ for (auto& r : regions_) {
+ ElfW(Ehdr) ehdr;
+ static_assert(SELFMAG <= sizeof(ElfW(Ehdr)), "SELFMAG too large");
+ if ((r.permissions & MappedMemoryRegion::READ) &&
+ safe_memcpy(&ehdr, r.start, sizeof(ElfW(Ehdr))) &&
+ memcmp(ehdr.e_ident, ELFMAG, SELFMAG) == 0) {
+ switch (ehdr.e_type) {
+ case ET_EXEC:
+ cur_base = 0;
+ break;
+ case ET_DYN:
+ // Find the segment containing file offset 0. This will correspond
+ // to the ELF header that we just read. Normally this will have
+ // virtual address 0, but this is not guaranteed. We must subtract
+ // the virtual address from the address where the ELF header was
+ // mapped to get the base address.
+ //
+ // If we fail to find a segment for file offset 0, use the address
+ // of the ELF header as the base address.
+ cur_base = r.start;
+ for (unsigned i = 0; i != ehdr.e_phnum; ++i) {
+ ElfW(Phdr) phdr;
+ if (safe_memcpy(&phdr, r.start + ehdr.e_phoff + i * sizeof(phdr),
+ sizeof(phdr)) &&
+ phdr.p_type == PT_LOAD && phdr.p_offset == 0) {
+ cur_base = r.start - phdr.p_vaddr;
+ break;
+ }
+ }
+ break;
+ default:
+ // ET_REL or ET_CORE. These aren't directly executable, so they
+ // don't affect the base address.
+ break;
+ }
+ }
+
+ r.base = cur_base;
+ }
+ }
+
// Parses /proc/self/maps in order to compile a list of all object file names
// for the modules that are loaded in the current process.
// Returns true on success.
@@ -610,6 +652,8 @@ class SandboxSymbolizeHelper {
return false;
}
+ SetBaseAddressesForMemoryRegions();
+
is_initialized_ = true;
return true;
}
@@ -666,7 +710,7 @@ class SandboxSymbolizeHelper {
// Unregister symbolization callback.
void UnregisterCallback() {
if (is_initialized_) {
- google::InstallSymbolizeOpenObjectFileCallback(NULL);
+ google::InstallSymbolizeOpenObjectFileCallback(nullptr);
is_initialized_ = false;
}
}
@@ -716,7 +760,7 @@ bool EnableInProcessStackDumping() {
memset(&sigpipe_action, 0, sizeof(sigpipe_action));
sigpipe_action.sa_handler = SIG_IGN;
sigemptyset(&sigpipe_action.sa_mask);
- bool success = (sigaction(SIGPIPE, &sigpipe_action, NULL) == 0);
+ bool success = (sigaction(SIGPIPE, &sigpipe_action, nullptr) == 0);
// Avoid hangs during backtrace initialization, see above.
WarmUpBacktrace();
@@ -727,14 +771,14 @@ bool EnableInProcessStackDumping() {
action.sa_sigaction = &StackDumpSignalHandler;
sigemptyset(&action.sa_mask);
- success &= (sigaction(SIGILL, &action, NULL) == 0);
- success &= (sigaction(SIGABRT, &action, NULL) == 0);
- success &= (sigaction(SIGFPE, &action, NULL) == 0);
- success &= (sigaction(SIGBUS, &action, NULL) == 0);
- success &= (sigaction(SIGSEGV, &action, NULL) == 0);
+ success &= (sigaction(SIGILL, &action, nullptr) == 0);
+ success &= (sigaction(SIGABRT, &action, nullptr) == 0);
+ success &= (sigaction(SIGFPE, &action, nullptr) == 0);
+ success &= (sigaction(SIGBUS, &action, nullptr) == 0);
+ success &= (sigaction(SIGSEGV, &action, nullptr) == 0);
// On Linux, SIGSYS is reserved by the kernel for seccomp-bpf sandboxing.
#if !defined(OS_LINUX)
- success &= (sigaction(SIGSYS, &action, NULL) == 0);
+ success &= (sigaction(SIGSYS, &action, nullptr) == 0);
#endif // !defined(OS_LINUX)
return success;
@@ -784,11 +828,11 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
// Make sure we can write at least one NUL byte.
size_t n = 1;
if (n > sz)
- return NULL;
+ return nullptr;
if (base < 2 || base > 16) {
buf[0] = '\000';
- return NULL;
+ return nullptr;
}
char* start = buf;
@@ -803,7 +847,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
// Make sure we can write the '-' character.
if (++n > sz) {
buf[0] = '\000';
- return NULL;
+ return nullptr;
}
*start++ = '-';
}
@@ -815,7 +859,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
// Make sure there is still enough space left in our output buffer.
if (++n > sz) {
buf[0] = '\000';
- return NULL;
+ return nullptr;
}
// Output the next digit.
diff --git a/chromium/base/debug/task_annotator.cc b/chromium/base/debug/task_annotator.cc
index 31f11beeba9..b9ff7f7d1c0 100644
--- a/chromium/base/debug/task_annotator.cc
+++ b/chromium/base/debug/task_annotator.cc
@@ -14,11 +14,9 @@
namespace base {
namespace debug {
-TaskAnnotator::TaskAnnotator() {
-}
+TaskAnnotator::TaskAnnotator() = default;
-TaskAnnotator::~TaskAnnotator() {
-}
+TaskAnnotator::~TaskAnnotator() = default;
void TaskAnnotator::DidQueueTask(const char* queue_function,
const PendingTask& pending_task) {
diff --git a/chromium/base/deferred_sequenced_task_runner.cc b/chromium/base/deferred_sequenced_task_runner.cc
index 21cb99a64f4..98304029435 100644
--- a/chromium/base/deferred_sequenced_task_runner.cc
+++ b/chromium/base/deferred_sequenced_task_runner.cc
@@ -18,8 +18,7 @@ DeferredSequencedTaskRunner::DeferredTask::DeferredTask()
DeferredSequencedTaskRunner::DeferredTask::DeferredTask(DeferredTask&& other) =
default;
-DeferredSequencedTaskRunner::DeferredTask::~DeferredTask() {
-}
+DeferredSequencedTaskRunner::DeferredTask::~DeferredTask() = default;
DeferredSequencedTaskRunner::DeferredTask&
DeferredSequencedTaskRunner::DeferredTask::operator=(DeferredTask&& other) =
@@ -29,8 +28,7 @@ DeferredSequencedTaskRunner::DeferredSequencedTaskRunner(
scoped_refptr<SequencedTaskRunner> target_task_runner)
: started_(false), target_task_runner_(std::move(target_task_runner)) {}
-DeferredSequencedTaskRunner::~DeferredSequencedTaskRunner() {
-}
+DeferredSequencedTaskRunner::~DeferredSequencedTaskRunner() = default;
bool DeferredSequencedTaskRunner::PostDelayedTask(const Location& from_here,
OnceClosure task,
diff --git a/chromium/base/environment.cc b/chromium/base/environment.cc
index 6c13f5f2fc2..f6655d9a917 100644
--- a/chromium/base/environment.cc
+++ b/chromium/base/environment.cc
@@ -132,7 +132,7 @@ const char kHome[] = "HOME";
} // namespace env_vars
-Environment::~Environment() {}
+Environment::~Environment() = default;
// static
std::unique_ptr<Environment> Environment::Create() {
diff --git a/chromium/base/environment_unittest.cc b/chromium/base/environment_unittest.cc
index 80e3aa6e441..23aec511812 100644
--- a/chromium/base/environment_unittest.cc
+++ b/chromium/base/environment_unittest.cc
@@ -16,13 +16,7 @@ namespace base {
namespace {
-// Fuchsia doesn't set PATH, Windows doesn't set PWD. (Fuchsia may eventually
-// set PATH and then this can be removed again.)
-#if defined(OS_FUCHSIA)
-constexpr char kValidEnvironmentVariable[] = "PWD";
-#else
constexpr char kValidEnvironmentVariable[] = "PATH";
-#endif
} // namespace
@@ -137,39 +131,39 @@ TEST_F(EnvironmentTest, AlterEnvironment) {
#else
TEST_F(EnvironmentTest, AlterEnvironment) {
- const char* const empty[] = { NULL };
- const char* const a2[] = { "A=2", NULL };
+ const char* const empty[] = {nullptr};
+ const char* const a2[] = {"A=2", nullptr};
EnvironmentMap changes;
std::unique_ptr<char* []> e;
e = AlterEnvironment(empty, changes);
- EXPECT_TRUE(e[0] == NULL);
+ EXPECT_TRUE(e[0] == nullptr);
changes["A"] = "1";
e = AlterEnvironment(empty, changes);
EXPECT_EQ(std::string("A=1"), e[0]);
- EXPECT_TRUE(e[1] == NULL);
+ EXPECT_TRUE(e[1] == nullptr);
changes.clear();
changes["A"] = std::string();
e = AlterEnvironment(empty, changes);
- EXPECT_TRUE(e[0] == NULL);
+ EXPECT_TRUE(e[0] == nullptr);
changes.clear();
e = AlterEnvironment(a2, changes);
EXPECT_EQ(std::string("A=2"), e[0]);
- EXPECT_TRUE(e[1] == NULL);
+ EXPECT_TRUE(e[1] == nullptr);
changes.clear();
changes["A"] = "1";
e = AlterEnvironment(a2, changes);
EXPECT_EQ(std::string("A=1"), e[0]);
- EXPECT_TRUE(e[1] == NULL);
+ EXPECT_TRUE(e[1] == nullptr);
changes.clear();
changes["A"] = std::string();
e = AlterEnvironment(a2, changes);
- EXPECT_TRUE(e[0] == NULL);
+ EXPECT_TRUE(e[0] == nullptr);
}
#endif
diff --git a/chromium/base/export_template.h b/chromium/base/export_template.h
new file mode 100644
index 00000000000..aac8b7c7f12
--- /dev/null
+++ b/chromium/base/export_template.h
@@ -0,0 +1,163 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_EXPORT_TEMPLATE_H_
+#define BASE_EXPORT_TEMPLATE_H_
+
+// Synopsis
+//
+// This header provides macros for using FOO_EXPORT macros with explicit
+// template instantiation declarations and definitions.
+// Generally, the FOO_EXPORT macros are used at declarations,
+// and GCC requires them to be used at explicit instantiation declarations,
+// but MSVC requires __declspec(dllexport) to be used at the explicit
+// instantiation definitions instead.
+
+// Usage
+//
+// In a header file, write:
+//
+// extern template class EXPORT_TEMPLATE_DECLARE(FOO_EXPORT) foo<bar>;
+//
+// In a source file, write:
+//
+// template class EXPORT_TEMPLATE_DEFINE(FOO_EXPORT) foo<bar>;
+
+// Implementation notes
+//
+// The implementation of this header uses some subtle macro semantics to
+// detect what the provided FOO_EXPORT value was defined as and then
+// to dispatch to appropriate macro definitions. Unfortunately,
+// MSVC's C preprocessor is rather non-compliant and requires special
+// care to make it work.
+//
+// Issue 1.
+//
+// #define F(x)
+// F()
+//
+// MSVC emits warning C4003 ("not enough actual parameters for macro
+// 'F'), even though it's a valid macro invocation. This affects the
+// macros below that take just an "export" parameter, because export
+// may be empty.
+//
+// As a workaround, we can add a dummy parameter and arguments:
+//
+// #define F(x,_)
+// F(,)
+//
+// Issue 2.
+//
+// #define F(x) G##x
+// #define Gj() ok
+// F(j())
+//
+// The correct replacement for "F(j())" is "ok", but MSVC replaces it
+// with "Gj()". As a workaround, we can pass the result to an
+// identity macro to force MSVC to look for replacements again. (This
+// is why EXPORT_TEMPLATE_STYLE_3 exists.)
+
+#define EXPORT_TEMPLATE_DECLARE(export) \
+ EXPORT_TEMPLATE_INVOKE(DECLARE, EXPORT_TEMPLATE_STYLE(export, ), export)
+#define EXPORT_TEMPLATE_DEFINE(export) \
+ EXPORT_TEMPLATE_INVOKE(DEFINE, EXPORT_TEMPLATE_STYLE(export, ), export)
+
+// INVOKE is an internal helper macro to perform parameter replacements
+// and token pasting to chain invoke another macro. E.g.,
+// EXPORT_TEMPLATE_INVOKE(DECLARE, DEFAULT, FOO_EXPORT)
+// will export to call
+// EXPORT_TEMPLATE_DECLARE_DEFAULT(FOO_EXPORT, )
+// (but with FOO_EXPORT expanded too).
+#define EXPORT_TEMPLATE_INVOKE(which, style, export) \
+ EXPORT_TEMPLATE_INVOKE_2(which, style, export)
+#define EXPORT_TEMPLATE_INVOKE_2(which, style, export) \
+ EXPORT_TEMPLATE_##which##_##style(export, )
+
+// Default style is to apply the FOO_EXPORT macro at declaration sites.
+#define EXPORT_TEMPLATE_DECLARE_DEFAULT(export, _) export
+#define EXPORT_TEMPLATE_DEFINE_DEFAULT(export, _)
+
+// The "MSVC hack" style is used when FOO_EXPORT is defined
+// as __declspec(dllexport), which MSVC requires to be used at
+// definition sites instead.
+#define EXPORT_TEMPLATE_DECLARE_MSVC_HACK(export, _)
+#define EXPORT_TEMPLATE_DEFINE_MSVC_HACK(export, _) export
+
+// EXPORT_TEMPLATE_STYLE is an internal helper macro that identifies which
+// export style needs to be used for the provided FOO_EXPORT macro definition.
+// "", "__attribute__(...)", and "__declspec(dllimport)" are mapped
+// to "DEFAULT"; while "__declspec(dllexport)" is mapped to "MSVC_HACK".
+//
+// It's implemented with token pasting to transform the __attribute__ and
+// __declspec annotations into macro invocations. E.g., if FOO_EXPORT is
+// defined as "__declspec(dllimport)", it undergoes the following sequence of
+// macro substitutions:
+// EXPORT_TEMPLATE_STYLE(FOO_EXPORT, )
+// EXPORT_TEMPLATE_STYLE_2(__declspec(dllimport), )
+// EXPORT_TEMPLATE_STYLE_3(EXPORT_TEMPLATE_STYLE_MATCH__declspec(dllimport))
+// EXPORT_TEMPLATE_STYLE_MATCH__declspec(dllimport)
+// EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_dllimport
+// DEFAULT
+#define EXPORT_TEMPLATE_STYLE(export, _) EXPORT_TEMPLATE_STYLE_2(export, )
+#define EXPORT_TEMPLATE_STYLE_2(export, _) \
+ EXPORT_TEMPLATE_STYLE_3( \
+ EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA##export)
+#define EXPORT_TEMPLATE_STYLE_3(style) style
+
+// Internal helper macros for EXPORT_TEMPLATE_STYLE.
+//
+// XXX: C++ reserves all identifiers containing "__" for the implementation,
+// but "__attribute__" and "__declspec" already contain "__" and the token-paste
+// operator can only add characters; not remove them. To minimize the risk of
+// conflict with implementations, we include "foj3FJo5StF0OvIzl7oMxA" (a random
+// 128-bit string, encoded in Base64) in the macro name.
+#define EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA DEFAULT
+#define EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA__attribute__(...) \
+ DEFAULT
+#define EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA__declspec(arg) \
+ EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_##arg
+
+// Internal helper macros for EXPORT_TEMPLATE_STYLE.
+#define EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_dllexport MSVC_HACK
+#define EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_dllimport DEFAULT
+
+// Sanity checks.
+//
+// EXPORT_TEMPLATE_TEST uses the same macro invocation pattern as
+// EXPORT_TEMPLATE_DECLARE and EXPORT_TEMPLATE_DEFINE do to check that they're
+// working correctly. When they're working correctly, the sequence of macro
+// replacements should go something like:
+//
+// EXPORT_TEMPLATE_TEST(DEFAULT, __declspec(dllimport));
+//
+// static_assert(EXPORT_TEMPLATE_INVOKE(TEST_DEFAULT,
+// EXPORT_TEMPLATE_STYLE(__declspec(dllimport), ),
+// __declspec(dllimport)), "__declspec(dllimport)");
+//
+// static_assert(EXPORT_TEMPLATE_INVOKE(TEST_DEFAULT,
+// DEFAULT, __declspec(dllimport)), "__declspec(dllimport)");
+//
+// static_assert(EXPORT_TEMPLATE_TEST_DEFAULT_DEFAULT(
+// __declspec(dllimport)), "__declspec(dllimport)");
+//
+// static_assert(true, "__declspec(dllimport)");
+//
+// When they're not working correctly, a syntax error should occur instead.
+#define EXPORT_TEMPLATE_TEST(want, export) \
+ static_assert(EXPORT_TEMPLATE_INVOKE( \
+ TEST_##want, EXPORT_TEMPLATE_STYLE(export, ), export), \
+ #export)
+#define EXPORT_TEMPLATE_TEST_DEFAULT_DEFAULT(...) true
+#define EXPORT_TEMPLATE_TEST_MSVC_HACK_MSVC_HACK(...) true
+
+EXPORT_TEMPLATE_TEST(DEFAULT, );
+EXPORT_TEMPLATE_TEST(DEFAULT, __attribute__((visibility("default"))));
+EXPORT_TEMPLATE_TEST(MSVC_HACK, __declspec(dllexport));
+EXPORT_TEMPLATE_TEST(DEFAULT, __declspec(dllimport));
+
+#undef EXPORT_TEMPLATE_TEST
+#undef EXPORT_TEMPLATE_TEST_DEFAULT_DEFAULT
+#undef EXPORT_TEMPLATE_TEST_MSVC_HACK_MSVC_HACK
+
+#endif // BASE_EXPORT_TEMPLATE_H_
diff --git a/chromium/base/feature_list.cc b/chromium/base/feature_list.cc
index 7d984f1ad1d..e9aa54bc54c 100644
--- a/chromium/base/feature_list.cc
+++ b/chromium/base/feature_list.cc
@@ -81,9 +81,9 @@ const Feature kSyzyAsanDCheckIsFatalFeature{"DcheckIsFatal",
base::FEATURE_DISABLED_BY_DEFAULT};
#endif // defined(SYZYASAN)
-FeatureList::FeatureList() {}
+FeatureList::FeatureList() = default;
-FeatureList::~FeatureList() {}
+FeatureList::~FeatureList() = default;
void FeatureList::InitializeFromCommandLine(
const std::string& enable_features,
diff --git a/chromium/base/file_descriptor_store.cc b/chromium/base/file_descriptor_store.cc
index 34f3381fe96..71cf2b3f51d 100644
--- a/chromium/base/file_descriptor_store.cc
+++ b/chromium/base/file_descriptor_store.cc
@@ -26,7 +26,7 @@ FileDescriptorStore::Descriptor::Descriptor(
FileDescriptorStore::Descriptor&& other)
: key(other.key), fd(std::move(other.fd)), region(other.region) {}
-FileDescriptorStore::Descriptor::~Descriptor() {}
+FileDescriptorStore::Descriptor::~Descriptor() = default;
// static
FileDescriptorStore& FileDescriptorStore::GetInstance() {
@@ -66,8 +66,8 @@ void FileDescriptorStore::Set(const std::string& key,
descriptors_.insert(std::make_pair(key, std::move(descriptor)));
}
-FileDescriptorStore::FileDescriptorStore() {}
+FileDescriptorStore::FileDescriptorStore() = default;
-FileDescriptorStore::~FileDescriptorStore() {}
+FileDescriptorStore::~FileDescriptorStore() = default;
} // namespace base
diff --git a/chromium/base/file_version_info_win.cc b/chromium/base/file_version_info_win.cc
index 00261b76df5..4affd817ab1 100644
--- a/chromium/base/file_version_info_win.cc
+++ b/chromium/base/file_version_info_win.cc
@@ -68,7 +68,7 @@ FileVersionInfo* FileVersionInfo::CreateFileVersionInfoForModule(
// static
FileVersionInfo* FileVersionInfo::CreateFileVersionInfo(
const FilePath& file_path) {
- base::ThreadRestrictions::AssertIOAllowed();
+ base::AssertBlockingAllowed();
DWORD dummy;
const wchar_t* path = file_path.value().c_str();
diff --git a/chromium/base/files/file.cc b/chromium/base/files/file.cc
index 672950045a0..50b4370d309 100644
--- a/chromium/base/files/file.cc
+++ b/chromium/base/files/file.cc
@@ -17,8 +17,7 @@ File::Info::Info()
is_symbolic_link(false) {
}
-File::Info::~Info() {
-}
+File::Info::~Info() = default;
File::File()
: error_details_(FILE_ERROR_FAILED),
diff --git a/chromium/base/files/file_enumerator.cc b/chromium/base/files/file_enumerator.cc
index dfa277a2d1c..9dfb2ba04b5 100644
--- a/chromium/base/files/file_enumerator.cc
+++ b/chromium/base/files/file_enumerator.cc
@@ -8,8 +8,7 @@
namespace base {
-FileEnumerator::FileInfo::~FileInfo() {
-}
+FileEnumerator::FileInfo::~FileInfo() = default;
bool FileEnumerator::ShouldSkip(const FilePath& path) {
FilePath::StringType basename = path.BaseName().value();
diff --git a/chromium/base/files/file_enumerator_posix.cc b/chromium/base/files/file_enumerator_posix.cc
index 20452eade8e..4b429c64482 100644
--- a/chromium/base/files/file_enumerator_posix.cc
+++ b/chromium/base/files/file_enumerator_posix.cc
@@ -92,11 +92,10 @@ FileEnumerator::FileEnumerator(const FilePath& root_path,
pending_paths_.push(root_path);
}
-FileEnumerator::~FileEnumerator() {
-}
+FileEnumerator::~FileEnumerator() = default;
FilePath FileEnumerator::Next() {
- base::ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
++current_directory_entry_;
diff --git a/chromium/base/files/file_enumerator_win.cc b/chromium/base/files/file_enumerator_win.cc
index 826e293bed8..f96074cec43 100644
--- a/chromium/base/files/file_enumerator_win.cc
+++ b/chromium/base/files/file_enumerator_win.cc
@@ -111,7 +111,7 @@ FileEnumerator::FileInfo FileEnumerator::GetInfo() const {
}
FilePath FileEnumerator::Next() {
- base::ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
while (has_find_data_ || !pending_paths_.empty()) {
if (!has_find_data_) {
diff --git a/chromium/base/files/file_locking_unittest.cc b/chromium/base/files/file_locking_unittest.cc
index 067ff570075..e158b7d06a2 100644
--- a/chromium/base/files/file_locking_unittest.cc
+++ b/chromium/base/files/file_locking_unittest.cc
@@ -121,7 +121,7 @@ MULTIPROCESS_TEST_MAIN(ChildMain) {
class FileLockingTest : public testing::Test {
public:
- FileLockingTest() {}
+ FileLockingTest() = default;
protected:
void SetUp() override {
diff --git a/chromium/base/files/file_path.cc b/chromium/base/files/file_path.cc
index cc3c0d12e66..29a0b40c811 100644
--- a/chromium/base/files/file_path.cc
+++ b/chromium/base/files/file_path.cc
@@ -169,11 +169,9 @@ bool IsEmptyOrSpecialCase(const StringType& path) {
} // namespace
-FilePath::FilePath() {
-}
+FilePath::FilePath() = default;
-FilePath::FilePath(const FilePath& that) : path_(that.path_) {
-}
+FilePath::FilePath(const FilePath& that) = default;
FilePath::FilePath(FilePath&& that) noexcept = default;
FilePath::FilePath(StringPieceType path) {
@@ -183,13 +181,9 @@ FilePath::FilePath(StringPieceType path) {
path_.erase(nul_pos, StringType::npos);
}
-FilePath::~FilePath() {
-}
+FilePath::~FilePath() = default;
-FilePath& FilePath::operator=(const FilePath& that) {
- path_ = that.path_;
- return *this;
-}
+FilePath& FilePath::operator=(const FilePath& that) = default;
FilePath& FilePath::operator=(FilePath&& that) = default;
@@ -260,7 +254,7 @@ void FilePath::GetComponents(std::vector<StringType>* components) const {
}
bool FilePath::IsParent(const FilePath& child) const {
- return AppendRelativePath(child, NULL);
+ return AppendRelativePath(child, nullptr);
}
bool FilePath::AppendRelativePath(const FilePath& child,
@@ -299,7 +293,7 @@ bool FilePath::AppendRelativePath(const FilePath& child,
++child_comp;
}
- if (path != NULL) {
+ if (path != nullptr) {
for (; child_comp != child_components.end(); ++child_comp) {
*path = path->Append(*child_comp);
}
diff --git a/chromium/base/files/file_path_watcher_linux.cc b/chromium/base/files/file_path_watcher_linux.cc
index 1c1bb3eb3c7..4a575671352 100644
--- a/chromium/base/files/file_path_watcher_linux.cc
+++ b/chromium/base/files/file_path_watcher_linux.cc
@@ -212,7 +212,7 @@ void InotifyReaderCallback(InotifyReader* reader, int inotify_fd) {
// Wait until some inotify events are available.
int select_result =
- HANDLE_EINTR(select(inotify_fd + 1, &rfds, NULL, NULL, NULL));
+ HANDLE_EINTR(select(inotify_fd + 1, &rfds, nullptr, nullptr, nullptr));
if (select_result < 0) {
DPLOG(WARNING) << "select failed";
return;
diff --git a/chromium/base/files/file_path_watcher_unittest.cc b/chromium/base/files/file_path_watcher_unittest.cc
index 9945007d693..1ae5dc16747 100644
--- a/chromium/base/files/file_path_watcher_unittest.cc
+++ b/chromium/base/files/file_path_watcher_unittest.cc
@@ -75,7 +75,7 @@ class NotificationCollector
private:
friend class base::RefCountedThreadSafe<NotificationCollector>;
- ~NotificationCollector() {}
+ ~NotificationCollector() = default;
void RecordChange(TestDelegate* delegate) {
// Warning: |delegate| is Unretained. Do not dereference.
@@ -100,8 +100,8 @@ class NotificationCollector
class TestDelegateBase : public SupportsWeakPtr<TestDelegateBase> {
public:
- TestDelegateBase() {}
- virtual ~TestDelegateBase() {}
+ TestDelegateBase() = default;
+ virtual ~TestDelegateBase() = default;
virtual void OnFileChanged(const FilePath& path, bool error) = 0;
@@ -120,7 +120,7 @@ class TestDelegate : public TestDelegateBase {
: collector_(collector) {
collector_->Register(this);
}
- ~TestDelegate() override {}
+ ~TestDelegate() override = default;
void OnFileChanged(const FilePath& path, bool error) override {
if (error)
@@ -144,7 +144,7 @@ class FilePathWatcherTest : public testing::Test {
{
}
- ~FilePathWatcherTest() override {}
+ ~FilePathWatcherTest() override = default;
protected:
void SetUp() override {
@@ -275,7 +275,7 @@ class Deleter : public TestDelegateBase {
: watcher_(watcher),
loop_(loop) {
}
- ~Deleter() override {}
+ ~Deleter() override = default;
void OnFileChanged(const FilePath&, bool) override {
watcher_.reset();
@@ -304,7 +304,7 @@ TEST_F(FilePathWatcherTest, DeleteDuringNotify) {
// We win if we haven't crashed yet.
// Might as well double-check it got deleted, too.
- ASSERT_TRUE(deleter->watcher() == NULL);
+ ASSERT_TRUE(deleter->watcher() == nullptr);
}
// Verify that deleting the watcher works even if there is a pending
diff --git a/chromium/base/files/file_posix.cc b/chromium/base/files/file_posix.cc
index 6b5d174525c..20d59cbc054 100644
--- a/chromium/base/files/file_posix.cc
+++ b/chromium/base/files/file_posix.cc
@@ -32,12 +32,12 @@ namespace {
#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL)
int CallFstat(int fd, stat_wrapper_t *sb) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
return fstat(fd, sb);
}
#else
int CallFstat(int fd, stat_wrapper_t *sb) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
return fstat64(fd, sb);
}
#endif
@@ -178,12 +178,12 @@ void File::Close() {
return;
SCOPED_FILE_TRACE("Close");
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
file_.reset();
}
int64_t File::Seek(Whence whence, int64_t offset) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
SCOPED_FILE_TRACE_WITH_SIZE("Seek", offset);
@@ -200,7 +200,7 @@ int64_t File::Seek(Whence whence, int64_t offset) {
}
int File::Read(int64_t offset, char* data, int size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
if (size < 0)
return -1;
@@ -222,7 +222,7 @@ int File::Read(int64_t offset, char* data, int size) {
}
int File::ReadAtCurrentPos(char* data, int size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
if (size < 0)
return -1;
@@ -243,14 +243,14 @@ int File::ReadAtCurrentPos(char* data, int size) {
}
int File::ReadNoBestEffort(int64_t offset, char* data, int size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
SCOPED_FILE_TRACE_WITH_SIZE("ReadNoBestEffort", size);
return HANDLE_EINTR(pread(file_.get(), data, size, offset));
}
int File::ReadAtCurrentPosNoBestEffort(char* data, int size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
if (size < 0)
return -1;
@@ -260,7 +260,7 @@ int File::ReadAtCurrentPosNoBestEffort(char* data, int size) {
}
int File::Write(int64_t offset, const char* data, int size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
if (IsOpenAppend(file_.get()))
return WriteAtCurrentPos(data, size);
@@ -286,7 +286,7 @@ int File::Write(int64_t offset, const char* data, int size) {
}
int File::WriteAtCurrentPos(const char* data, int size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
if (size < 0)
return -1;
@@ -308,7 +308,7 @@ int File::WriteAtCurrentPos(const char* data, int size) {
}
int File::WriteAtCurrentPosNoBestEffort(const char* data, int size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
if (size < 0)
return -1;
@@ -330,7 +330,7 @@ int64_t File::GetLength() {
}
bool File::SetLength(int64_t length) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
SCOPED_FILE_TRACE_WITH_SIZE("SetLength", length);
@@ -338,7 +338,7 @@ bool File::SetLength(int64_t length) {
}
bool File::SetTimes(Time last_access_time, Time last_modified_time) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
SCOPED_FILE_TRACE("SetTimes");
@@ -432,7 +432,7 @@ File::Error File::OSErrorToFileError(int saved_errno) {
#if !defined(OS_NACL)
// TODO(erikkay): does it make sense to support FLAG_EXCLUSIVE_* here?
void File::DoInitialize(const FilePath& path, uint32_t flags) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(!IsValid());
int open_flags = 0;
@@ -518,7 +518,7 @@ void File::DoInitialize(const FilePath& path, uint32_t flags) {
#endif // !defined(OS_NACL)
bool File::Flush() {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
SCOPED_FILE_TRACE("Flush");
diff --git a/chromium/base/files/file_proxy.cc b/chromium/base/files/file_proxy.cc
index a7950f001b7..26a49e16dfe 100644
--- a/chromium/base/files/file_proxy.cc
+++ b/chromium/base/files/file_proxy.cc
@@ -317,7 +317,7 @@ bool FileProxy::Write(int64_t offset,
int bytes_to_write,
const WriteCallback& callback) {
DCHECK(file_.IsValid());
- if (bytes_to_write <= 0 || buffer == NULL)
+ if (bytes_to_write <= 0 || buffer == nullptr)
return false;
WriteHelper* helper =
diff --git a/chromium/base/files/file_proxy_unittest.cc b/chromium/base/files/file_proxy_unittest.cc
index d75b304f209..8c802674039 100644
--- a/chromium/base/files/file_proxy_unittest.cc
+++ b/chromium/base/files/file_proxy_unittest.cc
@@ -117,7 +117,7 @@ TEST_F(FileProxyTest, CreateOrOpen_Create) {
TEST_F(FileProxyTest, CreateOrOpen_Open) {
// Creates a file.
- base::WriteFile(TestPath(), NULL, 0);
+ base::WriteFile(TestPath(), nullptr, 0);
ASSERT_TRUE(PathExists(TestPath()));
// Opens the created file.
diff --git a/chromium/base/files/file_unittest.cc b/chromium/base/files/file_unittest.cc
index 4449543031f..757e1b223cf 100644
--- a/chromium/base/files/file_unittest.cc
+++ b/chromium/base/files/file_unittest.cc
@@ -334,6 +334,13 @@ TEST(FileTest, Length) {
EXPECT_EQ(file_size, bytes_read);
for (int i = 0; i < file_size; i++)
EXPECT_EQ(data_to_write[i], data_read[i]);
+
+ // Close the file and reopen with base::File::FLAG_CREATE_ALWAYS, and make
+ // sure the file is empty (old file was overridden).
+ file.Close();
+ file.Initialize(file_path,
+ base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE);
+ EXPECT_EQ(0, file.GetLength());
}
// Flakily fails: http://crbug.com/86494
diff --git a/chromium/base/files/file_util.cc b/chromium/base/files/file_util.cc
index 80fa44f9ea2..6fd5dd86dea 100644
--- a/chromium/base/files/file_util.cc
+++ b/chromium/base/files/file_util.cc
@@ -178,13 +178,13 @@ bool IsDirectoryEmpty(const FilePath& dir_path) {
FILE* CreateAndOpenTemporaryFile(FilePath* path) {
FilePath directory;
if (!GetTempDir(&directory))
- return NULL;
+ return nullptr;
return CreateAndOpenTemporaryFileInDir(directory, path);
}
bool CreateDirectory(const FilePath& full_path) {
- return CreateDirectoryAndGetError(full_path, NULL);
+ return CreateDirectoryAndGetError(full_path, nullptr);
}
bool GetFileSize(const FilePath& file_path, int64_t* file_size) {
@@ -215,14 +215,14 @@ bool TouchFile(const FilePath& path,
#endif // !defined(OS_NACL_NONSFI)
bool CloseFile(FILE* file) {
- if (file == NULL)
+ if (file == nullptr)
return true;
return fclose(file) == 0;
}
#if !defined(OS_NACL_NONSFI)
bool TruncateFile(FILE* file) {
- if (file == NULL)
+ if (file == nullptr)
return false;
long current_offset = ftell(file);
if (current_offset == -1)
diff --git a/chromium/base/files/file_util_mac.mm b/chromium/base/files/file_util_mac.mm
index d3e14a37870..392fbcef36f 100644
--- a/chromium/base/files/file_util_mac.mm
+++ b/chromium/base/files/file_util_mac.mm
@@ -18,7 +18,7 @@
namespace base {
bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
if (from_path.ReferencesParent() || to_path.ReferencesParent())
return false;
return (copyfile(from_path.value().c_str(),
diff --git a/chromium/base/files/file_util_posix.cc b/chromium/base/files/file_util_posix.cc
index 98887acf996..5adac6b4508 100644
--- a/chromium/base/files/file_util_posix.cc
+++ b/chromium/base/files/file_util_posix.cc
@@ -68,20 +68,20 @@ namespace {
#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL)
static int CallStat(const char *path, stat_wrapper_t *sb) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
return stat(path, sb);
}
static int CallLstat(const char *path, stat_wrapper_t *sb) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
return lstat(path, sb);
}
#else // defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL)
static int CallStat(const char *path, stat_wrapper_t *sb) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
return stat64(path, sb);
}
static int CallLstat(const char *path, stat_wrapper_t *sb) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
return lstat64(path, sb);
}
#endif // !(defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL))
@@ -142,7 +142,7 @@ std::string TempFileName() {
// file descriptor. |path| is set to the temporary file path.
// This function does NOT unlink() the file.
int CreateAndOpenFdForTemporaryFile(FilePath directory, FilePath* path) {
- ThreadRestrictions::AssertIOAllowed(); // For call to mkstemp().
+ AssertBlockingAllowed(); // For call to mkstemp().
*path = directory.Append(base::TempFileName());
const std::string& tmpdir_string = path->value();
// this should be OK since mkstemp just replaces characters in place
@@ -168,7 +168,7 @@ bool DetermineDevShmExecutable() {
CHECK_GE(sysconf_result, 0);
size_t pagesize = static_cast<size_t>(sysconf_result);
CHECK_GE(sizeof(pagesize), sizeof(sysconf_result));
- void* mapping = mmap(NULL, pagesize, PROT_READ, MAP_SHARED, fd.get(), 0);
+ void* mapping = mmap(nullptr, pagesize, PROT_READ, MAP_SHARED, fd.get(), 0);
if (mapping != MAP_FAILED) {
if (mprotect(mapping, pagesize, PROT_READ | PROT_EXEC) == 0)
result = true;
@@ -236,9 +236,9 @@ std::string AppendModeCharacter(StringPiece mode, char mode_char) {
#if !defined(OS_NACL_NONSFI)
FilePath MakeAbsoluteFilePath(const FilePath& input) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
char full_path[PATH_MAX];
- if (realpath(input.value().c_str(), full_path) == NULL)
+ if (realpath(input.value().c_str(), full_path) == nullptr)
return FilePath();
return FilePath(full_path);
}
@@ -248,7 +248,7 @@ FilePath MakeAbsoluteFilePath(const FilePath& input) {
// that functionality. If not, remove from file_util_win.cc, otherwise add it
// here.
bool DeleteFile(const FilePath& path, bool recursive) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
const char* path_str = path.value().c_str();
stat_wrapper_t file_info;
if (CallLstat(path_str, &file_info) != 0) {
@@ -266,18 +266,18 @@ bool DeleteFile(const FilePath& path, bool recursive) {
FileEnumerator traversal(path, true,
FileEnumerator::FILES | FileEnumerator::DIRECTORIES |
FileEnumerator::SHOW_SYM_LINKS);
- for (FilePath current = traversal.Next(); success && !current.empty();
+ for (FilePath current = traversal.Next(); !current.empty();
current = traversal.Next()) {
if (traversal.GetInfo().IsDirectory())
directories.push(current.value());
else
- success = (unlink(current.value().c_str()) == 0);
+ success &= (unlink(current.value().c_str()) == 0);
}
- while (success && !directories.empty()) {
+ while (!directories.empty()) {
FilePath dir = FilePath(directories.top());
directories.pop();
- success = (rmdir(dir.value().c_str()) == 0);
+ success &= (rmdir(dir.value().c_str()) == 0);
}
return success;
}
@@ -285,7 +285,7 @@ bool DeleteFile(const FilePath& path, bool recursive) {
bool ReplaceFile(const FilePath& from_path,
const FilePath& to_path,
File::Error* error) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
if (rename(from_path.value().c_str(), to_path.value().c_str()) == 0)
return true;
if (error)
@@ -296,7 +296,7 @@ bool ReplaceFile(const FilePath& from_path,
bool CopyDirectory(const FilePath& from_path,
const FilePath& to_path,
bool recursive) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
// Some old callers of CopyDirectory want it to support wildcards.
// After some discussion, we decided to fix those callers.
// Break loudly here if anyone tries to do this.
@@ -478,7 +478,7 @@ bool SetCloseOnExec(int fd) {
}
bool PathExists(const FilePath& path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
#if defined(OS_ANDROID)
if (path.IsContentUri()) {
return ContentUriExists(path);
@@ -489,13 +489,13 @@ bool PathExists(const FilePath& path) {
#if !defined(OS_NACL_NONSFI)
bool PathIsWritable(const FilePath& path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
return access(path.value().c_str(), W_OK) == 0;
}
#endif // !defined(OS_NACL_NONSFI)
bool DirectoryExists(const FilePath& path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
stat_wrapper_t file_info;
if (CallStat(path.value().c_str(), &file_info) != 0)
return false;
@@ -541,7 +541,7 @@ bool ReadSymbolicLink(const FilePath& symlink_path, FilePath* target_path) {
}
bool GetPosixFilePermissions(const FilePath& path, int* mode) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(mode);
stat_wrapper_t file_info;
@@ -556,7 +556,7 @@ bool GetPosixFilePermissions(const FilePath& path, int* mode) {
bool SetPosixFilePermissions(const FilePath& path,
int mode) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK_EQ(mode & ~FILE_PERMISSION_MASK, 0);
// Calls stat() so that we can preserve the higher bits like S_ISGID.
@@ -640,7 +640,7 @@ FilePath GetHomeDir() {
#endif // !defined(OS_MACOSX)
bool CreateTemporaryFile(FilePath* path) {
- ThreadRestrictions::AssertIOAllowed(); // For call to close().
+ AssertBlockingAllowed(); // For call to close().
FilePath directory;
if (!GetTempDir(&directory))
return false;
@@ -654,7 +654,7 @@ bool CreateTemporaryFile(FilePath* path) {
FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir, FilePath* path) {
int fd = CreateAndOpenFdForTemporaryFile(dir, path);
if (fd < 0)
- return NULL;
+ return nullptr;
FILE* file = fdopen(fd, "a+");
if (!file)
@@ -663,7 +663,7 @@ FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir, FilePath* path) {
}
bool CreateTemporaryFileInDir(const FilePath& dir, FilePath* temp_file) {
- ThreadRestrictions::AssertIOAllowed(); // For call to close().
+ AssertBlockingAllowed(); // For call to close().
int fd = CreateAndOpenFdForTemporaryFile(dir, temp_file);
return ((fd >= 0) && !IGNORE_EINTR(close(fd)));
}
@@ -671,7 +671,7 @@ bool CreateTemporaryFileInDir(const FilePath& dir, FilePath* temp_file) {
static bool CreateTemporaryDirInDirImpl(const FilePath& base_dir,
const FilePath::StringType& name_tmpl,
FilePath* new_dir) {
- ThreadRestrictions::AssertIOAllowed(); // For call to mkdtemp().
+ AssertBlockingAllowed(); // For call to mkdtemp().
DCHECK(name_tmpl.find("XXXXXX") != FilePath::StringType::npos)
<< "Directory name template must contain \"XXXXXX\".";
@@ -708,7 +708,7 @@ bool CreateNewTempDirectory(const FilePath::StringType& prefix,
bool CreateDirectoryAndGetError(const FilePath& full_path,
File::Error* error) {
- ThreadRestrictions::AssertIOAllowed(); // For call to mkdir().
+ AssertBlockingAllowed(); // For call to mkdir().
std::vector<FilePath> subpaths;
// Collect a list of all parent directories.
@@ -793,8 +793,8 @@ FILE* OpenFile(const FilePath& filename, const char* mode) {
DCHECK(
strchr(mode, 'e') == nullptr ||
(strchr(mode, ',') != nullptr && strchr(mode, 'e') > strchr(mode, ',')));
- ThreadRestrictions::AssertIOAllowed();
- FILE* result = NULL;
+ AssertBlockingAllowed();
+ FILE* result = nullptr;
#if defined(OS_MACOSX)
// macOS does not provide a mode character to set O_CLOEXEC; see
// https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man3/fopen.3.html.
@@ -825,7 +825,7 @@ FILE* FileToFILE(File file, const char* mode) {
#endif // !defined(OS_NACL)
int ReadFile(const FilePath& filename, char* data, int max_size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
int fd = HANDLE_EINTR(open(filename.value().c_str(), O_RDONLY));
if (fd < 0)
return -1;
@@ -837,7 +837,7 @@ int ReadFile(const FilePath& filename, char* data, int max_size) {
}
int WriteFile(const FilePath& filename, const char* data, int size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
int fd = HANDLE_EINTR(creat(filename.value().c_str(), 0666));
if (fd < 0)
return -1;
@@ -866,7 +866,7 @@ bool WriteFileDescriptor(const int fd, const char* data, int size) {
#if !defined(OS_NACL_NONSFI)
bool AppendToFile(const FilePath& filename, const char* data, int size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
bool ret = true;
int fd = HANDLE_EINTR(open(filename.value().c_str(), O_WRONLY | O_APPEND));
if (fd < 0) {
@@ -890,7 +890,7 @@ bool AppendToFile(const FilePath& filename, const char* data, int size) {
bool GetCurrentDirectory(FilePath* dir) {
// getcwd can return ENOENT, which implies it checks against the disk.
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
char system_buffer[PATH_MAX] = "";
if (!getcwd(system_buffer, sizeof(system_buffer))) {
@@ -902,7 +902,7 @@ bool GetCurrentDirectory(FilePath* dir) {
}
bool SetCurrentDirectory(const FilePath& path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
return chdir(path.value().c_str()) == 0;
}
@@ -957,7 +957,7 @@ bool VerifyPathControlledByAdmin(const FilePath& path) {
};
// Reading the groups database may touch the file system.
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
std::set<gid_t> allowed_group_ids;
for (int i = 0, ie = arraysize(kAdminGroupNames); i < ie; ++i) {
@@ -977,7 +977,7 @@ bool VerifyPathControlledByAdmin(const FilePath& path) {
#endif // defined(OS_MACOSX) && !defined(OS_IOS)
int GetMaximumPathComponentLength(const FilePath& path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
return pathconf(path.value().c_str(), _PC_NAME_MAX);
}
@@ -1002,7 +1002,7 @@ bool GetShmemTempDir(bool executable, FilePath* path) {
#if !defined(OS_MACOSX)
// Mac has its own implementation, this is for all other Posix systems.
bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
File infile;
#if defined(OS_ANDROID)
if (from_path.IsContentUri()) {
@@ -1029,7 +1029,7 @@ bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
namespace internal {
bool MoveUnsafe(const FilePath& from_path, const FilePath& to_path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
// Windows compatibility: if to_path exists, from_path and to_path
// must be the same type, either both files, or both directories.
stat_wrapper_t to_file_info;
diff --git a/chromium/base/files/file_util_unittest.cc b/chromium/base/files/file_util_unittest.cc
index 3f2e0f0876b..118e4491927 100644
--- a/chromium/base/files/file_util_unittest.cc
+++ b/chromium/base/files/file_util_unittest.cc
@@ -18,6 +18,7 @@
#include "base/bind_helpers.h"
#include "base/callback_helpers.h"
#include "base/environment.h"
+#include "base/files/file.h"
#include "base/files/file_enumerator.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
@@ -46,9 +47,15 @@
#if defined(OS_POSIX)
#include <errno.h>
#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
#include <unistd.h>
#endif
+#if defined(OS_LINUX)
+#include <linux/fs.h>
+#endif
+
#if defined(OS_ANDROID)
#include "base/android/content_uri_utils.h"
#endif
@@ -1330,6 +1337,52 @@ TEST_F(FileUtilTest, DeleteDirRecursive) {
EXPECT_FALSE(PathExists(test_subdir));
}
+// Tests recursive Delete() for a directory.
+TEST_F(FileUtilTest, DeleteDirRecursiveWithOpenFile) {
+ // Create a subdirectory and put a file and two directories inside.
+ FilePath test_subdir = temp_dir_.GetPath().Append(FPL("DeleteWithOpenFile"));
+ CreateDirectory(test_subdir);
+ ASSERT_TRUE(PathExists(test_subdir));
+
+ FilePath file_name1 = test_subdir.Append(FPL("Undeletebable File1.txt"));
+ File file1(file_name1,
+ File::FLAG_CREATE | File::FLAG_READ | File::FLAG_WRITE);
+ ASSERT_TRUE(PathExists(file_name1));
+
+ FilePath file_name2 = test_subdir.Append(FPL("Deleteable File2.txt"));
+ CreateTextFile(file_name2, bogus_content);
+ ASSERT_TRUE(PathExists(file_name2));
+
+ FilePath file_name3 = test_subdir.Append(FPL("Undeletebable File3.txt"));
+ File file3(file_name3,
+ File::FLAG_CREATE | File::FLAG_READ | File::FLAG_WRITE);
+ ASSERT_TRUE(PathExists(file_name3));
+
+#if defined(OS_LINUX)
+ // On Windows, holding the file open in sufficient to make it un-deletable.
+ // The POSIX code is verifiable on Linux by creating an "immutable" file but
+ // this is best-effort because it's not supported by all file systems. Both
+ // files will have the same flags so no need to get them individually.
+ int flags;
+ CHECK_EQ(0, ioctl(file1.GetPlatformFile(), FS_IOC_GETFLAGS, &flags));
+ flags |= FS_IMMUTABLE_FL;
+ ioctl(file1.GetPlatformFile(), FS_IOC_SETFLAGS, &flags);
+ ioctl(file3.GetPlatformFile(), FS_IOC_SETFLAGS, &flags);
+#endif
+
+ // Delete recursively and check that at least the second file got deleted.
+ // This ensures that un-deletable files don't impact those that can be.
+ DeleteFile(test_subdir, true);
+ EXPECT_FALSE(PathExists(file_name2));
+
+#if defined(OS_LINUX)
+ // Make sure that the test can clean up after itself.
+ flags &= ~FS_IMMUTABLE_FL;
+ ioctl(file1.GetPlatformFile(), FS_IOC_SETFLAGS, &flags);
+ ioctl(file3.GetPlatformFile(), FS_IOC_SETFLAGS, &flags);
+#endif
+}
+
TEST_F(FileUtilTest, MoveFileNew) {
// Create a file
FilePath file_name_from =
@@ -2466,9 +2519,9 @@ TEST_F(FileUtilTest, ReadFileToString) {
EXPECT_TRUE(ReadFileToStringWithMaxSize(file_path, &data, 6));
EXPECT_EQ("0123", data);
- EXPECT_TRUE(ReadFileToStringWithMaxSize(file_path, NULL, 6));
+ EXPECT_TRUE(ReadFileToStringWithMaxSize(file_path, nullptr, 6));
- EXPECT_TRUE(ReadFileToString(file_path, NULL));
+ EXPECT_TRUE(ReadFileToString(file_path, nullptr));
data = "temp";
EXPECT_FALSE(ReadFileToString(file_path_dangerous, &data));
diff --git a/chromium/base/files/file_util_win.cc b/chromium/base/files/file_util_win.cc
index 39da4bac5d9..1f63211c1e9 100644
--- a/chromium/base/files/file_util_win.cc
+++ b/chromium/base/files/file_util_win.cc
@@ -49,6 +49,7 @@ bool DeleteFileRecursive(const FilePath& path,
FileEnumerator traversal(path, false,
FileEnumerator::FILES | FileEnumerator::DIRECTORIES,
pattern);
+ bool success = true;
for (FilePath current = traversal.Next(); !current.empty();
current = traversal.Next()) {
// Try to clear the read-only bit if we find it.
@@ -63,12 +64,12 @@ bool DeleteFileRecursive(const FilePath& path,
if (info.IsDirectory()) {
if (recursive && (!DeleteFileRecursive(current, pattern, true) ||
!RemoveDirectory(current.value().c_str())))
- return false;
+ success = false;
} else if (!::DeleteFile(current.value().c_str())) {
- return false;
+ success = false;
}
}
- return true;
+ return success;
}
// Appends |mode_char| to |mode| before the optional character set encoding; see
@@ -82,7 +83,7 @@ void AppendModeCharacter(base::char16 mode_char, base::string16* mode) {
} // namespace
FilePath MakeAbsoluteFilePath(const FilePath& input) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
wchar_t file_path[MAX_PATH];
if (!_wfullpath(file_path, input.value().c_str(), MAX_PATH))
return FilePath();
@@ -90,7 +91,7 @@ FilePath MakeAbsoluteFilePath(const FilePath& input) {
}
bool DeleteFile(const FilePath& path, bool recursive) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
if (path.empty())
return true;
@@ -125,7 +126,7 @@ bool DeleteFile(const FilePath& path, bool recursive) {
}
bool DeleteFileAfterReboot(const FilePath& path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
if (path.value().length() >= MAX_PATH)
return false;
@@ -138,7 +139,7 @@ bool DeleteFileAfterReboot(const FilePath& path) {
bool ReplaceFile(const FilePath& from_path,
const FilePath& to_path,
File::Error* error) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
// Try a simple move first. It will only succeed when |to_path| doesn't
// already exist.
if (::MoveFile(from_path.value().c_str(), to_path.value().c_str()))
@@ -171,7 +172,7 @@ bool CopyDirectory(const FilePath& from_path, const FilePath& to_path,
// attributes, OLE structured storage, NTFS file system alternate data
// streams, SECURITY_DESCRIPTOR. In practice, this is not what we want, we
// want the containing directory to propagate its SECURITY_DESCRIPTOR.
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
// NOTE: I suspect we could support longer paths, but that would involve
// analyzing all our usage of files.
@@ -253,12 +254,12 @@ bool CopyDirectory(const FilePath& from_path, const FilePath& to_path,
}
bool PathExists(const FilePath& path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
return (GetFileAttributes(path.value().c_str()) != INVALID_FILE_ATTRIBUTES);
}
bool PathIsWritable(const FilePath& path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
HANDLE dir =
CreateFile(path.value().c_str(), FILE_ADD_FILE, kFileShareAll,
NULL, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL);
@@ -271,7 +272,7 @@ bool PathIsWritable(const FilePath& path) {
}
bool DirectoryExists(const FilePath& path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DWORD fileattr = GetFileAttributes(path.value().c_str());
if (fileattr != INVALID_FILE_ATTRIBUTES)
return (fileattr & FILE_ATTRIBUTE_DIRECTORY) != 0;
@@ -308,7 +309,7 @@ FilePath GetHomeDir() {
}
bool CreateTemporaryFile(FilePath* path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
FilePath temp_file;
@@ -328,7 +329,7 @@ bool CreateTemporaryFile(FilePath* path) {
// TODO(jrg): is there equivalent call to use on Windows instead of
// going 2-step?
FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir, FilePath* path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
if (!CreateTemporaryFileInDir(dir, path)) {
return NULL;
}
@@ -339,7 +340,7 @@ FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir, FilePath* path) {
}
bool CreateTemporaryFileInDir(const FilePath& dir, FilePath* temp_file) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
// Use GUID instead of ::GetTempFileName() to generate unique file names.
// "Due to the algorithm used to generate file names, GetTempFileName can
@@ -388,7 +389,7 @@ bool CreateTemporaryFileInDir(const FilePath& dir, FilePath* temp_file) {
bool CreateTemporaryDirInDir(const FilePath& base_dir,
const FilePath::StringType& prefix,
FilePath* new_dir) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
FilePath path_to_create;
@@ -414,7 +415,7 @@ bool CreateTemporaryDirInDir(const FilePath& base_dir,
bool CreateNewTempDirectory(const FilePath::StringType& prefix,
FilePath* new_temp_path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
FilePath system_temp_dir;
if (!GetTempDir(&system_temp_dir))
@@ -425,7 +426,7 @@ bool CreateNewTempDirectory(const FilePath::StringType& prefix,
bool CreateDirectoryAndGetError(const FilePath& full_path,
File::Error* error) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
// If the path exists, we've succeeded if it's a directory, failed otherwise.
const wchar_t* full_path_str = full_path.value().c_str();
@@ -485,7 +486,7 @@ bool CreateDirectoryAndGetError(const FilePath& full_path,
}
bool NormalizeFilePath(const FilePath& path, FilePath* real_path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
FilePath mapped_file;
if (!NormalizeToNativeFilePath(path, &mapped_file))
return false;
@@ -498,7 +499,7 @@ bool NormalizeFilePath(const FilePath& path, FilePath* real_path) {
bool DevicePathToDriveLetterPath(const FilePath& nt_device_path,
FilePath* out_drive_letter_path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
// Get the mapping of drive letters to device paths.
const int kDriveMappingSize = 1024;
@@ -541,7 +542,7 @@ bool DevicePathToDriveLetterPath(const FilePath& nt_device_path,
}
bool NormalizeToNativeFilePath(const FilePath& path, FilePath* nt_path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
// In Vista, GetFinalPathNameByHandle() would give us the real path
// from a file handle. If we ever deprecate XP, consider changing the
// code below to a call to GetFinalPathNameByHandle(). The method this
@@ -602,7 +603,7 @@ bool IsLink(const FilePath& file_path) {
}
bool GetFileInfo(const FilePath& file_path, File::Info* results) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
WIN32_FILE_ATTRIBUTE_DATA attr;
if (!GetFileAttributesEx(file_path.value().c_str(),
@@ -630,7 +631,7 @@ FILE* OpenFile(const FilePath& filename, const char* mode) {
DCHECK(
strchr(mode, 'N') == nullptr ||
(strchr(mode, ',') != nullptr && strchr(mode, 'N') > strchr(mode, ',')));
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
string16 w_mode = ASCIIToUTF16(mode);
AppendModeCharacter(L'N', &w_mode);
return _wfsopen(filename.value().c_str(), w_mode.c_str(), _SH_DENYNO);
@@ -651,7 +652,7 @@ FILE* FileToFILE(File file, const char* mode) {
}
int ReadFile(const FilePath& filename, char* data, int max_size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
win::ScopedHandle file(CreateFile(filename.value().c_str(),
GENERIC_READ,
FILE_SHARE_READ | FILE_SHARE_WRITE,
@@ -670,7 +671,7 @@ int ReadFile(const FilePath& filename, char* data, int max_size) {
}
int WriteFile(const FilePath& filename, const char* data, int size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
win::ScopedHandle file(CreateFile(filename.value().c_str(), GENERIC_WRITE, 0,
NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL,
NULL));
@@ -698,7 +699,7 @@ int WriteFile(const FilePath& filename, const char* data, int size) {
}
bool AppendToFile(const FilePath& filename, const char* data, int size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
win::ScopedHandle file(CreateFile(filename.value().c_str(),
FILE_APPEND_DATA,
0,
@@ -728,7 +729,7 @@ bool AppendToFile(const FilePath& filename, const char* data, int size) {
}
bool GetCurrentDirectory(FilePath* dir) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
wchar_t system_buffer[MAX_PATH];
system_buffer[0] = 0;
@@ -744,12 +745,12 @@ bool GetCurrentDirectory(FilePath* dir) {
}
bool SetCurrentDirectory(const FilePath& directory) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
return ::SetCurrentDirectory(directory.value().c_str()) != 0;
}
int GetMaximumPathComponentLength(const FilePath& path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
wchar_t volume_path[MAX_PATH];
if (!GetVolumePathNameW(path.NormalizePathSeparators().value().c_str(),
@@ -773,7 +774,7 @@ int GetMaximumPathComponentLength(const FilePath& path) {
}
bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
if (from_path.ReferencesParent() || to_path.ReferencesParent())
return false;
@@ -815,7 +816,7 @@ bool SetNonBlocking(int fd) {
namespace internal {
bool MoveUnsafe(const FilePath& from_path, const FilePath& to_path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
// NOTE: I suspect we could support longer paths, but that would involve
// analyzing all our usage of files.
@@ -850,7 +851,7 @@ bool MoveUnsafe(const FilePath& from_path, const FilePath& to_path) {
bool CopyAndDeleteDirectory(const FilePath& from_path,
const FilePath& to_path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
if (CopyDirectory(from_path, to_path, true)) {
if (DeleteFile(from_path, true))
return true;
diff --git a/chromium/base/files/file_win.cc b/chromium/base/files/file_win.cc
index acd3334f6af..6e7c38362d9 100644
--- a/chromium/base/files/file_win.cc
+++ b/chromium/base/files/file_win.cc
@@ -35,13 +35,13 @@ void File::Close() {
if (!file_.IsValid())
return;
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
SCOPED_FILE_TRACE("Close");
file_.Close();
}
int64_t File::Seek(Whence whence, int64_t offset) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
SCOPED_FILE_TRACE_WITH_SIZE("Seek", offset);
@@ -55,7 +55,7 @@ int64_t File::Seek(Whence whence, int64_t offset) {
}
int File::Read(int64_t offset, char* data, int size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
DCHECK(!async_);
if (size < 0)
@@ -80,7 +80,7 @@ int File::Read(int64_t offset, char* data, int size) {
}
int File::ReadAtCurrentPos(char* data, int size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
DCHECK(!async_);
if (size < 0)
@@ -108,7 +108,7 @@ int File::ReadAtCurrentPosNoBestEffort(char* data, int size) {
}
int File::Write(int64_t offset, const char* data, int size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
DCHECK(!async_);
@@ -129,7 +129,7 @@ int File::Write(int64_t offset, const char* data, int size) {
}
int File::WriteAtCurrentPos(const char* data, int size) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
DCHECK(!async_);
if (size < 0)
@@ -149,7 +149,7 @@ int File::WriteAtCurrentPosNoBestEffort(const char* data, int size) {
}
int64_t File::GetLength() {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
SCOPED_FILE_TRACE("GetLength");
@@ -162,7 +162,7 @@ int64_t File::GetLength() {
}
bool File::SetLength(int64_t length) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
SCOPED_FILE_TRACE_WITH_SIZE("SetLength", length);
@@ -193,7 +193,7 @@ bool File::SetLength(int64_t length) {
}
bool File::SetTimes(Time last_access_time, Time last_modified_time) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
SCOPED_FILE_TRACE("SetTimes");
@@ -205,7 +205,7 @@ bool File::SetTimes(Time last_access_time, Time last_modified_time) {
}
bool File::GetInfo(Info* info) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
SCOPED_FILE_TRACE("GetInfo");
@@ -317,7 +317,7 @@ File::Error File::OSErrorToFileError(DWORD last_error) {
}
void File::DoInitialize(const FilePath& path, uint32_t flags) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(!IsValid());
DWORD disposition = 0;
@@ -405,7 +405,7 @@ void File::DoInitialize(const FilePath& path, uint32_t flags) {
}
bool File::Flush() {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(IsValid());
SCOPED_FILE_TRACE("Flush");
return ::FlushFileBuffers(file_.Get()) != FALSE;
diff --git a/chromium/base/files/important_file_writer_unittest.cc b/chromium/base/files/important_file_writer_unittest.cc
index f48343ae82a..0f97945ff6e 100644
--- a/chromium/base/files/important_file_writer_unittest.cc
+++ b/chromium/base/files/important_file_writer_unittest.cc
@@ -114,7 +114,7 @@ WriteCallbacksObserver::GetAndResetObservationState() {
class ImportantFileWriterTest : public testing::Test {
public:
- ImportantFileWriterTest() { }
+ ImportantFileWriterTest() = default;
void SetUp() override {
ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
file_ = temp_dir_.GetPath().AppendASCII("test-file");
diff --git a/chromium/base/files/memory_mapped_file.cc b/chromium/base/files/memory_mapped_file.cc
index 45a09a01aeb..638de0884e2 100644
--- a/chromium/base/files/memory_mapped_file.cc
+++ b/chromium/base/files/memory_mapped_file.cc
@@ -8,6 +8,7 @@
#include "base/files/file_path.h"
#include "base/logging.h"
+#include "base/numerics/safe_math.h"
#include "base/sys_info.h"
#include "build/build_config.h"
@@ -72,16 +73,19 @@ bool MemoryMappedFile::Initialize(File file,
switch (access) {
case READ_WRITE_EXTEND:
DCHECK(Region::kWholeFile != region);
- // Ensure that the extended size is within limits of File.
- if (region.size > std::numeric_limits<int64_t>::max() - region.offset) {
- DLOG(ERROR) << "Region bounds exceed maximum for base::File.";
- return false;
+ {
+ CheckedNumeric<int64_t> region_end(region.offset);
+ region_end += region.size;
+ if (!region_end.IsValid()) {
+ DLOG(ERROR) << "Region bounds exceed maximum for base::File.";
+ return false;
+ }
}
// Fall through.
case READ_ONLY:
case READ_WRITE:
// Ensure that the region values are valid.
- if (region.offset < 0 || region.size < 0) {
+ if (region.offset < 0) {
DLOG(ERROR) << "Region bounds are not valid.";
return false;
}
@@ -91,10 +95,8 @@ bool MemoryMappedFile::Initialize(File file,
if (IsValid())
return false;
- if (region != Region::kWholeFile) {
+ if (region != Region::kWholeFile)
DCHECK_GE(region.offset, 0);
- DCHECK_GT(region.size, 0);
- }
file_ = std::move(file);
@@ -107,23 +109,22 @@ bool MemoryMappedFile::Initialize(File file,
}
bool MemoryMappedFile::IsValid() const {
- return data_ != NULL;
+ return data_ != nullptr;
}
// static
void MemoryMappedFile::CalculateVMAlignedBoundaries(int64_t start,
- int64_t size,
+ size_t size,
int64_t* aligned_start,
- int64_t* aligned_size,
+ size_t* aligned_size,
int32_t* offset) {
// Sadly, on Windows, the mmap alignment is not just equal to the page size.
- const int64_t mask =
- static_cast<int64_t>(SysInfo::VMAllocationGranularity()) - 1;
- DCHECK_LT(mask, std::numeric_limits<int32_t>::max());
+ auto mask = SysInfo::VMAllocationGranularity() - 1;
+ DCHECK(IsValueInRangeForNumericType<int32_t>(mask));
*offset = start & mask;
*aligned_start = start & ~mask;
*aligned_size = (size + *offset + mask) & ~mask;
}
-#endif
+#endif // !defined(OS_NACL)
} // namespace base
diff --git a/chromium/base/files/memory_mapped_file.h b/chromium/base/files/memory_mapped_file.h
index cad99f679d7..04f43367d18 100644
--- a/chromium/base/files/memory_mapped_file.h
+++ b/chromium/base/files/memory_mapped_file.h
@@ -61,7 +61,7 @@ class BASE_EXPORT MemoryMappedFile {
int64_t offset;
// Length of the region in bytes.
- int64_t size;
+ size_t size;
};
// Opens an existing file and maps it into memory. |access| can be read-only
@@ -108,9 +108,9 @@ class BASE_EXPORT MemoryMappedFile {
// - |aligned_size| is a multiple of the VM granularity and >= |size|.
// - |offset| is the displacement of |start| w.r.t |aligned_start|.
static void CalculateVMAlignedBoundaries(int64_t start,
- int64_t size,
+ size_t size,
int64_t* aligned_start,
- int64_t* aligned_size,
+ size_t* aligned_size,
int32_t* offset);
// Map the file to memory, set data_ to that memory address. Return true on
diff --git a/chromium/base/files/memory_mapped_file_posix.cc b/chromium/base/files/memory_mapped_file_posix.cc
index b1efe887a05..45a0aea6ddb 100644
--- a/chromium/base/files/memory_mapped_file_posix.cc
+++ b/chromium/base/files/memory_mapped_file_posix.cc
@@ -12,6 +12,7 @@
#include <unistd.h>
#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
@@ -21,14 +22,13 @@
namespace base {
-MemoryMappedFile::MemoryMappedFile() : data_(NULL), length_(0) {
-}
+MemoryMappedFile::MemoryMappedFile() : data_(nullptr), length_(0) {}
#if !defined(OS_NACL)
bool MemoryMappedFile::MapFileRegionToMemory(
const MemoryMappedFile::Region& region,
Access access) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
off_t map_start = 0;
size_t map_size = 0;
@@ -40,6 +40,8 @@ bool MemoryMappedFile::MapFileRegionToMemory(
DPLOG(ERROR) << "fstat " << file_.GetPlatformFile();
return false;
}
+ if (!IsValueInRangeForNumericType<size_t>(file_len))
+ return false;
map_size = static_cast<size_t>(file_len);
length_ = map_size;
} else {
@@ -48,7 +50,7 @@ bool MemoryMappedFile::MapFileRegionToMemory(
// outer region [|aligned_start|, |aligned_start| + |size|] which contains
// |region| and then add up the |data_offset| displacement.
int64_t aligned_start = 0;
- int64_t aligned_size = 0;
+ size_t aligned_size = 0;
CalculateVMAlignedBoundaries(region.offset,
region.size,
&aligned_start,
@@ -56,19 +58,15 @@ bool MemoryMappedFile::MapFileRegionToMemory(
&data_offset);
// Ensure that the casts in the mmap call below are sane.
- if (aligned_start < 0 || aligned_size < 0 ||
- aligned_start > std::numeric_limits<off_t>::max() ||
- static_cast<uint64_t>(aligned_size) >
- std::numeric_limits<size_t>::max() ||
- static_cast<uint64_t>(region.size) >
- std::numeric_limits<size_t>::max()) {
+ if (aligned_start < 0 ||
+ !IsValueInRangeForNumericType<off_t>(aligned_start)) {
DLOG(ERROR) << "Region bounds are not valid for mmap";
return false;
}
map_start = static_cast<off_t>(aligned_start);
- map_size = static_cast<size_t>(aligned_size);
- length_ = static_cast<size_t>(region.size);
+ map_size = aligned_size;
+ length_ = region.size;
}
int flags = 0;
@@ -106,20 +104,29 @@ bool MemoryMappedFile::MapFileRegionToMemory(
// Realize the extent of the file so that it can't fail (and crash) later
// when trying to write to a memory page that can't be created. This can
// fail if the disk is full and the file is sparse.
- //
- // Only Android API>=21 supports the fallocate call. Older versions need
- // to manually extend the file by writing zeros at block intervals.
- //
- // Mac OSX doesn't support this call but the primary filesystem doesn't
- // support sparse files so is unneeded.
bool do_manual_extension = false;
#if defined(OS_ANDROID) && __ANDROID_API__ < 21
+ // Only Android API>=21 supports the fallocate call. Older versions need
+ // to manually extend the file by writing zeros at block intervals.
do_manual_extension = true;
-#elif !defined(OS_MACOSX)
+#elif defined(OS_MACOSX)
+ // MacOS doesn't support fallocate even though their new APFS filesystem
+ // does support sparse files. It does, however, have the functionality
+ // available via fcntl.
+ // See also: https://openradar.appspot.com/32720223
+ fstore_t params = {F_ALLOCATEALL, F_PEOFPOSMODE, region.offset,
+ region.size, 0};
+ if (fcntl(file_.GetPlatformFile(), F_PREALLOCATE, &params) != 0) {
+ DPLOG(ERROR) << "F_PREALLOCATE";
+ // This can fail because the filesystem doesn't support it so don't
+ // give up just yet. Try the manual method below.
+ do_manual_extension = true;
+ }
+#else
if (posix_fallocate(file_.GetPlatformFile(), region.offset,
region.size) != 0) {
- DPLOG(ERROR) << "posix_fallocate " << file_.GetPlatformFile();
+ DPLOG(ERROR) << "posix_fallocate";
// This can fail because the filesystem doesn't support it so don't
// give up just yet. Try the manual method below.
do_manual_extension = true;
@@ -152,7 +159,7 @@ bool MemoryMappedFile::MapFileRegionToMemory(
break;
}
- data_ = static_cast<uint8_t*>(mmap(NULL, map_size, flags, MAP_SHARED,
+ data_ = static_cast<uint8_t*>(mmap(nullptr, map_size, flags, MAP_SHARED,
file_.GetPlatformFile(), map_start));
if (data_ == MAP_FAILED) {
DPLOG(ERROR) << "mmap " << file_.GetPlatformFile();
@@ -165,13 +172,13 @@ bool MemoryMappedFile::MapFileRegionToMemory(
#endif
void MemoryMappedFile::CloseHandles() {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
- if (data_ != NULL)
+ if (data_ != nullptr)
munmap(data_, length_);
file_.Close();
- data_ = NULL;
+ data_ = nullptr;
length_ = 0;
}
diff --git a/chromium/base/files/memory_mapped_file_win.cc b/chromium/base/files/memory_mapped_file_win.cc
index 03c0f975f94..087ca9ffe2a 100644
--- a/chromium/base/files/memory_mapped_file_win.cc
+++ b/chromium/base/files/memory_mapped_file_win.cc
@@ -21,14 +21,13 @@ MemoryMappedFile::MemoryMappedFile() : data_(NULL), length_(0) {
bool MemoryMappedFile::MapFileRegionToMemory(
const MemoryMappedFile::Region& region,
Access access) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
if (!file_.IsValid())
return false;
int flags = 0;
- uint32_t size_low = 0;
- uint32_t size_high = 0;
+ ULARGE_INTEGER size = {};
switch (access) {
case READ_ONLY:
flags |= PAGE_READONLY;
@@ -38,13 +37,12 @@ bool MemoryMappedFile::MapFileRegionToMemory(
break;
case READ_WRITE_EXTEND:
flags |= PAGE_READWRITE;
- size_high = static_cast<uint32_t>(region.size >> 32);
- size_low = static_cast<uint32_t>(region.size & 0xFFFFFFFF);
+ size.QuadPart = region.size;
break;
}
file_mapping_.Set(::CreateFileMapping(file_.GetPlatformFile(), NULL, flags,
- size_high, size_low, NULL));
+ size.HighPart, size.LowPart, NULL));
if (!file_mapping_.IsValid())
return false;
@@ -55,7 +53,7 @@ bool MemoryMappedFile::MapFileRegionToMemory(
if (region == MemoryMappedFile::Region::kWholeFile) {
DCHECK_NE(READ_WRITE_EXTEND, access);
int64_t file_len = file_.GetLength();
- if (file_len <= 0 || file_len > std::numeric_limits<int32_t>::max())
+ if (file_len <= 0 || !IsValueInRangeForNumericType<size_t>(file_len))
return false;
length_ = static_cast<size_t>(file_len);
} else {
@@ -67,20 +65,21 @@ bool MemoryMappedFile::MapFileRegionToMemory(
// We map here the outer region [|aligned_start|, |aligned_start+size|]
// which contains |region| and then add up the |data_offset| displacement.
int64_t aligned_start = 0;
- int64_t ignored = 0;
+ size_t ignored = 0U;
CalculateVMAlignedBoundaries(
region.offset, region.size, &aligned_start, &ignored, &data_offset);
- int64_t size = region.size + data_offset;
+ int64_t full_map_size = region.size + data_offset;
// Ensure that the casts below in the MapViewOfFile call are sane.
- if (aligned_start < 0 || size < 0 ||
- static_cast<uint64_t>(size) > std::numeric_limits<SIZE_T>::max()) {
+ if (aligned_start < 0 || full_map_size < 0 ||
+ !IsValueInRangeForNumericType<SIZE_T>(
+ static_cast<uint64_t>(full_map_size))) {
DLOG(ERROR) << "Region bounds are not valid for MapViewOfFile";
return false;
}
map_start.QuadPart = aligned_start;
- map_size = static_cast<SIZE_T>(size);
- length_ = static_cast<size_t>(region.size);
+ map_size = static_cast<SIZE_T>(full_map_size);
+ length_ = region.size;
}
data_ = static_cast<uint8_t*>(
diff --git a/chromium/base/files/scoped_temp_dir.cc b/chromium/base/files/scoped_temp_dir.cc
index a04e825156b..01ec0f0caab 100644
--- a/chromium/base/files/scoped_temp_dir.cc
+++ b/chromium/base/files/scoped_temp_dir.cc
@@ -16,8 +16,7 @@ constexpr FilePath::CharType kScopedDirPrefix[] =
} // namespace
-ScopedTempDir::ScopedTempDir() {
-}
+ScopedTempDir::ScopedTempDir() = default;
ScopedTempDir::~ScopedTempDir() {
if (!path_.empty() && !Delete())
diff --git a/chromium/base/fuchsia/default_job.h b/chromium/base/fuchsia/default_job.h
index af2c61d68e7..f5f5c3ad21b 100644
--- a/chromium/base/fuchsia/default_job.h
+++ b/chromium/base/fuchsia/default_job.h
@@ -5,6 +5,7 @@
#ifndef BASE_FUCHSIA_DEFAULT_JOB_H_
#define BASE_FUCHSIA_DEFAULT_JOB_H_
+#include "base/base_export.h"
#include "base/fuchsia/scoped_zx_handle.h"
namespace base {
@@ -13,8 +14,8 @@ namespace base {
// and looking them up by their process IDs.
// zx_job_default() will be returned if no job is explicitly set here.
// Only valid handles may be passed to SetDefaultJob().
-zx_handle_t GetDefaultJob();
-void SetDefaultJob(ScopedZxHandle job);
+BASE_EXPORT zx_handle_t GetDefaultJob();
+BASE_EXPORT void SetDefaultJob(ScopedZxHandle job);
} // namespace base
diff --git a/chromium/base/fuchsia/fuchsia_logging.cc b/chromium/base/fuchsia/fuchsia_logging.cc
new file mode 100644
index 00000000000..31a8dd56dc2
--- /dev/null
+++ b/chromium/base/fuchsia/fuchsia_logging.cc
@@ -0,0 +1,26 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/fuchsia/fuchsia_logging.h"
+
+#include <zircon/status.h>
+
+#include <iomanip>
+
+namespace logging {
+
+ZxLogMessage::ZxLogMessage(const char* file_path,
+ int line,
+ LogSeverity severity,
+ zx_status_t zx_err)
+ : LogMessage(file_path, line, severity), zx_err_(zx_err) {}
+
+ZxLogMessage::~ZxLogMessage() {
+ // zx_status_t error values are negative, so log the numeric version as
+ // decimal rather than hex. This is also useful to match zircon/errors.h for
+ // grepping.
+ stream() << ": " << zx_status_get_string(zx_err_) << " (" << zx_err_ << ")";
+}
+
+} // namespace logging
diff --git a/chromium/base/fuchsia/fuchsia_logging.h b/chromium/base/fuchsia/fuchsia_logging.h
new file mode 100644
index 00000000000..728b0bb6486
--- /dev/null
+++ b/chromium/base/fuchsia/fuchsia_logging.h
@@ -0,0 +1,60 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FUCHSIA_FUCHSIA_LOGGING_H_
+#define BASE_FUCHSIA_FUCHSIA_LOGGING_H_
+
+#include <zircon/types.h>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+// Use the ZX_LOG family of macros along with a zx_status_t containing a Zircon
+// error. The error value will be decoded so that logged messages explain the
+// error.
+
+namespace logging {
+
+class BASE_EXPORT ZxLogMessage : public logging::LogMessage {
+ public:
+ ZxLogMessage(const char* file_path,
+ int line,
+ LogSeverity severity,
+ zx_status_t zx_err);
+ ~ZxLogMessage();
+
+ private:
+ zx_status_t zx_err_;
+
+ DISALLOW_COPY_AND_ASSIGN(ZxLogMessage);
+};
+
+} // namespace logging
+
+#define ZX_LOG_STREAM(severity, zx_err) \
+ COMPACT_GOOGLE_LOG_EX_##severity(ZxLogMessage, zx_err).stream()
+
+#define ZX_LOG(severity, zx_err) \
+ LAZY_STREAM(ZX_LOG_STREAM(severity, zx_err), LOG_IS_ON(severity))
+#define ZX_LOG_IF(severity, condition, zx_err) \
+ LAZY_STREAM(ZX_LOG_STREAM(severity, zx_err), \
+ LOG_IS_ON(severity) && (condition))
+
+#define ZX_CHECK(condition, zx_err) \
+ LAZY_STREAM(ZX_LOG_STREAM(FATAL, zx_err), !(condition)) \
+ << "Check failed: " #condition << ". "
+
+#define ZX_DLOG(severity, zx_err) \
+ LAZY_STREAM(ZX_LOG_STREAM(severity, zx_err), DLOG_IS_ON(severity))
+#define ZX_DLOG_IF(severity, condition, zx_err) \
+ LAZY_STREAM(ZX_LOG_STREAM(severity, zx_err), \
+ DLOG_IS_ON(severity) && (condition))
+
+#define ZX_DCHECK(condition, zx_err) \
+ LAZY_STREAM(ZX_LOG_STREAM(FATAL, zx_err), DCHECK_IS_ON() && !(condition)) \
+ << "Check failed: " #condition << ". "
+
+#endif // BASE_FUCHSIA_FUCHSIA_LOGGING_H_
diff --git a/chromium/base/gmock_unittest.cc b/chromium/base/gmock_unittest.cc
index da8dd94ea39..5c16728e354 100644
--- a/chromium/base/gmock_unittest.cc
+++ b/chromium/base/gmock_unittest.cc
@@ -23,8 +23,8 @@ namespace {
// for easy mocking.
class SampleClass {
public:
- SampleClass() {}
- virtual ~SampleClass() {}
+ SampleClass() = default;
+ virtual ~SampleClass() = default;
virtual int ReturnSomething() {
return -1;
diff --git a/chromium/base/i18n/bidi_line_iterator.cc b/chromium/base/i18n/bidi_line_iterator.cc
index eec4283c0e3..3f7f8686614 100644
--- a/chromium/base/i18n/bidi_line_iterator.cc
+++ b/chromium/base/i18n/bidi_line_iterator.cc
@@ -64,13 +64,12 @@ UCharDirection GetURLBiDiClassCallback(const void* /*unused*/, UChar32 c) {
} // namespace
-BiDiLineIterator::BiDiLineIterator() : bidi_(NULL) {
-}
+BiDiLineIterator::BiDiLineIterator() : bidi_(nullptr) {}
BiDiLineIterator::~BiDiLineIterator() {
if (bidi_) {
ubidi_close(bidi_);
- bidi_ = NULL;
+ bidi_ = nullptr;
}
}
@@ -96,7 +95,7 @@ bool BiDiLineIterator::Open(const string16& text,
}
int BiDiLineIterator::CountRuns() const {
- DCHECK(bidi_ != NULL);
+ DCHECK(bidi_ != nullptr);
UErrorCode error = U_ZERO_ERROR;
const int runs = ubidi_countRuns(bidi_, &error);
return U_SUCCESS(error) ? runs : 0;
@@ -105,14 +104,14 @@ int BiDiLineIterator::CountRuns() const {
UBiDiDirection BiDiLineIterator::GetVisualRun(int index,
int* start,
int* length) const {
- DCHECK(bidi_ != NULL);
+ DCHECK(bidi_ != nullptr);
return ubidi_getVisualRun(bidi_, index, start, length);
}
void BiDiLineIterator::GetLogicalRun(int start,
int* end,
UBiDiLevel* level) const {
- DCHECK(bidi_ != NULL);
+ DCHECK(bidi_ != nullptr);
ubidi_getLogicalRun(bidi_, start, end, level);
}
diff --git a/chromium/base/i18n/bidi_line_iterator_unittest.cc b/chromium/base/i18n/bidi_line_iterator_unittest.cc
index 5851664d9cf..d531313bf1d 100644
--- a/chromium/base/i18n/bidi_line_iterator_unittest.cc
+++ b/chromium/base/i18n/bidi_line_iterator_unittest.cc
@@ -14,7 +14,7 @@ namespace {
class BiDiLineIteratorTest : public testing::TestWithParam<TextDirection> {
public:
- BiDiLineIteratorTest() {}
+ BiDiLineIteratorTest() = default;
BiDiLineIterator* iterator() { return &iterator_; }
diff --git a/chromium/base/i18n/break_iterator.cc b/chromium/base/i18n/break_iterator.cc
index 869390fec37..251cd002e7b 100644
--- a/chromium/base/i18n/break_iterator.cc
+++ b/chromium/base/i18n/break_iterator.cc
@@ -17,21 +17,19 @@ namespace i18n {
const size_t npos = static_cast<size_t>(-1);
BreakIterator::BreakIterator(const StringPiece16& str, BreakType break_type)
- : iter_(NULL),
+ : iter_(nullptr),
string_(str),
break_type_(break_type),
prev_(npos),
- pos_(0) {
-}
+ pos_(0) {}
BreakIterator::BreakIterator(const StringPiece16& str, const string16& rules)
- : iter_(NULL),
+ : iter_(nullptr),
string_(str),
rules_(rules),
break_type_(RULE_BASED),
prev_(npos),
- pos_(0) {
-}
+ pos_(0) {}
BreakIterator::~BreakIterator() {
if (iter_)
@@ -70,11 +68,8 @@ bool BreakIterator::Init() {
<< parse_error.line << ", offset " << parse_error.offset;
}
} else {
- iter_ = ubrk_open(break_type,
- NULL,
- string_.data(),
- static_cast<int32_t>(string_.size()),
- &status);
+ iter_ = ubrk_open(break_type, nullptr, string_.data(),
+ static_cast<int32_t>(string_.size()), &status);
if (U_FAILURE(status)) {
NOTREACHED() << "ubrk_open failed for type " << break_type
<< " with error " << status;
@@ -147,7 +142,11 @@ BreakIterator::WordBreakStatus BreakIterator::GetWordBreakStatus() const {
int32_t status = ubrk_getRuleStatus(static_cast<UBreakIterator*>(iter_));
if (break_type_ != BREAK_WORD && break_type_ != RULE_BASED)
return IS_LINE_OR_CHAR_BREAK;
- return status == UBRK_WORD_NONE ? IS_SKIPPABLE_WORD : IS_WORD_BREAK;
+ // In ICU 60, trying to advance past the end of the text does not change
+ // |status| so that |pos_| has to be checked as well as |status|.
+ // See http://bugs.icu-project.org/trac/ticket/13447 .
+ return (status == UBRK_WORD_NONE || pos_ == npos) ? IS_SKIPPABLE_WORD
+ : IS_WORD_BREAK;
}
bool BreakIterator::IsEndOfWord(size_t position) const {
diff --git a/chromium/base/i18n/case_conversion.cc b/chromium/base/i18n/case_conversion.cc
index 9b7ce80537d..a4a104cf97e 100644
--- a/chromium/base/i18n/case_conversion.cc
+++ b/chromium/base/i18n/case_conversion.cc
@@ -28,14 +28,14 @@ int32_t ToUpperMapper(UChar* dest, int32_t dest_capacity,
const UChar* src, int32_t src_length,
UErrorCode* error) {
// Use default locale.
- return u_strToUpper(dest, dest_capacity, src, src_length, NULL, error);
+ return u_strToUpper(dest, dest_capacity, src, src_length, nullptr, error);
}
int32_t ToLowerMapper(UChar* dest, int32_t dest_capacity,
const UChar* src, int32_t src_length,
UErrorCode* error) {
// Use default locale.
- return u_strToLower(dest, dest_capacity, src, src_length, NULL, error);
+ return u_strToLower(dest, dest_capacity, src, src_length, nullptr, error);
}
int32_t FoldCaseMapper(UChar* dest, int32_t dest_capacity,
diff --git a/chromium/base/i18n/char_iterator.cc b/chromium/base/i18n/char_iterator.cc
index 25efc518694..d80b8b618d9 100644
--- a/chromium/base/i18n/char_iterator.cc
+++ b/chromium/base/i18n/char_iterator.cc
@@ -21,8 +21,7 @@ UTF8CharIterator::UTF8CharIterator(const std::string* str)
U8_NEXT(str_, next_pos_, len_, char_);
}
-UTF8CharIterator::~UTF8CharIterator() {
-}
+UTF8CharIterator::~UTF8CharIterator() = default;
bool UTF8CharIterator::Advance() {
if (array_pos_ >= len_)
@@ -58,8 +57,7 @@ UTF16CharIterator::UTF16CharIterator(const char16* str, size_t str_len)
ReadChar();
}
-UTF16CharIterator::~UTF16CharIterator() {
-}
+UTF16CharIterator::~UTF16CharIterator() = default;
bool UTF16CharIterator::Advance() {
if (array_pos_ >= len_)
diff --git a/chromium/base/i18n/file_util_icu.cc b/chromium/base/i18n/file_util_icu.cc
index e0cf3d52178..c154204d659 100644
--- a/chromium/base/i18n/file_util_icu.cc
+++ b/chromium/base/i18n/file_util_icu.cc
@@ -54,7 +54,7 @@ class IllegalCharacters {
friend struct DefaultSingletonTraits<IllegalCharacters>;
IllegalCharacters();
- ~IllegalCharacters() { }
+ ~IllegalCharacters() = default;
// set of characters considered invalid anywhere inside a filename.
std::unique_ptr<icu::UnicodeSet> illegal_anywhere_;
diff --git a/chromium/base/i18n/icu_string_conversions.cc b/chromium/base/i18n/icu_string_conversions.cc
index be82db2a956..6ec99803a81 100644
--- a/chromium/base/i18n/icu_string_conversions.cc
+++ b/chromium/base/i18n/icu_string_conversions.cc
@@ -13,10 +13,10 @@
#include "base/logging.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
+#include "third_party/icu/source/common/unicode/normalizer2.h"
#include "third_party/icu/source/common/unicode/ucnv.h"
#include "third_party/icu/source/common/unicode/ucnv_cb.h"
#include "third_party/icu/source/common/unicode/ucnv_err.h"
-#include "third_party/icu/source/common/unicode/unorm.h"
#include "third_party/icu/source/common/unicode/ustring.h"
namespace base {
@@ -69,11 +69,11 @@ void ToUnicodeCallbackSubstitute(const void* context,
UErrorCode * err) {
static const UChar kReplacementChar = 0xFFFD;
if (reason <= UCNV_IRREGULAR) {
- if (context == NULL ||
- (*(reinterpret_cast<const char*>(context)) == 'i' &&
- reason == UCNV_UNASSIGNED)) {
- *err = U_ZERO_ERROR;
- ucnv_cbToUWriteUChars(to_args, &kReplacementChar, 1, 0, err);
+ if (context == nullptr ||
+ (*(reinterpret_cast<const char*>(context)) == 'i' &&
+ reason == UCNV_UNASSIGNED)) {
+ *err = U_ZERO_ERROR;
+ ucnv_cbToUWriteUChars(to_args, &kReplacementChar, 1, 0, err);
}
// else the caller must have set the error code accordingly.
}
@@ -92,13 +92,13 @@ bool ConvertFromUTF16(UConverter* converter, const UChar* uchar_src,
// Setup our error handler.
switch (on_error) {
case OnStringConversionError::FAIL:
- ucnv_setFromUCallBack(converter, UCNV_FROM_U_CALLBACK_STOP, 0,
- NULL, NULL, &status);
+ ucnv_setFromUCallBack(converter, UCNV_FROM_U_CALLBACK_STOP, nullptr,
+ nullptr, nullptr, &status);
break;
case OnStringConversionError::SKIP:
case OnStringConversionError::SUBSTITUTE:
- ucnv_setFromUCallBack(converter, UCNV_FROM_U_CALLBACK_SKIP, 0,
- NULL, NULL, &status);
+ ucnv_setFromUCallBack(converter, UCNV_FROM_U_CALLBACK_SKIP, nullptr,
+ nullptr, nullptr, &status);
break;
default:
NOTREACHED();
@@ -120,16 +120,16 @@ void SetUpErrorHandlerForToUChars(OnStringConversionError::Type on_error,
UConverter* converter, UErrorCode* status) {
switch (on_error) {
case OnStringConversionError::FAIL:
- ucnv_setToUCallBack(converter, UCNV_TO_U_CALLBACK_STOP, 0,
- NULL, NULL, status);
+ ucnv_setToUCallBack(converter, UCNV_TO_U_CALLBACK_STOP, nullptr, nullptr,
+ nullptr, status);
break;
case OnStringConversionError::SKIP:
- ucnv_setToUCallBack(converter, UCNV_TO_U_CALLBACK_SKIP, 0,
- NULL, NULL, status);
+ ucnv_setToUCallBack(converter, UCNV_TO_U_CALLBACK_SKIP, nullptr, nullptr,
+ nullptr, status);
break;
case OnStringConversionError::SUBSTITUTE:
- ucnv_setToUCallBack(converter, ToUnicodeCallbackSubstitute, 0,
- NULL, NULL, status);
+ ucnv_setToUCallBack(converter, ToUnicodeCallbackSubstitute, nullptr,
+ nullptr, nullptr, status);
break;
default:
NOTREACHED();
@@ -201,18 +201,23 @@ bool ConvertToUtf8AndNormalize(const std::string& text,
return false;
UErrorCode status = U_ZERO_ERROR;
- size_t max_length = utf16.length() + 1;
- string16 normalized_utf16;
- std::unique_ptr<char16[]> buffer(new char16[max_length]);
- int actual_length = unorm_normalize(
- utf16.c_str(), utf16.length(), UNORM_NFC, 0,
- buffer.get(), static_cast<int>(max_length), &status);
- if (!U_SUCCESS(status))
+ const icu::Normalizer2* normalizer = icu::Normalizer2::getNFCInstance(status);
+ DCHECK(U_SUCCESS(status));
+ if (U_FAILURE(status))
return false;
- normalized_utf16.assign(buffer.get(), actual_length);
-
- return UTF16ToUTF8(normalized_utf16.data(),
- normalized_utf16.length(), result);
+ int32_t utf16_length = static_cast<int32_t>(utf16.length());
+ icu::UnicodeString normalized(utf16.data(), utf16_length);
+ int32_t normalized_prefix_length =
+ normalizer->spanQuickCheckYes(normalized, status);
+ if (normalized_prefix_length < utf16_length) {
+ icu::UnicodeString un_normalized(normalized, normalized_prefix_length);
+ normalized.truncate(normalized_prefix_length);
+ normalizer->normalizeSecondAndAppend(normalized, un_normalized, status);
+ }
+ if (U_FAILURE(status))
+ return false;
+ normalized.toUTF8String(*result);
+ return true;
}
} // namespace base
diff --git a/chromium/base/i18n/icu_string_conversions_unittest.cc b/chromium/base/i18n/icu_string_conversions_unittest.cc
index 99e4b9096d8..d1559860cc8 100644
--- a/chromium/base/i18n/icu_string_conversions_unittest.cc
+++ b/chromium/base/i18n/icu_string_conversions_unittest.cc
@@ -73,125 +73,93 @@ static const struct {
const wchar_t* wide;
const wchar_t* u16_wide;
} kConvertCodepageCases[] = {
- // Test a case where the input cannot be decoded, using SKIP, FAIL
- // and SUBSTITUTE error handling rules. "A7 41" is valid, but "A6" isn't.
- {"big5",
- "\xA7\x41\xA6",
- OnStringConversionError::FAIL,
- false,
- L"",
- NULL},
- {"big5",
- "\xA7\x41\xA6",
- OnStringConversionError::SKIP,
- true,
- L"\x4F60",
- NULL},
- {"big5",
- "\xA7\x41\xA6",
- OnStringConversionError::SUBSTITUTE,
- true,
- L"\x4F60\xFFFD",
- NULL},
- // Arabic (ISO-8859)
- {"iso-8859-6",
- "\xC7\xEE\xE4\xD3\xF1\xEE\xE4\xC7\xE5\xEF" " "
- "\xD9\xEE\xE4\xEE\xEA\xF2\xE3\xEF\xE5\xF2",
- OnStringConversionError::FAIL,
- true,
- L"\x0627\x064E\x0644\x0633\x0651\x064E\x0644\x0627\x0645\x064F" L" "
- L"\x0639\x064E\x0644\x064E\x064A\x0652\x0643\x064F\x0645\x0652",
- NULL},
- // Chinese Simplified (GB2312)
- {"gb2312",
- "\xC4\xE3\xBA\xC3",
- OnStringConversionError::FAIL,
- true,
- L"\x4F60\x597D",
- NULL},
- // Chinese (GB18030) : 4 byte sequences mapped to BMP characters
- {"gb18030",
- "\x81\x30\x84\x36\xA1\xA7",
- OnStringConversionError::FAIL,
- true,
- L"\x00A5\x00A8",
- NULL},
- // Chinese (GB18030) : A 4 byte sequence mapped to plane 2 (U+20000)
- {"gb18030",
- "\x95\x32\x82\x36\xD2\xBB",
- OnStringConversionError::FAIL,
- true,
+ // Test a case where the input cannot be decoded, using SKIP, FAIL
+ // and SUBSTITUTE error handling rules. "A7 41" is valid, but "A6" isn't.
+ {"big5", "\xA7\x41\xA6", OnStringConversionError::FAIL, false, L"",
+ nullptr},
+ {"big5", "\xA7\x41\xA6", OnStringConversionError::SKIP, true, L"\x4F60",
+ nullptr},
+ {"big5", "\xA7\x41\xA6", OnStringConversionError::SUBSTITUTE, true,
+ L"\x4F60\xFFFD", nullptr},
+ // Arabic (ISO-8859)
+ {"iso-8859-6",
+ "\xC7\xEE\xE4\xD3\xF1\xEE\xE4\xC7\xE5\xEF"
+ " "
+ "\xD9\xEE\xE4\xEE\xEA\xF2\xE3\xEF\xE5\xF2",
+ OnStringConversionError::FAIL, true,
+ L"\x0627\x064E\x0644\x0633\x0651\x064E\x0644\x0627\x0645\x064F"
+ L" "
+ L"\x0639\x064E\x0644\x064E\x064A\x0652\x0643\x064F\x0645\x0652",
+ nullptr},
+ // Chinese Simplified (GB2312)
+ {"gb2312", "\xC4\xE3\xBA\xC3", OnStringConversionError::FAIL, true,
+ L"\x4F60\x597D", nullptr},
+ // Chinese (GB18030) : 4 byte sequences mapped to BMP characters
+ {"gb18030", "\x81\x30\x84\x36\xA1\xA7", OnStringConversionError::FAIL, true,
+ L"\x00A5\x00A8", nullptr},
+ // Chinese (GB18030) : A 4 byte sequence mapped to plane 2 (U+20000)
+ {"gb18030", "\x95\x32\x82\x36\xD2\xBB", OnStringConversionError::FAIL, true,
#if defined(WCHAR_T_IS_UTF16)
- L"\xD840\xDC00\x4E00",
+ L"\xD840\xDC00\x4E00",
#elif defined(WCHAR_T_IS_UTF32)
- L"\x20000\x4E00",
+ L"\x20000\x4E00",
#endif
- L"\xD840\xDC00\x4E00"},
- {"big5",
- "\xA7\x41\xA6\x6E",
- OnStringConversionError::FAIL,
- true,
- L"\x4F60\x597D",
- NULL},
- // Greek (ISO-8859)
- {"iso-8859-7",
- "\xE3\xE5\xE9\xDC" " " "\xF3\xEF\xF5",
- OnStringConversionError::FAIL,
- true,
- L"\x03B3\x03B5\x03B9\x03AC" L" " L"\x03C3\x03BF\x03C5",
- NULL},
- // Hebrew (Windows)
- {"windows-1255",
- "\xF9\xD1\xC8\xEC\xE5\xC9\xED",
- OnStringConversionError::FAIL,
- true,
- L"\x05E9\x05C1\x05B8\x05DC\x05D5\x05B9\x05DD",
- NULL},
- // Korean (EUC)
- {"euc-kr",
- "\xBE\xC8\xB3\xE7\xC7\xCF\xBC\xBC\xBF\xE4",
- OnStringConversionError::FAIL,
- true,
- L"\xC548\xB155\xD558\xC138\xC694",
- NULL},
- // Japanese (EUC)
- {"euc-jp",
- "\xA4\xB3\xA4\xF3\xA4\xCB\xA4\xC1\xA4\xCF\xB0\xEC\x8E\xA6",
- OnStringConversionError::FAIL,
- true,
- L"\x3053\x3093\x306B\x3061\x306F\x4E00\xFF66",
- NULL},
- // Japanese (ISO-2022)
- {"iso-2022-jp",
- "\x1B$B" "\x24\x33\x24\x73\x24\x4B\x24\x41\x24\x4F\x30\x6C" "\x1B(B"
- "ab" "\x1B(J" "\x5C\x7E#$" "\x1B(B",
- OnStringConversionError::FAIL,
- true,
- L"\x3053\x3093\x306B\x3061\x306F\x4E00" L"ab\x00A5\x203E#$",
- NULL},
- // Japanese (Shift-JIS)
- {"sjis",
- "\x82\xB1\x82\xF1\x82\xC9\x82\xBF\x82\xCD\x88\xEA\xA6",
- OnStringConversionError::FAIL,
- true,
- L"\x3053\x3093\x306B\x3061\x306F\x4E00\xFF66",
- NULL},
- // Russian (KOI8)
- {"koi8-r",
- "\xDA\xC4\xD2\xC1\xD7\xD3\xD4\xD7\xD5\xCA\xD4\xC5",
- OnStringConversionError::FAIL,
- true,
- L"\x0437\x0434\x0440\x0430\x0432\x0441\x0442\x0432"
- L"\x0443\x0439\x0442\x0435",
- NULL},
- // Thai (windows-874)
- {"windows-874",
- "\xCA\xC7\xD1\xCA\xB4\xD5" "\xA4\xC3\xD1\xBA",
- OnStringConversionError::FAIL,
- true,
- L"\x0E2A\x0E27\x0E31\x0E2A\x0E14\x0E35"
- L"\x0E04\x0E23\x0e31\x0E1A",
- NULL},
+ L"\xD840\xDC00\x4E00"},
+ {"big5", "\xA7\x41\xA6\x6E", OnStringConversionError::FAIL, true,
+ L"\x4F60\x597D", nullptr},
+ // Greek (ISO-8859)
+ {"iso-8859-7",
+ "\xE3\xE5\xE9\xDC"
+ " "
+ "\xF3\xEF\xF5",
+ OnStringConversionError::FAIL, true,
+ L"\x03B3\x03B5\x03B9\x03AC"
+ L" "
+ L"\x03C3\x03BF\x03C5",
+ nullptr},
+ // Hebrew (Windows)
+ {"windows-1255", "\xF9\xD1\xC8\xEC\xE5\xC9\xED",
+ OnStringConversionError::FAIL, true,
+ L"\x05E9\x05C1\x05B8\x05DC\x05D5\x05B9\x05DD", nullptr},
+ // Korean (EUC)
+ {"euc-kr", "\xBE\xC8\xB3\xE7\xC7\xCF\xBC\xBC\xBF\xE4",
+ OnStringConversionError::FAIL, true, L"\xC548\xB155\xD558\xC138\xC694",
+ nullptr},
+ // Japanese (EUC)
+ {"euc-jp", "\xA4\xB3\xA4\xF3\xA4\xCB\xA4\xC1\xA4\xCF\xB0\xEC\x8E\xA6",
+ OnStringConversionError::FAIL, true,
+ L"\x3053\x3093\x306B\x3061\x306F\x4E00\xFF66", nullptr},
+ // Japanese (ISO-2022)
+ {"iso-2022-jp",
+ "\x1B$B"
+ "\x24\x33\x24\x73\x24\x4B\x24\x41\x24\x4F\x30\x6C"
+ "\x1B(B"
+ "ab"
+ "\x1B(J"
+ "\x5C\x7E#$"
+ "\x1B(B",
+ OnStringConversionError::FAIL, true,
+ L"\x3053\x3093\x306B\x3061\x306F\x4E00"
+ L"ab\x00A5\x203E#$",
+ nullptr},
+ // Japanese (Shift-JIS)
+ {"sjis", "\x82\xB1\x82\xF1\x82\xC9\x82\xBF\x82\xCD\x88\xEA\xA6",
+ OnStringConversionError::FAIL, true,
+ L"\x3053\x3093\x306B\x3061\x306F\x4E00\xFF66", nullptr},
+ // Russian (KOI8)
+ {"koi8-r", "\xDA\xC4\xD2\xC1\xD7\xD3\xD4\xD7\xD5\xCA\xD4\xC5",
+ OnStringConversionError::FAIL, true,
+ L"\x0437\x0434\x0440\x0430\x0432\x0441\x0442\x0432"
+ L"\x0443\x0439\x0442\x0435",
+ nullptr},
+ // Thai (windows-874)
+ {"windows-874",
+ "\xCA\xC7\xD1\xCA\xB4\xD5"
+ "\xA4\xC3\xD1\xBA",
+ OnStringConversionError::FAIL, true,
+ L"\x0E2A\x0E27\x0E31\x0E2A\x0E14\x0E35"
+ L"\x0E04\x0E23\x0e31\x0E1A",
+ nullptr},
};
TEST(ICUStringConversionsTest, ConvertBetweenCodepageAndUTF16) {
@@ -207,7 +175,7 @@ TEST(ICUStringConversionsTest, ConvertBetweenCodepageAndUTF16) {
kConvertCodepageCases[i].on_error,
&utf16);
string16 utf16_expected;
- if (kConvertCodepageCases[i].u16_wide == NULL)
+ if (kConvertCodepageCases[i].u16_wide == nullptr)
utf16_expected = BuildString16(kConvertCodepageCases[i].wide);
else
utf16_expected = BuildString16(kConvertCodepageCases[i].u16_wide);
diff --git a/chromium/base/i18n/message_formatter.cc b/chromium/base/i18n/message_formatter.cc
index 6962a282975..c69dd07d3d8 100644
--- a/chromium/base/i18n/message_formatter.cc
+++ b/chromium/base/i18n/message_formatter.cc
@@ -48,7 +48,7 @@ MessageArg::MessageArg(double d) : formattable(new icu::Formattable(d)) {}
MessageArg::MessageArg(const Time& t)
: formattable(new icu::Formattable(static_cast<UDate>(t.ToJsTime()))) {}
-MessageArg::~MessageArg() {}
+MessageArg::~MessageArg() = default;
// Tests if this argument has a value, and if so increments *count.
bool MessageArg::has_value(int *count) const {
diff --git a/chromium/base/i18n/message_formatter.h b/chromium/base/i18n/message_formatter.h
index 439402380d2..d24d42e2740 100644
--- a/chromium/base/i18n/message_formatter.h
+++ b/chromium/base/i18n/message_formatter.h
@@ -70,8 +70,23 @@ class BASE_I18N_EXPORT MessageArg {
// http://userguide.icu-project.org/formatparse/messages
// message_formatter_unittest.cc
// go/plurals inside Google.
-// TODO(jshin): Document this API at sites.chromium.org and add a reference
-// here.
+// TODO(jshin): Document this API in md format docs.
+// Caveat:
+// When plural/select/gender is used along with other format specifiers such
+// as date or number, plural/select/gender should be at the top level. It's
+// not an ICU restriction but a constraint imposed by Google's translation
+// infrastructure. Message A does not work. It must be revised to Message B.
+//
+// A.
+// Rated <ph name="RATING">{0, number,0.0}<ex>3.2</ex></ph>
+// by {1, plural, =1{a user} other{# users}}
+//
+// B.
+// {1, plural,
+// =1{Rated <ph name="RATING">{0, number,0.0}<ex>3.2</ex></ph>
+// by a user.}
+// other{Rated <ph name="RATING">{0, number,0.0}<ex>3.2</ex></ph>
+// by # users.}}
class BASE_I18N_EXPORT MessageFormatter {
public:
diff --git a/chromium/base/i18n/number_formatting_unittest.cc b/chromium/base/i18n/number_formatting_unittest.cc
index baa7ba7d807..0bdd6034461 100644
--- a/chromium/base/i18n/number_formatting_unittest.cc
+++ b/chromium/base/i18n/number_formatting_unittest.cc
@@ -101,15 +101,18 @@ TEST(NumberFormattingTest, FormatPercent) {
const wchar_t* expected_german; // Note: Space before % isn't \x20.
// Note: Eastern Arabic-Indic digits (U+06Fx) for Persian and
// Arabic-Indic digits (U+066x) for Arabic.
- // See http://unicode.org/cldr/trac/ticket/9040 for details.
+ // See https://unicode.org/cldr/trac/ticket/9040 for details.
+ // See also https://unicode.org/cldr/trac/ticket/10176 .
+ // For now, take what CLDR 32 has (percent sign to the right of
+ // a number in Persian).
const wchar_t* expected_persian;
const wchar_t* expected_arabic;
} cases[] = {
- {0, "0%", L"0\xa0%", L"\x200e\x66a\xa0\x6f0", L"\x660\xa0\x66a\x61c"},
- {42, "42%", L"42\xa0%", L"\x200e\x66a\xa0\x6f4\x6f2",
+ {0, "0%", L"0\xa0%", L"\x6f0\x66a", L"\x660\xa0\x66a\x61c"},
+ {42, "42%", L"42\xa0%", L"\x6f4\x6f2\x66a",
L"\x664\x662\xa0\x66a\x61c"},
{1024, "1,024%", L"1.024\xa0%",
- L"\x200e\x66a\xa0\x6f1\x66c\x6f0\x6f2\x6f4",
+ L"\x6f1\x66c\x6f0\x6f2\x6f4\x66a",
L"\x661\x66c\x660\x662\x664\xa0\x66a\x61c"},
};
diff --git a/chromium/base/i18n/rtl.cc b/chromium/base/i18n/rtl.cc
index 5d77c6cebe1..29ed648caa5 100644
--- a/chromium/base/i18n/rtl.cc
+++ b/chromium/base/i18n/rtl.cc
@@ -38,14 +38,14 @@ std::string GetLocaleString(const icu::Locale& locale) {
const char* variant = locale.getVariant();
std::string result =
- (language != NULL && *language != '\0') ? language : "und";
+ (language != nullptr && *language != '\0') ? language : "und";
- if (country != NULL && *country != '\0') {
+ if (country != nullptr && *country != '\0') {
result += '-';
result += country;
}
- if (variant != NULL && *variant != '\0')
+ if (variant != nullptr && *variant != '\0')
result += '@' + base::ToLowerASCII(variant);
return result;
diff --git a/chromium/base/i18n/string_search.cc b/chromium/base/i18n/string_search.cc
index 779e4d99765..2f6fee4fe61 100644
--- a/chromium/base/i18n/string_search.cc
+++ b/chromium/base/i18n/string_search.cc
@@ -20,10 +20,9 @@ FixedPatternStringSearchIgnoringCaseAndAccents(const string16& find_this)
const string16& dummy = find_this_;
UErrorCode status = U_ZERO_ERROR;
- search_ = usearch_open(find_this_.data(), find_this_.size(),
- dummy.data(), dummy.size(),
- uloc_getDefault(),
- NULL, // breakiter
+ search_ = usearch_open(find_this_.data(), find_this_.size(), dummy.data(),
+ dummy.size(), uloc_getDefault(),
+ nullptr, // breakiter
&status);
if (U_SUCCESS(status)) {
UCollator* collator = usearch_getCollator(search_);
diff --git a/chromium/base/i18n/string_search_unittest.cc b/chromium/base/i18n/string_search_unittest.cc
index f1c9d193f27..69501d6c99f 100644
--- a/chromium/base/i18n/string_search_unittest.cc
+++ b/chromium/base/i18n/string_search_unittest.cc
@@ -189,14 +189,14 @@ TEST(StringSearchTest, UnicodeLocaleDependent) {
// Composed characters
const string16 a_with_ring = WideToUTF16(L"\u00e5");
- EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
- a_base, a_with_ring, NULL, NULL));
+ EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(a_base, a_with_ring, nullptr,
+ nullptr));
const char* default_locale = uloc_getDefault();
SetICUDefaultLocale("da");
- EXPECT_FALSE(StringSearchIgnoringCaseAndAccents(
- a_base, a_with_ring, NULL, NULL));
+ EXPECT_FALSE(StringSearchIgnoringCaseAndAccents(a_base, a_with_ring, nullptr,
+ nullptr));
SetICUDefaultLocale(default_locale);
}
diff --git a/chromium/base/i18n/time_formatting_unittest.cc b/chromium/base/i18n/time_formatting_unittest.cc
index 98b6a209aa1..8224f6605bc 100644
--- a/chromium/base/i18n/time_formatting_unittest.cc
+++ b/chromium/base/i18n/time_formatting_unittest.cc
@@ -21,8 +21,8 @@ namespace base {
namespace {
const Time::Exploded kTestDateTimeExploded = {
- 2011, 4, 6, 30, // Sat, Apr 30, 2011
- 15, 42, 7, 0 // 15:42:07.000
+ 2011, 4, 6, 30, // Sat, Apr 30, 2011
+ 22, 42, 7, 0 // 22:42:07.000 in UTC = 15:42:07 in US PDT.
};
// Returns difference between the local time and GMT formatted as string.
@@ -64,20 +64,33 @@ string16 TimeDurationFormatWithSecondsString(const TimeDelta& delta,
return str;
}
-#if defined(OS_ANDROID)
-#define MAYBE_TimeFormatTimeOfDayDefault12h \
- DISABLED_TimeFormatTimeOfDayDefault12h
-#else
-#define MAYBE_TimeFormatTimeOfDayDefault12h TimeFormatTimeOfDayDefault12h
-#endif
-TEST(TimeFormattingTest, MAYBE_TimeFormatTimeOfDayDefault12h) {
+class ScopedRestoreDefaultTimezone {
+ public:
+ ScopedRestoreDefaultTimezone(const char* zoneid) {
+ original_zone_.reset(icu::TimeZone::createDefault());
+ icu::TimeZone::adoptDefault(icu::TimeZone::createTimeZone(zoneid));
+ }
+ ~ScopedRestoreDefaultTimezone() {
+ icu::TimeZone::adoptDefault(original_zone_.release());
+ }
+
+ ScopedRestoreDefaultTimezone(const ScopedRestoreDefaultTimezone&) = delete;
+ ScopedRestoreDefaultTimezone& operator=(const ScopedRestoreDefaultTimezone&) =
+ delete;
+
+ private:
+ std::unique_ptr<icu::TimeZone> original_zone_;
+};
+
+TEST(TimeFormattingTest, TimeFormatTimeOfDayDefault12h) {
// Test for a locale defaulted to 12h clock.
// As an instance, we use third_party/icu/source/data/locales/en.txt.
test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("en_US");
+ ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
Time time;
- EXPECT_TRUE(Time::FromLocalExploded(kTestDateTimeExploded, &time));
+ EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
string16 clock24h(ASCIIToUTF16("15:42"));
string16 clock12h_pm(ASCIIToUTF16("3:42 PM"));
string16 clock12h(ASCIIToUTF16("3:42"));
@@ -107,20 +120,15 @@ TEST(TimeFormattingTest, MAYBE_TimeFormatTimeOfDayDefault12h) {
kDropAmPm));
}
-#if defined(OS_ANDROID)
-#define MAYBE_TimeFormatTimeOfDayDefault24h \
- DISABLED_TimeFormatTimeOfDayDefault24h
-#else
-#define MAYBE_TimeFormatTimeOfDayDefault24h TimeFormatTimeOfDayDefault24h
-#endif
-TEST(TimeFormattingTest, MAYBE_TimeFormatTimeOfDayDefault24h) {
+TEST(TimeFormattingTest, TimeFormatTimeOfDayDefault24h) {
// Test for a locale defaulted to 24h clock.
// As an instance, we use third_party/icu/source/data/locales/en_GB.txt.
test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("en_GB");
+ ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
Time time;
- EXPECT_TRUE(Time::FromLocalExploded(kTestDateTimeExploded, &time));
+ EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
string16 clock24h(ASCIIToUTF16("15:42"));
string16 clock12h_pm(ASCIIToUTF16("3:42 pm"));
string16 clock12h(ASCIIToUTF16("3:42"));
@@ -150,21 +158,17 @@ TEST(TimeFormattingTest, MAYBE_TimeFormatTimeOfDayDefault24h) {
kDropAmPm));
}
-#if defined(OS_ANDROID)
-#define MAYBE_TimeFormatTimeOfDayJP DISABLED_TimeFormatTimeOfDayJP
-#else
-#define MAYBE_TimeFormatTimeOfDayJP TimeFormatTimeOfDayJP
-#endif
-TEST(TimeFormattingTest, MAYBE_TimeFormatTimeOfDayJP) {
+TEST(TimeFormattingTest, TimeFormatTimeOfDayJP) {
// Test for a locale that uses different mark than "AM" and "PM".
// As an instance, we use third_party/icu/source/data/locales/ja.txt.
test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("ja_JP");
+ ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
Time time;
- EXPECT_TRUE(Time::FromLocalExploded(kTestDateTimeExploded, &time));
+ EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
string16 clock24h(ASCIIToUTF16("15:42"));
- string16 clock12h_pm(WideToUTF16(L"\x5348\x5f8c" L"3:42"));
+ string16 clock12h_pm(UTF8ToUTF16(u8"午後3:42"));
string16 clock12h(ASCIIToUTF16("3:42"));
// The default is 24h clock.
@@ -190,19 +194,15 @@ TEST(TimeFormattingTest, MAYBE_TimeFormatTimeOfDayJP) {
kDropAmPm));
}
-#if defined(OS_ANDROID)
-#define MAYBE_TimeFormatDateUS DISABLED_TimeFormatDateUS
-#else
-#define MAYBE_TimeFormatDateUS TimeFormatDateUS
-#endif
-TEST(TimeFormattingTest, MAYBE_TimeFormatDateUS) {
+TEST(TimeFormattingTest, TimeFormatDateUS) {
// See third_party/icu/source/data/locales/en.txt.
// The date patterns are "EEEE, MMMM d, y", "MMM d, y", and "M/d/yy".
test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("en_US");
+ ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
Time time;
- EXPECT_TRUE(Time::FromLocalExploded(kTestDateTimeExploded, &time));
+ EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
EXPECT_EQ(ASCIIToUTF16("Apr 30, 2011"), TimeFormatShortDate(time));
EXPECT_EQ(ASCIIToUTF16("4/30/11"), TimeFormatShortDateNumeric(time));
@@ -221,19 +221,15 @@ TEST(TimeFormattingTest, MAYBE_TimeFormatDateUS) {
TimeFormatFriendlyDate(time));
}
-#if defined(OS_ANDROID)
-#define MAYBE_TimeFormatDateGB DISABLED_TimeFormatDateGB
-#else
-#define MAYBE_TimeFormatDateGB TimeFormatDateGB
-#endif
-TEST(TimeFormattingTest, MAYBE_TimeFormatDateGB) {
+TEST(TimeFormattingTest, TimeFormatDateGB) {
// See third_party/icu/source/data/locales/en_GB.txt.
// The date patterns are "EEEE, d MMMM y", "d MMM y", and "dd/MM/yyyy".
test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("en_GB");
+ ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
Time time;
- EXPECT_TRUE(Time::FromLocalExploded(kTestDateTimeExploded, &time));
+ EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
EXPECT_EQ(ASCIIToUTF16("30 Apr 2011"), TimeFormatShortDate(time));
EXPECT_EQ(ASCIIToUTF16("30/04/2011"), TimeFormatShortDateNumeric(time));
@@ -250,9 +246,10 @@ TEST(TimeFormattingTest, MAYBE_TimeFormatDateGB) {
TEST(TimeFormattingTest, TimeFormatWithPattern) {
test::ScopedRestoreICUDefaultLocale restore_locale;
+ ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
Time time;
- EXPECT_TRUE(Time::FromLocalExploded(kTestDateTimeExploded, &time));
+ EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
i18n::SetICUDefaultLocale("en_US");
EXPECT_EQ(ASCIIToUTF16("Apr 30, 2011"), TimeFormatWithPattern(time, "yMMMd"));
@@ -265,9 +262,9 @@ TEST(TimeFormattingTest, TimeFormatWithPattern) {
TimeFormatWithPattern(time, "MMMMdjmmss"));
i18n::SetICUDefaultLocale("ja_JP");
- EXPECT_EQ(WideToUTF16(L"2011年4月30日"),
+ EXPECT_EQ(UTF8ToUTF16(u8"2011年4月30日"),
TimeFormatWithPattern(time, "yMMMd"));
- EXPECT_EQ(WideToUTF16(L"4月30日") + ASCIIToUTF16(" 15:42:07"),
+ EXPECT_EQ(UTF8ToUTF16(u8"4月30日 15:42:07"),
TimeFormatWithPattern(time, "MMMMdjmmss"));
}
@@ -299,16 +296,16 @@ TEST(TimeFormattingTest, TimeDurationFormat) {
// Persian, with non-Arabic numbers.
i18n::SetICUDefaultLocale("fa");
- string16 fa_wide = WideToUTF16(
- L"\x6f1\x6f5\x20\x633\x627\x639\x62a\x20\x648\x20\x6f4\x6f2\x20\x62f\x642"
- L"\x6cc\x642\x647");
- string16 fa_short = WideToUTF16(
- L"\x6f1\x6f5\x20\x633\x627\x639\x62a\x60c\x200f\x20\x6f4\x6f2\x20\x62f"
- L"\x642\x6cc\x642\x647");
- string16 fa_narrow = WideToUTF16(
- L"\x6f1\x6f5\x20\x633\x627\x639\x62a\x20\x6f4\x6f2\x20\x62f\x642\x6cc"
- L"\x642\x647");
- string16 fa_numeric = WideToUTF16(L"\x6f1\x6f5\x3a\x6f4\x6f2");
+ string16 fa_wide = UTF8ToUTF16(
+ u8"\u06f1\u06f5 \u0633\u0627\u0639\u062a \u0648 \u06f4\u06f2 \u062f\u0642"
+ u8"\u06cc\u0642\u0647");
+ string16 fa_short = UTF8ToUTF16(
+ u8"\u06f1\u06f5 \u0633\u0627\u0639\u062a\u060c\u200f \u06f4\u06f2 \u062f"
+ u8"\u0642\u06cc\u0642\u0647");
+ string16 fa_narrow = UTF8ToUTF16(
+ u8"\u06f1\u06f5 \u0633\u0627\u0639\u062a \u06f4\u06f2 \u062f\u0642\u06cc"
+ u8"\u0642\u0647");
+ string16 fa_numeric = UTF8ToUTF16(u8"\u06f1\u06f5:\u06f4\u06f2");
EXPECT_EQ(fa_wide, TimeDurationFormatString(delta, DURATION_WIDTH_WIDE));
EXPECT_EQ(fa_short, TimeDurationFormatString(delta, DURATION_WIDTH_SHORT));
EXPECT_EQ(fa_narrow, TimeDurationFormatString(delta, DURATION_WIDTH_NARROW));
@@ -368,19 +365,40 @@ TEST(TimeFormattingTest, TimeDurationFormatWithSeconds) {
TEST(TimeFormattingTest, TimeIntervalFormat) {
test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("en_US");
+ ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
const Time::Exploded kTestIntervalEndTimeExploded = {
- 2011, 5, 6, 28, // Sat, Apr 30, 2012
- 15, 42, 7, 0 // 15:42:07.000
+ 2011, 5, 6, 28, // Sat, May 28, 2012
+ 22, 42, 7, 0 // 22:42:07.000
};
Time begin_time;
- EXPECT_TRUE(Time::FromLocalExploded(kTestDateTimeExploded, &begin_time));
+ EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &begin_time));
Time end_time;
- EXPECT_TRUE(Time::FromLocalExploded(kTestIntervalEndTimeExploded, &end_time));
+ EXPECT_TRUE(Time::FromUTCExploded(kTestIntervalEndTimeExploded, &end_time));
+
+ EXPECT_EQ(
+ UTF8ToUTF16(u8"Saturday, April 30 – Saturday, May 28"),
+ DateIntervalFormat(begin_time, end_time, DATE_FORMAT_MONTH_WEEKDAY_DAY));
+
+ const Time::Exploded kTestIntervalBeginTimeExploded = {
+ 2011, 5, 1, 16, // Mon, May 16, 2012
+ 22, 42, 7, 0 // 22:42:07.000
+ };
+ EXPECT_TRUE(
+ Time::FromUTCExploded(kTestIntervalBeginTimeExploded, &begin_time));
+ EXPECT_EQ(
+ UTF8ToUTF16(u8"Monday, May 16 – Saturday, May 28"),
+ DateIntervalFormat(begin_time, end_time, DATE_FORMAT_MONTH_WEEKDAY_DAY));
+
+ i18n::SetICUDefaultLocale("en_GB");
+ EXPECT_EQ(
+ UTF8ToUTF16(u8"Monday 16 – Saturday 28 May"),
+ DateIntervalFormat(begin_time, end_time, DATE_FORMAT_MONTH_WEEKDAY_DAY));
+ i18n::SetICUDefaultLocale("ja");
EXPECT_EQ(
- WideToUTF16(L"Saturday, April 30 – Saturday, May 28"),
+ UTF8ToUTF16(u8"5月16日(月曜日)~28日(土曜日)"),
DateIntervalFormat(begin_time, end_time, DATE_FORMAT_MONTH_WEEKDAY_DAY));
}
diff --git a/chromium/base/ios/callback_counter.h b/chromium/base/ios/callback_counter.h
new file mode 100644
index 00000000000..eef45547355
--- /dev/null
+++ b/chromium/base/ios/callback_counter.h
@@ -0,0 +1,46 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef IOS_CHROME_BROWSER_CALLBACK_COUNTER_H_
+#define IOS_CHROME_BROWSER_CALLBACK_COUNTER_H_
+
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/threading/thread.h"
+
+// A helper class that keeps count of the number of pending callbacks that need
+// to be received. Calls |final_callback| when all callbacks have been received.
+// All methods (except the destructor) must be called on the same thread.
+class CallbackCounter : public base::RefCounted<CallbackCounter> {
+ public:
+ typedef base::Callback<void()> FinalCallback;
+
+ explicit CallbackCounter(const FinalCallback& final_callback);
+
+ // Increments the count of pending callbacks by |count| .
+ void IncrementCount(int count);
+
+ // Increments the count of pending callbacks by 1.
+ void IncrementCount();
+
+ // Decrements the count of pending callbacks.
+ void DecrementCount();
+
+ private:
+ friend class base::RefCounted<CallbackCounter>;
+
+ ~CallbackCounter();
+
+ // The number of callbacks that still need to be received.
+ unsigned callback_count_;
+ // The callback that is finally called when all callbacks have been received
+ // (when the |callback_count_| goes down to 0).
+ FinalCallback final_callback_;
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallbackCounter);
+};
+
+#endif // IOS_CHROME_BROWSER_CALLBACK_COUNTER_H_
diff --git a/chromium/base/ios/callback_counter.mm b/chromium/base/ios/callback_counter.mm
new file mode 100644
index 00000000000..575500925ce
--- /dev/null
+++ b/chromium/base/ios/callback_counter.mm
@@ -0,0 +1,35 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/ios/callback_counter.h"
+
+CallbackCounter::CallbackCounter(const FinalCallback& final_callback)
+ : callback_count_(0U), final_callback_(final_callback) {
+ DCHECK(!final_callback.is_null());
+}
+
+CallbackCounter::~CallbackCounter() {
+ DCHECK_EQ(0U, callback_count_);
+}
+
+void CallbackCounter::IncrementCount(int count) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!final_callback_.is_null());
+ callback_count_ += count;
+}
+
+void CallbackCounter::IncrementCount() {
+ IncrementCount(1);
+}
+
+void CallbackCounter::DecrementCount() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(callback_count_);
+
+ --callback_count_;
+ if (callback_count_ == 0) {
+ final_callback_.Run();
+ final_callback_.Reset();
+ }
+}
diff --git a/chromium/base/ios/callback_counter_unittest.mm b/chromium/base/ios/callback_counter_unittest.mm
new file mode 100644
index 00000000000..6d194aaf648
--- /dev/null
+++ b/chromium/base/ios/callback_counter_unittest.mm
@@ -0,0 +1,58 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/ios/callback_counter.h"
+
+#include "base/bind.h"
+#include "base/mac/bind_objc_block.h"
+#include "base/memory/ref_counted.h"
+#import "base/test/ios/wait_util.h"
+#include "testing/platform_test.h"
+
+using CallbackCounterTest = PlatformTest;
+
+// Tests that CallbackCounter works with adding callbacks one by one.
+TEST_F(CallbackCounterTest, BasicIncrementByOne) {
+ __block BOOL block_was_called = NO;
+ scoped_refptr<CallbackCounter> callback_counter =
+ new CallbackCounter(base::BindBlock(^{
+ block_was_called = YES;
+ }));
+
+ // Enqueue the first callback.
+ callback_counter->IncrementCount();
+ dispatch_async(dispatch_get_main_queue(), ^{
+ callback_counter->DecrementCount();
+ });
+
+ // Enqueue the second callback.
+ callback_counter->IncrementCount();
+ dispatch_async(dispatch_get_main_queue(), ^{
+ callback_counter->DecrementCount();
+ });
+
+ base::test::ios::WaitUntilCondition(^bool() {
+ return block_was_called;
+ });
+}
+
+// Tests that CallbackCounter works with adding all callbacks at once.
+TEST_F(CallbackCounterTest, BasicIncrementByMoreThanOne) {
+ __block BOOL block_was_called = NO;
+ scoped_refptr<CallbackCounter> callback_counter =
+ new CallbackCounter(base::BindBlock(^{
+ block_was_called = YES;
+ }));
+
+ // Enqueue the 5 callbacks.
+ callback_counter->IncrementCount(5);
+ for (int i = 0; i < 5; i++) {
+ dispatch_async(dispatch_get_main_queue(), ^{
+ callback_counter->DecrementCount();
+ });
+ }
+ base::test::ios::WaitUntilCondition(^bool() {
+ return block_was_called;
+ });
+}
diff --git a/chromium/base/json/json_file_value_serializer.cc b/chromium/base/json/json_file_value_serializer.cc
index 661d25d7984..a7c68c59d50 100644
--- a/chromium/base/json/json_file_value_serializer.cc
+++ b/chromium/base/json/json_file_value_serializer.cc
@@ -21,8 +21,7 @@ JSONFileValueSerializer::JSONFileValueSerializer(
: json_file_path_(json_file_path) {
}
-JSONFileValueSerializer::~JSONFileValueSerializer() {
-}
+JSONFileValueSerializer::~JSONFileValueSerializer() = default;
bool JSONFileValueSerializer::Serialize(const base::Value& root) {
return SerializeInternal(root, false);
@@ -57,8 +56,7 @@ JSONFileValueDeserializer::JSONFileValueDeserializer(
int options)
: json_file_path_(json_file_path), options_(options), last_read_size_(0U) {}
-JSONFileValueDeserializer::~JSONFileValueDeserializer() {
-}
+JSONFileValueDeserializer::~JSONFileValueDeserializer() = default;
int JSONFileValueDeserializer::ReadFileToString(std::string* json_string) {
DCHECK(json_string);
@@ -109,7 +107,7 @@ std::unique_ptr<base::Value> JSONFileValueDeserializer::Deserialize(
*error_code = error;
if (error_str)
*error_str = GetErrorMessageForCode(error);
- return NULL;
+ return nullptr;
}
JSONStringValueDeserializer deserializer(json_string, options_);
diff --git a/chromium/base/json/json_parser.cc b/chromium/base/json/json_parser.cc
index 7b26c204987..ceb64a62c05 100644
--- a/chromium/base/json/json_parser.cc
+++ b/chromium/base/json/json_parser.cc
@@ -70,8 +70,7 @@ JSONParser::JSONParser(int options)
error_column_(0) {
}
-JSONParser::~JSONParser() {
-}
+JSONParser::~JSONParser() = default;
std::unique_ptr<Value> JSONParser::Parse(StringPiece input) {
start_pos_ = input.data();
@@ -135,8 +134,7 @@ JSONParser::StringBuilder::StringBuilder() : StringBuilder(nullptr) {}
JSONParser::StringBuilder::StringBuilder(const char* pos)
: pos_(pos), length_(0) {}
-JSONParser::StringBuilder::~StringBuilder() {
-}
+JSONParser::StringBuilder::~StringBuilder() = default;
JSONParser::StringBuilder& JSONParser::StringBuilder::operator=(
StringBuilder&& other) = default;
diff --git a/chromium/base/json/json_reader.cc b/chromium/base/json/json_reader.cc
index 4ff7496bbb1..e18f4a55a5a 100644
--- a/chromium/base/json/json_reader.cc
+++ b/chromium/base/json/json_reader.cc
@@ -39,8 +39,7 @@ JSONReader::JSONReader(int options)
: parser_(new internal::JSONParser(options)) {
}
-JSONReader::~JSONReader() {
-}
+JSONReader::~JSONReader() = default;
// static
std::unique_ptr<Value> JSONReader::Read(StringPiece json) {
diff --git a/chromium/base/json/json_string_value_serializer.cc b/chromium/base/json/json_string_value_serializer.cc
index 2e46ab387a2..f9c45a40d3e 100644
--- a/chromium/base/json/json_string_value_serializer.cc
+++ b/chromium/base/json/json_string_value_serializer.cc
@@ -15,7 +15,7 @@ JSONStringValueSerializer::JSONStringValueSerializer(std::string* json_string)
pretty_print_(false) {
}
-JSONStringValueSerializer::~JSONStringValueSerializer() {}
+JSONStringValueSerializer::~JSONStringValueSerializer() = default;
bool JSONStringValueSerializer::Serialize(const Value& root) {
return SerializeInternal(root, false);
@@ -45,7 +45,7 @@ JSONStringValueDeserializer::JSONStringValueDeserializer(
int options)
: json_string_(json_string), options_(options) {}
-JSONStringValueDeserializer::~JSONStringValueDeserializer() {}
+JSONStringValueDeserializer::~JSONStringValueDeserializer() = default;
std::unique_ptr<Value> JSONStringValueDeserializer::Deserialize(
int* error_code,
diff --git a/chromium/base/json/json_value_converter_unittest.cc b/chromium/base/json/json_value_converter_unittest.cc
index 6a603d3a92a..805b05b5406 100644
--- a/chromium/base/json/json_value_converter_unittest.cc
+++ b/chromium/base/json/json_value_converter_unittest.cc
@@ -42,12 +42,12 @@ struct SimpleMessage {
}
static bool HasFieldPresent(const base::Value* value, bool* result) {
- *result = value != NULL;
+ *result = value != nullptr;
return true;
}
static bool GetValueString(const base::Value* value, std::string* result) {
- const base::DictionaryValue* dict = NULL;
+ const base::DictionaryValue* dict = nullptr;
if (!value->GetAsDictionary(&dict))
return false;
diff --git a/chromium/base/json/json_writer.cc b/chromium/base/json/json_writer.cc
index cc4cd28f605..e4f1e3cf9d0 100644
--- a/chromium/base/json/json_writer.cc
+++ b/chromium/base/json/json_writer.cc
@@ -89,7 +89,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
json_string_->append(Int64ToString(static_cast<int64_t>(value)));
return result;
}
- std::string real = DoubleToString(value);
+ std::string real = NumberToString(value);
// Ensure that the number has a .0 if there's no decimal or 'e'. This
// makes sure that when we read the JSON back, it's interpreted as a
// real rather than an int.
@@ -123,7 +123,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
if (pretty_print_)
json_string_->push_back(' ');
- const ListValue* list = NULL;
+ const ListValue* list = nullptr;
bool first_value_has_been_output = false;
bool result = node.GetAsList(&list);
DCHECK(result);
@@ -154,7 +154,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
if (pretty_print_)
json_string_->append(kPrettyPrintLineEnding);
- const DictionaryValue* dict = NULL;
+ const DictionaryValue* dict = nullptr;
bool first_value_has_been_output = false;
bool result = node.GetAsDictionary(&dict);
DCHECK(result);
diff --git a/chromium/base/lazy_instance.cc b/chromium/base/lazy_instance.cc
index c18cf52c152..de143ca5c17 100644
--- a/chromium/base/lazy_instance.cc
+++ b/chromium/base/lazy_instance.cc
@@ -29,8 +29,19 @@ bool NeedsLazyInstance(subtle::AtomicWord* state) {
// state_ == STATE_CREATED needs to acquire visibility over
// the associated data (buf_). Pairing Release_Store is in
// CompleteLazyInstance().
- while (subtle::Acquire_Load(state) == kLazyInstanceStateCreating) {
- PlatformThread::YieldCurrentThread();
+ if (subtle::Acquire_Load(state) == kLazyInstanceStateCreating) {
+ const base::Time start = base::Time::Now();
+ do {
+ const base::TimeDelta elapsed = base::Time::Now() - start;
+ // Spin with YieldCurrentThread for at most one ms - this ensures maximum
+ // responsiveness. After that spin with Sleep(1ms) so that we don't burn
+ // excessive CPU time - this also avoids infinite loops due to priority
+ // inversions (https://crbug.com/797129).
+ if (elapsed < TimeDelta::FromMilliseconds(1))
+ PlatformThread::YieldCurrentThread();
+ else
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
+ } while (subtle::Acquire_Load(state) == kLazyInstanceStateCreating);
}
// Someone else created the instance.
return false;
diff --git a/chromium/base/lazy_instance_unittest.cc b/chromium/base/lazy_instance_unittest.cc
index d126c5dbe4b..cfa48f2308c 100644
--- a/chromium/base/lazy_instance_unittest.cc
+++ b/chromium/base/lazy_instance_unittest.cc
@@ -108,7 +108,7 @@ namespace {
// It accepts a bool* and sets the bool to true when the dtor runs.
class DeleteLogger {
public:
- DeleteLogger() : deleted_(NULL) {}
+ DeleteLogger() : deleted_(nullptr) {}
~DeleteLogger() { *deleted_ = true; }
void SetDeletedPtr(bool* deleted) {
@@ -150,8 +150,8 @@ namespace {
template <size_t alignment>
class AlignedData {
public:
- AlignedData() {}
- ~AlignedData() {}
+ AlignedData() = default;
+ ~AlignedData() = default;
alignas(alignment) char data_[alignment];
};
diff --git a/chromium/base/linux_util.cc b/chromium/base/linux_util.cc
index 851a6c67ab0..ddf848eeb70 100644
--- a/chromium/base/linux_util.cc
+++ b/chromium/base/linux_util.cc
@@ -48,7 +48,7 @@ class LinuxDistroHelper {
// The simple state machine goes from:
// STATE_DID_NOT_CHECK -> STATE_CHECK_STARTED -> STATE_CHECK_FINISHED.
LinuxDistroHelper() : state_(STATE_DID_NOT_CHECK) {}
- ~LinuxDistroHelper() {}
+ ~LinuxDistroHelper() = default;
// Retrieve the current state, if we're in STATE_DID_NOT_CHECK,
// we automatically move to STATE_CHECK_STARTED so nobody else will
@@ -157,7 +157,7 @@ void SetLinuxDistro(const std::string& distro) {
pid_t FindThreadIDWithSyscall(pid_t pid, const std::string& expected_data,
bool* syscall_supported) {
- if (syscall_supported != NULL)
+ if (syscall_supported != nullptr)
*syscall_supported = false;
std::vector<pid_t> tids;
@@ -171,7 +171,7 @@ pid_t FindThreadIDWithSyscall(pid_t pid, const std::string& expected_data,
int fd = open(buf, O_RDONLY);
if (fd < 0)
continue;
- if (syscall_supported != NULL)
+ if (syscall_supported != nullptr)
*syscall_supported = true;
bool read_ret = ReadFromFD(fd, syscall_data.get(), expected_data.length());
close(fd);
diff --git a/chromium/base/location.cc b/chromium/base/location.cc
index 22c60e6ba9e..8bbf6edaa4a 100644
--- a/chromium/base/location.cc
+++ b/chromium/base/location.cc
@@ -48,18 +48,6 @@ std::string Location::ToString() const {
return StringPrintf("pc:%p", program_counter_);
}
-LocationSnapshot::LocationSnapshot() = default;
-
-LocationSnapshot::LocationSnapshot(const Location& location)
- : line_number(location.line_number()) {
- if (location.file_name())
- file_name = location.file_name();
- if (location.function_name())
- function_name = location.function_name();
-}
-
-LocationSnapshot::~LocationSnapshot() = default;
-
#if defined(COMPILER_MSVC)
#define RETURN_ADDRESS() _ReturnAddress()
#elif defined(COMPILER_GCC) && !defined(OS_NACL)
diff --git a/chromium/base/location.h b/chromium/base/location.h
index 7948ac71e07..bf08682a590 100644
--- a/chromium/base/location.h
+++ b/chromium/base/location.h
@@ -80,19 +80,6 @@ class BASE_EXPORT Location {
const void* program_counter_ = nullptr;
};
-// A "snapshotted" representation of the Location class that can safely be
-// passed across process boundaries.
-struct BASE_EXPORT LocationSnapshot {
- // The default constructor is exposed to support the IPC serialization macros.
- LocationSnapshot();
- explicit LocationSnapshot(const Location& location);
- ~LocationSnapshot();
-
- std::string file_name;
- std::string function_name;
- int line_number = -1;
-};
-
BASE_EXPORT const void* GetProgramCounter();
// The macros defined here will expand to the current function.
diff --git a/chromium/base/logging.cc b/chromium/base/logging.cc
index c4f4539f8ec..eba1b2c51ad 100644
--- a/chromium/base/logging.cc
+++ b/chromium/base/logging.cc
@@ -19,12 +19,33 @@ typedef HANDLE MutexHandle;
#define write(fd, buf, count) _write(fd, buf, static_cast<unsigned int>(count))
// Windows doesn't define STDERR_FILENO. Define it here.
#define STDERR_FILENO 2
+
#elif defined(OS_MACOSX)
+// In MacOS 10.12 and iOS 10.0 and later ASL (Apple System Log) was deprecated
+// in favor of OS_LOG (Unified Logging).
+#include <AvailabilityMacros.h>
+#if defined(OS_IOS)
+#if !defined(__IPHONE_10_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_10_0
+#define USE_ASL
+#endif
+#else // !defined(OS_IOS)
+#if !defined(MAC_OS_X_VERSION_10_12) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_12
+#define USE_ASL
+#endif
+#endif // defined(OS_IOS)
+
+#if defined(USE_ASL)
#include <asl.h>
+#else
+#include <os/log.h>
+#endif
+
#include <CoreFoundation/CoreFoundation.h>
#include <mach/mach.h>
#include <mach/mach_time.h>
#include <mach-o/dyld.h>
+
#elif defined(OS_POSIX)
#if defined(OS_NACL)
#include <sys/time.h> // timespec doesn't seem to be in <time.h>
@@ -578,10 +599,10 @@ LogMessage::~LogMessage() {
OutputDebugStringA(str_newline.c_str());
#elif defined(OS_MACOSX)
// In LOG_TO_SYSTEM_DEBUG_LOG mode, log messages are always written to
- // stderr. If stderr is /dev/null, also log via ASL (Apple System Log). If
- // there's something weird about stderr, assume that log messages are going
- // nowhere and log via ASL too. Messages logged via ASL show up in
- // Console.app.
+ // stderr. If stderr is /dev/null, also log via ASL (Apple System Log) or
+ // its successor OS_LOG. If there's something weird about stderr, assume
+ // that log messages are going nowhere and log via ASL/OS_LOG too.
+ // Messages logged via ASL/OS_LOG show up in Console.app.
//
// Programs started by launchd, as UI applications normally are, have had
// stderr connected to /dev/null since OS X 10.8. Prior to that, stderr was
@@ -591,14 +612,14 @@ LogMessage::~LogMessage() {
// Another alternative would be to determine whether stderr is a pipe to
// launchd and avoid logging via ASL only in that case. See 10.7.5
// CF-635.21/CFUtilities.c also_do_stderr(). This would result in logging to
- // both stderr and ASL even in tests, where it's undesirable to log to the
- // system log at all.
+ // both stderr and ASL/OS_LOG even in tests, where it's undesirable to log
+ // to the system log at all.
//
// Note that the ASL client by default discards messages whose levels are
// below ASL_LEVEL_NOTICE. It's possible to change that with
// asl_set_filter(), but this is pointless because syslogd normally applies
// the same filter.
- const bool log_via_asl = []() {
+ const bool log_to_system = []() {
struct stat stderr_stat;
if (fstat(fileno(stderr), &stderr_stat) == -1) {
return true;
@@ -616,25 +637,22 @@ LogMessage::~LogMessage() {
stderr_stat.st_rdev == dev_null_stat.st_rdev;
}();
- if (log_via_asl) {
+ if (log_to_system) {
// Log roughly the same way that CFLog() and NSLog() would. See 10.10.5
// CF-1153.18/CFUtilities.c __CFLogCString().
- //
- // The ASL facility is set to the main bundle ID if available. Otherwise,
- // "com.apple.console" is used.
CFBundleRef main_bundle = CFBundleGetMainBundle();
CFStringRef main_bundle_id_cf =
main_bundle ? CFBundleGetIdentifier(main_bundle) : nullptr;
- std::string asl_facility =
+ std::string main_bundle_id =
main_bundle_id_cf ? base::SysCFStringRefToUTF8(main_bundle_id_cf)
- : std::string("com.apple.console");
-
- class ASLClient {
+ : std::string("");
+#if defined(USE_ASL)
+ // The facility is set to the main bundle ID if available. Otherwise,
+ // "com.apple.console" is used.
+ const class ASLClient {
public:
- explicit ASLClient(const std::string& asl_facility)
- : client_(asl_open(nullptr,
- asl_facility.c_str(),
- ASL_OPT_NO_DELAY)) {}
+ explicit ASLClient(const std::string& facility)
+ : client_(asl_open(nullptr, facility.c_str(), ASL_OPT_NO_DELAY)) {}
~ASLClient() { asl_close(client_); }
aslclient get() const { return client_; }
@@ -642,9 +660,10 @@ LogMessage::~LogMessage() {
private:
aslclient client_;
DISALLOW_COPY_AND_ASSIGN(ASLClient);
- } asl_client(asl_facility);
+ } asl_client(main_bundle_id.empty() ? main_bundle_id
+ : "com.apple.console");
- class ASLMessage {
+ const class ASLMessage {
public:
ASLMessage() : message_(asl_new(ASL_TYPE_MSG)) {}
~ASLMessage() { asl_free(message_); }
@@ -690,6 +709,40 @@ LogMessage::~LogMessage() {
asl_set(asl_message.get(), ASL_KEY_MSG, str_newline.c_str());
asl_send(asl_client.get(), asl_message.get());
+#else // !defined(USE_ASL)
+ const class OSLog {
+ public:
+ explicit OSLog(const char* subsystem)
+ : os_log_(subsystem ? os_log_create(subsystem, "chromium_logging")
+ : OS_LOG_DEFAULT) {}
+ ~OSLog() {
+ if (os_log_ != OS_LOG_DEFAULT) {
+ os_release(os_log_);
+ }
+ }
+ os_log_t get() const { return os_log_; }
+
+ private:
+ os_log_t os_log_;
+ DISALLOW_COPY_AND_ASSIGN(OSLog);
+ } log(main_bundle_id.empty() ? nullptr : main_bundle_id.c_str());
+ const os_log_type_t os_log_type = [](LogSeverity severity) {
+ switch (severity) {
+ case LOG_INFO:
+ return OS_LOG_TYPE_INFO;
+ case LOG_WARNING:
+ return OS_LOG_TYPE_DEFAULT;
+ case LOG_ERROR:
+ return OS_LOG_TYPE_ERROR;
+ case LOG_FATAL:
+ return OS_LOG_TYPE_FAULT;
+ default:
+ return severity < 0 ? OS_LOG_TYPE_DEBUG : OS_LOG_TYPE_DEFAULT;
+ }
+ }(severity_);
+ os_log_with_type(log.get(), os_log_type, "%{public}s",
+ str_newline.c_str());
+#endif // defined(USE_ASL)
}
#elif defined(OS_ANDROID)
android_LogPriority priority =
diff --git a/chromium/base/logging.h b/chromium/base/logging.h
index a2cdaa13d70..201de8d1c43 100644
--- a/chromium/base/logging.h
+++ b/chromium/base/logging.h
@@ -1130,25 +1130,9 @@ inline std::ostream& operator<<(std::ostream& out, const std::wstring& wstr) {
}
} // namespace std
-// The NOTIMPLEMENTED() macro annotates codepaths which have
-// not been implemented yet.
-//
-// The implementation of this macro is controlled by NOTIMPLEMENTED_POLICY:
-// 0 -- Do nothing (stripped by compiler)
-// 1 -- Warn at compile time
-// 2 -- Fail at compile time
-// 3 -- Fail at runtime (DCHECK)
-// 4 -- [default] LOG(ERROR) at runtime
-// 5 -- LOG(ERROR) at runtime, only once per call-site
-
-#ifndef NOTIMPLEMENTED_POLICY
-#if defined(OS_ANDROID) && defined(OFFICIAL_BUILD)
-#define NOTIMPLEMENTED_POLICY 0
-#else
-// Select default policy: LOG(ERROR)
-#define NOTIMPLEMENTED_POLICY 4
-#endif
-#endif
+// The NOTIMPLEMENTED() macro annotates codepaths which have not been
+// implemented yet. If output spam is a serious concern,
+// NOTIMPLEMENTED_LOG_ONCE can be used.
#if defined(COMPILER_GCC)
// On Linux, with GCC, we can use __PRETTY_FUNCTION__ to get the demangled name
@@ -1158,24 +1142,18 @@ inline std::ostream& operator<<(std::ostream& out, const std::wstring& wstr) {
#define NOTIMPLEMENTED_MSG "NOT IMPLEMENTED"
#endif
-#if NOTIMPLEMENTED_POLICY == 0
+#if defined(OS_ANDROID) && defined(OFFICIAL_BUILD)
#define NOTIMPLEMENTED() EAT_STREAM_PARAMETERS
-#elif NOTIMPLEMENTED_POLICY == 1
-// TODO, figure out how to generate a warning
-#define NOTIMPLEMENTED() static_assert(false, "NOT_IMPLEMENTED")
-#elif NOTIMPLEMENTED_POLICY == 2
-#define NOTIMPLEMENTED() static_assert(false, "NOT_IMPLEMENTED")
-#elif NOTIMPLEMENTED_POLICY == 3
-#define NOTIMPLEMENTED() NOTREACHED()
-#elif NOTIMPLEMENTED_POLICY == 4
+#define NOTIMPLEMENTED_LOG_ONCE() EAT_STREAM_PARAMETERS
+#else
#define NOTIMPLEMENTED() LOG(ERROR) << NOTIMPLEMENTED_MSG
-#elif NOTIMPLEMENTED_POLICY == 5
-#define NOTIMPLEMENTED() do {\
- static bool logged_once = false;\
- LOG_IF(ERROR, !logged_once) << NOTIMPLEMENTED_MSG;\
- logged_once = true;\
-} while(0);\
-EAT_STREAM_PARAMETERS
+#define NOTIMPLEMENTED_LOG_ONCE() \
+ do { \
+ static bool logged_once = false; \
+ LOG_IF(ERROR, !logged_once) << NOTIMPLEMENTED_MSG; \
+ logged_once = true; \
+ } while (0); \
+ EAT_STREAM_PARAMETERS
#endif
#endif // BASE_LOGGING_H_
diff --git a/chromium/base/logging_unittest.cc b/chromium/base/logging_unittest.cc
index 93808ef2dc5..0e32b68519d 100644
--- a/chromium/base/logging_unittest.cc
+++ b/chromium/base/logging_unittest.cc
@@ -9,6 +9,7 @@
#include "base/macros.h"
#include "base/strings/string_piece.h"
#include "base/test/scoped_feature_list.h"
+#include "build/build_config.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -28,6 +29,10 @@
#include <windows.h>
#endif // OS_WIN
+#if defined(OS_FUCHSIA)
+#include "base/fuchsia/fuchsia_logging.h"
+#endif
+
namespace logging {
namespace {
@@ -315,9 +320,9 @@ void CrashChildMain(int death_location) {
struct sigaction act = {};
act.sa_sigaction = CheckCrashTestSighandler;
act.sa_flags = SA_SIGINFO;
- ASSERT_EQ(0, sigaction(SIGTRAP, &act, NULL));
- ASSERT_EQ(0, sigaction(SIGBUS, &act, NULL));
- ASSERT_EQ(0, sigaction(SIGILL, &act, NULL));
+ ASSERT_EQ(0, sigaction(SIGTRAP, &act, nullptr));
+ ASSERT_EQ(0, sigaction(SIGBUS, &act, nullptr));
+ ASSERT_EQ(0, sigaction(SIGILL, &act, nullptr));
DO_CHECK(death_location != 1);
DO_CHECK(death_location != 2);
printf("\n");
@@ -388,7 +393,7 @@ TEST_F(LoggingTest, DcheckStreamsAreLazy) {
DCHECK(mock_log_source.Log()) << mock_log_source.Log();
DPCHECK(mock_log_source.Log()) << mock_log_source.Log();
DCHECK_EQ(0, 0) << mock_log_source.Log();
- DCHECK_EQ(mock_log_source.Log(), static_cast<const char*>(NULL))
+ DCHECK_EQ(mock_log_source.Log(), static_cast<const char*>(nullptr))
<< mock_log_source.Log();
#endif
}
@@ -635,6 +640,26 @@ TEST_F(LoggingTest, AsanConditionalDCheckFeature) {
}
#endif // DCHECK_IS_ON() && defined(SYZYASAN)
+#if defined(OS_FUCHSIA)
+TEST_F(LoggingTest, FuchsiaLogging) {
+ MockLogSource mock_log_source;
+ EXPECT_CALL(mock_log_source, Log())
+ .Times(DCHECK_IS_ON() ? 2 : 1)
+ .WillRepeatedly(Return("log message"));
+
+ SetMinLogLevel(LOG_INFO);
+
+ EXPECT_TRUE(LOG_IS_ON(INFO));
+ EXPECT_TRUE((DCHECK_IS_ON() != 0) == DLOG_IS_ON(INFO));
+
+ ZX_LOG(INFO, ZX_ERR_INTERNAL) << mock_log_source.Log();
+ ZX_DLOG(INFO, ZX_ERR_INTERNAL) << mock_log_source.Log();
+
+ ZX_CHECK(true, ZX_ERR_INTERNAL);
+ ZX_DCHECK(true, ZX_ERR_INTERNAL);
+}
+#endif // defined(OS_FUCHSIA)
+
} // namespace
} // namespace logging
diff --git a/chromium/base/mac/objc_property_releaser.h b/chromium/base/mac/objc_property_releaser.h
deleted file mode 100644
index 3efdb39be38..00000000000
--- a/chromium/base/mac/objc_property_releaser.h
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_OBJC_PROPERTY_RELEASER_H_
-#define BASE_MAC_OBJC_PROPERTY_RELEASER_H_
-
-#import <Foundation/Foundation.h>
-
-#include "base/base_export.h"
-
-#if defined(__has_feature) && __has_feature(objc_arc)
-#error "In ARC code properties are release automatically. Don't use this class."
-#endif
-
-namespace base {
-namespace mac {
-
-// Deprecated: Use base::mac::ReleaseProperties instead.
-// ---
-
-// ObjCPropertyReleaser is a C++ class that can automatically release
-// synthesized Objective-C properties marked "retain" or "copy". The expected
-// use is to place an ObjCPropertyReleaser object within an Objective-C class
-// definition. When built with the -fobjc-call-cxx-cdtors compiler option,
-// the ObjCPropertyReleaser's destructor will be called when the Objective-C
-// object that owns it is deallocated, and it will send a -release message to
-// the instance variables backing the appropriate properties. If
-// -fobjc-call-cxx-cdtors is not in use, ObjCPropertyReleaser's
-// ReleaseProperties method can be called from -dealloc to achieve the same
-// effect.
-//
-// Example usage:
-//
-// @interface AllaysIBF : NSObject {
-// @private
-// NSString* string_;
-// NSMutableDictionary* dictionary_;
-// NSString* notAProperty_;
-// IBFDelegate* delegate_; // weak
-//
-// // It's recommended to put the class name into the property releaser's
-// // instance variable name to gracefully handle subclassing, where
-// // multiple classes in a hierarchy might want their own property
-// // releasers.
-// base::mac::ObjCPropertyReleaser propertyReleaser_AllaysIBF_;
-// }
-//
-// @property(retain, nonatomic) NSString* string;
-// @property(copy, nonatomic) NSMutableDictionary* dictionary;
-// @property(assign, nonatomic) IBFDelegate* delegate;
-// @property(retain, nonatomic) NSString* autoProp;
-//
-// @end // @interface AllaysIBF
-//
-// @implementation AllaysIBF
-//
-// @synthesize string = string_;
-// @synthesize dictionary = dictionary_;
-// @synthesize delegate = delegate_;
-// @synthesize autoProp;
-//
-// - (id)init {
-// if ((self = [super init])) {
-// // Initialize with [AllaysIBF class]. Never use [self class] because
-// // in the case of subclassing, it will return the most specific class
-// // for |self|, which may not be the same as [AllaysIBF class]. This
-// // would cause AllaysIBF's -.cxx_destruct or -dealloc to release
-// // instance variables that only exist in subclasses, likely causing
-// // mass disaster.
-// propertyReleaser_AllaysIBF_.Init(self, [AllaysIBF class]);
-// }
-// return self;
-// }
-//
-// @end // @implementation AllaysIBF
-//
-// When an instance of AllaysIBF is deallocated, the ObjCPropertyReleaser will
-// send a -release message to string_, dictionary_, and the compiler-created
-// autoProp instance variables. No -release will be sent to delegate_ as it
-// is marked "assign" and not "retain" or "copy". No -release will be sent to
-// notAProperty_ because it doesn't correspond to any declared @property.
-//
-// Another way of doing this would be to provide a base class that others can
-// inherit from, and to have the base class' -dealloc walk the property lists
-// of all subclasses in an object to send the -release messages. Since this
-// involves a base reaching into its subclasses, it's deemed scary, so don't
-// do it. ObjCPropertyReleaser's design ensures that the property releaser
-// will only operate on instance variables in the immediate object in which
-// the property releaser is placed.
-
-class BASE_EXPORT ObjCPropertyReleaser {
- public:
- // ObjCPropertyReleaser can only be owned by an Objective-C object, so its
- // memory is always guaranteed to be 0-initialized. Not defining the default
- // constructor can prevent an otherwise no-op -.cxx_construct method from
- // showing up in Objective-C classes that contain a ObjCPropertyReleaser.
-
- // Upon destruction (expected to occur from an Objective-C object's
- // -.cxx_destruct method), release all properties.
- ~ObjCPropertyReleaser() {
- ReleaseProperties();
- }
-
- // Initialize this object so that it's armed to release the properties of
- // object |object|, which must be of type |classy|. The class argument must
- // be supplied separately and cannot be gleaned from the object's own type
- // because an object will allays identify itself as the most-specific type
- // that describes it, but the ObjCPropertyReleaser needs to know which class
- // type in the class hierarchy it's responsible for releasing properties
- // for. For the same reason, Init must be called with a |classy| argument
- // initialized using a +class (class) method such as [MyClass class], and
- // never a -class (instance) method such as [self class].
- //
- // -.cxx_construct can only call the default constructor, but
- // ObjCPropertyReleaser needs to know about the Objective-C object that owns
- // it, so this can't be handled in a constructor, it needs to be a distinct
- // Init method.
- void Init(id object, Class classy);
-
- // Release all of the properties in object_ defined in class_ as either
- // "retain" or "copy" and with an identifiable backing instance variable.
- // Properties must be synthesized to have identifiable instance variables.
- void ReleaseProperties();
-
- private:
- id object_;
- Class class_;
-};
-
-} // namespace mac
-} // namespace base
-
-#endif // BASE_MAC_OBJC_PROPERTY_RELEASER_H_
diff --git a/chromium/base/mac/objc_property_releaser.mm b/chromium/base/mac/objc_property_releaser.mm
deleted file mode 100644
index f7ee88fbcc4..00000000000
--- a/chromium/base/mac/objc_property_releaser.mm
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#import "base/mac/objc_property_releaser.h"
-
-#import <objc/runtime.h>
-#include <stdlib.h>
-
-#include <string>
-
-#include "base/logging.h"
-
-namespace base {
-namespace mac {
-
-namespace {
-
-// Returns the name of the instance variable backing the property, if known,
-// if the property is marked "retain" or "copy". If the instance variable name
-// is not known (perhaps because it was not automatically associated with the
-// property by @synthesize) or if the property is not "retain" or "copy",
-// returns an empty string.
-std::string ReleasableInstanceName(objc_property_t property) {
- // TODO(mark): Starting in newer system releases, the Objective-C runtime
- // provides a function to break the property attribute string into
- // individual attributes (property_copyAttributeList), as well as a function
- // to look up the value of a specific attribute
- // (property_copyAttributeValue). When the SDK defining that interface is
- // final, this function should be adapted to walk the attribute list as
- // returned by property_copyAttributeList when that function is available in
- // preference to scanning through the attribute list manually.
-
- // The format of the string returned by property_getAttributes is documented
- // at
- // http://developer.apple.com/library/mac/#documentation/Cocoa/Conceptual/ObjCRuntimeGuide/Articles/ocrtPropertyIntrospection.html#//apple_ref/doc/uid/TP40008048-CH101-SW6
- const char* property_attributes = property_getAttributes(property);
-
- std::string instance_name;
- bool releasable = false;
- while (*property_attributes) {
- char name = *property_attributes;
-
- const char* value = ++property_attributes;
- while (*property_attributes && *property_attributes != ',') {
- ++property_attributes;
- }
-
- switch (name) {
- // It might seem intelligent to check the type ('T') attribute to verify
- // that it identifies an NSObject-derived type (the attribute value
- // begins with '@'.) This is a bad idea beacuse it fails to identify
- // CFTypeRef-based properties declared as __attribute__((NSObject)),
- // which just show up as pointers to their underlying CFType structs.
- //
- // Quoting
- // http://developer.apple.com/library/mac/#documentation/Cocoa/Conceptual/ObjectiveC/Chapters/ocProperties.html#//apple_ref/doc/uid/TP30001163-CH17-SW27
- //
- // > In Mac OS X v10.6 and later, you can use the __attribute__ keyword
- // > to specify that a Core Foundation property should be treated like
- // > an Objective-C object for memory management:
- // > @property(retain) __attribute__((NSObject)) CFDictionaryRef
- // > myDictionary;
- case 'C': // copy
- case '&': // retain
- releasable = true;
- break;
- case 'V': // instance variable name
- // 'V' is specified as the last attribute to occur in the
- // documentation, but empirically, it's not always the last. In
- // GC-supported or GC-required code, the 'P' (GC-eligible) attribute
- // occurs after 'V'.
- instance_name.assign(value, property_attributes - value);
- break;
- }
-
- if (*property_attributes) {
- ++property_attributes;
- }
- }
-
- if (releasable) {
- return instance_name;
- }
-
- return std::string();
-}
-
-} // namespace
-
-void ObjCPropertyReleaser::Init(id object, Class classy) {
- DCHECK(!object_);
- DCHECK(!class_);
- CHECK([object isKindOfClass:classy]);
-
- object_ = object;
- class_ = classy;
-}
-
-void ObjCPropertyReleaser::ReleaseProperties() {
- DCHECK(object_);
- DCHECK(class_);
-
- unsigned int property_count = 0;
- objc_property_t* properties = class_copyPropertyList(class_, &property_count);
-
- for (unsigned int property_index = 0;
- property_index < property_count;
- ++property_index) {
- objc_property_t property = properties[property_index];
- std::string instance_name = ReleasableInstanceName(property);
- if (!instance_name.empty()) {
- id instance_value = nil;
- Ivar instance_variable =
- object_getInstanceVariable(object_, instance_name.c_str(),
- (void**)&instance_value);
- DCHECK(instance_variable);
- [instance_value release];
- }
- }
-
- free(properties);
-
- // Clear object_ and class_ in case this ObjCPropertyReleaser will live on.
- // It's only expected to release the properties it supervises once per Init.
- object_ = nil;
- class_ = nil;
-}
-
-} // namespace mac
-} // namespace base
diff --git a/chromium/base/mac/objc_property_releaser_unittest.mm b/chromium/base/mac/objc_property_releaser_unittest.mm
deleted file mode 100644
index d496c7f865c..00000000000
--- a/chromium/base/mac/objc_property_releaser_unittest.mm
+++ /dev/null
@@ -1,356 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#import <Foundation/Foundation.h>
-
-#import "base/mac/objc_property_releaser.h"
-#import "base/mac/scoped_nsautorelease_pool.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-// "When I'm alone, I count myself."
-// --Count von Count, http://www.youtube.com/watch?v=FKzszqa9WA4
-
-namespace {
-
-// The number of CountVonCounts outstanding.
-int ah_ah_ah;
-
-// NumberHolder exists to exercise the property attribute string parser by
-// providing a named struct and an anonymous union.
-struct NumberHolder {
- union {
- long long sixty_four;
- int thirty_two;
- short sixteen;
- char eight;
- } what;
- enum {
- SIXTY_FOUR,
- THIRTY_TWO,
- SIXTEEN,
- EIGHT
- } how;
-};
-
-} // namespace
-
-@interface PropertyReleaser_CountVonCount : NSObject<NSCopying>
-
-typedef PropertyReleaser_CountVonCount CountVonCount;
-
-+ (CountVonCount*)countVonCount;
-
-@end // @interface PropertyReleaser_CountVonCount
-
-@implementation PropertyReleaser_CountVonCount
-
-+ (CountVonCount*)countVonCount {
- return [[[CountVonCount alloc] init] autorelease];
-}
-
-- (id)init {
- ++ah_ah_ah;
- return [super init];
-}
-
-- (void)dealloc {
- --ah_ah_ah;
- [super dealloc];
-}
-
-- (id)copyWithZone:(NSZone*)zone {
- return [[CountVonCount allocWithZone:zone] init];
-}
-
-@end // @implementation PropertyReleaser_CountVonCount
-
-@interface PropertyReleaser_ObjCPropertyTestBase : NSObject {
- @private
- CountVonCount* baseCvcRetain_;
- CountVonCount* baseCvcCopy_;
- CountVonCount* baseCvcAssign_;
- CountVonCount* baseCvcNotProperty_;
- CountVonCount* baseCvcNil_;
- CountVonCount* baseCvcCustom_;
- int baseInt_;
- double baseDouble_;
- void* basePointer_;
- NumberHolder baseStruct_;
-
- base::mac::ObjCPropertyReleaser propertyReleaser_ObjCPropertyTestBase_;
-}
-
-typedef PropertyReleaser_ObjCPropertyTestBase ObjCPropertyTestBase;
-
-@property(retain, nonatomic) CountVonCount* baseCvcRetain;
-@property(copy, nonatomic) CountVonCount* baseCvcCopy;
-@property(assign, nonatomic) CountVonCount* baseCvcAssign;
-@property(retain, nonatomic) CountVonCount* baseCvcNil;
-@property(retain, nonatomic, getter=baseCustom, setter=setBaseCustom:)
- CountVonCount* baseCvcCustom;
-@property(retain, nonatomic) CountVonCount* baseCvcDynamic;
-@property(assign, nonatomic) int baseInt;
-@property(assign, nonatomic) double baseDouble;
-@property(assign, nonatomic) void* basePointer;
-@property(assign, nonatomic) NumberHolder baseStruct;
-
-- (void)setBaseCvcNotProperty:(CountVonCount*)cvc;
-
-@end // @interface ObjCPropertyTestBase
-
-@implementation PropertyReleaser_ObjCPropertyTestBase
-
-@synthesize baseCvcRetain = baseCvcRetain_;
-@synthesize baseCvcCopy = baseCvcCopy_;
-@synthesize baseCvcAssign = baseCvcAssign_;
-@synthesize baseCvcNil = baseCvcNil_;
-@synthesize baseCvcCustom = baseCvcCustom_;
-@dynamic baseCvcDynamic;
-@synthesize baseInt = baseInt_;
-@synthesize baseDouble = baseDouble_;
-@synthesize basePointer = basePointer_;
-@synthesize baseStruct = baseStruct_;
-
-- (id)init {
- if ((self = [super init])) {
- propertyReleaser_ObjCPropertyTestBase_.Init(
- self, [ObjCPropertyTestBase class]);
- }
- return self;
-}
-
-- (void)dealloc {
- [baseCvcNotProperty_ release];
- [super dealloc];
-}
-
-- (void)setBaseCvcNotProperty:(CountVonCount*)cvc {
- if (cvc != baseCvcNotProperty_) {
- [baseCvcNotProperty_ release];
- baseCvcNotProperty_ = [cvc retain];
- }
-}
-
-@end // @implementation ObjCPropertyTestBase
-
-@protocol PropertyReleaser_ObjCPropertyTestProtocol
-
-@property(retain, nonatomic) CountVonCount* protoCvcRetain;
-@property(copy, nonatomic) CountVonCount* protoCvcCopy;
-@property(assign, nonatomic) CountVonCount* protoCvcAssign;
-@property(retain, nonatomic) CountVonCount* protoCvcNil;
-@property(retain, nonatomic, getter=protoCustom, setter=setProtoCustom:)
- CountVonCount* protoCvcCustom;
-@property(retain, nonatomic) CountVonCount* protoCvcDynamic;
-@property(assign, nonatomic) int protoInt;
-@property(assign, nonatomic) double protoDouble;
-@property(assign, nonatomic) void* protoPointer;
-@property(assign, nonatomic) NumberHolder protoStruct;
-
-@end // @protocol PropertyReleaser_ObjCPropertyTestProtocol
-
-@interface PropertyReleaser_ObjCPropertyTestDerived
- : ObjCPropertyTestBase<PropertyReleaser_ObjCPropertyTestProtocol> {
- @private
- CountVonCount* derivedCvcRetain_;
- CountVonCount* derivedCvcCopy_;
- CountVonCount* derivedCvcAssign_;
- CountVonCount* derivedCvcNotProperty_;
- CountVonCount* derivedCvcNil_;
- CountVonCount* derivedCvcCustom_;
- int derivedInt_;
- double derivedDouble_;
- void* derivedPointer_;
- NumberHolder derivedStruct_;
-
- CountVonCount* protoCvcRetain_;
- CountVonCount* protoCvcCopy_;
- CountVonCount* protoCvcAssign_;
- CountVonCount* protoCvcNil_;
- CountVonCount* protoCvcCustom_;
- int protoInt_;
- double protoDouble_;
- void* protoPointer_;
- NumberHolder protoStruct_;
-
- base::mac::ObjCPropertyReleaser propertyReleaser_ObjCPropertyTestDerived_;
-}
-
-typedef PropertyReleaser_ObjCPropertyTestDerived ObjCPropertyTestDerived;
-
-@property(retain, nonatomic) CountVonCount* derivedCvcRetain;
-@property(copy, nonatomic) CountVonCount* derivedCvcCopy;
-@property(assign, nonatomic) CountVonCount* derivedCvcAssign;
-@property(retain, nonatomic) CountVonCount* derivedCvcNil;
-@property(retain, nonatomic, getter=derivedCustom, setter=setDerivedCustom:)
- CountVonCount* derivedCvcCustom;
-@property(retain, nonatomic) CountVonCount* derivedCvcDynamic;
-@property(assign, nonatomic) int derivedInt;
-@property(assign, nonatomic) double derivedDouble;
-@property(assign, nonatomic) void* derivedPointer;
-@property(assign, nonatomic) NumberHolder derivedStruct;
-
-- (void)setDerivedCvcNotProperty:(CountVonCount*)cvc;
-
-@end // @interface ObjCPropertyTestDerived
-
-@implementation PropertyReleaser_ObjCPropertyTestDerived
-
-@synthesize derivedCvcRetain = derivedCvcRetain_;
-@synthesize derivedCvcCopy = derivedCvcCopy_;
-@synthesize derivedCvcAssign = derivedCvcAssign_;
-@synthesize derivedCvcNil = derivedCvcNil_;
-@synthesize derivedCvcCustom = derivedCvcCustom_;
-@dynamic derivedCvcDynamic;
-@synthesize derivedInt = derivedInt_;
-@synthesize derivedDouble = derivedDouble_;
-@synthesize derivedPointer = derivedPointer_;
-@synthesize derivedStruct = derivedStruct_;
-
-@synthesize protoCvcRetain = protoCvcRetain_;
-@synthesize protoCvcCopy = protoCvcCopy_;
-@synthesize protoCvcAssign = protoCvcAssign_;
-@synthesize protoCvcNil = protoCvcNil_;
-@synthesize protoCvcCustom = protoCvcCustom_;
-@dynamic protoCvcDynamic;
-@synthesize protoInt = protoInt_;
-@synthesize protoDouble = protoDouble_;
-@synthesize protoPointer = protoPointer_;
-@synthesize protoStruct = protoStruct_;
-
-- (id)init {
- if ((self = [super init])) {
- propertyReleaser_ObjCPropertyTestDerived_.Init(
- self, [ObjCPropertyTestDerived class]);
- }
- return self;
-}
-
-- (void)dealloc {
- [derivedCvcNotProperty_ release];
- [super dealloc];
-}
-
-- (void)setDerivedCvcNotProperty:(CountVonCount*)cvc {
- if (cvc != derivedCvcNotProperty_) {
- [derivedCvcNotProperty_ release];
- derivedCvcNotProperty_ = [cvc retain];
- }
-}
-
-@end // @implementation ObjCPropertyTestDerived
-
-namespace {
-
-TEST(ObjCPropertyReleaserTest, SesameStreet) {
- ObjCPropertyTestDerived* test_object = [[ObjCPropertyTestDerived alloc] init];
-
- // Assure a clean slate.
- EXPECT_EQ(0, ah_ah_ah);
- EXPECT_EQ(1U, [test_object retainCount]);
-
- CountVonCount* baseAssign = [[CountVonCount alloc] init];
- CountVonCount* derivedAssign = [[CountVonCount alloc] init];
- CountVonCount* protoAssign = [[CountVonCount alloc] init];
-
- // Make sure that worked before things get more involved.
- EXPECT_EQ(3, ah_ah_ah);
-
- {
- base::mac::ScopedNSAutoreleasePool pool;
-
- test_object.baseCvcRetain = [CountVonCount countVonCount];
- test_object.baseCvcCopy = [CountVonCount countVonCount];
- test_object.baseCvcAssign = baseAssign;
- test_object.baseCvcCustom = [CountVonCount countVonCount];
- [test_object setBaseCvcNotProperty:[CountVonCount countVonCount]];
-
- // That added 4 objects, plus 1 more that was copied.
- EXPECT_EQ(8, ah_ah_ah);
-
- test_object.derivedCvcRetain = [CountVonCount countVonCount];
- test_object.derivedCvcCopy = [CountVonCount countVonCount];
- test_object.derivedCvcAssign = derivedAssign;
- test_object.derivedCvcCustom = [CountVonCount countVonCount];
- [test_object setDerivedCvcNotProperty:[CountVonCount countVonCount]];
-
- // That added 4 objects, plus 1 more that was copied.
- EXPECT_EQ(13, ah_ah_ah);
-
- test_object.protoCvcRetain = [CountVonCount countVonCount];
- test_object.protoCvcCopy = [CountVonCount countVonCount];
- test_object.protoCvcAssign = protoAssign;
- test_object.protoCvcCustom = [CountVonCount countVonCount];
-
- // That added 3 objects, plus 1 more that was copied.
- EXPECT_EQ(17, ah_ah_ah);
- }
-
- // Now that the autorelease pool has been popped, the 3 objects that were
- // copied when placed into the test object will have been deallocated.
- EXPECT_EQ(14, ah_ah_ah);
-
- // Make sure that the setters work and have the expected semantics.
- test_object.baseCvcRetain = nil;
- test_object.baseCvcCopy = nil;
- test_object.baseCvcAssign = nil;
- test_object.baseCvcCustom = nil;
- test_object.derivedCvcRetain = nil;
- test_object.derivedCvcCopy = nil;
- test_object.derivedCvcAssign = nil;
- test_object.derivedCvcCustom = nil;
- test_object.protoCvcRetain = nil;
- test_object.protoCvcCopy = nil;
- test_object.protoCvcAssign = nil;
- test_object.protoCvcCustom = nil;
-
- // The CountVonCounts marked "retain" and "copy" should have been
- // deallocated. Those marked assign should not have been. The only ones that
- // should exist now are the ones marked "assign" and the ones held in
- // non-property instance variables.
- EXPECT_EQ(5, ah_ah_ah);
-
- {
- base::mac::ScopedNSAutoreleasePool pool;
-
- // Put things back to how they were.
- test_object.baseCvcRetain = [CountVonCount countVonCount];
- test_object.baseCvcCopy = [CountVonCount countVonCount];
- test_object.baseCvcAssign = baseAssign;
- test_object.baseCvcCustom = [CountVonCount countVonCount];
- test_object.derivedCvcRetain = [CountVonCount countVonCount];
- test_object.derivedCvcCopy = [CountVonCount countVonCount];
- test_object.derivedCvcAssign = derivedAssign;
- test_object.derivedCvcCustom = [CountVonCount countVonCount];
- test_object.protoCvcRetain = [CountVonCount countVonCount];
- test_object.protoCvcCopy = [CountVonCount countVonCount];
- test_object.protoCvcAssign = protoAssign;
- test_object.protoCvcCustom = [CountVonCount countVonCount];
-
- // 9 more CountVonCounts, 3 of which were copied.
- EXPECT_EQ(17, ah_ah_ah);
- }
-
- // Now that the autorelease pool has been popped, the 3 copies are gone.
- EXPECT_EQ(14, ah_ah_ah);
-
- // Releasing the test object should get rid of everything that it owns.
- [test_object release];
-
- // The property releaser should have released all of the CountVonCounts
- // associated with properties marked "retain" or "copy". The -dealloc
- // methods in each should have released the single non-property objects in
- // each. Only the CountVonCounts assigned to the properties marked "assign"
- // should remain.
- EXPECT_EQ(3, ah_ah_ah);
-
- [baseAssign release];
- [derivedAssign release];
- [protoAssign release];
-
- // Zero! Zero counts! Ah, ah, ah.
- EXPECT_EQ(0, ah_ah_ah);
-}
-
-} // namespace
diff --git a/chromium/base/mac/sdk_forward_declarations.h b/chromium/base/mac/sdk_forward_declarations.h
index cc405f91e55..af7b2978600 100644
--- a/chromium/base/mac/sdk_forward_declarations.h
+++ b/chromium/base/mac/sdk_forward_declarations.h
@@ -100,6 +100,7 @@ BASE_EXPORT extern NSString* const NSAppearanceNameVibrantLight;
@property(readonly, copy) NSString* activityType;
@property(copy) NSDictionary* userInfo;
@property(copy) NSURL* webpageURL;
+@property(copy) NSString* title;
- (instancetype)initWithActivityType:(NSString*)activityType;
- (void)becomeCurrent;
- (void)invalidate;
diff --git a/chromium/base/macros.h b/chromium/base/macros.h
index d7abfc7f793..b1c5fe75149 100644
--- a/chromium/base/macros.h
+++ b/chromium/base/macros.h
@@ -68,8 +68,8 @@ inline void ignore_result(const T&) {
namespace base {
// Use these to declare and define a static local variable (static T;) so that
-// it is leaked so that its destructors are not called at exit. If you need
-// thread-safe initialization, use base/lazy_instance.h instead.
+// it is leaked so that its destructors are not called at exit. This is
+// thread-safe.
#define CR_DEFINE_STATIC_LOCAL(type, name, arguments) \
static type& name = *new type arguments
diff --git a/chromium/base/memory/aligned_memory.cc b/chromium/base/memory/aligned_memory.cc
index 526a49587a4..93cbeb57f74 100644
--- a/chromium/base/memory/aligned_memory.cc
+++ b/chromium/base/memory/aligned_memory.cc
@@ -17,7 +17,7 @@ void* AlignedAlloc(size_t size, size_t alignment) {
DCHECK_GT(size, 0U);
DCHECK_EQ(alignment & (alignment - 1), 0U);
DCHECK_EQ(alignment % sizeof(void*), 0U);
- void* ptr = NULL;
+ void* ptr = nullptr;
#if defined(COMPILER_MSVC)
ptr = _aligned_malloc(size, alignment);
// Android technically supports posix_memalign(), but does not expose it in
@@ -29,7 +29,7 @@ void* AlignedAlloc(size_t size, size_t alignment) {
ptr = memalign(alignment, size);
#else
if (posix_memalign(&ptr, alignment, size))
- ptr = NULL;
+ ptr = nullptr;
#endif
// Since aligned allocations may fail for non-memory related reasons, force a
// crash if we encounter a failed allocation; maintaining consistent behavior
diff --git a/chromium/base/memory/discardable_memory.cc b/chromium/base/memory/discardable_memory.cc
index d50f1853e19..f0730aa403f 100644
--- a/chromium/base/memory/discardable_memory.cc
+++ b/chromium/base/memory/discardable_memory.cc
@@ -6,10 +6,8 @@
namespace base {
-DiscardableMemory::DiscardableMemory() {
-}
+DiscardableMemory::DiscardableMemory() = default;
-DiscardableMemory::~DiscardableMemory() {
-}
+DiscardableMemory::~DiscardableMemory() = default;
} // namespace base
diff --git a/chromium/base/memory/discardable_memory_allocator.cc b/chromium/base/memory/discardable_memory_allocator.cc
index ee288ff4b20..3dbb27672b3 100644
--- a/chromium/base/memory/discardable_memory_allocator.cc
+++ b/chromium/base/memory/discardable_memory_allocator.cc
@@ -9,21 +9,21 @@
namespace base {
namespace {
-DiscardableMemoryAllocator* g_allocator = nullptr;
+DiscardableMemoryAllocator* g_discardable_allocator = nullptr;
} // namespace
// static
void DiscardableMemoryAllocator::SetInstance(
DiscardableMemoryAllocator* allocator) {
- DCHECK(!allocator || !g_allocator);
- g_allocator = allocator;
+ DCHECK(!allocator || !g_discardable_allocator);
+ g_discardable_allocator = allocator;
}
// static
DiscardableMemoryAllocator* DiscardableMemoryAllocator::GetInstance() {
- DCHECK(g_allocator);
- return g_allocator;
+ DCHECK(g_discardable_allocator);
+ return g_discardable_allocator;
}
} // namespace base
diff --git a/chromium/base/memory/discardable_shared_memory.cc b/chromium/base/memory/discardable_shared_memory.cc
index 2b7f7381a97..7aeddab63d2 100644
--- a/chromium/base/memory/discardable_shared_memory.cc
+++ b/chromium/base/memory/discardable_shared_memory.cc
@@ -152,8 +152,7 @@ DiscardableSharedMemory::DiscardableSharedMemory(
locked_page_count_(0) {
}
-DiscardableSharedMemory::~DiscardableSharedMemory() {
-}
+DiscardableSharedMemory::~DiscardableSharedMemory() = default;
bool DiscardableSharedMemory::CreateAndMap(size_t size) {
CheckedNumeric<size_t> checked_size = size;
diff --git a/chromium/base/memory/discardable_shared_memory_unittest.cc b/chromium/base/memory/discardable_shared_memory_unittest.cc
index 2858f50da6d..caf7eaf047c 100644
--- a/chromium/base/memory/discardable_shared_memory_unittest.cc
+++ b/chromium/base/memory/discardable_shared_memory_unittest.cc
@@ -19,7 +19,7 @@ namespace {
class TestDiscardableSharedMemory : public DiscardableSharedMemory {
public:
- TestDiscardableSharedMemory() {}
+ TestDiscardableSharedMemory() = default;
explicit TestDiscardableSharedMemory(SharedMemoryHandle handle)
: DiscardableSharedMemory(handle) {}
diff --git a/chromium/base/memory/linked_ptr_unittest.cc b/chromium/base/memory/linked_ptr_unittest.cc
index f6bc410942b..7e0c9e355d5 100644
--- a/chromium/base/memory/linked_ptr_unittest.cc
+++ b/chromium/base/memory/linked_ptr_unittest.cc
@@ -36,18 +36,18 @@ TEST(LinkedPtrTest, Test) {
linked_ptr<A> a0, a1, a2;
a0 = a0;
a1 = a2;
- ASSERT_EQ(a0.get(), static_cast<A*>(NULL));
- ASSERT_EQ(a1.get(), static_cast<A*>(NULL));
- ASSERT_EQ(a2.get(), static_cast<A*>(NULL));
- ASSERT_TRUE(a0 == NULL);
- ASSERT_TRUE(a1 == NULL);
- ASSERT_TRUE(a2 == NULL);
+ ASSERT_EQ(a0.get(), static_cast<A*>(nullptr));
+ ASSERT_EQ(a1.get(), static_cast<A*>(nullptr));
+ ASSERT_EQ(a2.get(), static_cast<A*>(nullptr));
+ ASSERT_TRUE(a0 == nullptr);
+ ASSERT_TRUE(a1 == nullptr);
+ ASSERT_TRUE(a2 == nullptr);
{
linked_ptr<A> a3(new A);
a0 = a3;
ASSERT_TRUE(a0 == a3);
- ASSERT_TRUE(a0 != NULL);
+ ASSERT_TRUE(a0 != nullptr);
ASSERT_TRUE(a0.get() == a3);
ASSERT_TRUE(a0 == a3.get());
linked_ptr<A> a4(a0);
@@ -60,7 +60,7 @@ TEST(LinkedPtrTest, Test) {
linked_ptr<A> a6(b0);
ASSERT_TRUE(b0 == a6);
ASSERT_TRUE(a6 == b0);
- ASSERT_TRUE(b0 != NULL);
+ ASSERT_TRUE(b0 != nullptr);
a5 = b0;
a5 = b0;
a3->Use();
diff --git a/chromium/base/memory/manual_constructor.h b/chromium/base/memory/manual_constructor.h
deleted file mode 100644
index e968d043122..00000000000
--- a/chromium/base/memory/manual_constructor.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// ManualConstructor statically-allocates space in which to store some
-// object, but does not initialize it. You can then call the constructor
-// and destructor for the object yourself as you see fit. This is useful
-// for memory management optimizations, where you want to initialize and
-// destroy an object multiple times but only allocate it once.
-//
-// (When I say ManualConstructor statically allocates space, I mean that
-// the ManualConstructor object itself is forced to be the right size.)
-//
-// For example usage, check out base/containers/small_map.h.
-
-#ifndef BASE_MEMORY_MANUAL_CONSTRUCTOR_H_
-#define BASE_MEMORY_MANUAL_CONSTRUCTOR_H_
-
-#include <stddef.h>
-
-#include "base/compiler_specific.h"
-#include "base/memory/aligned_memory.h"
-
-namespace base {
-
-template <typename Type>
-class ManualConstructor {
- public:
- // No constructor or destructor because one of the most useful uses of
- // this class is as part of a union, and members of a union cannot have
- // constructors or destructors. And, anyway, the whole point of this
- // class is to bypass these.
-
- // Support users creating arrays of ManualConstructor<>s. This ensures that
- // the array itself has the correct alignment.
- static void* operator new[](size_t size) {
- return AlignedAlloc(size, alignof(Type));
- }
- static void operator delete[](void* mem) {
- AlignedFree(mem);
- }
-
- inline Type* get() { return reinterpret_cast<Type*>(space_); }
- inline const Type* get() const {
- return reinterpret_cast<const Type*>(space_);
- }
-
- inline Type* operator->() { return get(); }
- inline const Type* operator->() const { return get(); }
-
- inline Type& operator*() { return *get(); }
- inline const Type& operator*() const { return *get(); }
-
- template <typename... Ts>
- inline void Init(Ts&&... params) {
- new (space_) Type(std::forward<Ts>(params)...);
- }
-
- inline void InitFromMove(ManualConstructor<Type>&& o) {
- Init(std::move(*o));
- }
-
- inline void Destroy() {
- get()->~Type();
- }
-
- private:
- alignas(Type) char space_[sizeof(Type)];
-};
-
-} // namespace base
-
-#endif // BASE_MEMORY_MANUAL_CONSTRUCTOR_H_
diff --git a/chromium/base/memory/memory_coordinator_client_registry.cc b/chromium/base/memory/memory_coordinator_client_registry.cc
index 67064581ab0..45b4a7f5bc9 100644
--- a/chromium/base/memory/memory_coordinator_client_registry.cc
+++ b/chromium/base/memory/memory_coordinator_client_registry.cc
@@ -17,7 +17,7 @@ MemoryCoordinatorClientRegistry::GetInstance() {
MemoryCoordinatorClientRegistry::MemoryCoordinatorClientRegistry()
: clients_(new ClientList) {}
-MemoryCoordinatorClientRegistry::~MemoryCoordinatorClientRegistry() {}
+MemoryCoordinatorClientRegistry::~MemoryCoordinatorClientRegistry() = default;
void MemoryCoordinatorClientRegistry::Register(
MemoryCoordinatorClient* client) {
diff --git a/chromium/base/memory/memory_coordinator_proxy.cc b/chromium/base/memory/memory_coordinator_proxy.cc
index f82e928a4b5..4e22fe04fcf 100644
--- a/chromium/base/memory/memory_coordinator_proxy.cc
+++ b/chromium/base/memory/memory_coordinator_proxy.cc
@@ -12,11 +12,9 @@ MemoryCoordinator* g_memory_coordinator = nullptr;
} // namespace
-MemoryCoordinatorProxy::MemoryCoordinatorProxy() {
-}
+MemoryCoordinatorProxy::MemoryCoordinatorProxy() = default;
-MemoryCoordinatorProxy::~MemoryCoordinatorProxy() {
-}
+MemoryCoordinatorProxy::~MemoryCoordinatorProxy() = default;
// static
MemoryCoordinatorProxy* MemoryCoordinatorProxy::GetInstance() {
diff --git a/chromium/base/memory/memory_pressure_listener.cc b/chromium/base/memory/memory_pressure_listener.cc
index c89ce3a469b..669fb17b7af 100644
--- a/chromium/base/memory/memory_pressure_listener.cc
+++ b/chromium/base/memory/memory_pressure_listener.cc
@@ -11,47 +11,46 @@ namespace base {
namespace {
+// This class is thread safe and internally synchronized.
class MemoryPressureObserver {
public:
- MemoryPressureObserver()
- : async_observers_(new ObserverListThreadSafe<MemoryPressureListener>),
- sync_observers_(new ObserverList<MemoryPressureListener>) {
- }
+ // There is at most one MemoryPressureObserver and it is never deleted.
+ ~MemoryPressureObserver() = delete;
void AddObserver(MemoryPressureListener* listener, bool sync) {
async_observers_->AddObserver(listener);
if (sync) {
AutoLock lock(sync_observers_lock_);
- sync_observers_->AddObserver(listener);
+ sync_observers_.AddObserver(listener);
}
}
void RemoveObserver(MemoryPressureListener* listener) {
async_observers_->RemoveObserver(listener);
AutoLock lock(sync_observers_lock_);
- sync_observers_->RemoveObserver(listener);
+ sync_observers_.RemoveObserver(listener);
}
- void Notify(MemoryPressureListener::MemoryPressureLevel
- memory_pressure_level) {
- async_observers_->Notify(FROM_HERE,
- &MemoryPressureListener::Notify, memory_pressure_level);
+ void Notify(
+ MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
+ async_observers_->Notify(FROM_HERE, &MemoryPressureListener::Notify,
+ memory_pressure_level);
AutoLock lock(sync_observers_lock_);
- for (auto& observer : *sync_observers_)
- observer.MemoryPressureListener::SyncNotify(memory_pressure_level);
+ for (auto& observer : sync_observers_)
+ observer.SyncNotify(memory_pressure_level);
}
private:
- scoped_refptr<ObserverListThreadSafe<MemoryPressureListener>>
- async_observers_;
- ObserverList<MemoryPressureListener>* sync_observers_;
+ const scoped_refptr<ObserverListThreadSafe<MemoryPressureListener>>
+ async_observers_ = base::MakeRefCounted<
+ ObserverListThreadSafe<MemoryPressureListener>>();
+ ObserverList<MemoryPressureListener> sync_observers_;
Lock sync_observers_lock_;
-
- DISALLOW_COPY_AND_ASSIGN(MemoryPressureObserver);
};
+// Gets the shared MemoryPressureObserver singleton instance.
MemoryPressureObserver* GetMemoryPressureObserver() {
- static auto* observer = new MemoryPressureObserver();
+ static auto* const observer = new MemoryPressureObserver();
return observer;
}
diff --git a/chromium/base/memory/memory_pressure_monitor_chromeos_unittest.cc b/chromium/base/memory/memory_pressure_monitor_chromeos_unittest.cc
index 23b037cea6e..d203afb0ad9 100644
--- a/chromium/base/memory/memory_pressure_monitor_chromeos_unittest.cc
+++ b/chromium/base/memory/memory_pressure_monitor_chromeos_unittest.cc
@@ -52,7 +52,7 @@ class TestMemoryPressureMonitor : public MemoryPressureMonitor {
// function.
StopObserving();
}
- ~TestMemoryPressureMonitor() override {}
+ ~TestMemoryPressureMonitor() override = default;
void SetMemoryInPercentOverride(int percent) {
memory_in_percent_override_ = percent;
diff --git a/chromium/base/memory/protected_memory.h b/chromium/base/memory/protected_memory.h
new file mode 100644
index 00000000000..7ae5fda7ff3
--- /dev/null
+++ b/chromium/base/memory/protected_memory.h
@@ -0,0 +1,252 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Protected memory is memory holding security-sensitive data intended to be
+// left read-only for the majority of its lifetime to avoid being overwritten
+// by attackers. ProtectedMemory is a simple wrapper around platform-specific
+// APIs to set memory read-write and read-only when required. Protected memory
+// should be set read-write for the minimum amount of time required.
+
+// Variables stored in protected memory must be global variables declared in the
+// PROTECTED_MEMORY_SECTION so they are set to read-only upon start-up.
+
+#ifndef BASE_MEMORY_PROTECTED_MEMORY_H_
+#define BASE_MEMORY_PROTECTED_MEMORY_H_
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+#define PROTECTED_MEMORY_ENABLED 1
+
+#if defined(OS_LINUX)
+// Define the section read-only
+__asm__(".section protected_memory, \"a\"\n\t");
+#define PROTECTED_MEMORY_SECTION __attribute__((section("protected_memory")))
+
+// Explicitly mark these variables hidden so the symbols are local to the
+// currently built component. Otherwise they are created with global (external)
+// linkage and component builds would break because a single pair of these
+// symbols would override the rest.
+__attribute__((visibility("hidden"))) extern char __start_protected_memory;
+__attribute__((visibility("hidden"))) extern char __stop_protected_memory;
+
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+// The segment the section is in is defined read-only with a linker flag in
+// build/config/mac/BUILD.gn
+#define PROTECTED_MEMORY_SECTION \
+ __attribute__((section("PROTECTED_MEMORY, protected_memory")))
+extern char __start_protected_memory __asm(
+ "section$start$PROTECTED_MEMORY$protected_memory");
+extern char __stop_protected_memory __asm(
+ "section$end$PROTECTED_MEMORY$protected_memory");
+
+#else
+#undef PROTECTED_MEMORY_ENABLED
+#define PROTECTED_MEMORY_ENABLED 0
+#define PROTECTED_MEMORY_SECTION
+#endif
+
+namespace base {
+
+// Normally mutable variables are held in read-write memory and constant data
+// is held in read-only memory to ensure it is not accidentally overwritten.
+// In some cases we want to hold mutable variables in read-only memory, except
+// when they are being written to, to ensure that they are not tampered with.
+//
+// ProtectedMemory is a container class intended to hold a single variable in
+// read-only memory, except when explicitly set read-write. The variable can be
+// set read-write by creating a scoped AutoWritableMemory object by calling
+// AutoWritableMemory::Create(), the memory stays writable until the returned
+// object goes out of scope and is destructed. The wrapped variable can be
+// accessed using operator* and operator->.
+//
+// Instances of ProtectedMemory must be declared in the PROTECTED_MEMORY_SECTION
+// and as global variables. Because protected memory variables are globals, the
+// the same rules apply disallowing non-trivial constructors and destructors.
+// Global definitions are required to avoid the linker placing statics in
+// inlinable functions into a comdat section and setting the protected memory
+// section read-write when they are merged.
+//
+// EXAMPLE:
+//
+// struct Items { void* item1; };
+// static PROTECTED_MEMORY_SECTION ProtectedMemory<Items> items;
+// void InitializeItems() {
+// // Explicitly set items read-write before writing to it.
+// auto writer = AutoWritableMemory::Create(items);
+// items->item1 = /* ... */;
+// assert(items->item1 != nullptr);
+// // items is set back to read-only on the destruction of writer
+// }
+//
+// using FnPtr = void (*)(void);
+// PROTECTED_MEMORY_SECTION ProtectedMemory<FnPtr> fnPtr;
+// FnPtr ResolveFnPtr(void) {
+// // The Initializer nested class is a helper class for creating a static
+// // initializer for a ProtectedMemory variable. It implicitly sets the
+// // variable read-write during initialization.
+// static ProtectedMemory<FnPtr>::Initializer(&fnPtr,
+// reinterpret_cast<FnPtr>(dlsym(/* ... */)));
+// return *fnPtr;
+// }
+
+template <typename T>
+class ProtectedMemory {
+ public:
+ ProtectedMemory() = default;
+
+ // Expose direct access to the encapsulated variable
+ T& operator*() { return data; }
+ const T& operator*() const { return data; }
+ T* operator->() { return &data; }
+ const T* operator->() const { return &data; }
+
+ // Helper class for creating simple ProtectedMemory static initializers.
+ class Initializer {
+ public:
+ // Defined out-of-line below to break circular definition dependency between
+ // ProtectedMemory and AutoWritableMemory.
+ Initializer(ProtectedMemory<T>* PM, const T& Init);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Initializer);
+ };
+
+ private:
+ T data;
+
+ DISALLOW_COPY_AND_ASSIGN(ProtectedMemory);
+};
+
+// DCHECK that the byte at |ptr| is read-only.
+BASE_EXPORT void AssertMemoryIsReadOnly(const void* ptr);
+
+// Abstract out platform-specific methods to get the beginning and end of the
+// PROTECTED_MEMORY_SECTION. ProtectedMemoryEnd returns a pointer to the byte
+// past the end of the PROTECTED_MEMORY_SECTION.
+#if PROTECTED_MEMORY_ENABLED
+constexpr void* ProtectedMemoryStart = &__start_protected_memory;
+constexpr void* ProtectedMemoryEnd = &__stop_protected_memory;
+#endif
+
+#if defined(COMPONENT_BUILD)
+namespace internal {
+
+// For component builds we want to define a separate global writers variable
+// (explained below) in every DSO that includes this header. To do that we use
+// this template to define a global without duplicate symbol errors.
+template <typename T>
+struct DsoSpecific {
+ static T value;
+};
+template <typename T>
+T DsoSpecific<T>::value = 0;
+
+} // namespace internal
+#endif // defined(COMPONENT_BUILD)
+
+// A class that sets a given ProtectedMemory variable writable while the
+// AutoWritableMemory is in scope. This class implements the logic for setting
+// the protected memory region read-only/read-write in a thread-safe manner.
+class AutoWritableMemory {
+ private:
+ // 'writers' is a global holding the number of ProtectedMemory instances set
+ // writable, used to avoid races setting protected memory readable/writable.
+ // When this reaches zero the protected memory region is set read only.
+ // Access is controlled by writers_lock.
+#if defined(COMPONENT_BUILD)
+ // For component builds writers is a reference to an int defined separately in
+ // every DSO.
+ static constexpr int& writers = internal::DsoSpecific<int>::value;
+#else
+ // Otherwise, we declare writers in the protected memory section to avoid the
+ // scenario where an attacker could overwrite it with a large value and invoke
+ // code that constructs and destructs an AutoWritableMemory. After such a call
+ // protected memory would still be set writable because writers > 0.
+ static int writers;
+#endif // defined(COMPONENT_BUILD)
+
+ // Synchronizes access to the writers variable and the simultaneous actions
+ // that need to happen alongside writers changes, e.g. setting the protected
+ // memory region readable when writers is decremented to 0.
+ static BASE_EXPORT base::LazyInstance<Lock>::Leaky writers_lock;
+
+ // Abstract out platform-specific memory APIs. |end| points to the byte past
+ // the end of the region of memory having its memory protections changed.
+ BASE_EXPORT bool SetMemoryReadWrite(void* start, void* end);
+ BASE_EXPORT bool SetMemoryReadOnly(void* start, void* end);
+
+ // If this is the first writer (e.g. writers == 0) set the writers variable
+ // read-write. Next, increment writers and set the requested memory writable.
+ AutoWritableMemory(void* ptr, void* ptr_end) {
+#if PROTECTED_MEMORY_ENABLED
+ DCHECK(ptr >= ProtectedMemoryStart && ptr_end <= ProtectedMemoryEnd);
+
+ {
+ base::AutoLock auto_lock(writers_lock.Get());
+ if (writers == 0) {
+ AssertMemoryIsReadOnly(ptr);
+#if !defined(COMPONENT_BUILD)
+ AssertMemoryIsReadOnly(&writers);
+ CHECK(SetMemoryReadWrite(&writers, &writers + 1));
+#endif // !defined(COMPONENT_BUILD)
+ }
+
+ writers++;
+ }
+
+ CHECK(SetMemoryReadWrite(ptr, ptr_end));
+#endif // PROTECTED_MEMORY_ENABLED
+ }
+
+ public:
+ // Wrap the private constructor to create an easy-to-use interface to
+ // construct AutoWritableMemory objects.
+ template <typename T>
+ static AutoWritableMemory Create(ProtectedMemory<T>& PM) {
+ T* ptr = &*PM;
+ return AutoWritableMemory(ptr, ptr + 1);
+ }
+
+ // Move constructor just increments writers
+ AutoWritableMemory(AutoWritableMemory&& original) {
+#if PROTECTED_MEMORY_ENABLED
+ base::AutoLock auto_lock(writers_lock.Get());
+ CHECK_GT(writers, 0);
+ writers++;
+#endif // PROTECTED_MEMORY_ENABLED
+ }
+
+ // On destruction decrement writers, and if no other writers exist, set the
+ // entire protected memory region read-only.
+ ~AutoWritableMemory() {
+#if PROTECTED_MEMORY_ENABLED
+ base::AutoLock auto_lock(writers_lock.Get());
+ CHECK_GT(writers, 0);
+ writers--;
+
+ if (writers == 0) {
+ CHECK(SetMemoryReadOnly(ProtectedMemoryStart, ProtectedMemoryEnd));
+#if !defined(COMPONENT_BUILD)
+ AssertMemoryIsReadOnly(&writers);
+#endif // !defined(COMPONENT_BUILD)
+ }
+#endif // PROTECTED_MEMORY_ENABLED
+ }
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AutoWritableMemory);
+};
+
+template <typename T>
+ProtectedMemory<T>::Initializer::Initializer(ProtectedMemory<T>* PM,
+ const T& Init) {
+ AutoWritableMemory writer = AutoWritableMemory::Create(*PM);
+ **PM = Init;
+}
+
+} // namespace base
+
+#endif // BASE_MEMORY_PROTECTED_MEMORY_H_
diff --git a/chromium/base/memory/protected_memory_cfi.h b/chromium/base/memory/protected_memory_cfi.h
new file mode 100644
index 00000000000..dad25d264d4
--- /dev/null
+++ b/chromium/base/memory/protected_memory_cfi.h
@@ -0,0 +1,87 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Helper routines to call function pointers stored in protected memory with
+// Control Flow Integrity indirect call checking disabled. Some indirect calls,
+// e.g. dynamically resolved symbols in another DSO, can not be accounted for by
+// CFI-icall. These routines allow those symbols to be called without CFI-icall
+// checking safely by ensuring that they are placed in protected memory.
+
+#ifndef BASE_MEMORY_PROTECTED_MEMORY_CFI_H_
+#define BASE_MEMORY_PROTECTED_MEMORY_CFI_H_
+
+#include <utility>
+
+#include "base/cfi_flags.h"
+#include "base/macros.h"
+#include "base/memory/protected_memory.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(CFI_ICALL_CHECK) && !PROTECTED_MEMORY_ENABLED
+#error "CFI-icall enabled for platform without protected memory support"
+#endif // BUILDFLAG(CFI_ICALL_CHECK) && !PROTECTED_MEMORY_ENABLED
+
+namespace base {
+namespace internal {
+
+// This class is used to exempt calls to function pointers stored in
+// ProtectedMemory from cfi-icall checking. It's not secure to use directly, it
+// should only be used by the UnsanitizedCfiCall() functions below. Given an
+// UnsanitizedCfiCall object, you can use operator() to call the encapsulated
+// function pointer without cfi-icall checking.
+template <typename FunctionType>
+class UnsanitizedCfiCall {
+ public:
+ explicit UnsanitizedCfiCall(FunctionType function) : function_(function) {}
+ UnsanitizedCfiCall(UnsanitizedCfiCall&&) = default;
+
+ template <typename... Args>
+#if !defined(COMPILER_MSVC)
+ __attribute__((no_sanitize("cfi-icall")))
+#endif // !defined(COMPILER_MSVC)
+ auto operator()(Args&&... args) {
+ return function_(std::forward<Args>(args)...);
+ }
+
+ private:
+ FunctionType function_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(UnsanitizedCfiCall);
+};
+
+} // namespace internal
+
+// These functions can be used to call function pointers in ProtectedMemory
+// without cfi-icall checking. They are intended to be used to create an
+// UnsanitizedCfiCall object and immediately call it. UnsanitizedCfiCall objects
+// should not initialized directly or stored because they hold a function
+// pointer that will be called without CFI-icall checking in mutable memory. The
+// functions can be used as shown below:
+
+// ProtectedMemory<void (*)(int)> p;
+// UnsanitizedCfiCall(p)(5); /* In place of (*p)(5); */
+
+template <typename T>
+auto UnsanitizedCfiCall(const ProtectedMemory<T>& PM) {
+#if PROTECTED_MEMORY_ENABLED
+ DCHECK(&PM >= ProtectedMemoryStart && &PM < ProtectedMemoryEnd);
+#endif // PROTECTED_MEMORY_ENABLED
+ return internal::UnsanitizedCfiCall<T>(*PM);
+}
+
+// struct S { void (*fp)(int); } s;
+// ProtectedMemory<S> p;
+// UnsanitizedCfiCall(p, &S::fp)(5); /* In place of p->fp(5); */
+
+template <typename T, typename Member>
+auto UnsanitizedCfiCall(const ProtectedMemory<T>& PM, Member member) {
+#if PROTECTED_MEMORY_ENABLED
+ DCHECK(&PM >= ProtectedMemoryStart && &PM < ProtectedMemoryEnd);
+#endif // PROTECTED_MEMORY_ENABLED
+ return internal::UnsanitizedCfiCall<decltype(*PM.*member)>(*PM.*member);
+}
+
+} // namespace base
+
+#endif // BASE_MEMORY_PROTECTED_MEMORY_CFI_H_
diff --git a/chromium/base/memory/protected_memory_posix.cc b/chromium/base/memory/protected_memory_posix.cc
new file mode 100644
index 00000000000..1f5fd9002ea
--- /dev/null
+++ b/chromium/base/memory/protected_memory_posix.cc
@@ -0,0 +1,82 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/protected_memory.h"
+
+#include <stdint.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#if defined(OS_LINUX)
+#include <sys/resource.h>
+#endif // defined(OS_LINUX)
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+
+#include "base/posix/eintr_wrapper.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+namespace base {
+
+#if !defined(COMPONENT_BUILD)
+PROTECTED_MEMORY_SECTION int AutoWritableMemory::writers = 0;
+#endif // !defined(COMPONENT_BUILD)
+
+base::LazyInstance<Lock>::Leaky AutoWritableMemory::writers_lock =
+ LAZY_INSTANCE_INITIALIZER;
+
+static uintptr_t page_mask() {
+ return ~(static_cast<uintptr_t>(getpagesize()) - 1);
+}
+
+static bool SetMemory(void* start, void* end, int prot) {
+ const uintptr_t page_start = reinterpret_cast<uintptr_t>(start) & page_mask();
+ return mprotect(reinterpret_cast<void*>(page_start),
+ reinterpret_cast<uintptr_t>(end) - page_start, prot) == 0;
+}
+
+bool AutoWritableMemory::SetMemoryReadWrite(void* start, void* end) {
+ return SetMemory(start, end, PROT_READ | PROT_WRITE);
+}
+
+bool AutoWritableMemory::SetMemoryReadOnly(void* start, void* end) {
+ return SetMemory(start, end, PROT_READ);
+}
+
+#if defined(OS_LINUX)
+void AssertMemoryIsReadOnly(const void* ptr) {
+#if DCHECK_IS_ON()
+ const uintptr_t page_start = reinterpret_cast<uintptr_t>(ptr) & page_mask();
+
+ // Note: We've casted away const here, which should not be meaningful since
+ // if the memory is written to we will abort immediately.
+ int result =
+ getrlimit(RLIMIT_NPROC, reinterpret_cast<struct rlimit*>(page_start));
+ DCHECK_EQ(result, -1);
+ DCHECK_EQ(errno, EFAULT);
+#endif // DCHECK_IS_ON()
+}
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+void AssertMemoryIsReadOnly(const void* ptr) {
+#if DCHECK_IS_ON()
+ mach_port_t object_name;
+ vm_region_basic_info_64 region_info;
+ mach_vm_size_t size = 1;
+ mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
+
+ kern_return_t kr = mach_vm_region(
+ mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&ptr), &size,
+ VM_REGION_BASIC_INFO_64, reinterpret_cast<vm_region_info_t>(&region_info),
+ &count, &object_name);
+ DCHECK_EQ(kr, KERN_SUCCESS);
+ DCHECK_EQ(region_info.protection, VM_PROT_READ);
+#endif // DCHECK_IS_ON()
+}
+#endif // defined(OS_LINUX) || (defined(OS_MACOSX) && !defined(OS_IOS))
+
+} // namespace base
diff --git a/chromium/base/memory/protected_memory_unittest.cc b/chromium/base/memory/protected_memory_unittest.cc
new file mode 100644
index 00000000000..9b08bf6997a
--- /dev/null
+++ b/chromium/base/memory/protected_memory_unittest.cc
@@ -0,0 +1,126 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/cfi_flags.h"
+#include "base/memory/protected_memory.h"
+#include "base/memory/protected_memory_cfi.h"
+#include "base/synchronization/lock.h"
+#include "base/test/gtest_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+struct Data {
+ Data() = default;
+ Data(int foo_) : foo(foo_) {}
+ int foo;
+};
+
+} // namespace
+
+class ProtectedMemoryTest : public ::testing::Test {
+ protected:
+ // Run tests one at a time. Some of the negative tests can not be made thread
+ // safe.
+ void SetUp() final { lock.Acquire(); }
+ void TearDown() final { lock.Release(); }
+
+ Lock lock;
+};
+
+PROTECTED_MEMORY_SECTION ProtectedMemory<int> init;
+
+TEST_F(ProtectedMemoryTest, Initializer) {
+ static ProtectedMemory<int>::Initializer I(&init, 4);
+ EXPECT_EQ(*init, 4);
+}
+
+PROTECTED_MEMORY_SECTION ProtectedMemory<Data> data;
+
+TEST_F(ProtectedMemoryTest, Basic) {
+ AutoWritableMemory writer = AutoWritableMemory::Create(data);
+ data->foo = 5;
+ EXPECT_EQ(data->foo, 5);
+}
+
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+#if PROTECTED_MEMORY_ENABLED
+TEST_F(ProtectedMemoryTest, ReadOnlyOnStart) {
+ EXPECT_DEATH({ data->foo = 6; AutoWritableMemory::Create(data); }, "");
+}
+
+TEST_F(ProtectedMemoryTest, ReadOnlyAfterSetWritable) {
+ { AutoWritableMemory writer = AutoWritableMemory::Create(data); }
+ EXPECT_DEATH({ data->foo = 7; }, "");
+}
+
+TEST_F(ProtectedMemoryTest, AssertMemoryIsReadOnly) {
+ AssertMemoryIsReadOnly(&data->foo);
+ { AutoWritableMemory::Create(data); }
+ AssertMemoryIsReadOnly(&data->foo);
+
+ ProtectedMemory<Data> writable_data;
+ EXPECT_DCHECK_DEATH({ AssertMemoryIsReadOnly(&writable_data->foo); });
+}
+
+TEST_F(ProtectedMemoryTest, FailsIfDefinedOutsideOfProtectMemoryRegion) {
+ ProtectedMemory<Data> data;
+ EXPECT_DCHECK_DEATH({ AutoWritableMemory::Create(data); });
+}
+
+TEST_F(ProtectedMemoryTest, UnsanitizedCfiCallOutsideOfProtectedMemoryRegion) {
+ ProtectedMemory<void (*)(void)> data;
+ EXPECT_DCHECK_DEATH({ UnsanitizedCfiCall(data)(); });
+}
+#endif // PROTECTED_MEMORY_ENABLED
+
+namespace {
+
+struct BadIcall {
+ BadIcall() = default;
+ BadIcall(int (*fp_)(int)) : fp(fp_) {}
+ int (*fp)(int);
+};
+
+unsigned int bad_icall(int i) {
+ return 4 + i;
+}
+
+} // namespace
+
+PROTECTED_MEMORY_SECTION ProtectedMemory<BadIcall> icall_pm1;
+
+TEST_F(ProtectedMemoryTest, BadMemberCall) {
+ static ProtectedMemory<BadIcall>::Initializer I(
+ &icall_pm1, BadIcall(reinterpret_cast<int (*)(int)>(&bad_icall)));
+
+ EXPECT_EQ(UnsanitizedCfiCall(icall_pm1, &BadIcall::fp)(1), 5);
+#if !BUILDFLAG(CFI_ICALL_CHECK)
+ EXPECT_EQ(icall_pm1->fp(1), 5);
+#elif BUILDFLAG(CFI_ENFORCEMENT_TRAP) || BUILDFLAG(CFI_ENFORCEMENT_DIAGNOSTIC)
+ EXPECT_DEATH({ icall_pm1->fp(1); }, "");
+#endif
+}
+
+PROTECTED_MEMORY_SECTION ProtectedMemory<int (*)(int)> icall_pm2;
+
+TEST_F(ProtectedMemoryTest, BadFnPtrCall) {
+ static ProtectedMemory<int (*)(int)>::Initializer I(
+ &icall_pm2, reinterpret_cast<int (*)(int)>(&bad_icall));
+
+ EXPECT_EQ(UnsanitizedCfiCall(icall_pm2)(1), 5);
+#if !BUILDFLAG(CFI_ICALL_CHECK)
+ EXPECT_EQ((*icall_pm2)(1), 5);
+#elif BUILDFLAG(CFI_ENFORCEMENT_TRAP) || BUILDFLAG(CFI_ENFORCEMENT_DIAGNOSTIC)
+ EXPECT_DEATH({ (*icall_pm2)(1); }, "");
+#endif
+}
+
+#endif // defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+} // namespace base
diff --git a/chromium/base/memory/ref_counted.h b/chromium/base/memory/ref_counted.h
index b914bcfd602..5e1e8ad847b 100644
--- a/chromium/base/memory/ref_counted.h
+++ b/chromium/base/memory/ref_counted.h
@@ -7,32 +7,21 @@
#include <stddef.h>
-#include <iosfwd>
-#include <type_traits>
+#include <utility>
#include "base/atomic_ref_count.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
#include "base/sequence_checker.h"
#include "base/threading/thread_collision_warner.h"
#include "build/build_config.h"
-template <class T>
-class scoped_refptr;
-
namespace base {
-
-template <typename T>
-scoped_refptr<T> AdoptRef(T* t);
-
namespace subtle {
-enum AdoptRefTag { kAdoptRefTag };
-enum StartRefCountFromZeroTag { kStartRefCountFromZeroTag };
-enum StartRefCountFromOneTag { kStartRefCountFromOneTag };
-
class BASE_EXPORT RefCountedBase {
public:
bool HasOneRef() const { return ref_count_ == 1; }
@@ -430,295 +419,6 @@ class RefCountedData
~RefCountedData() = default;
};
-// Creates a scoped_refptr from a raw pointer without incrementing the reference
-// count. Use this only for a newly created object whose reference count starts
-// from 1 instead of 0.
-template <typename T>
-scoped_refptr<T> AdoptRef(T* obj) {
- using Tag = typename std::decay<decltype(T::kRefCountPreference)>::type;
- static_assert(std::is_same<subtle::StartRefCountFromOneTag, Tag>::value,
- "Use AdoptRef only for the reference count starts from one.");
-
- DCHECK(obj);
- DCHECK(obj->HasOneRef());
- obj->Adopted();
- return scoped_refptr<T>(obj, subtle::kAdoptRefTag);
-}
-
-namespace subtle {
-
-template <typename T>
-scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromZeroTag) {
- return scoped_refptr<T>(obj);
-}
-
-template <typename T>
-scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromOneTag) {
- return AdoptRef(obj);
-}
-
-} // namespace subtle
-
-// Constructs an instance of T, which is a ref counted type, and wraps the
-// object into a scoped_refptr<T>.
-template <typename T, typename... Args>
-scoped_refptr<T> MakeRefCounted(Args&&... args) {
- T* obj = new T(std::forward<Args>(args)...);
- return subtle::AdoptRefIfNeeded(obj, T::kRefCountPreference);
-}
-
-// Takes an instance of T, which is a ref counted type, and wraps the object
-// into a scoped_refptr<T>.
-template <typename T>
-scoped_refptr<T> WrapRefCounted(T* t) {
- return scoped_refptr<T>(t);
-}
-
} // namespace base
-//
-// A smart pointer class for reference counted objects. Use this class instead
-// of calling AddRef and Release manually on a reference counted object to
-// avoid common memory leaks caused by forgetting to Release an object
-// reference. Sample usage:
-//
-// class MyFoo : public RefCounted<MyFoo> {
-// ...
-// private:
-// friend class RefCounted<MyFoo>; // Allow destruction by RefCounted<>.
-// ~MyFoo(); // Destructor must be private/protected.
-// };
-//
-// void some_function() {
-// scoped_refptr<MyFoo> foo = new MyFoo();
-// foo->Method(param);
-// // |foo| is released when this function returns
-// }
-//
-// void some_other_function() {
-// scoped_refptr<MyFoo> foo = new MyFoo();
-// ...
-// foo = nullptr; // explicitly releases |foo|
-// ...
-// if (foo)
-// foo->Method(param);
-// }
-//
-// The above examples show how scoped_refptr<T> acts like a pointer to T.
-// Given two scoped_refptr<T> classes, it is also possible to exchange
-// references between the two objects, like so:
-//
-// {
-// scoped_refptr<MyFoo> a = new MyFoo();
-// scoped_refptr<MyFoo> b;
-//
-// b.swap(a);
-// // now, |b| references the MyFoo object, and |a| references nullptr.
-// }
-//
-// To make both |a| and |b| in the above example reference the same MyFoo
-// object, simply use the assignment operator:
-//
-// {
-// scoped_refptr<MyFoo> a = new MyFoo();
-// scoped_refptr<MyFoo> b;
-//
-// b = a;
-// // now, |a| and |b| each own a reference to the same MyFoo object.
-// }
-//
-template <class T>
-class scoped_refptr {
- public:
- typedef T element_type;
-
- scoped_refptr() {}
-
- scoped_refptr(T* p) : ptr_(p) {
- if (ptr_)
- AddRef(ptr_);
- }
-
- // Copy constructor.
- scoped_refptr(const scoped_refptr<T>& r) : ptr_(r.ptr_) {
- if (ptr_)
- AddRef(ptr_);
- }
-
- // Copy conversion constructor.
- template <typename U,
- typename = typename std::enable_if<
- std::is_convertible<U*, T*>::value>::type>
- scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
- if (ptr_)
- AddRef(ptr_);
- }
-
- // Move constructor. This is required in addition to the conversion
- // constructor below in order for clang to warn about pessimizing moves.
- scoped_refptr(scoped_refptr&& r) : ptr_(r.get()) { r.ptr_ = nullptr; }
-
- // Move conversion constructor.
- template <typename U,
- typename = typename std::enable_if<
- std::is_convertible<U*, T*>::value>::type>
- scoped_refptr(scoped_refptr<U>&& r) : ptr_(r.get()) {
- r.ptr_ = nullptr;
- }
-
- ~scoped_refptr() {
- if (ptr_)
- Release(ptr_);
- }
-
- T* get() const { return ptr_; }
-
- T& operator*() const {
- DCHECK(ptr_);
- return *ptr_;
- }
-
- T* operator->() const {
- DCHECK(ptr_);
- return ptr_;
- }
-
- scoped_refptr<T>& operator=(T* p) {
- // AddRef first so that self assignment should work
- if (p)
- AddRef(p);
- T* old_ptr = ptr_;
- ptr_ = p;
- if (old_ptr)
- Release(old_ptr);
- return *this;
- }
-
- scoped_refptr<T>& operator=(const scoped_refptr<T>& r) {
- return *this = r.ptr_;
- }
-
- template <typename U>
- scoped_refptr<T>& operator=(const scoped_refptr<U>& r) {
- return *this = r.get();
- }
-
- scoped_refptr<T>& operator=(scoped_refptr<T>&& r) {
- scoped_refptr<T> tmp(std::move(r));
- tmp.swap(*this);
- return *this;
- }
-
- template <typename U>
- scoped_refptr<T>& operator=(scoped_refptr<U>&& r) {
- // We swap with a temporary variable to guarantee that |ptr_| is released
- // immediately. A naive implementation which swaps |this| and |r| would
- // unintentionally extend the lifetime of |ptr_| to at least the lifetime of
- // |r|.
- scoped_refptr<T> tmp(std::move(r));
- tmp.swap(*this);
- return *this;
- }
-
- void swap(scoped_refptr<T>& r) {
- T* tmp = ptr_;
- ptr_ = r.ptr_;
- r.ptr_ = tmp;
- }
-
- explicit operator bool() const { return ptr_ != nullptr; }
-
- template <typename U>
- bool operator==(const scoped_refptr<U>& rhs) const {
- return ptr_ == rhs.get();
- }
-
- template <typename U>
- bool operator!=(const scoped_refptr<U>& rhs) const {
- return !operator==(rhs);
- }
-
- template <typename U>
- bool operator<(const scoped_refptr<U>& rhs) const {
- return ptr_ < rhs.get();
- }
-
- protected:
- T* ptr_ = nullptr;
-
- private:
- template <typename U>
- friend scoped_refptr<U> base::AdoptRef(U*);
-
- scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {}
-
- // Friend required for move constructors that set r.ptr_ to null.
- template <typename U>
- friend class scoped_refptr;
-
- // Non-inline helpers to allow:
- // class Opaque;
- // extern template class scoped_refptr<Opaque>;
- // Otherwise the compiler will complain that Opaque is an incomplete type.
- static void AddRef(T* ptr);
- static void Release(T* ptr);
-};
-
-// static
-template <typename T>
-void scoped_refptr<T>::AddRef(T* ptr) {
- ptr->AddRef();
-}
-
-// static
-template <typename T>
-void scoped_refptr<T>::Release(T* ptr) {
- ptr->Release();
-}
-
-template <typename T, typename U>
-bool operator==(const scoped_refptr<T>& lhs, const U* rhs) {
- return lhs.get() == rhs;
-}
-
-template <typename T, typename U>
-bool operator==(const T* lhs, const scoped_refptr<U>& rhs) {
- return lhs == rhs.get();
-}
-
-template <typename T>
-bool operator==(const scoped_refptr<T>& lhs, std::nullptr_t null) {
- return !static_cast<bool>(lhs);
-}
-
-template <typename T>
-bool operator==(std::nullptr_t null, const scoped_refptr<T>& rhs) {
- return !static_cast<bool>(rhs);
-}
-
-template <typename T, typename U>
-bool operator!=(const scoped_refptr<T>& lhs, const U* rhs) {
- return !operator==(lhs, rhs);
-}
-
-template <typename T, typename U>
-bool operator!=(const T* lhs, const scoped_refptr<U>& rhs) {
- return !operator==(lhs, rhs);
-}
-
-template <typename T>
-bool operator!=(const scoped_refptr<T>& lhs, std::nullptr_t null) {
- return !operator==(lhs, null);
-}
-
-template <typename T>
-bool operator!=(std::nullptr_t null, const scoped_refptr<T>& rhs) {
- return !operator==(null, rhs);
-}
-
-template <typename T>
-std::ostream& operator<<(std::ostream& out, const scoped_refptr<T>& p) {
- return out << p.get();
-}
-
#endif // BASE_MEMORY_REF_COUNTED_H_
diff --git a/chromium/base/memory/ref_counted_memory.cc b/chromium/base/memory/ref_counted_memory.cc
index 26b78f36632..7eaaf034257 100644
--- a/chromium/base/memory/ref_counted_memory.cc
+++ b/chromium/base/memory/ref_counted_memory.cc
@@ -15,9 +15,9 @@ bool RefCountedMemory::Equals(
(memcmp(front(), other->front(), size()) == 0);
}
-RefCountedMemory::RefCountedMemory() {}
+RefCountedMemory::RefCountedMemory() = default;
-RefCountedMemory::~RefCountedMemory() {}
+RefCountedMemory::~RefCountedMemory() = default;
const unsigned char* RefCountedStaticMemory::front() const {
return data_;
@@ -27,9 +27,9 @@ size_t RefCountedStaticMemory::size() const {
return length_;
}
-RefCountedStaticMemory::~RefCountedStaticMemory() {}
+RefCountedStaticMemory::~RefCountedStaticMemory() = default;
-RefCountedBytes::RefCountedBytes() {}
+RefCountedBytes::RefCountedBytes() = default;
RefCountedBytes::RefCountedBytes(const std::vector<unsigned char>& initializer)
: data_(initializer) {
@@ -48,18 +48,18 @@ scoped_refptr<RefCountedBytes> RefCountedBytes::TakeVector(
const unsigned char* RefCountedBytes::front() const {
// STL will assert if we do front() on an empty vector, but calling code
// expects a NULL.
- return size() ? &data_.front() : NULL;
+ return size() ? &data_.front() : nullptr;
}
size_t RefCountedBytes::size() const {
return data_.size();
}
-RefCountedBytes::~RefCountedBytes() {}
+RefCountedBytes::~RefCountedBytes() = default;
-RefCountedString::RefCountedString() {}
+RefCountedString::RefCountedString() = default;
-RefCountedString::~RefCountedString() {}
+RefCountedString::~RefCountedString() = default;
// static
scoped_refptr<RefCountedString> RefCountedString::TakeString(
@@ -70,8 +70,8 @@ scoped_refptr<RefCountedString> RefCountedString::TakeString(
}
const unsigned char* RefCountedString::front() const {
- return data_.empty() ? NULL :
- reinterpret_cast<const unsigned char*>(data_.data());
+ return data_.empty() ? nullptr
+ : reinterpret_cast<const unsigned char*>(data_.data());
}
size_t RefCountedString::size() const {
diff --git a/chromium/base/memory/ref_counted_memory_unittest.cc b/chromium/base/memory/ref_counted_memory_unittest.cc
index bd2ed01f54b..034f674b758 100644
--- a/chromium/base/memory/ref_counted_memory_unittest.cc
+++ b/chromium/base/memory/ref_counted_memory_unittest.cc
@@ -75,7 +75,7 @@ TEST(RefCountedMemoryUnitTest, Equals) {
TEST(RefCountedMemoryUnitTest, EqualsNull) {
std::string s("str");
scoped_refptr<RefCountedMemory> mem = RefCountedString::TakeString(&s);
- EXPECT_FALSE(mem->Equals(NULL));
+ EXPECT_FALSE(mem->Equals(nullptr));
}
} // namespace base
diff --git a/chromium/base/memory/ref_counted_unittest.cc b/chromium/base/memory/ref_counted_unittest.cc
index 62ebd9de7c9..96f588e66cd 100644
--- a/chromium/base/memory/ref_counted_unittest.cc
+++ b/chromium/base/memory/ref_counted_unittest.cc
@@ -14,7 +14,7 @@ namespace {
class SelfAssign : public base::RefCounted<SelfAssign> {
protected:
- virtual ~SelfAssign() {}
+ virtual ~SelfAssign() = default;
private:
friend class base::RefCounted<SelfAssign>;
@@ -22,7 +22,7 @@ class SelfAssign : public base::RefCounted<SelfAssign> {
class Derived : public SelfAssign {
protected:
- ~Derived() override {}
+ ~Derived() override = default;
private:
friend class base::RefCounted<Derived>;
@@ -112,7 +112,7 @@ class Other : public base::RefCounted<Other> {
private:
friend class base::RefCounted<Other>;
- ~Other() {}
+ ~Other() = default;
};
class HasPrivateDestructorWithDeleter;
@@ -124,11 +124,11 @@ struct Deleter {
class HasPrivateDestructorWithDeleter
: public base::RefCounted<HasPrivateDestructorWithDeleter, Deleter> {
public:
- HasPrivateDestructorWithDeleter() {}
+ HasPrivateDestructorWithDeleter() = default;
private:
friend struct Deleter;
- ~HasPrivateDestructorWithDeleter() {}
+ ~HasPrivateDestructorWithDeleter() = default;
};
void Deleter::Destruct(const HasPrivateDestructorWithDeleter* x) {
@@ -147,11 +147,11 @@ class InitialRefCountIsOne : public base::RefCounted<InitialRefCountIsOne> {
public:
REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
- InitialRefCountIsOne() {}
+ InitialRefCountIsOne() = default;
private:
friend class base::RefCounted<InitialRefCountIsOne>;
- ~InitialRefCountIsOne() {}
+ ~InitialRefCountIsOne() = default;
};
} // end namespace
@@ -581,6 +581,16 @@ TEST(RefCountedUnitTest, TestOverloadResolutionMove) {
EXPECT_EQ(other2, Overloaded(std::move(other)));
}
+TEST(RefCountedUnitTest, TestMakeRefCounted) {
+ scoped_refptr<Derived> derived = new Derived;
+ EXPECT_TRUE(derived->HasOneRef());
+ derived = nullptr;
+
+ scoped_refptr<Derived> derived2 = base::MakeRefCounted<Derived>();
+ EXPECT_TRUE(derived2->HasOneRef());
+ derived2 = nullptr;
+}
+
TEST(RefCountedUnitTest, TestInitialRefCountIsOne) {
scoped_refptr<InitialRefCountIsOne> obj =
base::MakeRefCounted<InitialRefCountIsOne>();
diff --git a/chromium/base/memory/scoped_refptr.h b/chromium/base/memory/scoped_refptr.h
new file mode 100644
index 00000000000..d56d423ee78
--- /dev/null
+++ b/chromium/base/memory/scoped_refptr.h
@@ -0,0 +1,358 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SCOPED_REFPTR_H_
+#define BASE_MEMORY_SCOPED_REFPTR_H_
+
+#include <stddef.h>
+
+#include <iosfwd>
+#include <type_traits>
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+
+template <class T>
+class scoped_refptr;
+
+namespace base {
+
+template <class, typename>
+class RefCounted;
+template <class, typename>
+class RefCountedThreadSafe;
+
+template <typename T>
+scoped_refptr<T> AdoptRef(T* t);
+
+namespace subtle {
+
+enum AdoptRefTag { kAdoptRefTag };
+enum StartRefCountFromZeroTag { kStartRefCountFromZeroTag };
+enum StartRefCountFromOneTag { kStartRefCountFromOneTag };
+
+template <typename T, typename U, typename V>
+constexpr bool IsRefCountPreferenceOverridden(const T*,
+ const RefCounted<U, V>*) {
+ return !std::is_same<std::decay_t<decltype(T::kRefCountPreference)>,
+ std::decay_t<decltype(U::kRefCountPreference)>>::value;
+}
+
+template <typename T, typename U, typename V>
+constexpr bool IsRefCountPreferenceOverridden(
+ const T*,
+ const RefCountedThreadSafe<U, V>*) {
+ return !std::is_same<std::decay_t<decltype(T::kRefCountPreference)>,
+ std::decay_t<decltype(U::kRefCountPreference)>>::value;
+}
+
+constexpr bool IsRefCountPreferenceOverridden(...) {
+ return false;
+}
+
+} // namespace subtle
+
+// Creates a scoped_refptr from a raw pointer without incrementing the reference
+// count. Use this only for a newly created object whose reference count starts
+// from 1 instead of 0.
+template <typename T>
+scoped_refptr<T> AdoptRef(T* obj) {
+ using Tag = std::decay_t<decltype(T::kRefCountPreference)>;
+ static_assert(std::is_same<subtle::StartRefCountFromOneTag, Tag>::value,
+ "Use AdoptRef only for the reference count starts from one.");
+
+ DCHECK(obj);
+ DCHECK(obj->HasOneRef());
+ obj->Adopted();
+ return scoped_refptr<T>(obj, subtle::kAdoptRefTag);
+}
+
+namespace subtle {
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromZeroTag) {
+ return scoped_refptr<T>(obj);
+}
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromOneTag) {
+ return AdoptRef(obj);
+}
+
+} // namespace subtle
+
+// Constructs an instance of T, which is a ref counted type, and wraps the
+// object into a scoped_refptr<T>.
+template <typename T, typename... Args>
+scoped_refptr<T> MakeRefCounted(Args&&... args) {
+ T* obj = new T(std::forward<Args>(args)...);
+ return subtle::AdoptRefIfNeeded(obj, T::kRefCountPreference);
+}
+
+// Takes an instance of T, which is a ref counted type, and wraps the object
+// into a scoped_refptr<T>.
+template <typename T>
+scoped_refptr<T> WrapRefCounted(T* t) {
+ return scoped_refptr<T>(t);
+}
+
+} // namespace base
+
+//
+// A smart pointer class for reference counted objects. Use this class instead
+// of calling AddRef and Release manually on a reference counted object to
+// avoid common memory leaks caused by forgetting to Release an object
+// reference. Sample usage:
+//
+// class MyFoo : public RefCounted<MyFoo> {
+// ...
+// private:
+// friend class RefCounted<MyFoo>; // Allow destruction by RefCounted<>.
+// ~MyFoo(); // Destructor must be private/protected.
+// };
+//
+// void some_function() {
+// scoped_refptr<MyFoo> foo = new MyFoo();
+// foo->Method(param);
+// // |foo| is released when this function returns
+// }
+//
+// void some_other_function() {
+// scoped_refptr<MyFoo> foo = new MyFoo();
+// ...
+// foo = nullptr; // explicitly releases |foo|
+// ...
+// if (foo)
+// foo->Method(param);
+// }
+//
+// The above examples show how scoped_refptr<T> acts like a pointer to T.
+// Given two scoped_refptr<T> classes, it is also possible to exchange
+// references between the two objects, like so:
+//
+// {
+// scoped_refptr<MyFoo> a = new MyFoo();
+// scoped_refptr<MyFoo> b;
+//
+// b.swap(a);
+// // now, |b| references the MyFoo object, and |a| references nullptr.
+// }
+//
+// To make both |a| and |b| in the above example reference the same MyFoo
+// object, simply use the assignment operator:
+//
+// {
+// scoped_refptr<MyFoo> a = new MyFoo();
+// scoped_refptr<MyFoo> b;
+//
+// b = a;
+// // now, |a| and |b| each own a reference to the same MyFoo object.
+// }
+//
+template <class T>
+class scoped_refptr {
+ public:
+ typedef T element_type;
+
+ scoped_refptr() {}
+
+ scoped_refptr(T* p) : ptr_(p) {
+ if (ptr_)
+ AddRef(ptr_);
+ }
+
+ // Copy constructor.
+ scoped_refptr(const scoped_refptr<T>& r) : ptr_(r.ptr_) {
+ if (ptr_)
+ AddRef(ptr_);
+ }
+
+ // Copy conversion constructor.
+ template <typename U,
+ typename = typename std::enable_if<
+ std::is_convertible<U*, T*>::value>::type>
+ scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
+ if (ptr_)
+ AddRef(ptr_);
+ }
+
+ // Move constructor. This is required in addition to the conversion
+ // constructor below in order for clang to warn about pessimizing moves.
+ scoped_refptr(scoped_refptr&& r) : ptr_(r.get()) { r.ptr_ = nullptr; }
+
+ // Move conversion constructor.
+ template <typename U,
+ typename = typename std::enable_if<
+ std::is_convertible<U*, T*>::value>::type>
+ scoped_refptr(scoped_refptr<U>&& r) : ptr_(r.get()) {
+ r.ptr_ = nullptr;
+ }
+
+ ~scoped_refptr() {
+ static_assert(!base::subtle::IsRefCountPreferenceOverridden(
+ static_cast<T*>(nullptr), static_cast<T*>(nullptr)),
+ "It's unsafe to override the ref count preference."
+ " Please remove REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE"
+ " from subclasses.");
+ if (ptr_)
+ Release(ptr_);
+ }
+
+ T* get() const { return ptr_; }
+
+ T& operator*() const {
+ DCHECK(ptr_);
+ return *ptr_;
+ }
+
+ T* operator->() const {
+ DCHECK(ptr_);
+ return ptr_;
+ }
+
+ scoped_refptr<T>& operator=(T* p) {
+ // AddRef first so that self assignment should work
+ if (p)
+ AddRef(p);
+ T* old_ptr = ptr_;
+ ptr_ = p;
+ if (old_ptr)
+ Release(old_ptr);
+ return *this;
+ }
+
+ scoped_refptr<T>& operator=(const scoped_refptr<T>& r) {
+ return *this = r.ptr_;
+ }
+
+ template <typename U>
+ scoped_refptr<T>& operator=(const scoped_refptr<U>& r) {
+ return *this = r.get();
+ }
+
+ scoped_refptr<T>& operator=(scoped_refptr<T>&& r) {
+ scoped_refptr<T> tmp(std::move(r));
+ tmp.swap(*this);
+ return *this;
+ }
+
+ template <typename U>
+ scoped_refptr<T>& operator=(scoped_refptr<U>&& r) {
+ // We swap with a temporary variable to guarantee that |ptr_| is released
+ // immediately. A naive implementation which swaps |this| and |r| would
+ // unintentionally extend the lifetime of |ptr_| to at least the lifetime of
+ // |r|.
+ scoped_refptr<T> tmp(std::move(r));
+ tmp.swap(*this);
+ return *this;
+ }
+
+ void swap(scoped_refptr<T>& r) {
+ T* tmp = ptr_;
+ ptr_ = r.ptr_;
+ r.ptr_ = tmp;
+ }
+
+ explicit operator bool() const { return ptr_ != nullptr; }
+
+ template <typename U>
+ bool operator==(const scoped_refptr<U>& rhs) const {
+ return ptr_ == rhs.get();
+ }
+
+ template <typename U>
+ bool operator!=(const scoped_refptr<U>& rhs) const {
+ return !operator==(rhs);
+ }
+
+ template <typename U>
+ bool operator<(const scoped_refptr<U>& rhs) const {
+ return ptr_ < rhs.get();
+ }
+
+ protected:
+ T* ptr_ = nullptr;
+
+ private:
+ template <typename U>
+ friend scoped_refptr<U> base::AdoptRef(U*);
+
+ scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {}
+
+ // Friend required for move constructors that set r.ptr_ to null.
+ template <typename U>
+ friend class scoped_refptr;
+
+ // Non-inline helpers to allow:
+ // class Opaque;
+ // extern template class scoped_refptr<Opaque>;
+ // Otherwise the compiler will complain that Opaque is an incomplete type.
+ static void AddRef(T* ptr);
+ static void Release(T* ptr);
+};
+
+// static
+template <typename T>
+void scoped_refptr<T>::AddRef(T* ptr) {
+ ptr->AddRef();
+}
+
+// static
+template <typename T>
+void scoped_refptr<T>::Release(T* ptr) {
+ ptr->Release();
+}
+
+template <typename T, typename U>
+bool operator==(const scoped_refptr<T>& lhs, const U* rhs) {
+ return lhs.get() == rhs;
+}
+
+template <typename T, typename U>
+bool operator==(const T* lhs, const scoped_refptr<U>& rhs) {
+ return lhs == rhs.get();
+}
+
+template <typename T>
+bool operator==(const scoped_refptr<T>& lhs, std::nullptr_t null) {
+ return !static_cast<bool>(lhs);
+}
+
+template <typename T>
+bool operator==(std::nullptr_t null, const scoped_refptr<T>& rhs) {
+ return !static_cast<bool>(rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const scoped_refptr<T>& lhs, const U* rhs) {
+ return !operator==(lhs, rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const T* lhs, const scoped_refptr<U>& rhs) {
+ return !operator==(lhs, rhs);
+}
+
+template <typename T>
+bool operator!=(const scoped_refptr<T>& lhs, std::nullptr_t null) {
+ return !operator==(lhs, null);
+}
+
+template <typename T>
+bool operator!=(std::nullptr_t null, const scoped_refptr<T>& rhs) {
+ return !operator==(null, rhs);
+}
+
+template <typename T>
+std::ostream& operator<<(std::ostream& out, const scoped_refptr<T>& p) {
+ return out << p.get();
+}
+
+template <typename T>
+void swap(scoped_refptr<T>& lhs, scoped_refptr<T>& rhs) {
+ lhs.swap(rhs);
+}
+
+#endif // BASE_MEMORY_SCOPED_REFPTR_H_
diff --git a/chromium/base/memory/shared_memory_android.cc b/chromium/base/memory/shared_memory_android.cc
index 4befe06310a..c426ac3bcce 100644
--- a/chromium/base/memory/shared_memory_android.cc
+++ b/chromium/base/memory/shared_memory_android.cc
@@ -25,7 +25,7 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
// "name" is just a label in ashmem. It is visible in /proc/pid/maps.
int fd = ashmem_create_region(
- options.name_deprecated == NULL ? "" : options.name_deprecated->c_str(),
+ options.name_deprecated ? options.name_deprecated->c_str() : "",
options.size);
shm_ = SharedMemoryHandle::ImportHandle(fd, options.size);
if (!shm_.IsValid()) {
diff --git a/chromium/base/memory/shared_memory_handle.h b/chromium/base/memory/shared_memory_handle.h
index f719aeb9b24..c4e140e6126 100644
--- a/chromium/base/memory/shared_memory_handle.h
+++ b/chromium/base/memory/shared_memory_handle.h
@@ -24,6 +24,10 @@
#include "base/file_descriptor_posix.h"
#endif
+#if defined(OS_ANDROID)
+extern "C" typedef struct AHardwareBuffer AHardwareBuffer;
+#endif
+
namespace base {
// SharedMemoryHandle is the smallest possible IPC-transportable "reference" to
@@ -82,26 +86,6 @@ class BASE_EXPORT SharedMemoryHandle {
MACH,
};
- // Constructs a SharedMemoryHandle backed by the components of a
- // FileDescriptor. The newly created instance has the same ownership semantics
- // as base::FileDescriptor. This typically means that the SharedMemoryHandle
- // takes ownership of the |fd| if |auto_close| is true. Unfortunately, it's
- // common for existing code to make shallow copies of SharedMemoryHandle, and
- // the one that is finally passed into a base::SharedMemory is the one that
- // "consumes" the fd.
- // |guid| uniquely identifies the shared memory region pointed to by the
- // underlying OS resource. If |file_descriptor| is associated with another
- // SharedMemoryHandle, the caller must pass the |guid| of that
- // SharedMemoryHandle. Otherwise, the caller should generate a new
- // UnguessableToken.
- // |size| refers to the size of the memory region pointed to by
- // file_descriptor.fd. Passing the wrong |size| has no immediate consequence,
- // but may cause errors when trying to map the SharedMemoryHandle at a later
- // point in time.
- SharedMemoryHandle(const base::FileDescriptor& file_descriptor,
- size_t size,
- const base::UnguessableToken& guid);
-
// Makes a Mach-based SharedMemoryHandle of the given size. On error,
// subsequent calls to IsValid() return false.
// Passing the wrong |size| has no immediate consequence, but may cause errors
@@ -150,17 +134,6 @@ class BASE_EXPORT SharedMemoryHandle {
SharedMemoryHandle(HANDLE h, size_t size, const base::UnguessableToken& guid);
HANDLE GetHandle() const;
#else
- // |guid| uniquely identifies the shared memory region pointed to by the
- // underlying OS resource. If |file_descriptor| is associated with another
- // SharedMemoryHandle, the caller must pass the |guid| of that
- // SharedMemoryHandle. Otherwise, the caller should generate a new
- // UnguessableToken.
- // Passing the wrong |size| has no immediate consequence, but may cause errors
- // when trying to map the SharedMemoryHandle at a later point in time.
- SharedMemoryHandle(const base::FileDescriptor& file_descriptor,
- size_t size,
- const base::UnguessableToken& guid);
-
// Creates a SharedMemoryHandle from an |fd| supplied from an external
// service.
// Passing the wrong |size| has no immediate consequence, but may cause errors
@@ -175,6 +148,54 @@ class BASE_EXPORT SharedMemoryHandle {
int Release();
#endif
+#if defined(OS_ANDROID)
+ enum class Type {
+ // The SharedMemoryHandle is not backed by a handle. This is used for
+ // a default-constructed SharedMemoryHandle() or for a failed duplicate.
+ // The other types are assumed to be valid.
+ INVALID,
+ // The SharedMemoryHandle is backed by a valid fd for ashmem.
+ ASHMEM,
+ // The SharedMemoryHandle is backed by a valid AHardwareBuffer object.
+ ANDROID_HARDWARE_BUFFER,
+
+ LAST = ANDROID_HARDWARE_BUFFER
+ };
+ Type GetType() const { return type_; }
+ SharedMemoryHandle(AHardwareBuffer* buffer,
+ size_t size,
+ const base::UnguessableToken& guid);
+ // Constructs a handle from file descriptor and type. Both ASHMEM and
+ // AHardwareBuffer types are transported via file descriptor for IPC, so the
+ // type field is needed to distinguish them. The generic file descriptor
+ // constructor below assumes type ASHMEM.
+ SharedMemoryHandle(Type type,
+ const base::FileDescriptor& file_descriptor,
+ size_t size,
+ const base::UnguessableToken& guid);
+ AHardwareBuffer* GetMemoryObject() const;
+#endif
+
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+ // Constructs a SharedMemoryHandle backed by a FileDescriptor. The newly
+ // created instance has the same ownership semantics as base::FileDescriptor.
+ // This typically means that the SharedMemoryHandle takes ownership of the
+ // |fd| if |auto_close| is true. Unfortunately, it's common for existing code
+ // to make shallow copies of SharedMemoryHandle, and the one that is finally
+ // passed into a base::SharedMemory is the one that "consumes" the fd.
+ //
+ // |guid| uniquely identifies the shared memory region pointed to by the
+ // underlying OS resource. If |file_descriptor| is associated with another
+ // SharedMemoryHandle, the caller must pass the |guid| of that
+ // SharedMemoryHandle. Otherwise, the caller should generate a new
+ // UnguessableToken.
+ // Passing the wrong |size| has no immediate consequence, but may cause errors
+ // when trying to map the SharedMemoryHandle at a later point in time.
+ SharedMemoryHandle(const base::FileDescriptor& file_descriptor,
+ size_t size,
+ const base::UnguessableToken& guid);
+#endif
+
private:
#if defined(OS_MACOSX) && !defined(OS_IOS)
friend class SharedMemory;
@@ -196,6 +217,14 @@ class BASE_EXPORT SharedMemoryHandle {
bool ownership_passes_to_ipc_ = false;
};
};
+#elif defined(OS_ANDROID)
+ // Each instance of a SharedMemoryHandle is either INVALID, or backed by an
+ // ashmem fd, or backed by an AHardwareBuffer. |type_| determines the backing
+ // member.
+ Type type_ = Type::INVALID;
+ FileDescriptor file_descriptor_;
+ AHardwareBuffer* memory_object_ = nullptr;
+ bool ownership_passes_to_ipc_ = false;
#elif defined(OS_FUCHSIA)
zx_handle_t handle_ = ZX_HANDLE_INVALID;
bool ownership_passes_to_ipc_ = false;
diff --git a/chromium/base/memory/shared_memory_handle_android.cc b/chromium/base/memory/shared_memory_handle_android.cc
new file mode 100644
index 00000000000..69d32032c6d
--- /dev/null
+++ b/chromium/base/memory/shared_memory_handle_android.cc
@@ -0,0 +1,198 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_handle.h"
+
+#include <unistd.h>
+
+#include "base/android/android_hardware_buffer_compat.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/posix/unix_domain_socket.h"
+#include "base/unguessable_token.h"
+
+namespace base {
+
+SharedMemoryHandle::SharedMemoryHandle() {}
+
+SharedMemoryHandle::SharedMemoryHandle(
+ const base::FileDescriptor& file_descriptor,
+ size_t size,
+ const base::UnguessableToken& guid)
+ : SharedMemoryHandle(Type::ASHMEM, file_descriptor, size, guid) {}
+
+SharedMemoryHandle::SharedMemoryHandle(
+ Type type,
+ const base::FileDescriptor& file_descriptor,
+ size_t size,
+ const base::UnguessableToken& guid)
+ : type_(type), guid_(guid), size_(size) {
+ switch (type) {
+ case Type::INVALID:
+ NOTREACHED() << "Can't create a Type::INVALID from a file descriptor";
+ break;
+ case Type::ASHMEM:
+ DCHECK_GE(file_descriptor.fd, 0);
+ file_descriptor_ = file_descriptor;
+ break;
+ case Type::ANDROID_HARDWARE_BUFFER:
+ // This may be the first use of AHardwareBuffers in this process, so we
+ // need to load symbols. This should not fail since we're supposedly
+ // receiving one from IPC, but better to be paranoid.
+ if (!AndroidHardwareBufferCompat::IsSupportAvailable()) {
+ NOTREACHED() << "AHardwareBuffer support not available";
+ type_ = Type::INVALID;
+ return;
+ }
+
+ AHardwareBuffer* ahb = nullptr;
+ // A successful receive increments refcount, we don't need to do so
+ // separately.
+ int ret =
+ AndroidHardwareBufferCompat::GetInstance().RecvHandleFromUnixSocket(
+ file_descriptor.fd, &ahb);
+
+ // We need to take ownership of the FD and close it if it came from IPC.
+ if (file_descriptor.auto_close) {
+ if (IGNORE_EINTR(close(file_descriptor.fd)) < 0)
+ PLOG(ERROR) << "close";
+ }
+
+ if (ret < 0) {
+ PLOG(ERROR) << "recv";
+ type_ = Type::INVALID;
+ return;
+ }
+
+ memory_object_ = ahb;
+ }
+}
+
+SharedMemoryHandle::SharedMemoryHandle(AHardwareBuffer* buffer,
+ size_t size,
+ const base::UnguessableToken& guid)
+ : type_(Type::ANDROID_HARDWARE_BUFFER),
+ memory_object_(buffer),
+ ownership_passes_to_ipc_(false),
+ guid_(guid),
+ size_(size) {
+ // Don't call Acquire on the AHardwareBuffer here. Getting a handle doesn't
+ // take ownership.
+}
+
+// static
+SharedMemoryHandle SharedMemoryHandle::ImportHandle(int fd, size_t size) {
+ SharedMemoryHandle handle;
+ handle.type_ = Type::ASHMEM;
+ handle.file_descriptor_.fd = fd;
+ handle.file_descriptor_.auto_close = false;
+ handle.guid_ = UnguessableToken::Create();
+ handle.size_ = size;
+ return handle;
+}
+
+int SharedMemoryHandle::GetHandle() const {
+ switch (type_) {
+ case Type::INVALID:
+ return -1;
+ case Type::ASHMEM:
+ DCHECK(IsValid());
+ return file_descriptor_.fd;
+ case Type::ANDROID_HARDWARE_BUFFER:
+ DCHECK(IsValid());
+ ScopedFD read_fd, write_fd;
+ if (!CreateSocketPair(&read_fd, &write_fd)) {
+ PLOG(ERROR) << "SocketPair";
+ return -1;
+ }
+
+ int ret =
+ AndroidHardwareBufferCompat::GetInstance().SendHandleToUnixSocket(
+ memory_object_, write_fd.get());
+ if (ret < 0) {
+ PLOG(ERROR) << "send";
+ return -1;
+ }
+
+ // Close write end now to avoid timeouts in case the receiver goes away.
+ write_fd.reset();
+
+ return read_fd.release();
+ }
+}
+
+bool SharedMemoryHandle::IsValid() const {
+ return type_ != Type::INVALID;
+}
+
+void SharedMemoryHandle::Close() const {
+ switch (type_) {
+ case Type::INVALID:
+ return;
+ case Type::ASHMEM:
+ DCHECK(IsValid());
+ if (IGNORE_EINTR(close(file_descriptor_.fd)) < 0)
+ PLOG(ERROR) << "close";
+ break;
+ case Type::ANDROID_HARDWARE_BUFFER:
+ DCHECK(IsValid());
+ AndroidHardwareBufferCompat::GetInstance().Release(memory_object_);
+ }
+}
+
+int SharedMemoryHandle::Release() {
+ DCHECK_EQ(type_, Type::ASHMEM);
+ int old_fd = file_descriptor_.fd;
+ file_descriptor_.fd = -1;
+ return old_fd;
+}
+
+SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
+ switch (type_) {
+ case Type::INVALID:
+ return SharedMemoryHandle();
+ case Type::ASHMEM: {
+ DCHECK(IsValid());
+ int duped_handle = HANDLE_EINTR(dup(file_descriptor_.fd));
+ if (duped_handle < 0)
+ return SharedMemoryHandle();
+ return SharedMemoryHandle(FileDescriptor(duped_handle, true), GetSize(),
+ GetGUID());
+ }
+ case Type::ANDROID_HARDWARE_BUFFER:
+ DCHECK(IsValid());
+ AndroidHardwareBufferCompat::GetInstance().Acquire(memory_object_);
+ SharedMemoryHandle handle(*this);
+ handle.SetOwnershipPassesToIPC(true);
+ return handle;
+ }
+}
+
+AHardwareBuffer* SharedMemoryHandle::GetMemoryObject() const {
+ DCHECK_EQ(type_, Type::ANDROID_HARDWARE_BUFFER);
+ return memory_object_;
+}
+
+void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
+ switch (type_) {
+ case Type::ASHMEM:
+ file_descriptor_.auto_close = ownership_passes;
+ break;
+ case Type::INVALID:
+ case Type::ANDROID_HARDWARE_BUFFER:
+ ownership_passes_to_ipc_ = ownership_passes;
+ }
+}
+
+bool SharedMemoryHandle::OwnershipPassesToIPC() const {
+ switch (type_) {
+ case Type::ASHMEM:
+ return file_descriptor_.auto_close;
+ case Type::INVALID:
+ case Type::ANDROID_HARDWARE_BUFFER:
+ return ownership_passes_to_ipc_;
+ }
+}
+
+} // namespace base
diff --git a/chromium/base/memory/shared_memory_helper.cc b/chromium/base/memory/shared_memory_helper.cc
index 68f0c06bbba..91893d335e2 100644
--- a/chromium/base/memory/shared_memory_helper.cc
+++ b/chromium/base/memory/shared_memory_helper.cc
@@ -30,23 +30,24 @@ using ScopedPathUnlinker =
#if !defined(OS_ANDROID)
bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
- ScopedFILE* fp,
+ ScopedFD* fd,
ScopedFD* readonly_fd,
FilePath* path) {
-#if !(defined(OS_MACOSX) && !defined(OS_IOS)) && !defined(OS_FUCHSIA)
+#if defined(OS_LINUX)
// It doesn't make sense to have a open-existing private piece of shmem
DCHECK(!options.open_existing_deprecated);
-#endif // !(defined(OS_MACOSX) && !defined(OS_IOS)
+#endif // defined(OS_LINUX)
// Q: Why not use the shm_open() etc. APIs?
// A: Because they're limited to 4mb on OS X. FFFFFFFUUUUUUUUUUU
FilePath directory;
ScopedPathUnlinker path_unlinker;
+ ScopedFILE fp;
if (!GetShmemTempDir(options.executable, &directory))
return false;
- fp->reset(base::CreateAndOpenTemporaryFileInDir(directory, path));
+ fp.reset(base::CreateAndOpenTemporaryFileInDir(directory, path));
- if (!*fp)
+ if (!fp)
return false;
// Deleting the file prevents anyone else from mapping it in (making it
@@ -59,20 +60,20 @@ bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
readonly_fd->reset(HANDLE_EINTR(open(path->value().c_str(), O_RDONLY)));
if (!readonly_fd->is_valid()) {
DPLOG(ERROR) << "open(\"" << path->value() << "\", O_RDONLY) failed";
- fp->reset();
return false;
}
}
+ fd->reset(fileno(fp.release()));
return true;
}
-bool PrepareMapFile(ScopedFILE fp,
+bool PrepareMapFile(ScopedFD fd,
ScopedFD readonly_fd,
int* mapped_file,
int* readonly_mapped_file) {
DCHECK_EQ(-1, *mapped_file);
DCHECK_EQ(-1, *readonly_mapped_file);
- if (fp == NULL)
+ if (!fd.is_valid())
return false;
// This function theoretically can block on the disk, but realistically
@@ -82,7 +83,7 @@ bool PrepareMapFile(ScopedFILE fp,
if (readonly_fd.is_valid()) {
struct stat st = {};
- if (fstat(fileno(fp.get()), &st))
+ if (fstat(fd.get(), &st))
NOTREACHED();
struct stat readonly_st = {};
@@ -94,7 +95,7 @@ bool PrepareMapFile(ScopedFILE fp,
}
}
- *mapped_file = HANDLE_EINTR(dup(fileno(fp.get())));
+ *mapped_file = HANDLE_EINTR(dup(fd.get()));
if (*mapped_file == -1) {
NOTREACHED() << "Call to dup failed, errno=" << errno;
diff --git a/chromium/base/memory/shared_memory_helper.h b/chromium/base/memory/shared_memory_helper.h
index ca681e16cc6..2c24f869f5c 100644
--- a/chromium/base/memory/shared_memory_helper.h
+++ b/chromium/base/memory/shared_memory_helper.h
@@ -6,29 +6,30 @@
#define BASE_MEMORY_SHARED_MEMORY_HELPER_H_
#include "base/memory/shared_memory.h"
+#include "build/build_config.h"
#include <fcntl.h>
namespace base {
#if !defined(OS_ANDROID)
-// Makes a temporary file, fdopens it, and then unlinks it. |fp| is populated
-// with the fdopened FILE. |readonly_fd| is populated with the opened fd if
+// Makes a temporary file, fdopens it, and then unlinks it. |fd| is populated
+// with the opened fd. |readonly_fd| is populated with the opened fd if
// options.share_read_only is true. |path| is populated with the location of
// the file before it was unlinked.
// Returns false if there's an unhandled failure.
bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
- ScopedFILE* fp,
+ ScopedFD* fd,
ScopedFD* readonly_fd,
FilePath* path);
// Takes the outputs of CreateAnonymousSharedMemory and maps them properly to
// |mapped_file| or |readonly_mapped_file|, depending on which one is populated.
-bool PrepareMapFile(ScopedFILE fp,
+bool PrepareMapFile(ScopedFD fd,
ScopedFD readonly_fd,
int* mapped_file,
int* readonly_mapped_file);
-#endif
+#endif // !defined(OS_ANDROID)
} // namespace base
diff --git a/chromium/base/memory/shared_memory_mac.cc b/chromium/base/memory/shared_memory_mac.cc
index 00531a57ca2..d2bb5ec5b27 100644
--- a/chromium/base/memory/shared_memory_mac.cc
+++ b/chromium/base/memory/shared_memory_mac.cc
@@ -14,6 +14,7 @@
#include "base/files/file_util.h"
#include "base/files/scoped_file.h"
#include "base/logging.h"
+#include "base/mac/foundation_util.h"
#include "base/mac/mac_util.h"
#include "base/mac/scoped_mach_vm.h"
#include "base/memory/shared_memory_helper.h"
@@ -29,9 +30,9 @@
#include "base/unguessable_token.h"
#include "build/build_config.h"
-#if defined(OS_MACOSX)
-#include "base/mac/foundation_util.h"
-#endif // OS_MACOSX
+#if defined(OS_IOS)
+#error "MacOS only - iOS uses shared_memory_posix.cc"
+#endif
namespace base {
@@ -143,31 +144,32 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
// This function theoretically can block on the disk. Both profiling of real
// users and local instrumentation shows that this is a real problem.
// https://code.google.com/p/chromium/issues/detail?id=466437
- base::ThreadRestrictions::ScopedAllowIO allow_io;
+ ThreadRestrictions::ScopedAllowIO allow_io;
- ScopedFILE fp;
+ ScopedFD fd;
ScopedFD readonly_fd;
FilePath path;
- bool result = CreateAnonymousSharedMemory(options, &fp, &readonly_fd, &path);
+ bool result = CreateAnonymousSharedMemory(options, &fd, &readonly_fd, &path);
if (!result)
return false;
- DCHECK(fp); // Should be guaranteed by CreateAnonymousSharedMemory().
+ // Should be guaranteed by CreateAnonymousSharedMemory().
+ DCHECK(fd.is_valid());
// Get current size.
struct stat stat;
- if (fstat(fileno(fp.get()), &stat) != 0)
+ if (fstat(fd.get(), &stat) != 0)
return false;
const size_t current_size = stat.st_size;
if (current_size != options.size) {
- if (HANDLE_EINTR(ftruncate(fileno(fp.get()), options.size)) != 0)
+ if (HANDLE_EINTR(ftruncate(fd.get(), options.size)) != 0)
return false;
}
requested_size_ = options.size;
int mapped_file = -1;
int readonly_mapped_file = -1;
- result = PrepareMapFile(std::move(fp), std::move(readonly_fd), &mapped_file,
+ result = PrepareMapFile(std::move(fd), std::move(readonly_fd), &mapped_file,
&readonly_mapped_file);
shm_ = SharedMemoryHandle(FileDescriptor(mapped_file, false), options.size,
UnguessableToken::Create());
@@ -194,14 +196,14 @@ bool SharedMemory::MapAt(off_t offset, size_t bytes) {
mapped_id_ = shm_.GetGUID();
SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
} else {
- memory_ = NULL;
+ memory_ = nullptr;
}
return success;
}
bool SharedMemory::Unmap() {
- if (memory_ == NULL)
+ if (!memory_)
return false;
SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
@@ -215,7 +217,7 @@ bool SharedMemory::Unmap() {
mapped_size_);
break;
}
- memory_ = NULL;
+ memory_ = nullptr;
mapped_size_ = 0;
mapped_id_ = UnguessableToken();
return true;
@@ -251,7 +253,7 @@ SharedMemoryHandle SharedMemory::GetReadOnlyHandle() {
}
DCHECK(shm_.IsValid());
- base::SharedMemoryHandle new_handle;
+ SharedMemoryHandle new_handle;
bool success = MakeMachSharedMemoryHandleReadOnly(&new_handle, shm_, memory_);
if (success)
new_handle.SetOwnershipPassesToIPC(true);
diff --git a/chromium/base/memory/shared_memory_posix.cc b/chromium/base/memory/shared_memory_posix.cc
index e9b50a5f56c..c148d34d4ed 100644
--- a/chromium/base/memory/shared_memory_posix.cc
+++ b/chromium/base/memory/shared_memory_posix.cc
@@ -14,6 +14,7 @@
#include "base/files/file_util.h"
#include "base/files/scoped_file.h"
#include "base/logging.h"
+#include "base/macros.h"
#include "base/memory/shared_memory_helper.h"
#include "base/memory/shared_memory_tracker.h"
#include "base/posix/eintr_wrapper.h"
@@ -31,9 +32,13 @@
#include "third_party/ashmem/ashmem.h"
#endif
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#error "MacOS uses shared_memory_mac.cc"
+#endif
+
namespace base {
-SharedMemory::SharedMemory() {}
+SharedMemory::SharedMemory() = default;
SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
: shm_(handle), read_only_(read_only) {}
@@ -56,7 +61,7 @@ void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
// static
size_t SharedMemory::GetHandleLimit() {
- return base::GetMaxFds();
+ return GetMaxFds();
}
// static
@@ -76,6 +81,7 @@ bool SharedMemory::CreateAndMapAnonymous(size_t size) {
}
#if !defined(OS_ANDROID)
+
// Chromium mostly only uses the unique/private shmem as specified by
// "name == L"". The exception is in the StatsTable.
// TODO(jrg): there is no way to "clean up" all unused named shmem if
@@ -92,16 +98,15 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
// This function theoretically can block on the disk, but realistically
// the temporary files we create will just go into the buffer cache
// and be deleted before they ever make it out to disk.
- base::ThreadRestrictions::ScopedAllowIO allow_io;
+ ThreadRestrictions::ScopedAllowIO allow_io;
- ScopedFILE fp;
bool fix_size = true;
+ ScopedFD fd;
ScopedFD readonly_fd;
-
FilePath path;
- if (options.name_deprecated == NULL || options.name_deprecated->empty()) {
+ if (!options.name_deprecated || options.name_deprecated->empty()) {
bool result =
- CreateAnonymousSharedMemory(options, &fp, &readonly_fd, &path);
+ CreateAnonymousSharedMemory(options, &fd, &readonly_fd, &path);
if (!result)
return false;
} else {
@@ -113,9 +118,9 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
const mode_t kOwnerOnly = S_IRUSR | S_IWUSR;
// First, try to create the file.
- int fd = HANDLE_EINTR(
- open(path.value().c_str(), O_RDWR | O_CREAT | O_EXCL, kOwnerOnly));
- if (fd == -1 && options.open_existing_deprecated) {
+ fd.reset(HANDLE_EINTR(
+ open(path.value().c_str(), O_RDWR | O_CREAT | O_EXCL, kOwnerOnly)));
+ if (!fd.is_valid() && options.open_existing_deprecated) {
// If this doesn't work, try and open an existing file in append mode.
// Opening an existing file in a world writable directory has two main
// security implications:
@@ -124,12 +129,12 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
// - Attackers could plant a symbolic link so that an unexpected file
// is opened, so O_NOFOLLOW is passed to open().
#if !defined(OS_AIX)
- fd = HANDLE_EINTR(
- open(path.value().c_str(), O_RDWR | O_APPEND | O_NOFOLLOW));
+ fd.reset(HANDLE_EINTR(
+ open(path.value().c_str(), O_RDWR | O_APPEND | O_NOFOLLOW)));
#else
// AIX has no 64-bit support for open flags such as -
// O_CLOEXEC, O_NOFOLLOW and O_TTY_INIT.
- fd = HANDLE_EINTR(open(path.value().c_str(), O_RDWR | O_APPEND));
+ fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDWR | O_APPEND)));
#endif
// Check that the current user owns the file.
// If uid != euid, then a more complex permission model is used and this
@@ -137,12 +142,12 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
const uid_t real_uid = getuid();
const uid_t effective_uid = geteuid();
struct stat sb;
- if (fd >= 0 &&
- (fstat(fd, &sb) != 0 || sb.st_uid != real_uid ||
+ if (fd.is_valid() &&
+ (fstat(fd.get(), &sb) != 0 || sb.st_uid != real_uid ||
sb.st_uid != effective_uid)) {
LOG(ERROR) <<
"Invalid owner when opening existing shared memory file.";
- close(fd);
+ close(fd.get());
return false;
}
@@ -155,29 +160,31 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
readonly_fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
if (!readonly_fd.is_valid()) {
DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
- close(fd);
- fd = -1;
+ close(fd.get());
return false;
}
}
- if (fd >= 0) {
+ if (fd.is_valid()) {
// "a+" is always appropriate: if it's a new file, a+ is similar to w+.
- fp.reset(fdopen(fd, "a+"));
+ if (!fdopen(fd.get(), "a+")) {
+ PLOG(ERROR) << "Creating file stream in " << path.value() << " failed";
+ return false;
+ }
}
}
- if (fp && fix_size) {
+ if (fd.is_valid() && fix_size) {
// Get current size.
struct stat stat;
- if (fstat(fileno(fp.get()), &stat) != 0)
+ if (fstat(fd.get(), &stat) != 0)
return false;
const size_t current_size = stat.st_size;
if (current_size != options.size) {
- if (HANDLE_EINTR(ftruncate(fileno(fp.get()), options.size)) != 0)
+ if (HANDLE_EINTR(ftruncate(fd.get(), options.size)) != 0)
return false;
}
requested_size_ = options.size;
}
- if (fp == NULL) {
+ if (!fd.is_valid()) {
PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
FilePath dir = path.DirName();
if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
@@ -192,12 +199,13 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
int mapped_file = -1;
int readonly_mapped_file = -1;
- bool result = PrepareMapFile(std::move(fp), std::move(readonly_fd),
+
+ bool result = PrepareMapFile(std::move(fd), std::move(readonly_fd),
&mapped_file, &readonly_mapped_file);
- shm_ = SharedMemoryHandle(base::FileDescriptor(mapped_file, false),
- options.size, UnguessableToken::Create());
+ shm_ = SharedMemoryHandle(FileDescriptor(mapped_file, false), options.size,
+ UnguessableToken::Create());
readonly_shm_ =
- SharedMemoryHandle(base::FileDescriptor(readonly_mapped_file, false),
+ SharedMemoryHandle(FileDescriptor(readonly_mapped_file, false),
options.size, shm_.GetGUID());
return result;
}
@@ -211,7 +219,7 @@ bool SharedMemory::Delete(const std::string& name) {
return false;
if (PathExists(path))
- return base::DeleteFile(path, false);
+ return DeleteFile(path, false);
// Doesn't exist, so success.
return true;
@@ -224,8 +232,8 @@ bool SharedMemory::Open(const std::string& name, bool read_only) {
read_only_ = read_only;
- const char *mode = read_only ? "r" : "r+";
- ScopedFILE fp(base::OpenFile(path, mode));
+ int mode = read_only ? O_RDONLY : O_RDWR;
+ ScopedFD fd(HANDLE_EINTR(open(path.value().c_str(), mode)));
ScopedFD readonly_fd(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
if (!readonly_fd.is_valid()) {
DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
@@ -233,7 +241,7 @@ bool SharedMemory::Open(const std::string& name, bool read_only) {
}
int mapped_file = -1;
int readonly_mapped_file = -1;
- bool result = PrepareMapFile(std::move(fp), std::move(readonly_fd),
+ bool result = PrepareMapFile(std::move(fd), std::move(readonly_fd),
&mapped_file, &readonly_mapped_file);
// This form of sharing shared memory is deprecated. https://crbug.com/345734.
// However, we can't get rid of it without a significant refactor because its
@@ -245,10 +253,10 @@ bool SharedMemory::Open(const std::string& name, bool read_only) {
// single version of the service process.
// We pass the size |0|, which is a dummy size and wrong, but otherwise
// harmless.
- shm_ = SharedMemoryHandle(base::FileDescriptor(mapped_file, false), 0u,
+ shm_ = SharedMemoryHandle(FileDescriptor(mapped_file, false), 0u,
UnguessableToken::Create());
readonly_shm_ = SharedMemoryHandle(
- base::FileDescriptor(readonly_mapped_file, false), 0, shm_.GetGUID());
+ FileDescriptor(readonly_mapped_file, false), 0, shm_.GetGUID());
return result;
}
#endif // !defined(OS_ANDROID)
@@ -275,10 +283,10 @@ bool SharedMemory::MapAt(off_t offset, size_t bytes) {
}
#endif
- memory_ = mmap(NULL, bytes, PROT_READ | (read_only_ ? 0 : PROT_WRITE),
+ memory_ = mmap(nullptr, bytes, PROT_READ | (read_only_ ? 0 : PROT_WRITE),
MAP_SHARED, shm_.GetHandle(), offset);
- bool mmap_succeeded = memory_ != (void*)-1 && memory_ != NULL;
+ bool mmap_succeeded = memory_ && memory_ != reinterpret_cast<void*>(-1);
if (mmap_succeeded) {
mapped_size_ = bytes;
mapped_id_ = shm_.GetGUID();
@@ -287,19 +295,19 @@ bool SharedMemory::MapAt(off_t offset, size_t bytes) {
(SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
} else {
- memory_ = NULL;
+ memory_ = nullptr;
}
return mmap_succeeded;
}
bool SharedMemory::Unmap() {
- if (memory_ == NULL)
+ if (!memory_)
return false;
SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
munmap(memory_, mapped_size_);
- memory_ = NULL;
+ memory_ = nullptr;
mapped_size_ = 0;
mapped_id_ = UnguessableToken();
return true;
@@ -345,11 +353,12 @@ bool SharedMemory::FilePathForMemoryName(const std::string& mem_name,
return false;
#if defined(GOOGLE_CHROME_BUILD)
- std::string name_base = std::string("com.google.Chrome");
+ static const char kShmem[] = "com.google.Chrome.shmem.";
#else
- std::string name_base = std::string("org.chromium.Chromium");
+ static const char kShmem[] = "org.chromium.Chromium.shmem.";
#endif
- *path = temp_dir.AppendASCII(name_base + ".shmem." + mem_name);
+ CR_DEFINE_STATIC_LOCAL(const std::string, name_base, (kShmem));
+ *path = temp_dir.AppendASCII(name_base + mem_name);
return true;
}
#endif // !defined(OS_ANDROID)
diff --git a/chromium/base/memory/shared_memory_tracker.cc b/chromium/base/memory/shared_memory_tracker.cc
index 18a4f044681..2b823381373 100644
--- a/chromium/base/memory/shared_memory_tracker.cc
+++ b/chromium/base/memory/shared_memory_tracker.cc
@@ -12,6 +12,8 @@
namespace base {
+const char SharedMemoryTracker::kDumpRootName[] = "shared_memory";
+
// static
SharedMemoryTracker* SharedMemoryTracker::GetInstance() {
static SharedMemoryTracker* instance = new SharedMemoryTracker;
@@ -22,7 +24,7 @@ SharedMemoryTracker* SharedMemoryTracker::GetInstance() {
std::string SharedMemoryTracker::GetDumpNameForTracing(
const UnguessableToken& id) {
DCHECK(!id.is_empty());
- return "shared_memory/" + id.ToString();
+ return std::string(kDumpRootName) + "/" + id.ToString();
}
// static
diff --git a/chromium/base/memory/shared_memory_tracker.h b/chromium/base/memory/shared_memory_tracker.h
index ee86e858a72..9b1e1212b9e 100644
--- a/chromium/base/memory/shared_memory_tracker.h
+++ b/chromium/base/memory/shared_memory_tracker.h
@@ -44,6 +44,9 @@ class BASE_EXPORT SharedMemoryTracker : public trace_event::MemoryDumpProvider {
// Records shared memory usage on unmapping.
void DecrementMemoryUsage(const SharedMemory& shared_memory);
+ // Root dump name for all shared memory dumps.
+ static const char kDumpRootName[];
+
private:
SharedMemoryTracker();
~SharedMemoryTracker() override;
diff --git a/chromium/base/memory/shared_memory_unittest.cc b/chromium/base/memory/shared_memory_unittest.cc
index 8480aeb9e5e..19a1245517d 100644
--- a/chromium/base/memory/shared_memory_unittest.cc
+++ b/chromium/base/memory/shared_memory_unittest.cc
@@ -34,6 +34,10 @@
#include <unistd.h>
#endif
+#if defined(OS_LINUX)
+#include <sys/syscall.h>
+#endif
+
#if defined(OS_WIN)
#include "base/win/scoped_handle.h"
#endif
@@ -55,7 +59,7 @@ namespace {
class MultipleThreadMain : public PlatformThread::Delegate {
public:
explicit MultipleThreadMain(int16_t id) : id_(id) {}
- ~MultipleThreadMain() override {}
+ ~MultipleThreadMain() override = default;
static void CleanUp() {
SharedMemory memory;
@@ -126,8 +130,8 @@ TEST(SharedMemoryTest, OpenClose) {
EXPECT_NE(memory1.memory(), memory2.memory()); // Compare the pointers.
// Make sure we don't segfault. (it actually happened!)
- ASSERT_NE(memory1.memory(), static_cast<void*>(NULL));
- ASSERT_NE(memory2.memory(), static_cast<void*>(NULL));
+ ASSERT_NE(memory1.memory(), static_cast<void*>(nullptr));
+ ASSERT_NE(memory2.memory(), static_cast<void*>(nullptr));
// Write data to the first memory segment, verify contents of second.
memset(memory1.memory(), '1', kDataSize);
@@ -223,7 +227,7 @@ TEST(SharedMemoryTest, CloseNoUnmap) {
SharedMemory memory;
ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
char* ptr = static_cast<char*>(memory.memory());
- ASSERT_NE(ptr, static_cast<void*>(NULL));
+ ASSERT_NE(ptr, static_cast<void*>(nullptr));
memset(ptr, 'G', kDataSize);
memory.Close();
@@ -393,7 +397,7 @@ TEST(SharedMemoryTest, GetReadOnlyHandle) {
<< "The descriptor itself should be read-only.";
errno = 0;
- void* writable = mmap(NULL, contents.size(), PROT_READ | PROT_WRITE,
+ void* writable = mmap(nullptr, contents.size(), PROT_READ | PROT_WRITE,
MAP_SHARED, handle_fd, 0);
int mmap_errno = errno;
EXPECT_EQ(MAP_FAILED, writable)
@@ -509,7 +513,7 @@ TEST(SharedMemoryTest, MapAt) {
SharedMemory memory;
ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
uint32_t* ptr = static_cast<uint32_t*>(memory.memory());
- ASSERT_NE(ptr, static_cast<void*>(NULL));
+ ASSERT_NE(ptr, static_cast<void*>(nullptr));
for (size_t i = 0; i < kCount; ++i) {
ptr[i] = i;
@@ -521,7 +525,7 @@ TEST(SharedMemoryTest, MapAt) {
ASSERT_TRUE(memory.MapAt(offset, kDataSize - offset));
offset /= sizeof(uint32_t);
ptr = static_cast<uint32_t*>(memory.memory());
- ASSERT_NE(ptr, static_cast<void*>(NULL));
+ ASSERT_NE(ptr, static_cast<void*>(nullptr));
for (size_t i = offset; i < kCount; ++i) {
EXPECT_EQ(ptr[i - offset], i);
}
@@ -568,6 +572,7 @@ TEST(SharedMemoryTest, AnonymousExecutable) {
// shared memory implementation. So the tests about file permissions are not
// included on Android. Fuchsia does not use a file-backed shared memory
// implementation.
+
#if !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
// Set a umask and restore the old mask on destruction.
diff --git a/chromium/base/memory/shared_memory_win.cc b/chromium/base/memory/shared_memory_win.cc
index 280bc4e10c5..b05be755e0f 100644
--- a/chromium/base/memory/shared_memory_win.cc
+++ b/chromium/base/memory/shared_memory_win.cc
@@ -177,8 +177,8 @@ bool SharedMemory::CreateAndMapAnonymous(size_t size) {
}
bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
- // TODO(bsy,sehr): crbug.com/210609 NaCl forces us to round up 64k here,
- // wasting 32k per mapping on average.
+ // TODO(crbug.com/210609): NaCl forces us to round up 64k here, wasting 32k
+ // per mapping on average.
static const size_t kSectionMask = 65536 - 1;
DCHECK(!options.executable);
DCHECK(!shm_.IsValid());
@@ -197,7 +197,7 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
size_t rounded_size = (options.size + kSectionMask) & ~kSectionMask;
name_ = options.name_deprecated ?
ASCIIToUTF16(*options.name_deprecated) : L"";
- SECURITY_ATTRIBUTES sa = { sizeof(sa), NULL, FALSE };
+ SECURITY_ATTRIBUTES sa = {sizeof(sa), nullptr, FALSE};
SECURITY_DESCRIPTOR sd;
ACL dacl;
@@ -314,25 +314,26 @@ bool SharedMemory::MapAt(off_t offset, size_t bytes) {
shm_.GetHandle(),
read_only_ ? FILE_MAP_READ : FILE_MAP_READ | FILE_MAP_WRITE,
static_cast<uint64_t>(offset) >> 32, static_cast<DWORD>(offset), bytes);
- if (memory_ != NULL) {
- DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
- (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
- mapped_size_ = GetMemorySectionSize(memory_);
- mapped_id_ = shm_.GetGUID();
- SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
- return true;
+ if (!memory_) {
+ DPLOG(ERROR) << "Failed executing MapViewOfFile";
+ return false;
}
- DPLOG(ERROR) << "Failed executing MapViewOfFile";
- return false;
+
+ DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
+ (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+ mapped_size_ = GetMemorySectionSize(memory_);
+ mapped_id_ = shm_.GetGUID();
+ SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+ return true;
}
bool SharedMemory::Unmap() {
- if (memory_ == NULL)
+ if (!memory_)
return false;
SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
UnmapViewOfFile(memory_);
- memory_ = NULL;
+ memory_ = nullptr;
mapped_id_ = UnguessableToken();
return true;
}
diff --git a/chromium/base/memory/singleton_unittest.cc b/chromium/base/memory/singleton_unittest.cc
index e1292151fab..50b862d3eb0 100644
--- a/chromium/base/memory/singleton_unittest.cc
+++ b/chromium/base/memory/singleton_unittest.cc
@@ -19,8 +19,8 @@ typedef void (*CallbackFunc)();
template <size_t alignment>
class AlignedData {
public:
- AlignedData() {}
- ~AlignedData() {}
+ AlignedData() = default;
+ ~AlignedData() = default;
alignas(alignment) char data_[alignment];
};
@@ -71,7 +71,7 @@ struct CallbackTrait : public DefaultSingletonTraits<Type> {
class CallbackSingleton {
public:
- CallbackSingleton() : callback_(NULL) { }
+ CallbackSingleton() : callback_(nullptr) {}
CallbackFunc callback_;
};
@@ -123,8 +123,8 @@ struct CallbackSingletonWithStaticTrait::Trait
template <class Type>
class AlignedTestSingleton {
public:
- AlignedTestSingleton() {}
- ~AlignedTestSingleton() {}
+ AlignedTestSingleton() = default;
+ ~AlignedTestSingleton() = default;
static AlignedTestSingleton* GetInstance() {
return Singleton<AlignedTestSingleton,
StaticMemorySingletonTraits<AlignedTestSingleton>>::get();
@@ -162,7 +162,7 @@ CallbackFunc* GetStaticSingleton() {
class SingletonTest : public testing::Test {
public:
- SingletonTest() {}
+ SingletonTest() = default;
void SetUp() override {
non_leak_called_ = false;
@@ -249,7 +249,7 @@ TEST_F(SingletonTest, Basic) {
DeleteLeakySingleton();
// The static singleton can't be acquired post-atexit.
- EXPECT_EQ(NULL, GetStaticSingleton());
+ EXPECT_EQ(nullptr, GetStaticSingleton());
{
ShadowingAtExitManager sem;
diff --git a/chromium/base/memory/weak_ptr.cc b/chromium/base/memory/weak_ptr.cc
index 8879651e6da..1c3208f6433 100644
--- a/chromium/base/memory/weak_ptr.cc
+++ b/chromium/base/memory/weak_ptr.cc
@@ -28,17 +28,14 @@ bool WeakReference::Flag::IsValid() const {
return is_valid_;
}
-WeakReference::Flag::~Flag() {
-}
+WeakReference::Flag::~Flag() = default;
-WeakReference::WeakReference() {
-}
+WeakReference::WeakReference() = default;
WeakReference::WeakReference(const Flag* flag) : flag_(flag) {
}
-WeakReference::~WeakReference() {
-}
+WeakReference::~WeakReference() = default;
WeakReference::WeakReference(WeakReference&& other) = default;
@@ -46,8 +43,7 @@ WeakReference::WeakReference(const WeakReference& other) = default;
bool WeakReference::is_valid() const { return flag_.get() && flag_->IsValid(); }
-WeakReferenceOwner::WeakReferenceOwner() {
-}
+WeakReferenceOwner::WeakReferenceOwner() = default;
WeakReferenceOwner::~WeakReferenceOwner() {
Invalidate();
@@ -64,13 +60,13 @@ WeakReference WeakReferenceOwner::GetRef() const {
void WeakReferenceOwner::Invalidate() {
if (flag_.get()) {
flag_->Invalidate();
- flag_ = NULL;
+ flag_ = nullptr;
}
}
WeakPtrBase::WeakPtrBase() : ptr_(0) {}
-WeakPtrBase::~WeakPtrBase() {}
+WeakPtrBase::~WeakPtrBase() = default;
WeakPtrBase::WeakPtrBase(const WeakReference& ref, uintptr_t ptr)
: ref_(ref), ptr_(ptr) {}
diff --git a/chromium/base/memory/weak_ptr.h b/chromium/base/memory/weak_ptr.h
index 5c9ed545d72..1737a674d36 100644
--- a/chromium/base/memory/weak_ptr.h
+++ b/chromium/base/memory/weak_ptr.h
@@ -173,12 +173,14 @@ class SupportsWeakPtrBase {
// conversion will only compile if there is exists a Base which inherits
// from SupportsWeakPtr<Base>. See base::AsWeakPtr() below for a helper
// function that makes calling this easier.
+ //
+ // Precondition: t != nullptr
template<typename Derived>
static WeakPtr<Derived> StaticAsWeakPtr(Derived* t) {
static_assert(
std::is_base_of<internal::SupportsWeakPtrBase, Derived>::value,
"AsWeakPtr argument must inherit from SupportsWeakPtr");
- return AsWeakPtrImpl<Derived>(t, *t);
+ return AsWeakPtrImpl<Derived>(t);
}
private:
@@ -186,9 +188,8 @@ class SupportsWeakPtrBase {
// which is an instance of SupportsWeakPtr<Base>. We can then safely
// static_cast the Base* to a Derived*.
template <typename Derived, typename Base>
- static WeakPtr<Derived> AsWeakPtrImpl(
- Derived* t, const SupportsWeakPtr<Base>&) {
- WeakPtr<Base> ptr = t->Base::AsWeakPtr();
+ static WeakPtr<Derived> AsWeakPtrImpl(SupportsWeakPtr<Base>* t) {
+ WeakPtr<Base> ptr = t->AsWeakPtr();
return WeakPtr<Derived>(
ptr.ref_, static_cast<Derived*>(reinterpret_cast<Base*>(ptr.ptr_)));
}
diff --git a/chromium/base/memory/weak_ptr_unittest.cc b/chromium/base/memory/weak_ptr_unittest.cc
index d223bd2bf6d..f8dfb7c0f0d 100644
--- a/chromium/base/memory/weak_ptr_unittest.cc
+++ b/chromium/base/memory/weak_ptr_unittest.cc
@@ -51,9 +51,29 @@ struct Derived : public Base {};
struct TargetBase {};
struct Target : public TargetBase, public SupportsWeakPtr<Target> {
- virtual ~Target() {}
+ virtual ~Target() = default;
};
+
struct DerivedTarget : public Target {};
+
+// A class inheriting from Target and defining a nested type called 'Base'.
+// To guard against strange compilation errors.
+struct DerivedTargetWithNestedBase : public Target {
+ using Base = void;
+};
+
+// A struct with a virtual destructor.
+struct VirtualDestructor {
+ virtual ~VirtualDestructor() = default;
+};
+
+// A class inheriting from Target where Target is not the first base, and where
+// the first base has a virtual method table. This creates a structure where the
+// Target base is not positioned at the beginning of
+// DerivedTargetMultipleInheritance.
+struct DerivedTargetMultipleInheritance : public VirtualDestructor,
+ public Target {};
+
struct Arrow {
WeakPtr<Target> target;
};
@@ -290,6 +310,22 @@ TEST(WeakPtrTest, DerivedTarget) {
EXPECT_EQ(&target, ptr.get());
}
+TEST(WeakPtrTest, DerivedTargetWithNestedBase) {
+ DerivedTargetWithNestedBase target;
+ WeakPtr<DerivedTargetWithNestedBase> ptr = AsWeakPtr(&target);
+ EXPECT_EQ(&target, ptr.get());
+}
+
+TEST(WeakPtrTest, DerivedTargetMultipleInheritance) {
+ DerivedTargetMultipleInheritance d;
+ Target& b = d;
+ EXPECT_NE(static_cast<void*>(&d), static_cast<void*>(&b));
+ const WeakPtr<Target> pb = AsWeakPtr(&b);
+ EXPECT_EQ(pb.get(), &b);
+ const WeakPtr<DerivedTargetMultipleInheritance> pd = AsWeakPtr(&d);
+ EXPECT_EQ(pd.get(), &d);
+}
+
TEST(WeakPtrFactoryTest, BooleanTesting) {
int data;
WeakPtrFactory<int> factory(&data);
diff --git a/chromium/base/message_loop/incoming_task_queue.cc b/chromium/base/message_loop/incoming_task_queue.cc
index 844a64f10e6..941cbd8dabb 100644
--- a/chromium/base/message_loop/incoming_task_queue.cc
+++ b/chromium/base/message_loop/incoming_task_queue.cc
@@ -21,8 +21,7 @@ namespace {
#if DCHECK_IS_ON()
// Delays larger than this are often bogus, and a warning should be emitted in
// debug builds to warn developers. http://crbug.com/450045
-const int kTaskDelayWarningThresholdInSeconds =
- 14 * 24 * 60 * 60; // 14 days.
+constexpr TimeDelta kTaskDelayWarningThreshold = TimeDelta::FromDays(14);
#endif
// Returns true if MessagePump::ScheduleWork() must be called one
@@ -67,8 +66,7 @@ bool IncomingTaskQueue::AddToIncomingQueue(const Location& from_here,
// Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
// for details.
CHECK(task);
- DLOG_IF(WARNING,
- delay.InSeconds() > kTaskDelayWarningThresholdInSeconds)
+ DLOG_IF(WARNING, delay > kTaskDelayWarningThreshold)
<< "Requesting super-long task delay period of " << delay.InSeconds()
<< " seconds from here: " << from_here.ToString();
diff --git a/chromium/base/message_loop/message_loop.cc b/chromium/base/message_loop/message_loop.cc
index f60fd180641..2c8899bf05e 100644
--- a/chromium/base/message_loop/message_loop.cc
+++ b/chromium/base/message_loop/message_loop.cc
@@ -45,7 +45,7 @@ base::ThreadLocalPointer<MessageLoop>* GetTLSMessageLoop() {
static auto* lazy_tls_ptr = new base::ThreadLocalPointer<MessageLoop>();
return lazy_tls_ptr;
}
-MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = NULL;
+MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = nullptr;
#if defined(OS_IOS)
using MessagePumpForIO = MessagePumpIOSForIO;
@@ -71,14 +71,11 @@ std::unique_ptr<MessagePump> ReturnPump(std::unique_ptr<MessagePump> pump) {
//------------------------------------------------------------------------------
-MessageLoop::TaskObserver::TaskObserver() {
-}
+MessageLoop::TaskObserver::TaskObserver() = default;
-MessageLoop::TaskObserver::~TaskObserver() {
-}
+MessageLoop::TaskObserver::~TaskObserver() = default;
-MessageLoop::DestructionObserver::~DestructionObserver() {
-}
+MessageLoop::DestructionObserver::~DestructionObserver() = default;
//------------------------------------------------------------------------------
@@ -136,9 +133,9 @@ MessageLoop::~MessageLoop() {
// Tell the incoming queue that we are dying.
incoming_task_queue_->WillDestroyCurrentMessageLoop();
- incoming_task_queue_ = NULL;
- unbound_task_runner_ = NULL;
- task_runner_ = NULL;
+ incoming_task_queue_ = nullptr;
+ unbound_task_runner_ = nullptr;
+ task_runner_ = nullptr;
// OK, now make it so that no one can find us.
if (current() == this)
@@ -525,14 +522,6 @@ void MessageLoopForUI::Start() {
static_cast<MessagePumpForUI*>(pump_.get())->Start(this);
}
-void MessageLoopForUI::StartForTesting(
- base::android::JavaMessageHandlerFactory* factory,
- WaitableEvent* test_done_event) {
- // No Histogram support for UI message loop as it is managed by Java side
- static_cast<MessagePumpForUI*>(pump_.get())
- ->StartForUnitTest(this, factory, test_done_event);
-}
-
void MessageLoopForUI::Abort() {
static_cast<MessagePumpForUI*>(pump_.get())->Abort();
}
diff --git a/chromium/base/message_loop/message_loop.h b/chromium/base/message_loop/message_loop.h
index 46171f803a0..bff60ef1102 100644
--- a/chromium/base/message_loop/message_loop.h
+++ b/chromium/base/message_loop/message_loop.h
@@ -37,20 +37,9 @@
#include "base/message_loop/message_pump_libevent.h"
#endif
-#if defined(OS_ANDROID)
-namespace base {
-namespace android {
-
-class JavaMessageHandlerFactory;
-
-} // namespace android
-} // namespace base
-#endif // defined(OS_ANDROID)
-
namespace base {
class ThreadTaskRunnerHandle;
-class WaitableEvent;
// A MessageLoop is used to process events for a particular thread. There is
// at most one MessageLoop instance per thread.
@@ -443,7 +432,12 @@ class BASE_EXPORT MessageLoopForUI : public MessageLoop {
static MessageLoopForUI* current() {
MessageLoop* loop = MessageLoop::current();
DCHECK(loop);
+#if defined(OS_ANDROID)
+ DCHECK(loop->IsType(MessageLoop::TYPE_UI) ||
+ loop->IsType(MessageLoop::TYPE_JAVA));
+#else
DCHECK(loop->IsType(MessageLoop::TYPE_UI));
+#endif
return static_cast<MessageLoopForUI*>(loop);
}
@@ -464,8 +458,7 @@ class BASE_EXPORT MessageLoopForUI : public MessageLoop {
// never be called. Instead use Start(), which will forward all the native UI
// events to the Java message loop.
void Start();
- void StartForTesting(base::android::JavaMessageHandlerFactory* factory,
- WaitableEvent* test_done_event);
+
// In Android there are cases where we want to abort immediately without
// calling Quit(), in these cases we call Abort().
void Abort();
diff --git a/chromium/base/message_loop/message_loop_io_posix_unittest.cc b/chromium/base/message_loop/message_loop_io_posix_unittest.cc
index 5038bea7f8a..f98d4668d3a 100644
--- a/chromium/base/message_loop/message_loop_io_posix_unittest.cc
+++ b/chromium/base/message_loop/message_loop_io_posix_unittest.cc
@@ -25,7 +25,7 @@ namespace {
class MessageLoopForIoPosixTest : public testing::Test {
public:
- MessageLoopForIoPosixTest() {}
+ MessageLoopForIoPosixTest() = default;
// testing::Test interface.
void SetUp() override {
diff --git a/chromium/base/message_loop/message_loop_task_runner.cc b/chromium/base/message_loop/message_loop_task_runner.cc
index 5d41986d995..f251e3b8b24 100644
--- a/chromium/base/message_loop/message_loop_task_runner.cc
+++ b/chromium/base/message_loop/message_loop_task_runner.cc
@@ -46,8 +46,7 @@ bool MessageLoopTaskRunner::RunsTasksInCurrentSequence() const {
return valid_thread_id_ == PlatformThread::CurrentId();
}
-MessageLoopTaskRunner::~MessageLoopTaskRunner() {
-}
+MessageLoopTaskRunner::~MessageLoopTaskRunner() = default;
} // namespace internal
diff --git a/chromium/base/message_loop/message_loop_task_runner_unittest.cc b/chromium/base/message_loop/message_loop_task_runner_unittest.cc
index 323a3e88b1a..f2e71c40e4e 100644
--- a/chromium/base/message_loop/message_loop_task_runner_unittest.cc
+++ b/chromium/base/message_loop/message_loop_task_runner_unittest.cc
@@ -101,11 +101,11 @@ class MessageLoopTaskRunnerTest : public testing::Test {
AtomicSequenceNumber MessageLoopTaskRunnerTest::g_order;
TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_Basic) {
- MessageLoop* task_run_on = NULL;
- MessageLoop* task_deleted_on = NULL;
+ MessageLoop* task_run_on = nullptr;
+ MessageLoop* task_deleted_on = nullptr;
int task_delete_order = -1;
- MessageLoop* reply_run_on = NULL;
- MessageLoop* reply_deleted_on = NULL;
+ MessageLoop* reply_run_on = nullptr;
+ MessageLoop* reply_deleted_on = nullptr;
int reply_delete_order = -1;
scoped_refptr<LoopRecorder> task_recorder =
@@ -118,8 +118,8 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_Basic) {
BindOnce(&RecordLoopAndQuit, reply_recorder)));
// Die if base::Bind doesn't retain a reference to the recorders.
- task_recorder = NULL;
- reply_recorder = NULL;
+ task_recorder = nullptr;
+ reply_recorder = nullptr;
ASSERT_FALSE(task_deleted_on);
ASSERT_FALSE(reply_deleted_on);
@@ -134,11 +134,11 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_Basic) {
}
TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReplyOnDeletedThreadDoesNotLeak) {
- MessageLoop* task_run_on = NULL;
- MessageLoop* task_deleted_on = NULL;
+ MessageLoop* task_run_on = nullptr;
+ MessageLoop* task_deleted_on = nullptr;
int task_delete_order = -1;
- MessageLoop* reply_run_on = NULL;
- MessageLoop* reply_deleted_on = NULL;
+ MessageLoop* reply_run_on = nullptr;
+ MessageLoop* reply_deleted_on = nullptr;
int reply_delete_order = -1;
scoped_refptr<LoopRecorder> task_recorder =
@@ -168,11 +168,11 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReplyOnDeletedThreadDoesNotLeak) {
}
TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_SameLoop) {
- MessageLoop* task_run_on = NULL;
- MessageLoop* task_deleted_on = NULL;
+ MessageLoop* task_run_on = nullptr;
+ MessageLoop* task_deleted_on = nullptr;
int task_delete_order = -1;
- MessageLoop* reply_run_on = NULL;
- MessageLoop* reply_deleted_on = NULL;
+ MessageLoop* reply_run_on = nullptr;
+ MessageLoop* reply_deleted_on = nullptr;
int reply_delete_order = -1;
scoped_refptr<LoopRecorder> task_recorder =
@@ -186,8 +186,8 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_SameLoop) {
BindOnce(&RecordLoopAndQuit, reply_recorder)));
// Die if base::Bind doesn't retain a reference to the recorders.
- task_recorder = NULL;
- reply_recorder = NULL;
+ task_recorder = nullptr;
+ reply_recorder = nullptr;
ASSERT_FALSE(task_deleted_on);
ASSERT_FALSE(reply_deleted_on);
@@ -204,11 +204,11 @@ TEST_F(MessageLoopTaskRunnerTest,
PostTaskAndReply_DeadReplyTaskRunnerBehavior) {
// Annotate the scope as having memory leaks to suppress heapchecker reports.
ANNOTATE_SCOPED_MEMORY_LEAK;
- MessageLoop* task_run_on = NULL;
- MessageLoop* task_deleted_on = NULL;
+ MessageLoop* task_run_on = nullptr;
+ MessageLoop* task_deleted_on = nullptr;
int task_delete_order = -1;
- MessageLoop* reply_run_on = NULL;
- MessageLoop* reply_deleted_on = NULL;
+ MessageLoop* reply_run_on = nullptr;
+ MessageLoop* reply_deleted_on = nullptr;
int reply_delete_order = -1;
scoped_refptr<LoopRecorder> task_recorder =
@@ -222,8 +222,8 @@ TEST_F(MessageLoopTaskRunnerTest,
BindOnce(&RecordLoopAndQuit, reply_recorder));
// Die if base::Bind doesn't retain a reference to the recorders.
- task_recorder = NULL;
- reply_recorder = NULL;
+ task_recorder = nullptr;
+ reply_recorder = nullptr;
ASSERT_FALSE(task_deleted_on);
ASSERT_FALSE(reply_deleted_on);
diff --git a/chromium/base/message_loop/message_loop_unittest.cc b/chromium/base/message_loop/message_loop_unittest.cc
index 25718c62050..39953cc5f85 100644
--- a/chromium/base/message_loop/message_loop_unittest.cc
+++ b/chromium/base/message_loop/message_loop_unittest.cc
@@ -29,8 +29,9 @@
#include "testing/gtest/include/gtest/gtest.h"
#if defined(OS_ANDROID)
+#include "base/android/java_handler_thread.h"
#include "base/android/jni_android.h"
-#include "base/test/android/java_handler_thread_for_testing.h"
+#include "base/test/android/java_handler_thread_helpers.h"
#endif
#if defined(OS_WIN)
@@ -85,7 +86,7 @@ class Foo : public RefCounted<Foo> {
private:
friend class RefCounted<Foo>;
- ~Foo() {}
+ ~Foo() = default;
int test_count_;
std::string result_;
@@ -201,7 +202,7 @@ class DummyTaskObserver : public MessageLoop::TaskObserver {
num_tasks_processed_(0),
num_tasks_(num_tasks) {}
- ~DummyTaskObserver() override {}
+ ~DummyTaskObserver() override = default;
void WillProcessTask(const PendingTask& pending_task) override {
num_tasks_started_++;
@@ -253,46 +254,41 @@ void PostNTasks(int posts_remaining) {
}
#if defined(OS_ANDROID)
-void AbortMessagePump() {
- JNIEnv* env = base::android::AttachCurrentThread();
- jclass exception = env->FindClass(
- "org/chromium/base/TestSystemMessageHandler$TestException");
-
- env->ThrowNew(exception,
- "This is a test exception that should be caught in "
- "TestSystemMessageHandler.handleMessage");
- static_cast<base::MessageLoopForUI*>(base::MessageLoop::current())->Abort();
+void DoNotRun() {
+ ASSERT_TRUE(false);
}
void RunTest_AbortDontRunMoreTasks(bool delayed, bool init_java_first) {
WaitableEvent test_done_event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
-
std::unique_ptr<android::JavaHandlerThread> java_thread;
if (init_java_first) {
- java_thread =
- android::JavaHandlerThreadForTesting::CreateJavaFirst(&test_done_event);
+ java_thread = android::JavaHandlerThreadHelpers::CreateJavaFirst();
} else {
- java_thread = android::JavaHandlerThreadForTesting::Create(
- "JavaHandlerThreadForTesting from AbortDontRunMoreTasks",
- &test_done_event);
+ java_thread = std::make_unique<android::JavaHandlerThread>(
+ "JavaHandlerThreadForTesting from AbortDontRunMoreTasks");
}
java_thread->Start();
+ java_thread->ListenForUncaughtExceptionsForTesting();
+ auto target =
+ BindOnce(&android::JavaHandlerThreadHelpers::ThrowExceptionAndAbort,
+ &test_done_event);
if (delayed) {
java_thread->message_loop()->task_runner()->PostDelayedTask(
- FROM_HERE, BindOnce(&AbortMessagePump),
- TimeDelta::FromMilliseconds(10));
+ FROM_HERE, std::move(target), TimeDelta::FromMilliseconds(10));
} else {
- java_thread->message_loop()->task_runner()->PostTask(
- FROM_HERE, BindOnce(&AbortMessagePump));
+ java_thread->message_loop()->task_runner()->PostTask(FROM_HERE,
+ std::move(target));
+ java_thread->message_loop()->task_runner()->PostTask(FROM_HERE,
+ BindOnce(&DoNotRun));
}
-
- // Wait to ensure we catch the correct exception (and don't crash)
test_done_event.Wait();
-
java_thread->Stop();
- java_thread.reset();
+ android::ScopedJavaLocalRef<jthrowable> exception =
+ java_thread->GetUncaughtExceptionIfAny();
+ ASSERT_TRUE(
+ android::JavaHandlerThreadHelpers::IsExceptionTestException(exception));
}
TEST(MessageLoopTest, JavaExceptionAbort) {
@@ -958,12 +954,12 @@ TEST_P(MessageLoopTypedTest, DISABLED_EnsureDeletion) {
MessageLoop loop(GetParam());
loop.task_runner()->PostTask(
FROM_HERE, BindOnce(&RecordDeletionProbe::Run,
- new RecordDeletionProbe(NULL, &a_was_deleted)));
+ new RecordDeletionProbe(nullptr, &a_was_deleted)));
// TODO(ajwong): Do we really need 1000ms here?
loop.task_runner()->PostDelayedTask(
FROM_HERE,
BindOnce(&RecordDeletionProbe::Run,
- new RecordDeletionProbe(NULL, &b_was_deleted)),
+ new RecordDeletionProbe(nullptr, &b_was_deleted)),
TimeDelta::FromMilliseconds(1000));
}
EXPECT_TRUE(a_was_deleted);
@@ -981,7 +977,7 @@ TEST_P(MessageLoopTypedTest, DISABLED_EnsureDeletion_Chain) {
MessageLoop loop(GetParam());
// The scoped_refptr for each of the below is held either by the chained
// RecordDeletionProbe, or the bound RecordDeletionProbe::Run() callback.
- RecordDeletionProbe* a = new RecordDeletionProbe(NULL, &a_was_deleted);
+ RecordDeletionProbe* a = new RecordDeletionProbe(nullptr, &a_was_deleted);
RecordDeletionProbe* b = new RecordDeletionProbe(a, &b_was_deleted);
RecordDeletionProbe* c = new RecordDeletionProbe(b, &c_was_deleted);
loop.task_runner()->PostTask(FROM_HERE,
diff --git a/chromium/base/message_loop/message_pump.cc b/chromium/base/message_loop/message_pump.cc
index 3d85b9b5643..907617624a2 100644
--- a/chromium/base/message_loop/message_pump.cc
+++ b/chromium/base/message_loop/message_pump.cc
@@ -6,11 +6,9 @@
namespace base {
-MessagePump::MessagePump() {
-}
+MessagePump::MessagePump() = default;
-MessagePump::~MessagePump() {
-}
+MessagePump::~MessagePump() = default;
void MessagePump::SetTimerSlack(TimerSlack) {
}
diff --git a/chromium/base/message_loop/message_pump_android.cc b/chromium/base/message_loop/message_pump_android.cc
index 532665886e1..1a11e55325b 100644
--- a/chromium/base/message_loop/message_pump_android.cc
+++ b/chromium/base/message_loop/message_pump_android.cc
@@ -6,35 +6,43 @@
#include <jni.h>
-#include "base/android/java_message_handler_factory.h"
#include "base/android/jni_android.h"
#include "base/android/scoped_java_ref.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
-#include "base/time/time.h"
#include "jni/SystemMessageHandler_jni.h"
using base::android::JavaParamRef;
using base::android::ScopedJavaLocalRef;
-// ----------------------------------------------------------------------------
-// Native JNI methods called by Java.
-// ----------------------------------------------------------------------------
-// This method can not move to anonymous namespace as it has been declared as
-// 'static' in system_message_handler_jni.h.
-static void DoRunLoopOnce(JNIEnv* env,
- const JavaParamRef<jobject>& obj,
- jlong native_delegate,
- jlong native_message_pump,
- jlong delayed_scheduled_time_ticks) {
- base::MessagePump::Delegate* delegate =
- reinterpret_cast<base::MessagePump::Delegate*>(native_delegate);
- DCHECK(delegate);
- base::MessagePumpForUI* pump =
- reinterpret_cast<base::MessagePumpForUI*>(native_message_pump);
- DCHECK(pump);
+namespace base {
+
+MessagePumpForUI::MessagePumpForUI() = default;
+MessagePumpForUI::~MessagePumpForUI() = default;
+
+// This is called by the java SystemMessageHandler whenever the message queue
+// detects an idle state (as in, control returns to the looper and there are no
+// tasks available to be run immediately).
+// See the comments in DoRunLoopOnce for how this differs from the
+// implementation on other platforms.
+void MessagePumpForUI::DoIdleWork(JNIEnv* env,
+ const JavaParamRef<jobject>& obj) {
+ delegate_->DoIdleWork();
+}
+
+void MessagePumpForUI::DoRunLoopOnce(JNIEnv* env,
+ const JavaParamRef<jobject>& obj,
+ jboolean delayed) {
+ if (delayed)
+ delayed_scheduled_time_ = base::TimeTicks();
+
+ // If the pump has been aborted, tasks may continue to be queued up, but
+ // shouldn't run.
+ if (ShouldAbort())
+ return;
+
// This is based on MessagePumpForUI::DoRunLoop() from desktop.
// Note however that our system queue is handled in the java side.
// In desktop we inspect and process a single system message and then
@@ -49,85 +57,34 @@ static void DoRunLoopOnce(JNIEnv* env,
// add an IdleHandler to the message queue in SystemMessageHandler.java, which
// calls DoIdleWork whenever control returns back to the looper and there are
// no tasks queued up to run immediately.
- delegate->DoWork();
- if (pump->ShouldAbort()) {
+ delegate_->DoWork();
+ if (ShouldAbort()) {
// There is a pending JNI exception, return to Java so that the exception is
// thrown correctly.
return;
}
- // In the java side, |SystemMessageHandler| keeps a single "delayed" message.
- // It's an expensive operation to |removeMessage| there, so this is optimized
- // to avoid those calls.
- //
- // At this stage, |next_delayed_work_time| can be:
- // 1) The same as previously scheduled: nothing to be done, move along. This
- // is the typical case, since this method is called for every single message.
- //
- // 2) Not previously scheduled: just post a new message in java.
- //
- // 3) Shorter than previously scheduled: far less common. In this case,
- // |removeMessage| and post a new one.
- //
- // 4) Longer than previously scheduled (or null): nothing to be done, move
- // along.
- //
- // Side note: base::TimeTicks is a C++ representation and can't be
- // compared in java. When calling |scheduleDelayedWork|, pass the
- // |InternalValue()| to java and then back to C++ so the comparisons can be
- // done here.
- // This roundtrip allows comparing TimeTicks directly (cheap) and
- // avoid comparisons with TimeDelta / Now() (expensive).
base::TimeTicks next_delayed_work_time;
- delegate->DoDelayedWork(&next_delayed_work_time);
- if (pump->ShouldAbort()) {
+ delegate_->DoDelayedWork(&next_delayed_work_time);
+ if (ShouldAbort()) {
// There is a pending JNI exception, return to Java so that the exception is
// thrown correctly
return;
}
- if (!next_delayed_work_time.is_null()) {
- // Schedule a new message if there's nothing already scheduled or there's a
- // shorter delay than previously scheduled (see (2) and (3) above).
- if (delayed_scheduled_time_ticks == 0 ||
- next_delayed_work_time < base::TimeTicks::FromInternalValue(
- delayed_scheduled_time_ticks)) {
- Java_SystemMessageHandler_scheduleDelayedWork(env, obj,
- next_delayed_work_time.ToInternalValue(),
- (next_delayed_work_time -
- base::TimeTicks::Now()).InMillisecondsRoundedUp());
- }
- }
+ if (!next_delayed_work_time.is_null())
+ ScheduleDelayedWork(next_delayed_work_time);
}
-// This is called by the java SystemMessageHandler whenever the message queue
-// detects an idle state (as in, control returns to the looper and there are no
-// tasks available to be run immediately).
-// See the comments in DoRunLoopOnce for how this differs from the
-// implementation on other platforms.
-static void DoIdleWork(JNIEnv* env,
- const JavaParamRef<jobject>& obj,
- jlong native_delegate,
- jlong native_message_pump) {
- base::MessagePump::Delegate* delegate =
- reinterpret_cast<base::MessagePump::Delegate*>(native_delegate);
- DCHECK(delegate);
- delegate->DoIdleWork();
-};
-
-namespace base {
-
-MessagePumpForUI::MessagePumpForUI() = default;
-MessagePumpForUI::~MessagePumpForUI() = default;
-
void MessagePumpForUI::Run(Delegate* delegate) {
NOTREACHED() << "UnitTests should rely on MessagePumpForUIStub in"
- " test_stub_android.h";
+ " test_stub_android.h";
}
-JNIEnv* MessagePumpForUI::StartInternal() {
+void MessagePumpForUI::Start(Delegate* delegate) {
DCHECK(!quit_);
- run_loop_ = new RunLoop();
+ delegate_ = delegate;
+ run_loop_ = std::make_unique<RunLoop>();
// Since the RunLoop was just created above, BeforeRun should be guaranteed to
// return true (it only returns false if the RunLoop has been Quit already).
if (!run_loop_->BeforeRun())
@@ -137,40 +94,24 @@ JNIEnv* MessagePumpForUI::StartInternal() {
JNIEnv* env = base::android::AttachCurrentThread();
DCHECK(env);
- return env;
-}
-
-void MessagePumpForUI::Start(Delegate* delegate) {
- JNIEnv* env = StartInternal();
- system_message_handler_obj_.Reset(Java_SystemMessageHandler_create(
- env, reinterpret_cast<intptr_t>(delegate),
- reinterpret_cast<intptr_t>(this)));
-}
-
-void MessagePumpForUI::StartForUnitTest(
- Delegate* delegate,
- base::android::JavaMessageHandlerFactory* factory,
- WaitableEvent* test_done_event) {
- JNIEnv* env = StartInternal();
system_message_handler_obj_.Reset(
- factory->CreateMessageHandler(env, delegate, this, test_done_event));
+ Java_SystemMessageHandler_create(env, reinterpret_cast<jlong>(this)));
}
void MessagePumpForUI::Quit() {
quit_ = true;
+
if (!system_message_handler_obj_.is_null()) {
JNIEnv* env = base::android::AttachCurrentThread();
DCHECK(env);
- Java_SystemMessageHandler_removeAllPendingMessages(
- env, system_message_handler_obj_);
+ Java_SystemMessageHandler_shutdown(env, system_message_handler_obj_);
system_message_handler_obj_.Reset();
}
if (run_loop_) {
run_loop_->AfterRun();
- delete run_loop_;
- run_loop_ = NULL;
+ run_loop_ = nullptr;
}
}
@@ -188,6 +129,26 @@ void MessagePumpForUI::ScheduleWork() {
void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
if (quit_)
return;
+ // In the java side, |SystemMessageHandler| keeps a single "delayed" message.
+ // It's an expensive operation to |removeMessage| there, so this is optimized
+ // to avoid those calls.
+ //
+ // At this stage, |delayed_work_time| can be:
+ // 1) The same as previously scheduled: nothing to be done, move along. This
+ // is the typical case, since this method is called for every single message.
+ //
+ // 2) Not previously scheduled: just post a new message in java.
+ //
+ // 3) Shorter than previously scheduled: far less common. In this case,
+ // |removeMessage| and post a new one.
+ //
+ // 4) Longer than previously scheduled (or null): nothing to be done, move
+ // along.
+ if (!delayed_scheduled_time_.is_null() &&
+ delayed_work_time >= delayed_scheduled_time_) {
+ return;
+ }
+ DCHECK(!delayed_work_time.is_null());
DCHECK(!system_message_handler_obj_.is_null());
JNIEnv* env = base::android::AttachCurrentThread();
@@ -195,11 +156,11 @@ void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
jlong millis =
(delayed_work_time - TimeTicks::Now()).InMillisecondsRoundedUp();
+ delayed_scheduled_time_ = delayed_work_time;
// Note that we're truncating to milliseconds as required by the java side,
// even though delayed_work_time is microseconds resolution.
Java_SystemMessageHandler_scheduleDelayedWork(
- env, system_message_handler_obj_, delayed_work_time.ToInternalValue(),
- millis);
+ env, system_message_handler_obj_, millis);
}
} // namespace base
diff --git a/chromium/base/message_loop/message_pump_android.h b/chromium/base/message_loop/message_pump_android.h
index 3c8ac654c46..d09fdde789f 100644
--- a/chromium/base/message_loop/message_pump_android.h
+++ b/chromium/base/message_loop/message_pump_android.h
@@ -6,22 +6,18 @@
#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_ANDROID_H_
#include <jni.h>
+#include <memory>
#include "base/android/scoped_java_ref.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/message_loop/message_pump.h"
+#include "base/time/time.h"
namespace base {
-namespace android {
-class JavaMessageHandlerFactory;
-}
-
class RunLoop;
-class TimeTicks;
-class WaitableEvent;
// This class implements a MessagePump needed for TYPE_UI MessageLoops on
// OS_ANDROID platform.
@@ -30,15 +26,17 @@ class BASE_EXPORT MessagePumpForUI : public MessagePump {
MessagePumpForUI();
~MessagePumpForUI() override;
+ void DoIdleWork(JNIEnv* env, const base::android::JavaParamRef<jobject>& obj);
+ void DoRunLoopOnce(JNIEnv* env,
+ const base::android::JavaParamRef<jobject>& obj,
+ jboolean delayed);
+
void Run(Delegate* delegate) override;
void Quit() override;
void ScheduleWork() override;
void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
virtual void Start(Delegate* delegate);
- void StartForUnitTest(Delegate* delegate,
- base::android::JavaMessageHandlerFactory* factory,
- WaitableEvent* test_done_event);
// We call Abort when there is a pending JNI exception, meaning that the
// current thread will crash when we return to Java.
@@ -48,12 +46,12 @@ class BASE_EXPORT MessagePumpForUI : public MessagePump {
bool ShouldAbort() const { return should_abort_; }
private:
- JNIEnv* StartInternal();
-
- RunLoop* run_loop_ = nullptr;
+ std::unique_ptr<RunLoop> run_loop_;
base::android::ScopedJavaGlobalRef<jobject> system_message_handler_obj_;
bool should_abort_ = false;
bool quit_ = false;
+ Delegate* delegate_ = nullptr;
+ base::TimeTicks delayed_scheduled_time_;
DISALLOW_COPY_AND_ASSIGN(MessagePumpForUI);
};
diff --git a/chromium/base/message_loop/message_pump_default.cc b/chromium/base/message_loop/message_pump_default.cc
index 50dbc6f7183..dba0f5ba4e2 100644
--- a/chromium/base/message_loop/message_pump_default.cc
+++ b/chromium/base/message_loop/message_pump_default.cc
@@ -24,7 +24,7 @@ MessagePumpDefault::MessagePumpDefault()
event_(WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED) {}
-MessagePumpDefault::~MessagePumpDefault() {}
+MessagePumpDefault::~MessagePumpDefault() = default;
void MessagePumpDefault::Run(Delegate* delegate) {
AutoReset<bool> auto_reset_keep_running(&keep_running_, true);
diff --git a/chromium/base/message_loop/message_pump_fuchsia.cc b/chromium/base/message_loop/message_pump_fuchsia.cc
index 22fb3a1eb0a..e1cda047404 100644
--- a/chromium/base/message_loop/message_pump_fuchsia.cc
+++ b/chromium/base/message_loop/message_pump_fuchsia.cc
@@ -91,6 +91,8 @@ MessagePumpFuchsia::MessagePumpFuchsia() : weak_factory_(this) {
CHECK_EQ(ZX_OK, zx_port_create(0, port_.receive()));
}
+MessagePumpFuchsia::~MessagePumpFuchsia() {}
+
bool MessagePumpFuchsia::WatchFileDescriptor(int fd,
bool persistent,
int mode,
diff --git a/chromium/base/message_loop/message_pump_fuchsia.h b/chromium/base/message_loop/message_pump_fuchsia.h
index 80d41538110..8ffa76eb8f2 100644
--- a/chromium/base/message_loop/message_pump_fuchsia.h
+++ b/chromium/base/message_loop/message_pump_fuchsia.h
@@ -133,6 +133,7 @@ class BASE_EXPORT MessagePumpFuchsia : public MessagePump {
};
MessagePumpFuchsia();
+ ~MessagePumpFuchsia() override;
bool WatchZxHandle(zx_handle_t handle,
bool persistent,
diff --git a/chromium/base/message_loop/message_pump_glib.cc b/chromium/base/message_loop/message_pump_glib.cc
index fd23745f4e1..2f1909b84b2 100644
--- a/chromium/base/message_loop/message_pump_glib.cc
+++ b/chromium/base/message_loop/message_pump_glib.cc
@@ -112,12 +112,8 @@ gboolean WorkSourceDispatch(GSource* source,
}
// I wish these could be const, but g_source_new wants non-const.
-GSourceFuncs WorkSourceFuncs = {
- WorkSourcePrepare,
- WorkSourceCheck,
- WorkSourceDispatch,
- NULL
-};
+GSourceFuncs WorkSourceFuncs = {WorkSourcePrepare, WorkSourceCheck,
+ WorkSourceDispatch, nullptr};
// The following is used to make sure we only run the MessagePumpGlib on one
// thread. X only has one message pump so we can only have one UI loop per
@@ -180,7 +176,7 @@ struct MessagePumpGlib::RunState {
};
MessagePumpGlib::MessagePumpGlib()
- : state_(NULL),
+ : state_(nullptr),
context_(g_main_context_default()),
wakeup_gpollfd_(new GPollFD) {
// Create our wakeup pipe, which is used to flag when work was scheduled.
diff --git a/chromium/base/message_loop/message_pump_glib_unittest.cc b/chromium/base/message_loop/message_pump_glib_unittest.cc
index 3ac58c85bc3..f7868c71476 100644
--- a/chromium/base/message_loop/message_pump_glib_unittest.cc
+++ b/chromium/base/message_loop/message_pump_glib_unittest.cc
@@ -33,7 +33,7 @@ class EventInjector {
EventInjector() : processed_events_(0) {
source_ = static_cast<Source*>(g_source_new(&SourceFuncs, sizeof(Source)));
source_->injector = this;
- g_source_attach(source_, NULL);
+ g_source_attach(source_, nullptr);
g_source_set_can_recurse(source_, TRUE);
}
@@ -135,12 +135,9 @@ class EventInjector {
DISALLOW_COPY_AND_ASSIGN(EventInjector);
};
-GSourceFuncs EventInjector::SourceFuncs = {
- EventInjector::Prepare,
- EventInjector::Check,
- EventInjector::Dispatch,
- NULL
-};
+GSourceFuncs EventInjector::SourceFuncs = {EventInjector::Prepare,
+ EventInjector::Check,
+ EventInjector::Dispatch, nullptr};
void IncrementInt(int *value) {
++*value;
@@ -159,7 +156,7 @@ void PostMessageLoopTask(const Location& from_here, OnceClosure task) {
// Test fixture.
class MessagePumpGLibTest : public testing::Test {
public:
- MessagePumpGLibTest() : loop_(NULL), injector_(NULL) { }
+ MessagePumpGLibTest() : loop_(nullptr), injector_(nullptr) {}
// Overridden from testing::Test:
void SetUp() override {
@@ -168,9 +165,9 @@ class MessagePumpGLibTest : public testing::Test {
}
void TearDown() override {
delete injector_;
- injector_ = NULL;
+ injector_ = nullptr;
delete loop_;
- loop_ = NULL;
+ loop_ = nullptr;
}
MessageLoop* loop() const { return loop_; }
@@ -383,8 +380,8 @@ void AddEventsAndDrainGLib(EventInjector* injector) {
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, BindOnce(&DoNothing));
// Drain the events
- while (g_main_context_pending(NULL)) {
- g_main_context_iteration(NULL, FALSE);
+ while (g_main_context_pending(nullptr)) {
+ g_main_context_iteration(nullptr, FALSE);
}
}
@@ -408,13 +405,13 @@ class GLibLoopRunner : public RefCounted<GLibLoopRunner> {
void RunGLib() {
while (!quit_) {
- g_main_context_iteration(NULL, TRUE);
+ g_main_context_iteration(nullptr, TRUE);
}
}
void RunLoop() {
while (!quit_) {
- g_main_context_iteration(NULL, TRUE);
+ g_main_context_iteration(nullptr, TRUE);
}
}
diff --git a/chromium/base/message_loop/message_pump_libevent.cc b/chromium/base/message_loop/message_pump_libevent.cc
index 63d85c86221..dc5efc69215 100644
--- a/chromium/base/message_loop/message_pump_libevent.cc
+++ b/chromium/base/message_loop/message_pump_libevent.cc
@@ -45,10 +45,10 @@ namespace base {
MessagePumpLibevent::FileDescriptorWatcher::FileDescriptorWatcher(
const Location& from_here)
- : event_(NULL),
- pump_(NULL),
- watcher_(NULL),
- was_destroyed_(NULL),
+ : event_(nullptr),
+ pump_(nullptr),
+ watcher_(nullptr),
+ was_destroyed_(nullptr),
created_from_location_(from_here) {}
MessagePumpLibevent::FileDescriptorWatcher::~FileDescriptorWatcher() {
@@ -63,14 +63,14 @@ MessagePumpLibevent::FileDescriptorWatcher::~FileDescriptorWatcher() {
bool MessagePumpLibevent::FileDescriptorWatcher::StopWatchingFileDescriptor() {
event* e = ReleaseEvent();
- if (e == NULL)
+ if (e == nullptr)
return true;
// event_del() is a no-op if the event isn't active.
int rv = event_del(e);
delete e;
- pump_ = NULL;
- watcher_ = NULL;
+ pump_ = nullptr;
+ watcher_ = nullptr;
return (rv == 0);
}
@@ -83,7 +83,7 @@ void MessagePumpLibevent::FileDescriptorWatcher::Init(event* e) {
event* MessagePumpLibevent::FileDescriptorWatcher::ReleaseEvent() {
struct event* e = event_;
- event_ = NULL;
+ event_ = nullptr;
return e;
}
@@ -184,7 +184,7 @@ bool MessagePumpLibevent::WatchFileDescriptor(int fd,
}
// Add this socket to the list of monitored sockets.
- if (event_add(evt.get(), NULL)) {
+ if (event_add(evt.get(), nullptr)) {
DPLOG(ERROR) << "event_add failed(fd=" << EVENT_FD(evt.get()) << ")";
return false;
}
@@ -305,7 +305,7 @@ bool MessagePumpLibevent::Init() {
OnWakeup, this);
event_base_set(event_base_, wakeup_event_);
- if (event_add(wakeup_event_, 0))
+ if (event_add(wakeup_event_, nullptr))
return false;
return true;
}
diff --git a/chromium/base/message_loop/message_pump_libevent_unittest.cc b/chromium/base/message_loop/message_pump_libevent_unittest.cc
index 4d51bbf9bb7..da7c06ef820 100644
--- a/chromium/base/message_loop/message_pump_libevent_unittest.cc
+++ b/chromium/base/message_loop/message_pump_libevent_unittest.cc
@@ -33,7 +33,7 @@ class MessagePumpLibeventTest : public testing::Test {
MessagePumpLibeventTest()
: ui_loop_(new MessageLoop(MessageLoop::TYPE_UI)),
io_thread_("MessagePumpLibeventTestIOThread") {}
- ~MessagePumpLibeventTest() override {}
+ ~MessagePumpLibeventTest() override = default;
void SetUp() override {
Thread::Options options(MessageLoop::TYPE_IO, 0);
@@ -77,7 +77,7 @@ namespace {
// nothing useful.
class StupidWatcher : public MessagePumpLibevent::Watcher {
public:
- ~StupidWatcher() override {}
+ ~StupidWatcher() override = default;
// base:MessagePumpLibevent::Watcher interface
void OnFileCanReadWithoutBlocking(int fd) override {}
@@ -111,7 +111,7 @@ class BaseWatcher : public MessagePumpLibevent::Watcher {
: controller_(controller) {
DCHECK(controller_);
}
- ~BaseWatcher() override {}
+ ~BaseWatcher() override = default;
// base:MessagePumpLibevent::Watcher interface
void OnFileCanReadWithoutBlocking(int /* fd */) override { NOTREACHED(); }
@@ -133,7 +133,7 @@ class DeleteWatcher : public BaseWatcher {
void OnFileCanWriteWithoutBlocking(int /* fd */) override {
DCHECK(controller_);
delete controller_;
- controller_ = NULL;
+ controller_ = nullptr;
}
};
@@ -155,7 +155,7 @@ class StopWatcher : public BaseWatcher {
MessagePumpLibevent::FileDescriptorWatcher* controller)
: BaseWatcher(controller) {}
- ~StopWatcher() override {}
+ ~StopWatcher() override = default;
void OnFileCanWriteWithoutBlocking(int /* fd */) override {
controller_->StopWatchingFileDescriptor();
@@ -176,16 +176,15 @@ TEST_F(MessagePumpLibeventTest, StopWatcher) {
void QuitMessageLoopAndStart(const Closure& quit_closure) {
quit_closure.Run();
- MessageLoop::ScopedNestableTaskAllower allow(MessageLoop::current());
- RunLoop runloop;
+ RunLoop runloop(RunLoop::Type::kNestableTasksAllowed);
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, runloop.QuitClosure());
runloop.Run();
}
class NestedPumpWatcher : public MessagePumpLibevent::Watcher {
public:
- NestedPumpWatcher() {}
- ~NestedPumpWatcher() override {}
+ NestedPumpWatcher() = default;
+ ~NestedPumpWatcher() override = default;
void OnFileCanReadWithoutBlocking(int /* fd */) override {
RunLoop runloop;
diff --git a/chromium/base/message_loop/message_pump_perftest.cc b/chromium/base/message_loop/message_pump_perftest.cc
index 4b45341d58d..90680f07041 100644
--- a/chromium/base/message_loop/message_pump_perftest.cc
+++ b/chromium/base/message_loop/message_pump_perftest.cc
@@ -240,8 +240,8 @@ TEST_F(ScheduleWorkTest, ThreadTimeToJavaFromFourThreads) {
class FakeMessagePump : public MessagePump {
public:
- FakeMessagePump() {}
- ~FakeMessagePump() override {}
+ FakeMessagePump() = default;
+ ~FakeMessagePump() override = default;
void Run(Delegate* delegate) override {}
diff --git a/chromium/base/metrics/OWNERS b/chromium/base/metrics/OWNERS
index 3b1e0686f43..2f98bde4363 100644
--- a/chromium/base/metrics/OWNERS
+++ b/chromium/base/metrics/OWNERS
@@ -1,6 +1,8 @@
asvitkine@chromium.org
+gayane@chromium.org
holte@chromium.org
isherman@chromium.org
+jwd@chromium.org
mpearson@chromium.org
rkaplow@chromium.org
diff --git a/chromium/base/metrics/bucket_ranges.cc b/chromium/base/metrics/bucket_ranges.cc
index f995d7c43eb..04043939962 100644
--- a/chromium/base/metrics/bucket_ranges.cc
+++ b/chromium/base/metrics/bucket_ranges.cc
@@ -107,7 +107,7 @@ BucketRanges::BucketRanges(size_t num_ranges)
: ranges_(num_ranges, 0),
checksum_(0) {}
-BucketRanges::~BucketRanges() {}
+BucketRanges::~BucketRanges() = default;
uint32_t BucketRanges::CalculateChecksum() const {
// Seed checksum.
diff --git a/chromium/base/metrics/field_trial.cc b/chromium/base/metrics/field_trial.cc
index 972801200fa..72f25a9013e 100644
--- a/chromium/base/metrics/field_trial.cc
+++ b/chromium/base/metrics/field_trial.cc
@@ -50,7 +50,11 @@ const char kActivationMarker = '*';
// This is safe from race conditions because MakeIterable is a release operation
// and GetNextOfType is an acquire operation, so memory writes before
// MakeIterable happen before memory reads after GetNextOfType.
+#if defined(OS_FUCHSIA) // TODO(752368): Not yet supported on Fuchsia.
+const bool kUseSharedMemoryForFieldTrials = false;
+#else
const bool kUseSharedMemoryForFieldTrials = true;
+#endif
// Constants for the field trial allocator.
const char kAllocatorName[] = "FieldTrialAllocator";
@@ -239,14 +243,13 @@ int FieldTrialList::kNoExpirationYear = 0;
//------------------------------------------------------------------------------
// FieldTrial methods and members.
-FieldTrial::EntropyProvider::~EntropyProvider() {
-}
+FieldTrial::EntropyProvider::~EntropyProvider() = default;
-FieldTrial::State::State() {}
+FieldTrial::State::State() = default;
FieldTrial::State::State(const State& other) = default;
-FieldTrial::State::~State() {}
+FieldTrial::State::~State() = default;
bool FieldTrial::FieldTrialEntry::GetTrialAndGroupName(
StringPiece* trial_name,
@@ -409,7 +412,7 @@ FieldTrial::FieldTrial(const std::string& trial_name,
<< "Trial " << trial_name << " is missing a default group name.";
}
-FieldTrial::~FieldTrial() {}
+FieldTrial::~FieldTrial() = default;
void FieldTrial::SetTrialRegistered() {
DCHECK_EQ(kNotFinalized, group_);
@@ -477,19 +480,18 @@ bool FieldTrial::GetStateWhileLocked(State* field_trial_state) {
// FieldTrialList methods and members.
// static
-FieldTrialList* FieldTrialList::global_ = NULL;
+FieldTrialList* FieldTrialList::global_ = nullptr;
// static
bool FieldTrialList::used_without_global_ = false;
-FieldTrialList::Observer::~Observer() {
-}
+FieldTrialList::Observer::~Observer() = default;
FieldTrialList::FieldTrialList(
std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider)
: entropy_provider_(std::move(entropy_provider)),
observer_list_(new ObserverListThreadSafe<FieldTrialList::Observer>(
- ObserverListBase<FieldTrialList::Observer>::NOTIFY_EXISTING_ONLY)) {
+ ObserverListPolicy::EXISTING_ONLY)) {
DCHECK(!global_);
DCHECK(!used_without_global_);
global_ = this;
@@ -508,7 +510,7 @@ FieldTrialList::~FieldTrialList() {
registered_.erase(it->first);
}
DCHECK_EQ(this, global_);
- global_ = NULL;
+ global_ = nullptr;
}
// static
@@ -523,7 +525,7 @@ FieldTrial* FieldTrialList::FactoryGetFieldTrial(
int* default_group_number) {
return FactoryGetFieldTrialWithRandomizationSeed(
trial_name, total_probability, default_group_name, year, month,
- day_of_month, randomization_type, 0, default_group_number, NULL);
+ day_of_month, randomization_type, 0, default_group_number, nullptr);
}
// static
@@ -595,7 +597,7 @@ FieldTrial* FieldTrialList::FactoryGetFieldTrialWithRandomizationSeed(
// static
FieldTrial* FieldTrialList::Find(const std::string& trial_name) {
if (!global_)
- return NULL;
+ return nullptr;
AutoLock auto_lock(global_->lock_);
return global_->PreLockedFind(trial_name);
}
@@ -618,7 +620,7 @@ std::string FieldTrialList::FindFullName(const std::string& trial_name) {
// static
bool FieldTrialList::TrialExists(const std::string& trial_name) {
- return Find(trial_name) != NULL;
+ return Find(trial_name) != nullptr;
}
// static
@@ -891,14 +893,14 @@ FieldTrial* FieldTrialList::CreateFieldTrial(
DCHECK_GE(name.size(), 0u);
DCHECK_GE(group_name.size(), 0u);
if (name.empty() || group_name.empty() || !global_)
- return NULL;
+ return nullptr;
FieldTrial* field_trial = FieldTrialList::Find(name);
if (field_trial) {
// In single process mode, or when we force them from the command line,
// we may have already created the field trial.
if (field_trial->group_name_internal() != group_name)
- return NULL;
+ return nullptr;
return field_trial;
}
const int kTotalProbability = 100;
@@ -1397,7 +1399,7 @@ const FieldTrial::EntropyProvider*
FieldTrialList::GetEntropyProviderForOneTimeRandomization() {
if (!global_) {
used_without_global_ = true;
- return NULL;
+ return nullptr;
}
return global_->entropy_provider_.get();
@@ -1406,7 +1408,7 @@ const FieldTrial::EntropyProvider*
FieldTrial* FieldTrialList::PreLockedFind(const std::string& name) {
RegistrationMap::iterator it = registered_.find(name);
if (registered_.end() == it)
- return NULL;
+ return nullptr;
return it->second;
}
diff --git a/chromium/base/metrics/field_trial_param_associator.cc b/chromium/base/metrics/field_trial_param_associator.cc
index 9c93f344cfa..af76eafaca4 100644
--- a/chromium/base/metrics/field_trial_param_associator.cc
+++ b/chromium/base/metrics/field_trial_param_associator.cc
@@ -8,8 +8,8 @@
namespace base {
-FieldTrialParamAssociator::FieldTrialParamAssociator() {}
-FieldTrialParamAssociator::~FieldTrialParamAssociator() {}
+FieldTrialParamAssociator::FieldTrialParamAssociator() = default;
+FieldTrialParamAssociator::~FieldTrialParamAssociator() = default;
// static
FieldTrialParamAssociator* FieldTrialParamAssociator::GetInstance() {
diff --git a/chromium/base/metrics/field_trial_unittest.cc b/chromium/base/metrics/field_trial_unittest.cc
index d324f50823d..42c3ba1e436 100644
--- a/chromium/base/metrics/field_trial_unittest.cc
+++ b/chromium/base/metrics/field_trial_unittest.cc
@@ -77,7 +77,7 @@ class TestFieldTrialObserver : public FieldTrialList::Observer {
class FieldTrialTest : public testing::Test {
public:
- FieldTrialTest() : trial_list_(NULL) {}
+ FieldTrialTest() : trial_list_(nullptr) {}
private:
MessageLoop message_loop_;
@@ -95,7 +95,7 @@ TEST_F(FieldTrialTest, Registration) {
EXPECT_FALSE(FieldTrialList::Find(name2));
scoped_refptr<FieldTrial> trial1 =
- CreateFieldTrial(name1, 10, "default name 1 test", NULL);
+ CreateFieldTrial(name1, 10, "default name 1 test", nullptr);
EXPECT_EQ(FieldTrial::kNotFinalized, trial1->group_);
EXPECT_EQ(name1, trial1->trial_name());
EXPECT_EQ("", trial1->group_name_internal());
@@ -106,7 +106,7 @@ TEST_F(FieldTrialTest, Registration) {
EXPECT_FALSE(FieldTrialList::Find(name2));
scoped_refptr<FieldTrial> trial2 =
- CreateFieldTrial(name2, 10, "default name 2 test", NULL);
+ CreateFieldTrial(name2, 10, "default name 2 test", nullptr);
EXPECT_EQ(FieldTrial::kNotFinalized, trial2->group_);
EXPECT_EQ(name2, trial2->trial_name());
EXPECT_EQ("", trial2->group_name_internal());
@@ -132,7 +132,7 @@ TEST_F(FieldTrialTest, AbsoluteProbabilities) {
default_always_false[0] = c;
scoped_refptr<FieldTrial> trial_true =
- CreateFieldTrial(always_true, 10, default_always_true, NULL);
+ CreateFieldTrial(always_true, 10, default_always_true, nullptr);
const std::string winner = "TheWinner";
int winner_group = trial_true->AppendGroup(winner, 10);
@@ -140,7 +140,7 @@ TEST_F(FieldTrialTest, AbsoluteProbabilities) {
EXPECT_EQ(winner, trial_true->group_name());
scoped_refptr<FieldTrial> trial_false =
- CreateFieldTrial(always_false, 10, default_always_false, NULL);
+ CreateFieldTrial(always_false, 10, default_always_false, nullptr);
int loser_group = trial_false->AppendGroup("ALoser", 0);
EXPECT_NE(loser_group, trial_false->group());
@@ -181,7 +181,7 @@ TEST_F(FieldTrialTest, FiftyFiftyProbability) {
std::string default_group_name = base::StringPrintf("Default FiftyFifty%d",
++counter);
scoped_refptr<FieldTrial> trial =
- CreateFieldTrial(name, 2, default_group_name, NULL);
+ CreateFieldTrial(name, 2, default_group_name, nullptr);
trial->AppendGroup("first", 1); // 50% chance of being chosen.
// If group_ is kNotFinalized, then a group assignement hasn't been done.
if (trial->group_ != FieldTrial::kNotFinalized) {
@@ -206,7 +206,7 @@ TEST_F(FieldTrialTest, MiddleProbabilities) {
name[0] = c;
default_group_name[0] = c;
scoped_refptr<FieldTrial> trial =
- CreateFieldTrial(name, 10, default_group_name, NULL);
+ CreateFieldTrial(name, 10, default_group_name, nullptr);
int might_win = trial->AppendGroup("MightWin", 5);
if (trial->group() == might_win) {
@@ -230,7 +230,7 @@ TEST_F(FieldTrialTest, OneWinner) {
int default_group_number = -1;
scoped_refptr<FieldTrial> trial =
- CreateFieldTrial(name, group_count, default_group_name, NULL);
+ CreateFieldTrial(name, group_count, default_group_name, nullptr);
int winner_index(-2);
std::string winner_name;
@@ -277,7 +277,7 @@ TEST_F(FieldTrialTest, DisableProbability) {
TEST_F(FieldTrialTest, ActiveGroups) {
std::string no_group("No Group");
scoped_refptr<FieldTrial> trial =
- CreateFieldTrial(no_group, 10, "Default", NULL);
+ CreateFieldTrial(no_group, 10, "Default", nullptr);
// There is no winner yet, so no NameGroupId should be returned.
FieldTrial::ActiveGroup active_group;
@@ -285,7 +285,7 @@ TEST_F(FieldTrialTest, ActiveGroups) {
// Create a single winning group.
std::string one_winner("One Winner");
- trial = CreateFieldTrial(one_winner, 10, "Default", NULL);
+ trial = CreateFieldTrial(one_winner, 10, "Default", nullptr);
std::string winner("Winner");
trial->AppendGroup(winner, 10);
EXPECT_FALSE(trial->GetActiveGroup(&active_group));
@@ -297,7 +297,7 @@ TEST_F(FieldTrialTest, ActiveGroups) {
std::string multi_group("MultiGroup");
scoped_refptr<FieldTrial> multi_group_trial =
- CreateFieldTrial(multi_group, 9, "Default", NULL);
+ CreateFieldTrial(multi_group, 9, "Default", nullptr);
multi_group_trial->AppendGroup("Me", 3);
multi_group_trial->AppendGroup("You", 3);
@@ -338,7 +338,7 @@ TEST_F(FieldTrialTest, AllGroups) {
FieldTrial::State field_trial_state;
std::string one_winner("One Winner");
scoped_refptr<FieldTrial> trial =
- CreateFieldTrial(one_winner, 10, "Default", NULL);
+ CreateFieldTrial(one_winner, 10, "Default", nullptr);
std::string winner("Winner");
trial->AppendGroup(winner, 10);
EXPECT_TRUE(trial->GetState(&field_trial_state));
@@ -351,7 +351,7 @@ TEST_F(FieldTrialTest, AllGroups) {
std::string multi_group("MultiGroup");
scoped_refptr<FieldTrial> multi_group_trial =
- CreateFieldTrial(multi_group, 9, "Default", NULL);
+ CreateFieldTrial(multi_group, 9, "Default", nullptr);
multi_group_trial->AppendGroup("Me", 3);
multi_group_trial->AppendGroup("You", 3);
@@ -425,7 +425,7 @@ TEST_F(FieldTrialTest, Save) {
std::string save_string;
scoped_refptr<FieldTrial> trial =
- CreateFieldTrial("Some name", 10, "Default some name", NULL);
+ CreateFieldTrial("Some name", 10, "Default some name", nullptr);
// There is no winner yet, so no textual group name is associated with trial.
// In this case, the trial should not be included.
EXPECT_EQ("", trial->group_name_internal());
@@ -443,7 +443,7 @@ TEST_F(FieldTrialTest, Save) {
// Create a second trial and winning group.
scoped_refptr<FieldTrial> trial2 =
- CreateFieldTrial("xxx", 10, "Default xxx", NULL);
+ CreateFieldTrial("xxx", 10, "Default xxx", nullptr);
trial2->AppendGroup("yyyy", 10);
// Finalize the group selection by accessing the selected group.
trial2->group();
@@ -455,7 +455,7 @@ TEST_F(FieldTrialTest, Save) {
// Create a third trial with only the default group.
scoped_refptr<FieldTrial> trial3 =
- CreateFieldTrial("zzz", 10, "default", NULL);
+ CreateFieldTrial("zzz", 10, "default", nullptr);
// Finalize the group selection by accessing the selected group.
trial3->group();
@@ -499,7 +499,7 @@ TEST_F(FieldTrialTest, SaveAll) {
// Create a third trial with only the default group.
scoped_refptr<FieldTrial> trial3 =
- CreateFieldTrial("zzz", 10, "default", NULL);
+ CreateFieldTrial("zzz", 10, "default", nullptr);
FieldTrialList::AllStatesToString(&save_string);
EXPECT_EQ("Some name/Default some name/*trial2/Winner/*xxx/yyyy/zzz/default/",
@@ -514,12 +514,12 @@ TEST_F(FieldTrialTest, Restore) {
std::set<std::string>());
FieldTrial* trial = FieldTrialList::Find("Some_name");
- ASSERT_NE(static_cast<FieldTrial*>(NULL), trial);
+ ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
EXPECT_EQ("Winner", trial->group_name());
EXPECT_EQ("Some_name", trial->trial_name());
trial = FieldTrialList::Find("xxx");
- ASSERT_NE(static_cast<FieldTrial*>(NULL), trial);
+ ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
EXPECT_EQ("yyyy", trial->group_name());
EXPECT_EQ("xxx", trial->trial_name());
}
@@ -529,7 +529,7 @@ TEST_F(FieldTrialTest, RestoreNotEndingWithSlash) {
std::set<std::string>()));
FieldTrial* trial = FieldTrialList::Find("tname");
- ASSERT_NE(static_cast<FieldTrial*>(NULL), trial);
+ ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
EXPECT_EQ("gname", trial->group_name());
EXPECT_EQ("tname", trial->trial_name());
}
@@ -549,7 +549,7 @@ TEST_F(FieldTrialTest, BogusRestore) {
TEST_F(FieldTrialTest, DuplicateRestore) {
scoped_refptr<FieldTrial> trial =
- CreateFieldTrial("Some name", 10, "Default", NULL);
+ CreateFieldTrial("Some name", 10, "Default", nullptr);
trial->AppendGroup("Winner", 10);
// Finalize the group selection by accessing the selected group.
trial->group();
@@ -653,12 +653,12 @@ TEST_F(FieldTrialTest, CreateTrialsFromStringWithIgnoredFieldTrials) {
EXPECT_TRUE(active_groups.empty());
FieldTrial* trial = FieldTrialList::Find("Foo");
- ASSERT_NE(static_cast<FieldTrial*>(NULL), trial);
+ ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
EXPECT_EQ("Foo", trial->trial_name());
EXPECT_EQ("Foo_name", trial->group_name());
trial = FieldTrialList::Find("Bar");
- ASSERT_NE(static_cast<FieldTrial*>(NULL), trial);
+ ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
EXPECT_EQ("Bar", trial->trial_name());
EXPECT_EQ("Bar_name", trial->group_name());
}
@@ -669,7 +669,7 @@ TEST_F(FieldTrialTest, CreateFieldTrial) {
FieldTrialList::CreateFieldTrial("Some_name", "Winner");
FieldTrial* trial = FieldTrialList::Find("Some_name");
- ASSERT_NE(static_cast<FieldTrial*>(NULL), trial);
+ ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
EXPECT_EQ("Winner", trial->group_name());
EXPECT_EQ("Some_name", trial->trial_name());
}
@@ -687,16 +687,16 @@ TEST_F(FieldTrialTest, CreateFieldTrialIsNotActive) {
TEST_F(FieldTrialTest, DuplicateFieldTrial) {
scoped_refptr<FieldTrial> trial =
- CreateFieldTrial("Some_name", 10, "Default", NULL);
+ CreateFieldTrial("Some_name", 10, "Default", nullptr);
trial->AppendGroup("Winner", 10);
// It is OK if we redundantly specify a winner.
FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("Some_name", "Winner");
- EXPECT_TRUE(trial1 != NULL);
+ EXPECT_TRUE(trial1 != nullptr);
// But it is an error to try to change to a different winner.
FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("Some_name", "Loser");
- EXPECT_TRUE(trial2 == NULL);
+ EXPECT_TRUE(trial2 == nullptr);
}
TEST_F(FieldTrialTest, DisableImmediately) {
@@ -710,7 +710,7 @@ TEST_F(FieldTrialTest, DisableImmediately) {
TEST_F(FieldTrialTest, DisableAfterInitialization) {
scoped_refptr<FieldTrial> trial =
- CreateFieldTrial("trial", 100, "default", NULL);
+ CreateFieldTrial("trial", 100, "default", nullptr);
trial->AppendGroup("non_default", 100);
trial->Disable();
ASSERT_EQ("default", trial->group_name());
@@ -800,7 +800,7 @@ TEST_F(FieldTrialTest, SetForcedDefaultOnly) {
CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
trial->SetForced();
- trial = CreateFieldTrial(kTrialName, 100, kDefaultGroupName, NULL);
+ trial = CreateFieldTrial(kTrialName, 100, kDefaultGroupName, nullptr);
EXPECT_EQ(default_group, trial->group());
EXPECT_EQ(kDefaultGroupName, trial->group_name());
}
@@ -814,7 +814,7 @@ TEST_F(FieldTrialTest, SetForcedDefaultWithExtraGroup) {
CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
trial->SetForced();
- trial = CreateFieldTrial(kTrialName, 100, kDefaultGroupName, NULL);
+ trial = CreateFieldTrial(kTrialName, 100, kDefaultGroupName, nullptr);
const int extra_group = trial->AppendGroup("Extra", 100);
EXPECT_EQ(default_group, trial->group());
EXPECT_NE(extra_group, trial->group());
@@ -829,7 +829,7 @@ TEST_F(FieldTrialTest, SetForcedTurnFeatureOn) {
// Simulate a server-side (forced) config that turns the feature on when the
// original hard-coded config had it disabled.
scoped_refptr<FieldTrial> forced_trial =
- CreateFieldTrial(kTrialName, 100, kDefaultGroupName, NULL);
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, nullptr);
forced_trial->AppendGroup(kExtraGroupName, 100);
forced_trial->SetForced();
@@ -853,7 +853,7 @@ TEST_F(FieldTrialTest, SetForcedTurnFeatureOff) {
// Simulate a server-side (forced) config that turns the feature off when the
// original hard-coded config had it enabled.
scoped_refptr<FieldTrial> forced_trial =
- CreateFieldTrial(kTrialName, 100, kDefaultGroupName, NULL);
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, nullptr);
forced_trial->AppendGroup(kExtraGroupName, 0);
forced_trial->SetForced();
@@ -878,7 +878,7 @@ TEST_F(FieldTrialTest, SetForcedChangeDefault_Default) {
// Simulate a server-side (forced) config that switches which group is default
// and ensures that the non-forced code receives the correct group numbers.
scoped_refptr<FieldTrial> forced_trial =
- CreateFieldTrial(kTrialName, 100, kGroupAName, NULL);
+ CreateFieldTrial(kTrialName, 100, kGroupAName, nullptr);
forced_trial->AppendGroup(kGroupBName, 100);
forced_trial->SetForced();
@@ -903,7 +903,7 @@ TEST_F(FieldTrialTest, SetForcedChangeDefault_NonDefault) {
// Simulate a server-side (forced) config that switches which group is default
// and ensures that the non-forced code receives the correct group numbers.
scoped_refptr<FieldTrial> forced_trial =
- CreateFieldTrial(kTrialName, 100, kGroupAName, NULL);
+ CreateFieldTrial(kTrialName, 100, kGroupAName, nullptr);
forced_trial->AppendGroup(kGroupBName, 0);
forced_trial->SetForced();
@@ -993,7 +993,7 @@ TEST_F(FieldTrialTest, DisabledTrialNotActive) {
ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
scoped_refptr<FieldTrial> trial =
- CreateFieldTrial(kTrialName, 100, kDefaultGroupName, NULL);
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, nullptr);
trial->AppendGroup("X", 50);
trial->Disable();
@@ -1015,7 +1015,7 @@ TEST_F(FieldTrialTest, ExpirationYearNotExpired) {
ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
scoped_refptr<FieldTrial> trial =
- CreateFieldTrial(kTrialName, kProbability, kDefaultGroupName, NULL);
+ CreateFieldTrial(kTrialName, kProbability, kDefaultGroupName, nullptr);
trial->AppendGroup(kGroupName, kProbability);
EXPECT_EQ(kGroupName, trial->group_name());
}
@@ -1097,23 +1097,23 @@ TEST(FieldTrialTestWithoutList, StatesStringFormat) {
// Scoping the first FieldTrialList, as we need another one to test the
// importing function.
{
- FieldTrialList field_trial_list(NULL);
+ FieldTrialList field_trial_list(nullptr);
scoped_refptr<FieldTrial> trial =
- CreateFieldTrial("Abc", 10, "Default some name", NULL);
+ CreateFieldTrial("Abc", 10, "Default some name", nullptr);
trial->AppendGroup("cba", 10);
trial->group();
scoped_refptr<FieldTrial> trial2 =
- CreateFieldTrial("Xyz", 10, "Default xxx", NULL);
+ CreateFieldTrial("Xyz", 10, "Default xxx", nullptr);
trial2->AppendGroup("zyx", 10);
trial2->group();
scoped_refptr<FieldTrial> trial3 =
- CreateFieldTrial("zzz", 10, "default", NULL);
+ CreateFieldTrial("zzz", 10, "default", nullptr);
FieldTrialList::AllStatesToString(&save_string);
}
// Starting with a new blank FieldTrialList.
- FieldTrialList field_trial_list(NULL);
+ FieldTrialList field_trial_list(nullptr);
ASSERT_TRUE(field_trial_list.CreateTrialsFromString(save_string,
std::set<std::string>()));
@@ -1134,7 +1134,7 @@ TEST(FieldTrialDeathTest, OneTimeRandomizedTrialWithoutFieldTrialList) {
FieldTrialList::FactoryGetFieldTrial(
"OneTimeRandomizedTrialWithoutFieldTrialList", 100, kDefaultGroupName,
base::FieldTrialList::kNoExpirationYear, 1, 1,
- base::FieldTrial::ONE_TIME_RANDOMIZED, NULL),
+ base::FieldTrial::ONE_TIME_RANDOMIZED, nullptr),
"");
}
@@ -1383,8 +1383,11 @@ TEST(FieldTrialListTest, SerializeSharedMemoryHandleMetadata) {
SharedMemoryHandle deserialized =
FieldTrialList::DeserializeSharedMemoryHandleMetadata(serialized);
#else
+ // Use a valid-looking arbitrary number for the file descriptor. It's not
+ // being used in this unittest, but needs to pass sanity checks in the
+ // handle's constructor.
SharedMemoryHandle deserialized =
- FieldTrialList::DeserializeSharedMemoryHandleMetadata(-1, serialized);
+ FieldTrialList::DeserializeSharedMemoryHandleMetadata(42, serialized);
#endif
EXPECT_EQ(deserialized.GetGUID(), shm->handle().GetGUID());
EXPECT_FALSE(deserialized.GetGUID().is_empty());
diff --git a/chromium/base/metrics/histogram.cc b/chromium/base/metrics/histogram.cc
index 7d0913158ed..40e7bcc860a 100644
--- a/chromium/base/metrics/histogram.cc
+++ b/chromium/base/metrics/histogram.cc
@@ -130,14 +130,14 @@ class Histogram::Factory {
virtual BucketRanges* CreateRanges() {
BucketRanges* ranges = new BucketRanges(bucket_count_ + 1);
Histogram::InitializeBucketRanges(minimum_, maximum_, ranges);
- base::debug::Alias(&ranges); // TODO(bcwhite): Remove after crbug/586622.
return ranges;
}
// Allocate the correct Histogram object off the heap (in case persistent
// memory is not available).
virtual std::unique_ptr<HistogramBase> HeapAlloc(const BucketRanges* ranges) {
- return WrapUnique(new Histogram(name_, minimum_, maximum_, ranges));
+ return WrapUnique(
+ new Histogram(GetPermanentName(name_), minimum_, maximum_, ranges));
}
// Perform any required datafill on the just-created histogram. If
@@ -224,12 +224,6 @@ HistogramBase* Histogram::Factory::Build() {
allocator->FinalizeHistogram(histogram_ref,
histogram == tentative_histogram_ptr);
}
-
- // Update report on created histograms.
- ReportHistogramActivity(*histogram, HISTOGRAM_CREATED);
- } else {
- // Update report on lookup histograms.
- ReportHistogramActivity(*histogram, HISTOGRAM_LOOKUP);
}
CHECK_EQ(histogram_type_, histogram->GetHistogramType()) << name_;
@@ -287,7 +281,7 @@ HistogramBase* Histogram::FactoryTimeGet(const char* name,
}
std::unique_ptr<HistogramBase> Histogram::PersistentCreate(
- const std::string& name,
+ const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges,
@@ -398,7 +392,7 @@ uint32_t Histogram::bucket_count() const {
}
// static
-bool Histogram::InspectConstructionArguments(const std::string& name,
+bool Histogram::InspectConstructionArguments(StringPiece name,
Sample* minimum,
Sample* maximum,
uint32_t* bucket_count) {
@@ -569,10 +563,6 @@ bool Histogram::ValidateHistogramContents(bool crash_if_invalid,
bad_fields |= 1 << kLoggedBucketRangesField;
else if (logged_samples_->id() == 0)
bad_fields |= 1 << kIdField;
- else if (histogram_name().length() > 20 && histogram_name().at(20) == '\0')
- bad_fields |= 1 << kHistogramNameField;
- else if (histogram_name().length() > 40 && histogram_name().at(40) == '\0')
- bad_fields |= 1 << kHistogramNameField;
if (flags() == 0)
bad_fields |= 1 << kFlagsField;
if (dummy_ != kDummyValue)
@@ -584,7 +574,7 @@ bool Histogram::ValidateHistogramContents(bool crash_if_invalid,
// Abort if a problem is found (except "flags", which could legally be zero).
std::string debug_string = base::StringPrintf(
- "%s/%" PRIu32 "#%d", histogram_name().c_str(), bad_fields, identifier);
+ "%s/%" PRIu32 "#%d", histogram_name(), bad_fields, identifier);
#if !defined(OS_NACL)
base::debug::ScopedCrashKey crash_key("bad_histogram", debug_string);
#endif
@@ -604,7 +594,7 @@ void Histogram::SerializeInfoImpl(Pickle* pickle) const {
}
// TODO(bcwhite): Remove minimum/maximum parameters from here and call chain.
-Histogram::Histogram(const std::string& name,
+Histogram::Histogram(const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges)
@@ -615,7 +605,7 @@ Histogram::Histogram(const std::string& name,
logged_samples_.reset(new SampleVector(unlogged_samples_->id(), ranges));
}
-Histogram::Histogram(const std::string& name,
+Histogram::Histogram(const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges,
@@ -632,8 +622,7 @@ Histogram::Histogram(const std::string& name,
unlogged_samples_->id(), ranges, logged_meta, logged_counts));
}
-Histogram::~Histogram() {
-}
+Histogram::~Histogram() = default;
bool Histogram::PrintEmptyBucket(uint32_t index) const {
return true;
@@ -786,9 +775,7 @@ double Histogram::GetPeakBucketSize(const SampleVectorBase& samples) const {
void Histogram::WriteAsciiHeader(const SampleVectorBase& samples,
Count sample_count,
std::string* output) const {
- StringAppendF(output,
- "Histogram: %s recorded %d samples",
- histogram_name().c_str(),
+ StringAppendF(output, "Histogram: %s recorded %d samples", histogram_name(),
sample_count);
if (sample_count == 0) {
DCHECK_EQ(samples.sum(), 0);
@@ -863,13 +850,13 @@ class LinearHistogram::Factory : public Histogram::Factory {
BucketRanges* CreateRanges() override {
BucketRanges* ranges = new BucketRanges(bucket_count_ + 1);
LinearHistogram::InitializeBucketRanges(minimum_, maximum_, ranges);
- base::debug::Alias(&ranges); // TODO(bcwhite): Remove after crbug/586622.
return ranges;
}
std::unique_ptr<HistogramBase> HeapAlloc(
const BucketRanges* ranges) override {
- return WrapUnique(new LinearHistogram(name_, minimum_, maximum_, ranges));
+ return WrapUnique(new LinearHistogram(GetPermanentName(name_), minimum_,
+ maximum_, ranges));
}
void FillHistogram(HistogramBase* base_histogram) override {
@@ -890,7 +877,7 @@ class LinearHistogram::Factory : public Histogram::Factory {
DISALLOW_COPY_AND_ASSIGN(Factory);
};
-LinearHistogram::~LinearHistogram() {}
+LinearHistogram::~LinearHistogram() = default;
HistogramBase* LinearHistogram::FactoryGet(const std::string& name,
Sample minimum,
@@ -898,7 +885,7 @@ HistogramBase* LinearHistogram::FactoryGet(const std::string& name,
uint32_t bucket_count,
int32_t flags) {
return FactoryGetWithRangeDescription(name, minimum, maximum, bucket_count,
- flags, nullptr);
+ flags, NULL);
}
HistogramBase* LinearHistogram::FactoryTimeGet(const std::string& name,
@@ -929,7 +916,7 @@ HistogramBase* LinearHistogram::FactoryTimeGet(const char* name,
}
std::unique_ptr<HistogramBase> LinearHistogram::PersistentCreate(
- const std::string& name,
+ const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges,
@@ -960,15 +947,14 @@ HistogramType LinearHistogram::GetHistogramType() const {
return LINEAR_HISTOGRAM;
}
-LinearHistogram::LinearHistogram(const std::string& name,
+LinearHistogram::LinearHistogram(const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges)
- : Histogram(name, minimum, maximum, ranges) {
-}
+ : Histogram(name, minimum, maximum, ranges) {}
LinearHistogram::LinearHistogram(
- const std::string& name,
+ const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges,
@@ -1016,8 +1002,6 @@ void LinearHistogram::InitializeBucketRanges(Sample minimum,
double linear_range =
(min * (bucket_count - 1 - i) + max * (i - 1)) / (bucket_count - 2);
ranges->set_range(i, static_cast<Sample>(linear_range + 0.5));
- // TODO(bcwhite): Remove once crbug/586622 is fixed.
- base::debug::Alias(&linear_range);
}
ranges->set_range(ranges->bucket_count(), HistogramBase::kSampleType_MAX);
ranges->ResetChecksum();
@@ -1062,13 +1046,12 @@ class BooleanHistogram::Factory : public Histogram::Factory {
BucketRanges* CreateRanges() override {
BucketRanges* ranges = new BucketRanges(3 + 1);
LinearHistogram::InitializeBucketRanges(1, 2, ranges);
- base::debug::Alias(&ranges); // TODO(bcwhite): Remove after crbug/586622.
return ranges;
}
std::unique_ptr<HistogramBase> HeapAlloc(
const BucketRanges* ranges) override {
- return WrapUnique(new BooleanHistogram(name_, ranges));
+ return WrapUnique(new BooleanHistogram(GetPermanentName(name_), ranges));
}
private:
@@ -1085,7 +1068,7 @@ HistogramBase* BooleanHistogram::FactoryGet(const char* name, int32_t flags) {
}
std::unique_ptr<HistogramBase> BooleanHistogram::PersistentCreate(
- const std::string& name,
+ const char* name,
const BucketRanges* ranges,
const DelayedPersistentAllocation& counts,
const DelayedPersistentAllocation& logged_counts,
@@ -1099,12 +1082,11 @@ HistogramType BooleanHistogram::GetHistogramType() const {
return BOOLEAN_HISTOGRAM;
}
-BooleanHistogram::BooleanHistogram(const std::string& name,
- const BucketRanges* ranges)
+BooleanHistogram::BooleanHistogram(const char* name, const BucketRanges* ranges)
: LinearHistogram(name, 1, 2, ranges) {}
BooleanHistogram::BooleanHistogram(
- const std::string& name,
+ const char* name,
const BucketRanges* ranges,
const DelayedPersistentAllocation& counts,
const DelayedPersistentAllocation& logged_counts,
@@ -1176,7 +1158,7 @@ class CustomHistogram::Factory : public Histogram::Factory {
std::unique_ptr<HistogramBase> HeapAlloc(
const BucketRanges* ranges) override {
- return WrapUnique(new CustomHistogram(name_, ranges));
+ return WrapUnique(new CustomHistogram(GetPermanentName(name_), ranges));
}
private:
@@ -1202,7 +1184,7 @@ HistogramBase* CustomHistogram::FactoryGet(
}
std::unique_ptr<HistogramBase> CustomHistogram::PersistentCreate(
- const std::string& name,
+ const char* name,
const BucketRanges* ranges,
const DelayedPersistentAllocation& counts,
const DelayedPersistentAllocation& logged_counts,
@@ -1231,15 +1213,14 @@ std::vector<Sample> CustomHistogram::ArrayToCustomRanges(
return all_values;
}
-CustomHistogram::CustomHistogram(const std::string& name,
- const BucketRanges* ranges)
+CustomHistogram::CustomHistogram(const char* name, const BucketRanges* ranges)
: Histogram(name,
ranges->range(1),
ranges->range(ranges->bucket_count() - 1),
ranges) {}
CustomHistogram::CustomHistogram(
- const std::string& name,
+ const char* name,
const BucketRanges* ranges,
const DelayedPersistentAllocation& counts,
const DelayedPersistentAllocation& logged_counts,
diff --git a/chromium/base/metrics/histogram.h b/chromium/base/metrics/histogram.h
index a0942a5d198..373f3992481 100644
--- a/chromium/base/metrics/histogram.h
+++ b/chromium/base/metrics/histogram.h
@@ -80,6 +80,7 @@
#include "base/metrics/bucket_ranges.h"
#include "base/metrics/histogram_base.h"
#include "base/metrics/histogram_samples.h"
+#include "base/strings/string_piece.h"
#include "base/time/time.h"
namespace base {
@@ -140,7 +141,7 @@ class BASE_EXPORT Histogram : public HistogramBase {
// Create a histogram using data in persistent storage.
static std::unique_ptr<HistogramBase> PersistentCreate(
- const std::string& name,
+ const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges,
@@ -186,7 +187,7 @@ class BASE_EXPORT Histogram : public HistogramBase {
// converts it to good input: 1.
// TODO(bcwhite): Use false returns to create "sink" histograms so that bad
// data doesn't create confusion on the servers.
- static bool InspectConstructionArguments(const std::string& name,
+ static bool InspectConstructionArguments(StringPiece name,
Sample* minimum,
Sample* maximum,
uint32_t* bucket_count);
@@ -226,7 +227,7 @@ class BASE_EXPORT Histogram : public HistogramBase {
// |ranges| should contain the underflow and overflow buckets. See top
// comments for example.
- Histogram(const std::string& name,
+ Histogram(const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges);
@@ -237,7 +238,7 @@ class BASE_EXPORT Histogram : public HistogramBase {
// the life of this memory is managed externally and exceeds the lifetime
// of this object. Practically, this memory is never released until the
// process exits and the OS cleans it up.
- Histogram(const std::string& name,
+ Histogram(const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges,
@@ -311,6 +312,12 @@ class BASE_EXPORT Histogram : public HistogramBase {
int64_t* sum,
ListValue* buckets) const override;
+ // Samples that have not yet been logged with SnapshotDelta().
+ std::unique_ptr<SampleVectorBase> unlogged_samples_;
+
+ // Accumulation of all samples that have been logged with SnapshotDelta().
+ std::unique_ptr<SampleVectorBase> logged_samples_;
+
// This is a dummy field placed where corruption is frequently seen on
// current Android builds. The hope is that it will mitigate the problem
// sufficiently to continue with the M61 beta branch while investigation
@@ -318,12 +325,6 @@ class BASE_EXPORT Histogram : public HistogramBase {
// TODO(bcwhite): Remove this once crbug/736675 is fixed.
const uintptr_t dummy_;
- // Samples that have not yet been logged with SnapshotDelta().
- std::unique_ptr<SampleVectorBase> unlogged_samples_;
-
- // Accumulation of all samples that have been logged with SnapshotDelta().
- std::unique_ptr<SampleVectorBase> logged_samples_;
-
#if DCHECK_IS_ON() // Don't waste memory if it won't be used.
// Flag to indicate if PrepareFinalDelta has been previously called. It is
// used to DCHECK that a final delta is not created multiple times.
@@ -370,7 +371,7 @@ class BASE_EXPORT LinearHistogram : public Histogram {
// Create a histogram using data in persistent storage.
static std::unique_ptr<HistogramBase> PersistentCreate(
- const std::string& name,
+ const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges,
@@ -407,12 +408,12 @@ class BASE_EXPORT LinearHistogram : public Histogram {
protected:
class Factory;
- LinearHistogram(const std::string& name,
+ LinearHistogram(const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges);
- LinearHistogram(const std::string& name,
+ LinearHistogram(const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges,
@@ -459,7 +460,7 @@ class BASE_EXPORT BooleanHistogram : public LinearHistogram {
// Create a histogram using data in persistent storage.
static std::unique_ptr<HistogramBase> PersistentCreate(
- const std::string& name,
+ const char* name,
const BucketRanges* ranges,
const DelayedPersistentAllocation& counts,
const DelayedPersistentAllocation& logged_counts,
@@ -472,8 +473,8 @@ class BASE_EXPORT BooleanHistogram : public LinearHistogram {
class Factory;
private:
- BooleanHistogram(const std::string& name, const BucketRanges* ranges);
- BooleanHistogram(const std::string& name,
+ BooleanHistogram(const char* name, const BucketRanges* ranges);
+ BooleanHistogram(const char* name,
const BucketRanges* ranges,
const DelayedPersistentAllocation& counts,
const DelayedPersistentAllocation& logged_counts,
@@ -509,7 +510,7 @@ class BASE_EXPORT CustomHistogram : public Histogram {
// Create a histogram using data in persistent storage.
static std::unique_ptr<HistogramBase> PersistentCreate(
- const std::string& name,
+ const char* name,
const BucketRanges* ranges,
const DelayedPersistentAllocation& counts,
const DelayedPersistentAllocation& logged_counts,
@@ -530,10 +531,9 @@ class BASE_EXPORT CustomHistogram : public Histogram {
protected:
class Factory;
- CustomHistogram(const std::string& name,
- const BucketRanges* ranges);
+ CustomHistogram(const char* name, const BucketRanges* ranges);
- CustomHistogram(const std::string& name,
+ CustomHistogram(const char* name,
const BucketRanges* ranges,
const DelayedPersistentAllocation& counts,
const DelayedPersistentAllocation& logged_counts,
diff --git a/chromium/base/metrics/histogram_base.cc b/chromium/base/metrics/histogram_base.cc
index 3171f374451..405a80f27a6 100644
--- a/chromium/base/metrics/histogram_base.cc
+++ b/chromium/base/metrics/histogram_base.cc
@@ -7,17 +7,21 @@
#include <limits.h>
#include <memory>
+#include <set>
#include <utility>
#include "base/json/json_string_value_serializer.h"
+#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_macros.h"
#include "base/metrics/histogram_samples.h"
#include "base/metrics/sparse_histogram.h"
#include "base/metrics/statistics_recorder.h"
#include "base/pickle.h"
#include "base/process/process_handle.h"
#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
#include "base/values.h"
namespace base {
@@ -42,7 +46,7 @@ std::string HistogramTypeToString(HistogramType type) {
HistogramBase* DeserializeHistogramInfo(PickleIterator* iter) {
int type;
if (!iter->ReadInt(&type))
- return NULL;
+ return nullptr;
switch (type) {
case HISTOGRAM:
@@ -56,21 +60,19 @@ HistogramBase* DeserializeHistogramInfo(PickleIterator* iter) {
case SPARSE_HISTOGRAM:
return SparseHistogram::DeserializeInfoImpl(iter);
default:
- return NULL;
+ return nullptr;
}
}
const HistogramBase::Sample HistogramBase::kSampleType_MAX = INT_MAX;
-HistogramBase* HistogramBase::report_histogram_ = nullptr;
-HistogramBase::HistogramBase(const std::string& name)
- : histogram_name_(name),
- flags_(kNoFlags) {}
+HistogramBase::HistogramBase(const char* name)
+ : histogram_name_(name), flags_(kNoFlags) {}
-HistogramBase::~HistogramBase() {}
+HistogramBase::~HistogramBase() = default;
void HistogramBase::CheckName(const StringPiece& name) const {
- DCHECK_EQ(histogram_name(), name);
+ DCHECK_EQ(StringPiece(histogram_name()), name);
}
void HistogramBase::SetFlags(int32_t flags) {
@@ -106,7 +108,8 @@ bool HistogramBase::ValidateHistogramContents(bool crash_if_invalid,
return true;
}
-void HistogramBase::WriteJSON(std::string* output) const {
+void HistogramBase::WriteJSON(std::string* output,
+ JSONVerbosityLevel verbosity_level) const {
Count count;
int64_t sum;
std::unique_ptr<ListValue> buckets(new ListValue());
@@ -121,37 +124,12 @@ void HistogramBase::WriteJSON(std::string* output) const {
root.SetDouble("sum", static_cast<double>(sum));
root.SetInteger("flags", flags());
root.Set("params", std::move(parameters));
- root.Set("buckets", std::move(buckets));
+ if (verbosity_level != JSON_VERBOSITY_LEVEL_OMIT_BUCKETS)
+ root.Set("buckets", std::move(buckets));
root.SetInteger("pid", GetUniqueIdForProcess());
serializer.Serialize(root);
}
-// static
-void HistogramBase::EnableActivityReportHistogram(
- const std::string& process_type) {
- if (report_histogram_)
- return;
-
- size_t existing = StatisticsRecorder::GetHistogramCount();
- if (existing != 0) {
- DVLOG(1) << existing
- << " histograms were created before reporting was enabled.";
- }
-
- std::string name =
- "UMA.Histograms.Activity" +
- (process_type.empty() ? process_type : "." + process_type);
-
- // Calling FactoryGet() here rather than using a histogram-macro works
- // around some problems with tests that could end up seeing the results
- // histogram when not expected due to a bad interaction between
- // HistogramTester and StatisticsRecorder.
- report_histogram_ = LinearHistogram::FactoryGet(
- name, 1, HISTOGRAM_REPORT_MAX, HISTOGRAM_REPORT_MAX + 1,
- kUmaTargetedHistogramFlag);
- report_histogram_->Add(HISTOGRAM_REPORT_CREATED);
-}
-
void HistogramBase::FindAndRunCallback(HistogramBase::Sample sample) const {
if ((flags() & kCallbackExists) == 0)
return;
@@ -189,46 +167,16 @@ void HistogramBase::WriteAsciiBucketValue(Count current,
}
// static
-void HistogramBase::ReportHistogramActivity(const HistogramBase& histogram,
- ReportActivity activity) {
- if (!report_histogram_)
- return;
-
- const int32_t flags = histogram.flags_;
- HistogramReport report_type = HISTOGRAM_REPORT_MAX;
- switch (activity) {
- case HISTOGRAM_CREATED:
- report_histogram_->Add(HISTOGRAM_REPORT_HISTOGRAM_CREATED);
- switch (histogram.GetHistogramType()) {
- case HISTOGRAM:
- report_type = HISTOGRAM_REPORT_TYPE_LOGARITHMIC;
- break;
- case LINEAR_HISTOGRAM:
- report_type = HISTOGRAM_REPORT_TYPE_LINEAR;
- break;
- case BOOLEAN_HISTOGRAM:
- report_type = HISTOGRAM_REPORT_TYPE_BOOLEAN;
- break;
- case CUSTOM_HISTOGRAM:
- report_type = HISTOGRAM_REPORT_TYPE_CUSTOM;
- break;
- case SPARSE_HISTOGRAM:
- report_type = HISTOGRAM_REPORT_TYPE_SPARSE;
- break;
- }
- report_histogram_->Add(report_type);
- if (flags & kIsPersistent)
- report_histogram_->Add(HISTOGRAM_REPORT_FLAG_PERSISTENT);
- if ((flags & kUmaStabilityHistogramFlag) == kUmaStabilityHistogramFlag)
- report_histogram_->Add(HISTOGRAM_REPORT_FLAG_UMA_STABILITY);
- else if (flags & kUmaTargetedHistogramFlag)
- report_histogram_->Add(HISTOGRAM_REPORT_FLAG_UMA_TARGETED);
- break;
-
- case HISTOGRAM_LOOKUP:
- report_histogram_->Add(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP);
- break;
- }
+char const* HistogramBase::GetPermanentName(const std::string& name) {
+ // A set of histogram names that provides the "permanent" lifetime required
+ // by histogram objects for those strings that are not already code constants
+ // or held in persistent memory.
+ static LazyInstance<std::set<std::string>>::Leaky permanent_names;
+ static LazyInstance<Lock>::Leaky permanent_names_lock;
+
+ AutoLock lock(permanent_names_lock.Get());
+ auto result = permanent_names.Get().insert(name);
+ return result.first->c_str();
}
} // namespace base
diff --git a/chromium/base/metrics/histogram_base.h b/chromium/base/metrics/histogram_base.h
index f6c8de51621..b6928f1f529 100644
--- a/chromium/base/metrics/histogram_base.h
+++ b/chromium/base/metrics/histogram_base.h
@@ -41,6 +41,16 @@ enum HistogramType {
SPARSE_HISTOGRAM,
};
+// Controls the verbosity of the information when the histogram is serialized to
+// a JSON.
+// GENERATED_JAVA_ENUM_PACKAGE: org.chromium.base.metrics
+enum JSONVerbosityLevel {
+ // The histogram is completely serialized.
+ JSON_VERBOSITY_LEVEL_FULL,
+ // The bucket information is not serialized.
+ JSON_VERBOSITY_LEVEL_OMIT_BUCKETS,
+};
+
std::string HistogramTypeToString(HistogramType type);
// This enum is used for reporting how many histograms and of what types and
@@ -133,10 +143,12 @@ class BASE_EXPORT HistogramBase {
NEVER_EXCEEDED_VALUE = 0x10,
};
- explicit HistogramBase(const std::string& name);
+ // Construct the base histogram. The name is not copied; it's up to the
+ // caller to ensure that it lives at least as long as this object.
+ explicit HistogramBase(const char* name);
virtual ~HistogramBase();
- const std::string& histogram_name() const { return histogram_name_; }
+ const char* histogram_name() const { return histogram_name_; }
// Comapres |name| to the histogram name and triggers a DCHECK if they do not
// match. This is a helper function used by histogram macros, which results in
@@ -214,18 +226,11 @@ class BASE_EXPORT HistogramBase {
virtual bool ValidateHistogramContents(bool crash_if_invalid,
int corrupted_count) const;
- // Produce a JSON representation of the histogram. This is implemented with
- // the help of GetParameters and GetCountAndBucketData; overwrite them to
- // customize the output.
- void WriteJSON(std::string* output) const;
-
- // This enables a histogram that reports the what types of histograms are
- // created and their flags. It must be called while still single-threaded.
- //
- // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
- // with the following histogram:
- // UMA.Histograms.process_type.Creations
- static void EnableActivityReportHistogram(const std::string& process_type);
+ // Produce a JSON representation of the histogram with |verbosity_level| as
+ // the serialization verbosity. This is implemented with the help of
+ // GetParameters and GetCountAndBucketData; overwrite them to customize the
+ // output.
+ void WriteJSON(std::string* output, JSONVerbosityLevel verbosity_level) const;
protected:
enum ReportActivity { HISTOGRAM_CREATED, HISTOGRAM_LOOKUP };
@@ -261,17 +266,24 @@ class BASE_EXPORT HistogramBase {
// passing |sample| as the parameter.
void FindAndRunCallback(Sample sample) const;
- // Update report with an |activity| that occurred for |histogram|.
- static void ReportHistogramActivity(const HistogramBase& histogram,
- ReportActivity activicty);
-
- // Retrieves the global histogram reporting what histograms are created.
- static HistogramBase* report_histogram_;
+ // Gets a permanent string that can be used for histogram objects when the
+ // original is not a code constant or held in persistent memory.
+ static const char* GetPermanentName(const std::string& name);
private:
friend class HistogramBaseTest;
- const std::string histogram_name_;
+ // A pointer to permanent storage where the histogram name is held. This can
+ // be code space or the output of GetPermanentName() or any other storage
+ // that is known to never change. This is not StringPiece because (a) char*
+ // is 1/2 the size and (b) StringPiece transparently casts from std::string
+ // which can easily lead to a pointer to non-permanent space.
+ // For persistent histograms, this will simply point into the persistent
+ // memory segment, thus avoiding duplication. For heap histograms, the
+ // GetPermanentName method will create the necessary copy.
+ const char* const histogram_name_;
+
+ // Additional information about the histogram.
AtomicCount flags_;
DISALLOW_COPY_AND_ASSIGN(HistogramBase);
diff --git a/chromium/base/metrics/histogram_base_unittest.cc b/chromium/base/metrics/histogram_base_unittest.cc
index abf4d2a5d20..5850c6776a8 100644
--- a/chromium/base/metrics/histogram_base_unittest.cc
+++ b/chromium/base/metrics/histogram_base_unittest.cc
@@ -21,9 +21,7 @@ class HistogramBaseTest : public testing::Test {
ResetStatisticsRecorder();
}
- ~HistogramBaseTest() override {
- HistogramBase::report_histogram_ = nullptr;
- }
+ ~HistogramBaseTest() override = default;
void ResetStatisticsRecorder() {
// It is necessary to fully destruct any existing StatisticsRecorder
@@ -32,11 +30,6 @@ class HistogramBaseTest : public testing::Test {
statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
}
- HistogramBase* GetCreationReportHistogram(const std::string& name) {
- HistogramBase::EnableActivityReportHistogram(name);
- return HistogramBase::report_histogram_;
- }
-
private:
std::unique_ptr<StatisticsRecorder> statistics_recorder_;
@@ -62,7 +55,7 @@ TEST_F(HistogramBaseTest, DeserializeHistogram) {
deserialized = DeserializeHistogramInfo(&iter2);
EXPECT_TRUE(deserialized);
EXPECT_NE(histogram, deserialized);
- EXPECT_EQ("TestHistogram", deserialized->histogram_name());
+ EXPECT_EQ("TestHistogram", StringPiece(deserialized->histogram_name()));
EXPECT_TRUE(deserialized->HasConstructionArguments(1, 1000, 10));
// kIPCSerializationSourceFlag will be cleared.
@@ -87,7 +80,7 @@ TEST_F(HistogramBaseTest, DeserializeLinearHistogram) {
deserialized = DeserializeHistogramInfo(&iter2);
EXPECT_TRUE(deserialized);
EXPECT_NE(histogram, deserialized);
- EXPECT_EQ("TestHistogram", deserialized->histogram_name());
+ EXPECT_EQ("TestHistogram", StringPiece(deserialized->histogram_name()));
EXPECT_TRUE(deserialized->HasConstructionArguments(1, 1000, 10));
EXPECT_EQ(0, deserialized->flags());
}
@@ -109,7 +102,7 @@ TEST_F(HistogramBaseTest, DeserializeBooleanHistogram) {
deserialized = DeserializeHistogramInfo(&iter2);
EXPECT_TRUE(deserialized);
EXPECT_NE(histogram, deserialized);
- EXPECT_EQ("TestHistogram", deserialized->histogram_name());
+ EXPECT_EQ("TestHistogram", StringPiece(deserialized->histogram_name()));
EXPECT_TRUE(deserialized->HasConstructionArguments(1, 2, 3));
EXPECT_EQ(0, deserialized->flags());
}
@@ -136,7 +129,7 @@ TEST_F(HistogramBaseTest, DeserializeCustomHistogram) {
deserialized = DeserializeHistogramInfo(&iter2);
EXPECT_TRUE(deserialized);
EXPECT_NE(histogram, deserialized);
- EXPECT_EQ("TestHistogram", deserialized->histogram_name());
+ EXPECT_EQ("TestHistogram", StringPiece(deserialized->histogram_name()));
EXPECT_TRUE(deserialized->HasConstructionArguments(5, 13, 4));
EXPECT_EQ(0, deserialized->flags());
}
@@ -158,65 +151,8 @@ TEST_F(HistogramBaseTest, DeserializeSparseHistogram) {
deserialized = DeserializeHistogramInfo(&iter2);
EXPECT_TRUE(deserialized);
EXPECT_NE(histogram, deserialized);
- EXPECT_EQ("TestHistogram", deserialized->histogram_name());
+ EXPECT_EQ("TestHistogram", StringPiece(deserialized->histogram_name()));
EXPECT_EQ(0, deserialized->flags());
}
-TEST_F(HistogramBaseTest, CreationReportHistogram) {
- // Enabled creation report. Itself is not included in the report.
- HistogramBase* report = GetCreationReportHistogram("CreationReportTest");
- ASSERT_TRUE(report);
-
- std::vector<HistogramBase::Sample> ranges;
- ranges.push_back(1);
- ranges.push_back(2);
- ranges.push_back(4);
- ranges.push_back(8);
- ranges.push_back(10);
-
- // Create all histogram types and verify counts.
- Histogram::FactoryGet("CRH-Histogram", 1, 10, 5, 0);
- LinearHistogram::FactoryGet("CRH-Linear", 1, 10, 5, 0);
- BooleanHistogram::FactoryGet("CRH-Boolean", 0);
- CustomHistogram::FactoryGet("CRH-Custom", ranges, 0);
- SparseHistogram::FactoryGet("CRH-Sparse", 0);
-
- std::unique_ptr<HistogramSamples> samples = report->SnapshotSamples();
- EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_CREATED));
- EXPECT_EQ(5, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_CREATED));
- EXPECT_EQ(0, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP));
- EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_LOGARITHMIC));
- EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_LINEAR));
- EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_BOOLEAN));
- EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_CUSTOM));
- EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_SPARSE));
-
- // Create all flag types and verify counts.
- Histogram::FactoryGet("CRH-Histogram-UMA-Targeted", 1, 10, 5,
- HistogramBase::kUmaTargetedHistogramFlag);
- Histogram::FactoryGet("CRH-Histogram-UMA-Stability", 1, 10, 5,
- HistogramBase::kUmaStabilityHistogramFlag);
- SparseHistogram::FactoryGet("CRH-Sparse-UMA-Targeted",
- HistogramBase::kUmaTargetedHistogramFlag);
- SparseHistogram::FactoryGet("CRH-Sparse-UMA-Stability",
- HistogramBase::kUmaStabilityHistogramFlag);
- samples = report->SnapshotSamples();
- EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_CREATED));
- EXPECT_EQ(9, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_CREATED));
- EXPECT_EQ(0, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP));
- EXPECT_EQ(2, samples->GetCount(HISTOGRAM_REPORT_FLAG_UMA_TARGETED));
- EXPECT_EQ(2, samples->GetCount(HISTOGRAM_REPORT_FLAG_UMA_STABILITY));
-
- // Do lookup of existing histograms and verify counts.
- Histogram::FactoryGet("CRH-Histogram", 1, 10, 5, 0);
- LinearHistogram::FactoryGet("CRH-Linear", 1, 10, 5, 0);
- BooleanHistogram::FactoryGet("CRH-Boolean", 0);
- CustomHistogram::FactoryGet("CRH-Custom", ranges, 0);
- SparseHistogram::FactoryGet("CRH-Sparse", 0);
- samples = report->SnapshotSamples();
- EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_CREATED));
- EXPECT_EQ(9, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_CREATED));
- EXPECT_EQ(5, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP));
-}
-
} // namespace base
diff --git a/chromium/base/metrics/histogram_delta_serialization.cc b/chromium/base/metrics/histogram_delta_serialization.cc
index cf1c8b3c18d..a74b87f0d0e 100644
--- a/chromium/base/metrics/histogram_delta_serialization.cc
+++ b/chromium/base/metrics/histogram_delta_serialization.cc
@@ -35,12 +35,9 @@ void DeserializeHistogramAndAddSamples(PickleIterator* iter) {
HistogramDeltaSerialization::HistogramDeltaSerialization(
const std::string& caller_name)
- : histogram_snapshot_manager_(this),
- serialized_deltas_(NULL) {
-}
+ : histogram_snapshot_manager_(this), serialized_deltas_(nullptr) {}
-HistogramDeltaSerialization::~HistogramDeltaSerialization() {
-}
+HistogramDeltaSerialization::~HistogramDeltaSerialization() = default;
void HistogramDeltaSerialization::PrepareAndSerializeDeltas(
std::vector<std::string>* serialized_deltas,
@@ -54,7 +51,7 @@ void HistogramDeltaSerialization::PrepareAndSerializeDeltas(
StatisticsRecorder::PrepareDeltas(
include_persistent, Histogram::kIPCSerializationSourceFlag,
Histogram::kNoFlags, &histogram_snapshot_manager_);
- serialized_deltas_ = NULL;
+ serialized_deltas_ = nullptr;
}
// static
diff --git a/chromium/base/metrics/histogram_functions.cc b/chromium/base/metrics/histogram_functions.cc
index 67dd2f8c6d1..4c1a4b57ed2 100644
--- a/chromium/base/metrics/histogram_functions.cc
+++ b/chromium/base/metrics/histogram_functions.cc
@@ -92,6 +92,10 @@ void UmaHistogramMemoryKB(const std::string& name, int sample) {
UmaHistogramCustomCounts(name, sample, 1000, 500000, 50);
}
+void UmaHistogramMemoryMB(const std::string& name, int sample) {
+ UmaHistogramCustomCounts(name, sample, 1, 1000, 50);
+}
+
void UmaHistogramMemoryLargeMB(const std::string& name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 64000, 100);
}
diff --git a/chromium/base/metrics/histogram_functions.h b/chromium/base/metrics/histogram_functions.h
index cb93df4c43f..46986283880 100644
--- a/chromium/base/metrics/histogram_functions.h
+++ b/chromium/base/metrics/histogram_functions.h
@@ -105,6 +105,8 @@ BASE_EXPORT void UmaHistogramLongTimes(const std::string& name,
// For recording memory related histograms.
// Used to measure common KB-granularity memory stats. Range is up to 500M.
BASE_EXPORT void UmaHistogramMemoryKB(const std::string& name, int sample);
+// Used to measure common MB-granularity memory stats. Range is up to ~1G.
+BASE_EXPORT void UmaHistogramMemoryMB(const std::string& name, int sample);
// Used to measure common MB-granularity memory stats. Range is up to ~64G.
BASE_EXPORT void UmaHistogramMemoryLargeMB(const std::string& name, int sample);
diff --git a/chromium/base/metrics/histogram_samples.cc b/chromium/base/metrics/histogram_samples.cc
index f12cdc97ab0..7703580538f 100644
--- a/chromium/base/metrics/histogram_samples.cc
+++ b/chromium/base/metrics/histogram_samples.cc
@@ -7,6 +7,7 @@
#include <limits>
#include "base/compiler_specific.h"
+#include "base/metrics/histogram_macros.h"
#include "base/numerics/safe_conversions.h"
#include "base/numerics/safe_math.h"
#include "base/pickle.h"
@@ -185,7 +186,7 @@ HistogramSamples::HistogramSamples(uint64_t id, Metadata* meta)
// This mustn't do anything with |meta_|. It was passed to the ctor and may
// be invalid by the time this dtor gets called.
-HistogramSamples::~HistogramSamples() {}
+HistogramSamples::~HistogramSamples() = default;
void HistogramSamples::Add(const HistogramSamples& other) {
IncreaseSumAndCount(other.sum(), other.redundant_count());
@@ -251,7 +252,17 @@ void HistogramSamples::IncreaseSumAndCount(int64_t sum,
subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count, count);
}
-SampleCountIterator::~SampleCountIterator() {}
+void HistogramSamples::RecordNegativeSample(NegativeSampleReason reason,
+ HistogramBase::Count increment) {
+ UMA_HISTOGRAM_ENUMERATION("UMA.NegativeSamples.Reason", reason,
+ MAX_NEGATIVE_SAMPLE_REASONS);
+ UMA_HISTOGRAM_CUSTOM_COUNTS("UMA.NegativeSamples.Increment", increment, 1,
+ 1 << 30, 100);
+ UMA_HISTOGRAM_SPARSE_SLOWLY("UMA.NegativeSamples.Histogram",
+ static_cast<int32_t>(id()));
+}
+
+SampleCountIterator::~SampleCountIterator() = default;
bool SampleCountIterator::GetBucketIndex(size_t* index) const {
DCHECK(!Done());
@@ -269,7 +280,7 @@ SingleSampleIterator::SingleSampleIterator(HistogramBase::Sample min,
size_t bucket_index)
: min_(min), max_(max), bucket_index_(bucket_index), count_(count) {}
-SingleSampleIterator::~SingleSampleIterator() {}
+SingleSampleIterator::~SingleSampleIterator() = default;
bool SingleSampleIterator::Done() const {
return count_ == 0;
diff --git a/chromium/base/metrics/histogram_samples.h b/chromium/base/metrics/histogram_samples.h
index d1f95a47ed7..23237b0fe40 100644
--- a/chromium/base/metrics/histogram_samples.h
+++ b/chromium/base/metrics/histogram_samples.h
@@ -160,6 +160,19 @@ class BASE_EXPORT HistogramSamples {
}
protected:
+ enum NegativeSampleReason {
+ SAMPLES_HAVE_LOGGED_BUT_NOT_SAMPLE,
+ SAMPLES_SAMPLE_LESS_THAN_LOGGED,
+ SAMPLES_ADDED_NEGATIVE_COUNT,
+ SAMPLES_ADD_WENT_NEGATIVE,
+ SAMPLES_ADD_OVERFLOW,
+ SAMPLES_ACCUMULATE_NEGATIVE_COUNT,
+ SAMPLES_ACCUMULATE_WENT_NEGATIVE,
+ DEPRECATED_SAMPLES_ACCUMULATE_OVERFLOW,
+ SAMPLES_ACCUMULATE_OVERFLOW,
+ MAX_NEGATIVE_SAMPLE_REASONS
+ };
+
// Based on |op| type, add or subtract sample counts data from the iterator.
enum Operator { ADD, SUBTRACT };
virtual bool AddSubtractImpl(SampleCountIterator* iter, Operator op) = 0;
@@ -174,6 +187,10 @@ class BASE_EXPORT HistogramSamples {
// Atomically adjust the sum and redundant-count.
void IncreaseSumAndCount(int64_t sum, HistogramBase::Count count);
+ // Record a negative-sample observation and the reason why.
+ void RecordNegativeSample(NegativeSampleReason reason,
+ HistogramBase::Count increment);
+
AtomicSingleSample& single_sample() { return meta_->single_sample; }
const AtomicSingleSample& single_sample() const {
return meta_->single_sample;
diff --git a/chromium/base/metrics/histogram_snapshot_manager.cc b/chromium/base/metrics/histogram_snapshot_manager.cc
index 260fde9c73d..7b3cff8b4d8 100644
--- a/chromium/base/metrics/histogram_snapshot_manager.cc
+++ b/chromium/base/metrics/histogram_snapshot_manager.cc
@@ -41,8 +41,7 @@ HistogramSnapshotManager::HistogramSnapshotManager(
is_active_.store(false, std::memory_order_relaxed);
}
-HistogramSnapshotManager::~HistogramSnapshotManager() {
-}
+HistogramSnapshotManager::~HistogramSnapshotManager() = default;
void HistogramSnapshotManager::PrepareDelta(HistogramBase* histogram) {
if (!histogram->ValidateHistogramContents(true, 0))
@@ -79,23 +78,16 @@ void HistogramSnapshotManager::PrepareSamples(
// Extract fields useful during debug.
const BucketRanges* ranges =
static_cast<const Histogram*>(histogram)->bucket_ranges();
- std::vector<HistogramBase::Sample> ranges_copy;
- for (size_t i = 0; i < ranges->size(); ++i)
- ranges_copy.push_back(ranges->range(i));
- HistogramBase::Sample* ranges_ptr = &ranges_copy[0];
uint32_t ranges_checksum = ranges->checksum();
uint32_t ranges_calc_checksum = ranges->CalculateChecksum();
- const char* histogram_name = histogram->histogram_name().c_str();
int32_t flags = histogram->flags();
// The checksum should have caught this, so crash separately if it didn't.
CHECK_NE(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
CHECK(false); // Crash for the bucket order corruption.
// Ensure that compiler keeps around pointers to |histogram| and its
// internal |bucket_ranges_| for any minidumps.
- base::debug::Alias(&ranges_ptr);
base::debug::Alias(&ranges_checksum);
base::debug::Alias(&ranges_calc_checksum);
- base::debug::Alias(&histogram_name);
base::debug::Alias(&flags);
}
// Checksum corruption might not have caused order corruption.
diff --git a/chromium/base/metrics/histogram_snapshot_manager_unittest.cc b/chromium/base/metrics/histogram_snapshot_manager_unittest.cc
index ca95eaa684a..1e2c599ec6b 100644
--- a/chromium/base/metrics/histogram_snapshot_manager_unittest.cc
+++ b/chromium/base/metrics/histogram_snapshot_manager_unittest.cc
@@ -19,7 +19,7 @@ namespace base {
class HistogramFlattenerDeltaRecorder : public HistogramFlattener {
public:
- HistogramFlattenerDeltaRecorder() {}
+ HistogramFlattenerDeltaRecorder() = default;
void RecordDelta(const HistogramBase& histogram,
const HistogramSamples& snapshot) override {
@@ -59,7 +59,7 @@ class HistogramSnapshotManagerTest : public testing::Test {
: statistics_recorder_(StatisticsRecorder::CreateTemporaryForTesting()),
histogram_snapshot_manager_(&histogram_flattener_delta_recorder_) {}
- ~HistogramSnapshotManagerTest() override {}
+ ~HistogramSnapshotManagerTest() override = default;
std::unique_ptr<StatisticsRecorder> statistics_recorder_;
HistogramFlattenerDeltaRecorder histogram_flattener_delta_recorder_;
diff --git a/chromium/base/metrics/histogram_unittest.cc b/chromium/base/metrics/histogram_unittest.cc
index 63b9e85475f..c824eb75fe3 100644
--- a/chromium/base/metrics/histogram_unittest.cc
+++ b/chromium/base/metrics/histogram_unittest.cc
@@ -642,10 +642,10 @@ TEST_P(HistogramTest, BadConstruction) {
// Try to get the same histogram name with different arguments.
HistogramBase* bad_histogram = Histogram::FactoryGet(
"BadConstruction", 0, 100, 7, HistogramBase::kNoFlags);
- EXPECT_EQ(NULL, bad_histogram);
+ EXPECT_EQ(nullptr, bad_histogram);
bad_histogram = Histogram::FactoryGet(
"BadConstruction", 0, 99, 8, HistogramBase::kNoFlags);
- EXPECT_EQ(NULL, bad_histogram);
+ EXPECT_EQ(nullptr, bad_histogram);
HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
"BadConstructionLinear", 0, 100, 8, HistogramBase::kNoFlags);
@@ -654,10 +654,10 @@ TEST_P(HistogramTest, BadConstruction) {
// Try to get the same histogram name with different arguments.
bad_histogram = LinearHistogram::FactoryGet(
"BadConstructionLinear", 0, 100, 7, HistogramBase::kNoFlags);
- EXPECT_EQ(NULL, bad_histogram);
+ EXPECT_EQ(nullptr, bad_histogram);
bad_histogram = LinearHistogram::FactoryGet(
"BadConstructionLinear", 10, 100, 8, HistogramBase::kNoFlags);
- EXPECT_EQ(NULL, bad_histogram);
+ EXPECT_EQ(nullptr, bad_histogram);
}
TEST_P(HistogramTest, FactoryTime) {
diff --git a/chromium/base/metrics/persistent_histogram_allocator.cc b/chromium/base/metrics/persistent_histogram_allocator.cc
index 8353bdf63a6..79a903eb183 100644
--- a/chromium/base/metrics/persistent_histogram_allocator.cc
+++ b/chromium/base/metrics/persistent_histogram_allocator.cc
@@ -23,6 +23,9 @@
#include "base/metrics/statistics_recorder.h"
#include "base/numerics/safe_conversions.h"
#include "base/pickle.h"
+#include "base/process/process_handle.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock.h"
@@ -51,7 +54,7 @@ enum : uint32_t {
// managed elsewhere and which could be destructed first. An AtomicWord is
// used instead of std::atomic because the latter can create global ctors
// and dtors.
-subtle::AtomicWord g_allocator = 0;
+subtle::AtomicWord g_histogram_allocator = 0;
// Take an array of range boundaries and create a proper BucketRanges object
// which is returned to the caller. A return of nullptr indicates that the
@@ -103,7 +106,8 @@ PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
PersistentMemoryAllocator* allocator)
: allocator_(allocator), record_iterator_(allocator) {}
-PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() {}
+PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() =
+ default;
PersistentSampleMapRecords*
PersistentSparseHistogramDataManager::UseSampleMapRecords(uint64_t id,
@@ -186,7 +190,7 @@ PersistentSampleMapRecords::PersistentSampleMapRecords(
uint64_t sample_map_id)
: data_manager_(data_manager), sample_map_id_(sample_map_id) {}
-PersistentSampleMapRecords::~PersistentSampleMapRecords() {}
+PersistentSampleMapRecords::~PersistentSampleMapRecords() = default;
PersistentSampleMapRecords* PersistentSampleMapRecords::Acquire(
const void* user) {
@@ -273,7 +277,7 @@ PersistentHistogramAllocator::PersistentHistogramAllocator(
: memory_allocator_(std::move(memory)),
sparse_histogram_data_manager_(memory_allocator_.get()) {}
-PersistentHistogramAllocator::~PersistentHistogramAllocator() {}
+PersistentHistogramAllocator::~PersistentHistogramAllocator() = default;
std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
Reference ref) {
@@ -659,7 +663,7 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
/*make_iterable=*/false);
// Create the right type of histogram.
- std::string name(histogram_data_ptr->name);
+ const char* name = histogram_data_ptr->name;
std::unique_ptr<HistogramBase> histogram;
switch (histogram_type) {
case HISTOGRAM:
@@ -744,7 +748,7 @@ void PersistentHistogramAllocator::RecordCreateHistogramResult(
result_histogram->Add(result);
}
-GlobalHistogramAllocator::~GlobalHistogramAllocator() {}
+GlobalHistogramAllocator::~GlobalHistogramAllocator() = default;
// static
void GlobalHistogramAllocator::CreateWithPersistentMemory(
@@ -784,7 +788,7 @@ bool GlobalHistogramAllocator::CreateWithFile(
size = saturated_cast<size_t>(file.GetLength());
mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
} else {
- mmfile->Initialize(std::move(file), {0, static_cast<int64_t>(size)},
+ mmfile->Initialize(std::move(file), {0, size},
MemoryMappedFile::READ_WRITE_EXTEND);
}
if (!mmfile->IsValid() ||
@@ -834,22 +838,71 @@ bool GlobalHistogramAllocator::CreateWithActiveFileInDir(const FilePath& dir,
}
// static
+FilePath GlobalHistogramAllocator::ConstructFilePath(const FilePath& dir,
+ StringPiece name) {
+ return dir.AppendASCII(name).AddExtension(
+ PersistentMemoryAllocator::kFileExtension);
+}
+
+// static
+FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir(
+ const FilePath& dir,
+ StringPiece name,
+ base::Time stamp,
+ ProcessId pid) {
+ return ConstructFilePath(
+ dir,
+ StringPrintf("%.*s-%lX-%lX", static_cast<int>(name.length()), name.data(),
+ static_cast<long>(stamp.ToTimeT()), static_cast<long>(pid)));
+}
+
+// static
+bool GlobalHistogramAllocator::ParseFilePath(const FilePath& path,
+ std::string* out_name,
+ Time* out_stamp,
+ ProcessId* out_pid) {
+ std::string filename = path.BaseName().AsUTF8Unsafe();
+ std::vector<base::StringPiece> parts = base::SplitStringPiece(
+ filename, "-.", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+ if (parts.size() != 4)
+ return false;
+
+ if (out_name)
+ *out_name = parts[0].as_string();
+
+ if (out_stamp) {
+ int64_t stamp;
+ if (!HexStringToInt64(parts[1], &stamp))
+ return false;
+ *out_stamp = Time::FromTimeT(static_cast<time_t>(stamp));
+ }
+
+ if (out_pid) {
+ int64_t pid;
+ if (!HexStringToInt64(parts[2], &pid))
+ return false;
+ *out_pid = static_cast<ProcessId>(pid);
+ }
+
+ return true;
+}
+
+// static
void GlobalHistogramAllocator::ConstructFilePaths(const FilePath& dir,
StringPiece name,
FilePath* out_base_path,
FilePath* out_active_path,
FilePath* out_spare_path) {
if (out_base_path)
- *out_base_path = MakeMetricsFilePath(dir, name);
+ *out_base_path = ConstructFilePath(dir, name);
if (out_active_path) {
*out_active_path =
- MakeMetricsFilePath(dir, name.as_string().append("-active"));
+ ConstructFilePath(dir, name.as_string().append("-active"));
}
if (out_spare_path) {
- *out_spare_path =
- MakeMetricsFilePath(dir, name.as_string().append("-spare"));
+ *out_spare_path = ConstructFilePath(dir, name.as_string().append("-spare"));
}
}
@@ -862,20 +915,18 @@ void GlobalHistogramAllocator::ConstructFilePathsForUploadDir(
FilePath* out_active_path,
FilePath* out_spare_path) {
if (out_upload_path) {
- std::string name_stamp =
- StringPrintf("%s-%X", name.c_str(),
- static_cast<unsigned int>(Time::Now().ToTimeT()));
- *out_upload_path = MakeMetricsFilePath(upload_dir, name_stamp);
+ *out_upload_path = ConstructFilePathForUploadDir(
+ upload_dir, name, Time::Now(), GetCurrentProcId());
}
if (out_active_path) {
*out_active_path =
- MakeMetricsFilePath(active_dir, name + std::string("-active"));
+ ConstructFilePath(active_dir, name + std::string("-active"));
}
if (out_spare_path) {
*out_spare_path =
- MakeMetricsFilePath(active_dir, name + std::string("-spare"));
+ ConstructFilePath(active_dir, name + std::string("-spare"));
}
}
@@ -891,7 +942,7 @@ bool GlobalHistogramAllocator::CreateSpareFile(const FilePath& spare_path,
return false;
MemoryMappedFile mmfile;
- mmfile.Initialize(std::move(spare_file), {0, static_cast<int64_t>(size)},
+ mmfile.Initialize(std::move(spare_file), {0, size},
MemoryMappedFile::READ_WRITE_EXTEND);
success = mmfile.IsValid();
}
@@ -938,8 +989,8 @@ void GlobalHistogramAllocator::Set(
// Releasing or changing an allocator is extremely dangerous because it
// likely has histograms stored within it. If the backing memory is also
// also released, future accesses to those histograms will seg-fault.
- CHECK(!subtle::NoBarrier_Load(&g_allocator));
- subtle::Release_Store(&g_allocator,
+ CHECK(!subtle::NoBarrier_Load(&g_histogram_allocator));
+ subtle::Release_Store(&g_histogram_allocator,
reinterpret_cast<uintptr_t>(allocator.release()));
size_t existing = StatisticsRecorder::GetHistogramCount();
@@ -950,7 +1001,7 @@ void GlobalHistogramAllocator::Set(
// static
GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
return reinterpret_cast<GlobalHistogramAllocator*>(
- subtle::Acquire_Load(&g_allocator));
+ subtle::Acquire_Load(&g_histogram_allocator));
}
// static
@@ -980,7 +1031,7 @@ GlobalHistogramAllocator::ReleaseForTesting() {
DCHECK_NE(kResultHistogram, data->name);
}
- subtle::Release_Store(&g_allocator, 0);
+ subtle::Release_Store(&g_histogram_allocator, 0);
return WrapUnique(histogram_allocator);
};
@@ -1065,11 +1116,4 @@ void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
}
}
-// static
-FilePath GlobalHistogramAllocator::MakeMetricsFilePath(const FilePath& dir,
- StringPiece name) {
- return dir.AppendASCII(name).AddExtension(
- PersistentMemoryAllocator::kFileExtension);
-}
-
} // namespace base
diff --git a/chromium/base/metrics/persistent_histogram_allocator.h b/chromium/base/metrics/persistent_histogram_allocator.h
index a6e2ff5d431..3766ab123f6 100644
--- a/chromium/base/metrics/persistent_histogram_allocator.h
+++ b/chromium/base/metrics/persistent_histogram_allocator.h
@@ -423,6 +423,21 @@ class BASE_EXPORT GlobalHistogramAllocator
uint64_t id,
StringPiece name);
+ // Constructs a filename using a name.
+ static FilePath ConstructFilePath(const FilePath& dir, StringPiece name);
+
+ // Like above but with timestamp and pid for use in upload directories.
+ static FilePath ConstructFilePathForUploadDir(const FilePath& dir,
+ StringPiece name,
+ base::Time stamp,
+ ProcessId pid);
+
+ // Parses a filename to extract name, timestamp, and pid.
+ static bool ParseFilePath(const FilePath& path,
+ std::string* out_name,
+ Time* out_stamp,
+ ProcessId* out_pid);
+
// Constructs a set of names in |dir| based on name that can be used for a
// base + active persistent memory mapped location for CreateWithActiveFile().
// The spare path is a file that can be pre-created and moved to be active
diff --git a/chromium/base/metrics/persistent_histogram_allocator_unittest.cc b/chromium/base/metrics/persistent_histogram_allocator_unittest.cc
index 0f7ba2bf26a..c492a246d58 100644
--- a/chromium/base/metrics/persistent_histogram_allocator_unittest.cc
+++ b/chromium/base/metrics/persistent_histogram_allocator_unittest.cc
@@ -129,6 +129,34 @@ TEST_F(PersistentHistogramAllocatorTest, CreateAndIterate) {
EXPECT_FALSE(recovered);
}
+TEST_F(PersistentHistogramAllocatorTest, ConstructPaths) {
+ const FilePath dir_path(FILE_PATH_LITERAL("foo/"));
+ const std::string dir_string =
+ dir_path.NormalizePathSeparators().AsUTF8Unsafe();
+
+ FilePath path = GlobalHistogramAllocator::ConstructFilePath(dir_path, "bar");
+ EXPECT_EQ(dir_string + "bar.pma", path.AsUTF8Unsafe());
+
+ std::string name;
+ Time stamp;
+ ProcessId pid;
+ EXPECT_FALSE(
+ GlobalHistogramAllocator::ParseFilePath(path, &name, nullptr, nullptr));
+ EXPECT_FALSE(
+ GlobalHistogramAllocator::ParseFilePath(path, nullptr, &stamp, nullptr));
+ EXPECT_FALSE(
+ GlobalHistogramAllocator::ParseFilePath(path, nullptr, nullptr, &pid));
+
+ path = GlobalHistogramAllocator::ConstructFilePathForUploadDir(
+ dir_path, "bar", Time::FromTimeT(12345), 6789);
+ EXPECT_EQ(dir_string + "bar-3039-1A85.pma", path.AsUTF8Unsafe());
+ ASSERT_TRUE(
+ GlobalHistogramAllocator::ParseFilePath(path, &name, &stamp, &pid));
+ EXPECT_EQ(name, "bar");
+ EXPECT_EQ(Time::FromTimeT(12345), stamp);
+ EXPECT_EQ(static_cast<ProcessId>(6789), pid);
+}
+
TEST_F(PersistentHistogramAllocatorTest, CreateWithFile) {
const char temp_name[] = "CreateWithFileTest";
ScopedTempDir temp_dir;
diff --git a/chromium/base/metrics/persistent_memory_allocator.cc b/chromium/base/metrics/persistent_memory_allocator.cc
index f3ade51dd36..be107c39474 100644
--- a/chromium/base/metrics/persistent_memory_allocator.cc
+++ b/chromium/base/metrics/persistent_memory_allocator.cc
@@ -349,9 +349,9 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
// These atomics operate inter-process and so must be lock-free. The local
// casts are to make sure it can be evaluated at compile time to a constant.
- CHECK(((SharedMetadata*)0)->freeptr.is_lock_free());
- CHECK(((SharedMetadata*)0)->flags.is_lock_free());
- CHECK(((BlockHeader*)0)->next.is_lock_free());
+ CHECK(((SharedMetadata*)nullptr)->freeptr.is_lock_free());
+ CHECK(((SharedMetadata*)nullptr)->flags.is_lock_free());
+ CHECK(((BlockHeader*)nullptr)->next.is_lock_free());
CHECK(corrupt_.is_lock_free());
if (shared_meta()->cookie != kGlobalCookie) {
@@ -1029,7 +1029,7 @@ SharedPersistentMemoryAllocator::SharedPersistentMemoryAllocator(
read_only),
shared_memory_(std::move(memory)) {}
-SharedPersistentMemoryAllocator::~SharedPersistentMemoryAllocator() {}
+SharedPersistentMemoryAllocator::~SharedPersistentMemoryAllocator() = default;
// static
bool SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
@@ -1054,14 +1054,9 @@ FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
id,
name,
read_only),
- mapped_file_(std::move(file)) {
- // Ensure the disk-copy of the data reflects the fully-initialized memory as
- // there is no guarantee as to what order the pages might be auto-flushed by
- // the OS in the future.
- Flush(true);
-}
+ mapped_file_(std::move(file)) {}
-FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
+FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() = default;
// static
bool FilePersistentMemoryAllocator::IsFileAcceptable(
@@ -1072,12 +1067,13 @@ bool FilePersistentMemoryAllocator::IsFileAcceptable(
void FilePersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
if (sync)
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
if (IsReadonly())
return;
#if defined(OS_WIN)
- // Windows doesn't support a synchronous flush.
+ // Windows doesn't support asynchronous flush.
+ AssertBlockingAllowed();
BOOL success = ::FlushViewOfFile(data(), length);
DPCHECK(success);
#elif defined(OS_MACOSX)
@@ -1163,7 +1159,7 @@ DelayedPersistentAllocation::DelayedPersistentAllocation(
DCHECK(reference_);
}
-DelayedPersistentAllocation::~DelayedPersistentAllocation() {}
+DelayedPersistentAllocation::~DelayedPersistentAllocation() = default;
void* DelayedPersistentAllocation::Get() const {
// Relaxed operations are acceptable here because it's not protecting the
diff --git a/chromium/base/metrics/persistent_memory_allocator.h b/chromium/base/metrics/persistent_memory_allocator.h
index ae50ecf5d76..978a362cd91 100644
--- a/chromium/base/metrics/persistent_memory_allocator.h
+++ b/chromium/base/metrics/persistent_memory_allocator.h
@@ -328,7 +328,8 @@ class BASE_EXPORT PersistentMemoryAllocator {
// The |sync| parameter indicates if this call should block until the flush
// is complete but is only advisory and may or may not have an effect
// depending on the capabilities of the OS. Synchronous flushes are allowed
- // only from theads that are allowed to do I/O.
+ // only from theads that are allowed to do I/O but since |sync| is only
+ // advisory, all flushes should be done on IO-capable threads.
void Flush(bool sync);
// Direct access to underlying memory segment. If the segment is shared
diff --git a/chromium/base/metrics/persistent_sample_map.cc b/chromium/base/metrics/persistent_sample_map.cc
index cbb7974c575..f38b9d1f601 100644
--- a/chromium/base/metrics/persistent_sample_map.cc
+++ b/chromium/base/metrics/persistent_sample_map.cc
@@ -18,18 +18,6 @@ typedef HistogramBase::Sample Sample;
namespace {
-enum NegativeSampleReason {
- PERSISTENT_SPARSE_HAVE_LOGGED_BUT_NOT_SAMPLE,
- PERSISTENT_SPARSE_SAMPLE_LESS_THAN_LOGGED,
- PERSISTENT_SPARSE_ADDED_NEGATIVE_COUNT,
- PERSISTENT_SPARSE_ADD_WENT_NEGATIVE,
- PERSISTENT_SPARSE_ADD_OVERFLOW,
- PERSISTENT_SPARSE_ACCUMULATE_NEGATIVE_COUNT,
- PERSISTENT_SPARSE_ACCUMULATE_WENT_NEGATIVE,
- PERSISTENT_SPARSE_ACCUMULATE_OVERFLOW,
- MAX_NEGATIVE_SAMPLE_REASONS
-};
-
// An iterator for going through a PersistentSampleMap. The logic here is
// identical to that of SampleMapIterator but with different data structures.
// Changes here likely need to be duplicated there.
@@ -62,7 +50,7 @@ PersistentSampleMapIterator::PersistentSampleMapIterator(
SkipEmptyBuckets();
}
-PersistentSampleMapIterator::~PersistentSampleMapIterator() {}
+PersistentSampleMapIterator::~PersistentSampleMapIterator() = default;
bool PersistentSampleMapIterator::Done() const {
return iter_ == end_;
@@ -124,25 +112,19 @@ void PersistentSampleMap::Accumulate(Sample value, Count count) {
#if 0 // TODO(bcwhite) Re-enable efficient version after crbug.com/682680.
*GetOrCreateSampleCountStorage(value) += count;
#else
- NegativeSampleReason reason = MAX_NEGATIVE_SAMPLE_REASONS;
Count* local_count_ptr = GetOrCreateSampleCountStorage(value);
if (count < 0) {
- reason = PERSISTENT_SPARSE_ACCUMULATE_NEGATIVE_COUNT;
if (*local_count_ptr < -count)
- reason = PERSISTENT_SPARSE_ACCUMULATE_WENT_NEGATIVE;
+ RecordNegativeSample(SAMPLES_ACCUMULATE_WENT_NEGATIVE, -count);
+ else
+ RecordNegativeSample(SAMPLES_ACCUMULATE_NEGATIVE_COUNT, -count);
*local_count_ptr += count;
} else {
- *local_count_ptr += count;
- if (*local_count_ptr < 0)
- reason = PERSISTENT_SPARSE_ACCUMULATE_OVERFLOW;
- }
- if (reason != MAX_NEGATIVE_SAMPLE_REASONS) {
- UMA_HISTOGRAM_ENUMERATION("UMA.NegativeSamples.Reason", reason,
- MAX_NEGATIVE_SAMPLE_REASONS);
- UMA_HISTOGRAM_CUSTOM_COUNTS("UMA.NegativeSamples.Increment", count, 1,
- 1 << 30, 100);
- UMA_HISTOGRAM_SPARSE_SLOWLY("UMA.NegativeSamples.Histogram",
- static_cast<int32_t>(id()));
+ Sample old_value = *local_count_ptr;
+ Sample new_value = old_value + count;
+ *local_count_ptr = new_value;
+ if ((new_value >= 0) != (old_value >= 0))
+ RecordNegativeSample(SAMPLES_ACCUMULATE_OVERFLOW, count);
}
#endif
IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count);
diff --git a/chromium/base/metrics/persistent_sample_map_unittest.cc b/chromium/base/metrics/persistent_sample_map_unittest.cc
index 60bb976b289..b25f58203de 100644
--- a/chromium/base/metrics/persistent_sample_map_unittest.cc
+++ b/chromium/base/metrics/persistent_sample_map_unittest.cc
@@ -171,7 +171,7 @@ TEST(PersistentSampleMapIteratorTest, IterateTest) {
EXPECT_EQ(1, min);
EXPECT_EQ(2, max);
EXPECT_EQ(100, count);
- EXPECT_FALSE(it->GetBucketIndex(NULL));
+ EXPECT_FALSE(it->GetBucketIndex(nullptr));
it->Next();
it->Get(&min, &max, &count);
diff --git a/chromium/base/metrics/sample_map.cc b/chromium/base/metrics/sample_map.cc
index 08fe0320577..c6dce293212 100644
--- a/chromium/base/metrics/sample_map.cc
+++ b/chromium/base/metrics/sample_map.cc
@@ -47,7 +47,7 @@ SampleMapIterator::SampleMapIterator(const SampleToCountMap& sample_counts)
SkipEmptyBuckets();
}
-SampleMapIterator::~SampleMapIterator() {}
+SampleMapIterator::~SampleMapIterator() = default;
bool SampleMapIterator::Done() const {
return iter_ == end_;
diff --git a/chromium/base/metrics/sample_map_unittest.cc b/chromium/base/metrics/sample_map_unittest.cc
index 6e6da2b3d24..83db56f5037 100644
--- a/chromium/base/metrics/sample_map_unittest.cc
+++ b/chromium/base/metrics/sample_map_unittest.cc
@@ -88,7 +88,7 @@ TEST(SampleMapIteratorTest, IterateTest) {
EXPECT_EQ(1, min);
EXPECT_EQ(2, max);
EXPECT_EQ(100, count);
- EXPECT_FALSE(it->GetBucketIndex(NULL));
+ EXPECT_FALSE(it->GetBucketIndex(nullptr));
it->Next();
it->Get(&min, &max, &count);
diff --git a/chromium/base/metrics/sample_vector.cc b/chromium/base/metrics/sample_vector.cc
index c484ed74aaf..cf8634e8367 100644
--- a/chromium/base/metrics/sample_vector.cc
+++ b/chromium/base/metrics/sample_vector.cc
@@ -30,7 +30,7 @@ SampleVectorBase::SampleVectorBase(uint64_t id,
CHECK_GE(bucket_ranges_->bucket_count(), 1u);
}
-SampleVectorBase::~SampleVectorBase() {}
+SampleVectorBase::~SampleVectorBase() = default;
void SampleVectorBase::Accumulate(Sample value, Count count) {
const size_t bucket_index = GetBucketIndex(value);
@@ -55,8 +55,14 @@ void SampleVectorBase::Accumulate(Sample value, Count count) {
}
// Handle the multi-sample case.
- subtle::NoBarrier_AtomicIncrement(&counts()[bucket_index], count);
+ Count new_value =
+ subtle::NoBarrier_AtomicIncrement(&counts()[bucket_index], count);
IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count);
+
+ // TODO(bcwhite) Remove after crbug.com/682680.
+ Count old_value = new_value - count;
+ if ((new_value >= 0) != (old_value >= 0) && count > 0)
+ RecordNegativeSample(SAMPLES_ACCUMULATE_OVERFLOW, count);
}
Count SampleVectorBase::GetCount(Sample value) const {
@@ -321,7 +327,7 @@ PersistentSampleVector::PersistentSampleVector(
}
}
-PersistentSampleVector::~PersistentSampleVector() {}
+PersistentSampleVector::~PersistentSampleVector() = default;
bool PersistentSampleVector::MountExistingCountsStorage() const {
// There is no early exit if counts is not yet mounted because, given that
@@ -378,7 +384,7 @@ SampleVectorIterator::SampleVectorIterator(
SkipEmptyBuckets();
}
-SampleVectorIterator::~SampleVectorIterator() {}
+SampleVectorIterator::~SampleVectorIterator() = default;
bool SampleVectorIterator::Done() const {
return index_ >= counts_size_;
@@ -394,17 +400,17 @@ void SampleVectorIterator::Get(HistogramBase::Sample* min,
int64_t* max,
HistogramBase::Count* count) const {
DCHECK(!Done());
- if (min != NULL)
+ if (min != nullptr)
*min = bucket_ranges_->range(index_);
- if (max != NULL)
+ if (max != nullptr)
*max = strict_cast<int64_t>(bucket_ranges_->range(index_ + 1));
- if (count != NULL)
+ if (count != nullptr)
*count = subtle::NoBarrier_Load(&counts_[index_]);
}
bool SampleVectorIterator::GetBucketIndex(size_t* index) const {
DCHECK(!Done());
- if (index != NULL)
+ if (index != nullptr)
*index = index_;
return true;
}
diff --git a/chromium/base/metrics/single_sample_metrics_unittest.cc b/chromium/base/metrics/single_sample_metrics_unittest.cc
index e3c1cf0e2be..c1a8b96b5c5 100644
--- a/chromium/base/metrics/single_sample_metrics_unittest.cc
+++ b/chromium/base/metrics/single_sample_metrics_unittest.cc
@@ -20,7 +20,7 @@ const char kMetricName[] = "Single.Sample.Metric";
class SingleSampleMetricsTest : public testing::Test {
public:
- SingleSampleMetricsTest() {}
+ SingleSampleMetricsTest() = default;
~SingleSampleMetricsTest() override {
// Ensure we cleanup after ourselves.
diff --git a/chromium/base/metrics/sparse_histogram.cc b/chromium/base/metrics/sparse_histogram.cc
index ef9945add6f..e33fd3c88f2 100644
--- a/chromium/base/metrics/sparse_histogram.cc
+++ b/chromium/base/metrics/sparse_histogram.cc
@@ -45,7 +45,7 @@ HistogramBase* SparseHistogram::FactoryGet(const std::string& name,
DCHECK(!histogram_ref); // Should never have been set.
DCHECK(!allocator); // Shouldn't have failed.
flags &= ~HistogramBase::kIsPersistent;
- tentative_histogram.reset(new SparseHistogram(name));
+ tentative_histogram.reset(new SparseHistogram(GetPermanentName(name)));
tentative_histogram->SetFlags(flags);
}
@@ -62,10 +62,6 @@ HistogramBase* SparseHistogram::FactoryGet(const std::string& name,
allocator->FinalizeHistogram(histogram_ref,
histogram == tentative_histogram_ptr);
}
-
- ReportHistogramActivity(*histogram, HISTOGRAM_CREATED);
- } else {
- ReportHistogramActivity(*histogram, HISTOGRAM_LOOKUP);
}
CHECK_EQ(SPARSE_HISTOGRAM, histogram->GetHistogramType());
@@ -75,14 +71,14 @@ HistogramBase* SparseHistogram::FactoryGet(const std::string& name,
// static
std::unique_ptr<HistogramBase> SparseHistogram::PersistentCreate(
PersistentHistogramAllocator* allocator,
- const std::string& name,
+ const char* name,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta) {
return WrapUnique(
new SparseHistogram(allocator, name, meta, logged_meta));
}
-SparseHistogram::~SparseHistogram() {}
+SparseHistogram::~SparseHistogram() = default;
uint64_t SparseHistogram::name_hash() const {
return unlogged_samples_->id();
@@ -174,13 +170,13 @@ void SparseHistogram::SerializeInfoImpl(Pickle* pickle) const {
pickle->WriteInt(flags());
}
-SparseHistogram::SparseHistogram(const std::string& name)
+SparseHistogram::SparseHistogram(const char* name)
: HistogramBase(name),
unlogged_samples_(new SampleMap(HashMetricName(name))),
logged_samples_(new SampleMap(unlogged_samples_->id())) {}
SparseHistogram::SparseHistogram(PersistentHistogramAllocator* allocator,
- const std::string& name,
+ const char* name,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta)
: HistogramBase(name),
@@ -205,7 +201,7 @@ HistogramBase* SparseHistogram::DeserializeInfoImpl(PickleIterator* iter) {
int flags;
if (!iter->ReadString(&histogram_name) || !iter->ReadInt(&flags)) {
DLOG(ERROR) << "Pickle error decoding Histogram: " << histogram_name;
- return NULL;
+ return nullptr;
}
flags &= ~HistogramBase::kIPCSerializationSourceFlag;
@@ -278,9 +274,7 @@ void SparseHistogram::WriteAsciiImpl(bool graph_it,
void SparseHistogram::WriteAsciiHeader(const Count total_count,
std::string* output) const {
- StringAppendF(output,
- "Histogram: %s recorded %d samples",
- histogram_name().c_str(),
+ StringAppendF(output, "Histogram: %s recorded %d samples", histogram_name(),
total_count);
if (flags())
StringAppendF(output, " (flags = 0x%x)", flags());
diff --git a/chromium/base/metrics/sparse_histogram.h b/chromium/base/metrics/sparse_histogram.h
index 9733e57ed5c..913762c95df 100644
--- a/chromium/base/metrics/sparse_histogram.h
+++ b/chromium/base/metrics/sparse_histogram.h
@@ -35,7 +35,7 @@ class BASE_EXPORT SparseHistogram : public HistogramBase {
// live longer than the created sparse histogram.
static std::unique_ptr<HistogramBase> PersistentCreate(
PersistentHistogramAllocator* allocator,
- const std::string& name,
+ const char* name,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta);
@@ -63,10 +63,10 @@ class BASE_EXPORT SparseHistogram : public HistogramBase {
private:
// Clients should always use FactoryGet to create SparseHistogram.
- explicit SparseHistogram(const std::string& name);
+ explicit SparseHistogram(const char* name);
SparseHistogram(PersistentHistogramAllocator* allocator,
- const std::string& name,
+ const char* name,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta);
diff --git a/chromium/base/metrics/sparse_histogram_unittest.cc b/chromium/base/metrics/sparse_histogram_unittest.cc
index 3ad2bc86801..eeba150bf14 100644
--- a/chromium/base/metrics/sparse_histogram_unittest.cc
+++ b/chromium/base/metrics/sparse_histogram_unittest.cc
@@ -73,7 +73,9 @@ class SparseHistogramTest : public testing::TestWithParam<bool> {
GlobalHistogramAllocator::ReleaseForTesting();
}
- std::unique_ptr<SparseHistogram> NewSparseHistogram(const std::string& name) {
+ std::unique_ptr<SparseHistogram> NewSparseHistogram(const char* name) {
+ // std::make_unique can't access protected ctor so do it manually. This
+ // test class is a friend so can access it.
return std::unique_ptr<SparseHistogram>(new SparseHistogram(name));
}
@@ -181,7 +183,7 @@ TEST_P(SparseHistogramTest, MacroBasicTest) {
HistogramBase* sparse_histogram = histograms[0];
EXPECT_EQ(SPARSE_HISTOGRAM, sparse_histogram->GetHistogramType());
- EXPECT_EQ("Sparse", sparse_histogram->histogram_name());
+ EXPECT_EQ("Sparse", StringPiece(sparse_histogram->histogram_name()));
EXPECT_EQ(
HistogramBase::kUmaTargetedHistogramFlag |
(use_persistent_histogram_allocator_ ? HistogramBase::kIsPersistent
diff --git a/chromium/base/metrics/statistics_recorder.cc b/chromium/base/metrics/statistics_recorder.cc
index bbfc9b94ca1..a21adc09a0c 100644
--- a/chromium/base/metrics/statistics_recorder.cc
+++ b/chromium/base/metrics/statistics_recorder.cc
@@ -95,12 +95,13 @@ HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
// twice |if (!histograms_)|.
ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
} else {
- const std::string& name = histogram->histogram_name();
- HistogramMap::iterator it = histograms_->find(name);
+ const char* name = histogram->histogram_name();
+ StringPiece name_piece(name);
+ HistogramMap::iterator it = histograms_->find(name_piece);
if (histograms_->end() == it) {
- // The StringKey references the name within |histogram| rather than
- // making a copy.
- (*histograms_)[name] = histogram;
+ // |name_piece| is guaranteed to never change or be deallocated so long
+ // as the histogram is alive (which is forever).
+ (*histograms_)[name_piece] = histogram;
ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
// If there are callbacks for this histogram, we set the kCallbackExists
// flag.
@@ -117,8 +118,9 @@ HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
histogram_to_return = histogram;
} else {
// We already have one histogram with this name.
- DCHECK_EQ(histogram->histogram_name(),
- it->second->histogram_name()) << "hash collision";
+ DCHECK_EQ(StringPiece(histogram->histogram_name()),
+ StringPiece(it->second->histogram_name()))
+ << "hash collision";
histogram_to_return = it->second;
histogram_to_delete = histogram;
}
@@ -202,19 +204,13 @@ void StatisticsRecorder::WriteGraph(const std::string& query,
}
// static
-std::string StatisticsRecorder::ToJSON(const std::string& query) {
+std::string StatisticsRecorder::ToJSON(JSONVerbosityLevel verbosity_level) {
if (!IsActive())
return std::string();
std::string output("{");
- if (!query.empty()) {
- output += "\"query\":";
- EscapeJSONString(query, true, &output);
- output += ",";
- }
-
Histograms snapshot;
- GetSnapshot(query, &snapshot);
+ GetSnapshot(std::string(), &snapshot);
output += "\"histograms\":[";
bool first_histogram = true;
for (const HistogramBase* histogram : snapshot) {
@@ -223,7 +219,7 @@ std::string StatisticsRecorder::ToJSON(const std::string& query) {
else
output += ",";
std::string json;
- histogram->WriteJSON(&json);
+ histogram->WriteJSON(&json, verbosity_level);
output += json;
}
output += "]}";
@@ -321,8 +317,11 @@ void StatisticsRecorder::GetSnapshot(const std::string& query,
if (!histograms_)
return;
+ // Need a c-string query for comparisons against c-string histogram name.
+ const char* query_string = query.c_str();
+
for (const auto& entry : *histograms_) {
- if (entry.second->histogram_name().find(query) != std::string::npos)
+ if (strstr(entry.second->histogram_name(), query_string) != nullptr)
snapshot->push_back(entry.second);
}
}
diff --git a/chromium/base/metrics/statistics_recorder.h b/chromium/base/metrics/statistics_recorder.h
index cdec67ae1dd..49ccaf51917 100644
--- a/chromium/base/metrics/statistics_recorder.h
+++ b/chromium/base/metrics/statistics_recorder.h
@@ -110,9 +110,9 @@ class BASE_EXPORT StatisticsRecorder {
static void WriteHTMLGraph(const std::string& query, std::string* output);
static void WriteGraph(const std::string& query, std::string* output);
- // Returns the histograms with |query| as a substring as JSON text (an empty
- // |query| will process all registered histograms).
- static std::string ToJSON(const std::string& query);
+ // Returns the histograms with |verbosity_level| as the serialization
+ // verbosity.
+ static std::string ToJSON(JSONVerbosityLevel verbosity_level);
// Method for extracting histograms which were marked for use by UMA.
static void GetHistograms(Histograms* output);
diff --git a/chromium/base/metrics/statistics_recorder_unittest.cc b/chromium/base/metrics/statistics_recorder_unittest.cc
index 39b1f4dbf75..92d1bba602e 100644
--- a/chromium/base/metrics/statistics_recorder_unittest.cc
+++ b/chromium/base/metrics/statistics_recorder_unittest.cc
@@ -14,6 +14,7 @@
#include "base/json/json_reader.h"
#include "base/logging.h"
#include "base/memory/weak_ptr.h"
+#include "base/metrics/histogram_base.h"
#include "base/metrics/histogram_macros.h"
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/metrics/record_histogram_checker.h"
@@ -42,7 +43,7 @@ class LogStateSaver {
// Test implementation of RecordHistogramChecker interface.
class OddRecordHistogramChecker : public base::RecordHistogramChecker {
public:
- ~OddRecordHistogramChecker() override {}
+ ~OddRecordHistogramChecker() override = default;
// base::RecordHistogramChecker:
bool ShouldRecord(uint64_t histogram_hash) const override {
@@ -90,7 +91,7 @@ class StatisticsRecorderTest : public testing::TestWithParam<bool> {
StatisticsRecorder::UninitializeForTesting();
}
- Histogram* CreateHistogram(const std::string& name,
+ Histogram* CreateHistogram(const char* name,
HistogramBase::Sample min,
HistogramBase::Sample max,
size_t bucket_count) {
@@ -371,57 +372,51 @@ TEST_P(StatisticsRecorderTest, ToJSON) {
Histogram::FactoryGet("TestHistogram2", 1, 1000, 50, HistogramBase::kNoFlags)
->Add(40);
- std::string json(StatisticsRecorder::ToJSON(std::string()));
+ std::string json(StatisticsRecorder::ToJSON(JSON_VERBOSITY_LEVEL_FULL));
// Check for valid JSON.
std::unique_ptr<Value> root = JSONReader::Read(json);
ASSERT_TRUE(root.get());
- DictionaryValue* root_dict = NULL;
+ DictionaryValue* root_dict = nullptr;
ASSERT_TRUE(root->GetAsDictionary(&root_dict));
// No query should be set.
ASSERT_FALSE(root_dict->HasKey("query"));
- ListValue* histogram_list = NULL;
+ ListValue* histogram_list = nullptr;
ASSERT_TRUE(root_dict->GetList("histograms", &histogram_list));
ASSERT_EQ(2u, histogram_list->GetSize());
// Examine the first histogram.
- DictionaryValue* histogram_dict = NULL;
+ DictionaryValue* histogram_dict = nullptr;
ASSERT_TRUE(histogram_list->GetDictionary(0, &histogram_dict));
int sample_count;
ASSERT_TRUE(histogram_dict->GetInteger("count", &sample_count));
EXPECT_EQ(2, sample_count);
- // Test the query filter.
- std::string query("TestHistogram2");
- json = StatisticsRecorder::ToJSON(query);
+ ListValue* buckets_list = nullptr;
+ ASSERT_TRUE(histogram_dict->GetList("buckets", &buckets_list));
+ EXPECT_EQ(2u, buckets_list->GetList().size());
+ // Check the serialized JSON with a different verbosity level.
+ json = StatisticsRecorder::ToJSON(JSON_VERBOSITY_LEVEL_OMIT_BUCKETS);
root = JSONReader::Read(json);
ASSERT_TRUE(root.get());
+ root_dict = nullptr;
ASSERT_TRUE(root->GetAsDictionary(&root_dict));
-
- std::string query_value;
- ASSERT_TRUE(root_dict->GetString("query", &query_value));
- EXPECT_EQ(query, query_value);
-
+ histogram_list = nullptr;
ASSERT_TRUE(root_dict->GetList("histograms", &histogram_list));
- ASSERT_EQ(1u, histogram_list->GetSize());
-
+ ASSERT_EQ(2u, histogram_list->GetSize());
+ histogram_dict = nullptr;
ASSERT_TRUE(histogram_list->GetDictionary(0, &histogram_dict));
-
- std::string histogram_name;
- ASSERT_TRUE(histogram_dict->GetString("name", &histogram_name));
- EXPECT_EQ("TestHistogram2", histogram_name);
-
- json.clear();
- UninitializeStatisticsRecorder();
-
- // No data should be returned.
- json = StatisticsRecorder::ToJSON(query);
- EXPECT_TRUE(json.empty());
+ sample_count = 0;
+ ASSERT_TRUE(histogram_dict->GetInteger("count", &sample_count));
+ EXPECT_EQ(2, sample_count);
+ buckets_list = nullptr;
+ // Bucket information should be omitted.
+ ASSERT_FALSE(histogram_dict->GetList("buckets", &buckets_list));
}
TEST_P(StatisticsRecorderTest, IterationTest) {
diff --git a/chromium/base/native_library_posix.cc b/chromium/base/native_library_posix.cc
index 3459716af1c..49925375238 100644
--- a/chromium/base/native_library_posix.cc
+++ b/chromium/base/native_library_posix.cc
@@ -23,7 +23,7 @@ NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
const NativeLibraryOptions& options,
NativeLibraryLoadError* error) {
// dlopen() opens the file off disk.
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
// We deliberately do not use RTLD_DEEPBIND by default. For the history why,
// please refer to the bug tracker. Some useful bug reports to read include:
diff --git a/chromium/base/native_library_win.cc b/chromium/base/native_library_win.cc
index c281fbff7b9..7a5c9f62875 100644
--- a/chromium/base/native_library_win.cc
+++ b/chromium/base/native_library_win.cc
@@ -82,7 +82,7 @@ LoadLibraryResult GetLoadLibraryResult(bool are_search_flags_available,
NativeLibrary LoadNativeLibraryHelper(const FilePath& library_path,
NativeLibraryLoadError* error) {
// LoadLibrary() opens the file off disk.
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
HMODULE module = nullptr;
diff --git a/chromium/base/nix/mime_util_xdg.cc b/chromium/base/nix/mime_util_xdg.cc
index f6e755eede3..6b5b11d6397 100644
--- a/chromium/base/nix/mime_util_xdg.cc
+++ b/chromium/base/nix/mime_util_xdg.cc
@@ -24,7 +24,7 @@ LazyInstance<Lock>::Leaky g_mime_util_xdg_lock = LAZY_INSTANCE_INITIALIZER;
std::string GetFileMimeType(const FilePath& filepath) {
if (filepath.empty())
return std::string();
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
AutoLock scoped_lock(g_mime_util_xdg_lock.Get());
return xdg_mime_get_mime_type_from_file_name(filepath.value().c_str());
}
diff --git a/chromium/base/nix/xdg_util.cc b/chromium/base/nix/xdg_util.cc
index 91135c15f3c..7d1a8795ebb 100644
--- a/chromium/base/nix/xdg_util.cc
+++ b/chromium/base/nix/xdg_util.cc
@@ -119,7 +119,7 @@ DesktopEnvironment GetDesktopEnvironment(Environment* env) {
const char* GetDesktopEnvironmentName(DesktopEnvironment env) {
switch (env) {
case DESKTOP_ENVIRONMENT_OTHER:
- return NULL;
+ return nullptr;
case DESKTOP_ENVIRONMENT_GNOME:
return "GNOME";
case DESKTOP_ENVIRONMENT_KDE3:
@@ -135,7 +135,7 @@ const char* GetDesktopEnvironmentName(DesktopEnvironment env) {
case DESKTOP_ENVIRONMENT_XFCE:
return "XFCE";
}
- return NULL;
+ return nullptr;
}
const char* GetDesktopEnvironmentName(Environment* env) {
diff --git a/chromium/base/numerics/README.md b/chromium/base/numerics/README.md
index 5d7df8949bc..896b1242135 100644
--- a/chromium/base/numerics/README.md
+++ b/chromium/base/numerics/README.md
@@ -64,7 +64,7 @@ represent the full range of the source type:
```cpp
// Throw a compiler error if byte_value is changed to an out-of-range-type.
-int int_value = saturated_cast<int>(byte_value);
+int int_value = strict_cast<int>(byte_value);
```
You can also enforce these compile-time restrictions on function parameters by
diff --git a/chromium/base/numerics/ranges.h b/chromium/base/numerics/ranges.h
index 8640731ca57..f19320cedda 100644
--- a/chromium/base/numerics/ranges.h
+++ b/chromium/base/numerics/ranges.h
@@ -10,8 +10,9 @@
namespace base {
-template <typename T>
-T ClampToRange(T value, T min, T max) {
+// To be replaced with std::clamp() from C++17, someday.
+template <class T>
+constexpr const T& ClampToRange(const T& value, const T& min, const T& max) {
return std::min(std::max(value, min), max);
}
diff --git a/chromium/base/observer_list.h b/chromium/base/observer_list.h
index 1a07b95e245..ec1abbdd216 100644
--- a/chromium/base/observer_list.h
+++ b/chromium/base/observer_list.h
@@ -21,10 +21,20 @@
//
// OVERVIEW:
//
-// A container for a list of observers. Unlike a normal STL vector or list,
-// this container can be modified during iteration without invalidating the
-// iterator. So, it safely handles the case of an observer removing itself
-// or other observers from the list while observers are being notified.
+// A list of observers. Unlike a standard vector or list, this container can
+// be modified during iteration without invalidating the iterator. So, it
+// safely handles the case of an observer removing itself or other observers
+// from the list while observers are being notified.
+//
+//
+// WARNING:
+//
+// ObserverList is not thread-compatible. Iterating on the same ObserverList
+// simultaneously in different threads is not safe, even when the ObserverList
+// itself is not modified.
+//
+// For a thread-safe observer list, see ObserverListThreadSafe.
+//
//
// TYPICAL USAGE:
//
@@ -39,26 +49,25 @@
// };
//
// void AddObserver(Observer* obs) {
-// observer_list_.AddObserver(obs);
+// observers_.AddObserver(obs);
// }
//
-// void RemoveObserver(Observer* obs) {
-// observer_list_.RemoveObserver(obs);
+// void RemoveObserver(const Observer* obs) {
+// observers_.RemoveObserver(obs);
// }
//
// void NotifyFoo() {
-// for (auto& observer : observer_list_)
-// observer.OnFoo(this);
+// for (Observer& obs : observers_)
+// obs.OnFoo(this);
// }
//
// void NotifyBar(int x, int y) {
-// for (FooList::iterator i = observer_list.begin(),
-// e = observer_list.end(); i != e; ++i)
-// i->OnBar(this, x, y);
+// for (Observer& obs : observers_)
+// obs.OnBar(this, x, y);
// }
//
// private:
-// base::ObserverList<Observer> observer_list_;
+// base::ObserverList<Observer> observers_;
// };
//
//
@@ -66,49 +75,106 @@
namespace base {
-template <typename ObserverType>
-class ObserverListThreadSafe;
+// Enumeration of which observers are notified by ObserverList.
+enum class ObserverListPolicy {
+ // Specifies that any observers added during notification are notified.
+ // This is the default policy if no policy is provided to the constructor.
+ ALL,
-template <class ObserverType>
-class ObserverListBase
- : public SupportsWeakPtr<ObserverListBase<ObserverType>> {
- public:
- // Enumeration of which observers are notified.
- enum NotificationType {
- // Specifies that any observers added during notification are notified.
- // This is the default type if no type is provided to the constructor.
- NOTIFY_ALL,
-
- // Specifies that observers added while sending out notification are not
- // notified.
- NOTIFY_EXISTING_ONLY
- };
+ // Specifies that observers added while sending out notification are not
+ // notified.
+ EXISTING_ONLY,
+};
+// When check_empty is true, assert that the list is empty on destruction.
+template <class ObserverType, bool check_empty = false>
+class ObserverList
+ : public SupportsWeakPtr<ObserverList<ObserverType, check_empty>> {
+ public:
// An iterator class that can be used to access the list of observers.
- template <class ContainerType>
class Iter {
public:
- Iter();
- explicit Iter(ContainerType* list);
- ~Iter();
+ Iter() : index_(0), max_index_(0) {}
+
+ explicit Iter(const ObserverList* list)
+ : list_(const_cast<ObserverList*>(list)->AsWeakPtr()),
+ index_(0),
+ max_index_(list->policy_ == ObserverListPolicy::ALL
+ ? std::numeric_limits<size_t>::max()
+ : list->observers_.size()) {
+ DCHECK(list_);
+ EnsureValidIndex();
+ ++list_->live_iterator_count_;
+ }
+
+ ~Iter() {
+ if (!list_)
+ return;
+
+ DCHECK_GT(list_->live_iterator_count_, 0);
+ if (--list_->live_iterator_count_ == 0)
+ list_->Compact();
+ }
+
+ Iter(const Iter& other)
+ : list_(other.list_),
+ index_(other.index_),
+ max_index_(other.max_index_) {
+ if (list_)
+ ++list_->live_iterator_count_;
+ }
+
+ Iter& operator=(Iter other) {
+ using std::swap;
+ swap(list_, other.list_);
+ swap(index_, other.index_);
+ swap(max_index_, other.max_index_);
+ return *this;
+ }
- // A workaround for C2244. MSVC requires fully qualified type name for
- // return type on a function definition to match a function declaration.
- using ThisType =
- typename ObserverListBase<ObserverType>::template Iter<ContainerType>;
+ bool operator==(const Iter& other) const {
+ return (is_end() && other.is_end()) ||
+ (list_.get() == other.list_.get() && index_ == other.index_);
+ }
- bool operator==(const Iter& other) const;
- bool operator!=(const Iter& other) const;
- ThisType& operator++();
- ObserverType* operator->() const;
- ObserverType& operator*() const;
+ bool operator!=(const Iter& other) const { return !(*this == other); }
+
+ Iter& operator++() {
+ if (list_) {
+ ++index_;
+ EnsureValidIndex();
+ }
+ return *this;
+ }
+
+ ObserverType* operator->() const {
+ ObserverType* const current = GetCurrent();
+ DCHECK(current);
+ return current;
+ }
+
+ ObserverType& operator*() const {
+ ObserverType* const current = GetCurrent();
+ DCHECK(current);
+ return *current;
+ }
private:
FRIEND_TEST_ALL_PREFIXES(ObserverListTest, BasicStdIterator);
FRIEND_TEST_ALL_PREFIXES(ObserverListTest, StdIteratorRemoveFront);
- ObserverType* GetCurrent() const;
- void EnsureValidIndex();
+ ObserverType* GetCurrent() const {
+ DCHECK(list_);
+ DCHECK_LT(index_, clamped_max_index());
+ return list_->observers_[index_];
+ }
+
+ void EnsureValidIndex() {
+ DCHECK(list_);
+ const size_t max_index = clamped_max_index();
+ while (index_ < max_index && !list_->observers_[index_])
+ ++index_;
+ }
size_t clamped_max_index() const {
return std::min(max_index_, list_->observers_.size());
@@ -116,7 +182,8 @@ class ObserverListBase
bool is_end() const { return !list_ || index_ == clamped_max_index(); }
- WeakPtr<ObserverListBase<ObserverType>> list_;
+ WeakPtr<ObserverList> list_;
+
// When initially constructed and each time the iterator is incremented,
// |index_| is guaranteed to point to a non-null index if the iterator
// has not reached the end of the ObserverList.
@@ -124,224 +191,91 @@ class ObserverListBase
size_t max_index_;
};
- using Iterator = Iter<ObserverListBase<ObserverType>>;
+ using iterator = Iter;
+ using const_iterator = Iter;
- using iterator = Iter<ObserverListBase<ObserverType>>;
- iterator begin() {
+ const_iterator begin() const {
// An optimization: do not involve weak pointers for empty list.
- // Note: can't use ?: operator here due to some MSVC bug (unit tests fail)
- if (observers_.empty())
- return iterator();
- return iterator(this);
+ return observers_.empty() ? const_iterator() : const_iterator(this);
}
- iterator end() { return iterator(); }
- using const_iterator = Iter<const ObserverListBase<ObserverType>>;
- const_iterator begin() const {
- if (observers_.empty())
- return const_iterator();
- return const_iterator(this);
- }
const_iterator end() const { return const_iterator(); }
- ObserverListBase() : notify_depth_(0), type_(NOTIFY_ALL) {}
- explicit ObserverListBase(NotificationType type)
- : notify_depth_(0), type_(type) {}
-
- // Add an observer to the list. An observer should not be added to
- // the same list more than once.
- void AddObserver(ObserverType* obs);
-
- // Remove an observer from the list if it is in the list.
- void RemoveObserver(ObserverType* obs);
-
- // Determine whether a particular observer is in the list.
- bool HasObserver(const ObserverType* observer) const;
-
- void Clear();
-
- protected:
- size_t size() const { return observers_.size(); }
-
- void Compact();
-
- private:
- friend class ObserverListThreadSafe<ObserverType>;
-
- typedef std::vector<ObserverType*> ListType;
+ ObserverList() {}
+ explicit ObserverList(ObserverListPolicy policy) : policy_(policy) {}
- ListType observers_;
- int notify_depth_;
- NotificationType type_;
+ ~ObserverList() {
+ if (check_empty) {
+ Compact();
+ DCHECK(observers_.empty());
+ }
+ }
- template <class ContainerType>
- friend class Iter;
+ // Add an observer to this list. An observer should not be added to the same
+ // list more than once.
+ //
+ // Precondition: obs != nullptr
+ // Precondition: !HasObserver(obs)
+ void AddObserver(ObserverType* obs) {
+ DCHECK(obs);
+ if (HasObserver(obs)) {
+ NOTREACHED() << "Observers can only be added once!";
+ return;
+ }
+ observers_.push_back(obs);
+ }
- DISALLOW_COPY_AND_ASSIGN(ObserverListBase);
-};
+ // Removes the given observer from this list. Does nothing if this observer is
+ // not in this list.
+ void RemoveObserver(const ObserverType* obs) {
+ DCHECK(obs);
+ const auto it = std::find(observers_.begin(), observers_.end(), obs);
+ if (it == observers_.end())
+ return;
-template <class ObserverType>
-template <class ContainerType>
-ObserverListBase<ObserverType>::Iter<ContainerType>::Iter()
- : index_(0), max_index_(0) {}
-
-template <class ObserverType>
-template <class ContainerType>
-ObserverListBase<ObserverType>::Iter<ContainerType>::Iter(ContainerType* list)
- : list_(const_cast<ObserverListBase<ObserverType>*>(list)->AsWeakPtr()),
- index_(0),
- max_index_(list->type_ == NOTIFY_ALL ? std::numeric_limits<size_t>::max()
- : list->observers_.size()) {
- EnsureValidIndex();
- DCHECK(list_);
- ++list_->notify_depth_;
-}
-
-template <class ObserverType>
-template <class ContainerType>
-ObserverListBase<ObserverType>::Iter<ContainerType>::~Iter() {
- if (list_ && --list_->notify_depth_ == 0)
- list_->Compact();
-}
-
-template <class ObserverType>
-template <class ContainerType>
-bool ObserverListBase<ObserverType>::Iter<ContainerType>::operator==(
- const Iter& other) const {
- if (is_end() && other.is_end())
- return true;
- return list_.get() == other.list_.get() && index_ == other.index_;
-}
-
-template <class ObserverType>
-template <class ContainerType>
-bool ObserverListBase<ObserverType>::Iter<ContainerType>::operator!=(
- const Iter& other) const {
- return !operator==(other);
-}
-
-template <class ObserverType>
-template <class ContainerType>
-typename ObserverListBase<ObserverType>::template Iter<ContainerType>&
- ObserverListBase<ObserverType>::Iter<ContainerType>::operator++() {
- if (list_) {
- ++index_;
- EnsureValidIndex();
- }
- return *this;
-}
-
-template <class ObserverType>
-template <class ContainerType>
-ObserverType* ObserverListBase<ObserverType>::Iter<ContainerType>::operator->()
- const {
- ObserverType* current = GetCurrent();
- DCHECK(current);
- return current;
-}
-
-template <class ObserverType>
-template <class ContainerType>
-ObserverType& ObserverListBase<ObserverType>::Iter<ContainerType>::operator*()
- const {
- ObserverType* current = GetCurrent();
- DCHECK(current);
- return *current;
-}
-
-template <class ObserverType>
-template <class ContainerType>
-ObserverType* ObserverListBase<ObserverType>::Iter<ContainerType>::GetCurrent()
- const {
- if (!list_)
- return nullptr;
- return index_ < clamped_max_index() ? list_->observers_[index_] : nullptr;
-}
-
-template <class ObserverType>
-template <class ContainerType>
-void ObserverListBase<ObserverType>::Iter<ContainerType>::EnsureValidIndex() {
- if (!list_)
- return;
-
- size_t max_index = clamped_max_index();
- while (index_ < max_index && !list_->observers_[index_])
- ++index_;
-}
-
-template <class ObserverType>
-void ObserverListBase<ObserverType>::AddObserver(ObserverType* obs) {
- DCHECK(obs);
- if (ContainsValue(observers_, obs)) {
- NOTREACHED() << "Observers can only be added once!";
- return;
- }
- observers_.push_back(obs);
-}
-
-template <class ObserverType>
-void ObserverListBase<ObserverType>::RemoveObserver(ObserverType* obs) {
- DCHECK(obs);
- typename ListType::iterator it =
- std::find(observers_.begin(), observers_.end(), obs);
- if (it != observers_.end()) {
- if (notify_depth_) {
+ DCHECK_GE(live_iterator_count_, 0);
+ if (live_iterator_count_) {
*it = nullptr;
} else {
observers_.erase(it);
}
}
-}
-
-template <class ObserverType>
-bool ObserverListBase<ObserverType>::HasObserver(
- const ObserverType* observer) const {
- for (size_t i = 0; i < observers_.size(); ++i) {
- if (observers_[i] == observer)
- return true;
+
+ // Determine whether a particular observer is in the list.
+ bool HasObserver(const ObserverType* obs) const {
+ return ContainsValue(observers_, obs);
}
- return false;
-}
-
-template <class ObserverType>
-void ObserverListBase<ObserverType>::Clear() {
- if (notify_depth_) {
- for (typename ListType::iterator it = observers_.begin();
- it != observers_.end(); ++it) {
- *it = nullptr;
+
+ // Removes all the observers from this list.
+ void Clear() {
+ DCHECK_GE(live_iterator_count_, 0);
+ if (live_iterator_count_) {
+ std::fill(observers_.begin(), observers_.end(), nullptr);
+ } else {
+ observers_.clear();
}
- } else {
- observers_.clear();
}
-}
-template <class ObserverType>
-void ObserverListBase<ObserverType>::Compact() {
- observers_.erase(std::remove(observers_.begin(), observers_.end(), nullptr),
- observers_.end());
-}
+ bool might_have_observers() const { return !observers_.empty(); }
-template <class ObserverType, bool check_empty = false>
-class ObserverList : public ObserverListBase<ObserverType> {
- public:
- typedef typename ObserverListBase<ObserverType>::NotificationType
- NotificationType;
+ private:
+ // Compacts list of observers by removing null pointers.
+ void Compact() {
+ observers_.erase(std::remove(observers_.begin(), observers_.end(), nullptr),
+ observers_.end());
+ }
- ObserverList() {}
- explicit ObserverList(NotificationType type)
- : ObserverListBase<ObserverType>(type) {}
+ std::vector<ObserverType*> observers_;
- ~ObserverList() {
- // When check_empty is true, assert that the list is empty on destruction.
- if (check_empty) {
- ObserverListBase<ObserverType>::Compact();
- DCHECK_EQ(ObserverListBase<ObserverType>::size(), 0U);
- }
- }
+ // Number of active iterators referencing this ObserverList.
+ //
+ // This counter is not synchronized although it is modified by const
+ // iterators.
+ int live_iterator_count_ = 0;
- bool might_have_observers() const {
- return ObserverListBase<ObserverType>::size() != 0;
- }
+ const ObserverListPolicy policy_ = ObserverListPolicy::ALL;
+
+ DISALLOW_COPY_AND_ASSIGN(ObserverList);
};
} // namespace base
diff --git a/chromium/base/observer_list_threadsafe.cc b/chromium/base/observer_list_threadsafe.cc
new file mode 100644
index 00000000000..95c852f6be3
--- /dev/null
+++ b/chromium/base/observer_list_threadsafe.cc
@@ -0,0 +1,16 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/observer_list_threadsafe.h"
+
+namespace base {
+namespace internal {
+
+LazyInstance<ThreadLocalPointer<
+ const ObserverListThreadSafeBase::NotificationDataBase>>::Leaky
+ ObserverListThreadSafeBase::tls_current_notification_ =
+ LAZY_INSTANCE_INITIALIZER;
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/observer_list_threadsafe.h b/chromium/base/observer_list_threadsafe.h
index fb78676f437..a47a40743ee 100644
--- a/chromium/base/observer_list_threadsafe.h
+++ b/chromium/base/observer_list_threadsafe.h
@@ -7,7 +7,9 @@
#include <unordered_map>
+#include "base/base_export.h"
#include "base/bind.h"
+#include "base/lazy_instance.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/macros.h"
@@ -54,28 +56,51 @@
namespace base {
namespace internal {
-template <typename ObserverType, typename Method>
-struct Dispatcher;
+class BASE_EXPORT ObserverListThreadSafeBase
+ : public RefCountedThreadSafe<ObserverListThreadSafeBase> {
+ public:
+ ObserverListThreadSafeBase() = default;
+
+ protected:
+ template <typename ObserverType, typename Method>
+ struct Dispatcher;
+
+ template <typename ObserverType, typename ReceiverType, typename... Params>
+ struct Dispatcher<ObserverType, void (ReceiverType::*)(Params...)> {
+ static void Run(void (ReceiverType::*m)(Params...),
+ Params... params,
+ ObserverType* obj) {
+ (obj->*m)(std::forward<Params>(params)...);
+ }
+ };
-template <typename ObserverType, typename ReceiverType, typename... Params>
-struct Dispatcher<ObserverType, void(ReceiverType::*)(Params...)> {
- static void Run(void(ReceiverType::* m)(Params...),
- Params... params, ObserverType* obj) {
- (obj->*m)(std::forward<Params>(params)...);
- }
+ struct NotificationDataBase {
+ NotificationDataBase(void* observer_list_in, const Location& from_here_in)
+ : observer_list(observer_list_in), from_here(from_here_in) {}
+
+ void* observer_list;
+ Location from_here;
+ };
+
+ virtual ~ObserverListThreadSafeBase() = default;
+
+ static LazyInstance<ThreadLocalPointer<const NotificationDataBase>>::Leaky
+ tls_current_notification_;
+
+ private:
+ friend class RefCountedThreadSafe<ObserverListThreadSafeBase>;
+
+ DISALLOW_COPY_AND_ASSIGN(ObserverListThreadSafeBase);
};
} // namespace internal
template <class ObserverType>
-class ObserverListThreadSafe
- : public RefCountedThreadSafe<ObserverListThreadSafe<ObserverType>> {
+class ObserverListThreadSafe : public internal::ObserverListThreadSafeBase {
public:
- using NotificationType =
- typename ObserverList<ObserverType>::NotificationType;
-
ObserverListThreadSafe() = default;
- explicit ObserverListThreadSafe(NotificationType type) : type_(type) {}
+ explicit ObserverListThreadSafe(ObserverListPolicy policy)
+ : policy_(policy) {}
// Adds |observer| to the list. |observer| must not already be in the list.
void AddObserver(ObserverType* observer) {
@@ -93,18 +118,20 @@ class ObserverListThreadSafe
observers_[observer] = task_runner;
// If this is called while a notification is being dispatched on this thread
- // and |type_| is NOTIFY_ALL, |observer| must be notified (if a notification
- // is being dispatched on another thread in parallel, the notification may
- // or may not make it to |observer| depending on the outcome of the race to
+ // and |policy_| is ALL, |observer| must be notified (if a notification is
+ // being dispatched on another thread in parallel, the notification may or
+ // may not make it to |observer| depending on the outcome of the race to
// |lock_|).
- if (type_ == NotificationType::NOTIFY_ALL) {
- const NotificationData* current_notification =
- tls_current_notification_.Get();
- if (current_notification) {
+ if (policy_ == ObserverListPolicy::ALL) {
+ const NotificationDataBase* current_notification =
+ tls_current_notification_.Get().Get();
+ if (current_notification && current_notification->observer_list == this) {
task_runner->PostTask(
current_notification->from_here,
- BindOnce(&ObserverListThreadSafe<ObserverType>::NotifyWrapper, this,
- observer, *current_notification));
+ BindOnce(
+ &ObserverListThreadSafe<ObserverType>::NotifyWrapper, this,
+ observer,
+ *static_cast<const NotificationData*>(current_notification)));
}
}
}
@@ -134,27 +161,28 @@ class ObserverListThreadSafe
template <typename Method, typename... Params>
void Notify(const Location& from_here, Method m, Params&&... params) {
Callback<void(ObserverType*)> method =
- Bind(&internal::Dispatcher<ObserverType, Method>::Run,
- m, std::forward<Params>(params)...);
+ Bind(&Dispatcher<ObserverType, Method>::Run, m,
+ std::forward<Params>(params)...);
AutoLock lock(lock_);
for (const auto& observer : observers_) {
observer.second->PostTask(
from_here,
BindOnce(&ObserverListThreadSafe<ObserverType>::NotifyWrapper, this,
- observer.first, NotificationData(from_here, method)));
+ observer.first, NotificationData(this, from_here, method)));
}
}
private:
- friend class RefCountedThreadSafe<ObserverListThreadSafe<ObserverType>>;
+ friend class RefCountedThreadSafe<ObserverListThreadSafeBase>;
- struct NotificationData {
- NotificationData(const Location& from_here_in,
+ struct NotificationData : public NotificationDataBase {
+ NotificationData(ObserverListThreadSafe* observer_list_in,
+ const Location& from_here_in,
const Callback<void(ObserverType*)>& method_in)
- : from_here(from_here_in), method(method_in) {}
+ : NotificationDataBase(observer_list_in, from_here_in),
+ method(method_in) {}
- Location from_here;
Callback<void(ObserverType*)> method;
};
@@ -178,19 +206,20 @@ class ObserverListThreadSafe
// Note: |tls_current_notification_| may not be nullptr if this runs in a
// nested loop started by a notification callback. In that case, it is
// important to save the previous value to restore it later.
- const NotificationData* const previous_notification =
- tls_current_notification_.Get();
- tls_current_notification_.Set(&notification);
+ auto& tls_current_notification = tls_current_notification_.Get();
+ const NotificationDataBase* const previous_notification =
+ tls_current_notification.Get();
+ tls_current_notification.Set(&notification);
// Invoke the callback.
notification.method.Run(observer);
// Reset the notification being dispatched on the current thread to its
// previous value.
- tls_current_notification_.Set(previous_notification);
+ tls_current_notification.Set(previous_notification);
}
- const NotificationType type_ = NotificationType::NOTIFY_ALL;
+ const ObserverListPolicy policy_ = ObserverListPolicy::ALL;
// Synchronizes access to |observers_|.
mutable Lock lock_;
@@ -200,9 +229,6 @@ class ObserverListThreadSafe
std::unordered_map<ObserverType*, scoped_refptr<SequencedTaskRunner>>
observers_;
- // Notification being dispatched on the current thread.
- ThreadLocalPointer<const NotificationData> tls_current_notification_;
-
DISALLOW_COPY_AND_ASSIGN(ObserverListThreadSafe);
};
diff --git a/chromium/base/observer_list_unittest.cc b/chromium/base/observer_list_unittest.cc
index 0ece89cb30d..fd88a2d5e8a 100644
--- a/chromium/base/observer_list_unittest.cc
+++ b/chromium/base/observer_list_unittest.cc
@@ -13,6 +13,7 @@
#include "base/compiler_specific.h"
#include "base/location.h"
#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
@@ -30,14 +31,14 @@ namespace {
class Foo {
public:
virtual void Observe(int x) = 0;
- virtual ~Foo() {}
+ virtual ~Foo() = default;
virtual int GetValue() const { return 0; }
};
class Adder : public Foo {
public:
explicit Adder(int scaler) : total(0), scaler_(scaler) {}
- ~Adder() override {}
+ ~Adder() override = default;
void Observe(int x) override { total += x * scaler_; }
int GetValue() const override { return total; }
@@ -57,7 +58,7 @@ class Disrupter : public Foo {
Disrupter(ObserverList<Foo>* list, bool remove_self)
: Disrupter(list, nullptr, remove_self) {}
- ~Disrupter() override {}
+ ~Disrupter() override = default;
void Observe(int x) override {
if (remove_self_)
@@ -115,7 +116,7 @@ class AddRemoveThread : public PlatformThread::Delegate,
ready_(ready),
weak_factory_(this) {}
- ~AddRemoveThread() override {}
+ ~AddRemoveThread() override = default;
void ThreadMain() override {
loop_ = new MessageLoop(); // Fire up a message loop.
@@ -195,14 +196,86 @@ class AddRemoveThread : public PlatformThread::Delegate,
TEST(ObserverListTest, BasicTest) {
ObserverList<Foo> observer_list;
+ const ObserverList<Foo>& const_observer_list = observer_list;
+
+ {
+ const ObserverList<Foo>::const_iterator it1 = const_observer_list.begin();
+ EXPECT_EQ(it1, const_observer_list.end());
+ // Iterator copy.
+ const ObserverList<Foo>::const_iterator it2 = it1;
+ EXPECT_EQ(it2, it1);
+ // Iterator assignment.
+ ObserverList<Foo>::const_iterator it3;
+ it3 = it2;
+ EXPECT_EQ(it3, it1);
+ EXPECT_EQ(it3, it2);
+ // Self assignment.
+ it3 = it3;
+ EXPECT_EQ(it3, it1);
+ EXPECT_EQ(it3, it2);
+ }
+
+ {
+ const ObserverList<Foo>::iterator it1 = observer_list.begin();
+ EXPECT_EQ(it1, observer_list.end());
+ // Iterator copy.
+ const ObserverList<Foo>::iterator it2 = it1;
+ EXPECT_EQ(it2, it1);
+ // Iterator assignment.
+ ObserverList<Foo>::iterator it3;
+ it3 = it2;
+ EXPECT_EQ(it3, it1);
+ EXPECT_EQ(it3, it2);
+ // Self assignment.
+ it3 = it3;
+ EXPECT_EQ(it3, it1);
+ EXPECT_EQ(it3, it2);
+ }
+
Adder a(1), b(-1), c(1), d(-1), e(-1);
Disrupter evil(&observer_list, &c);
observer_list.AddObserver(&a);
observer_list.AddObserver(&b);
- EXPECT_TRUE(observer_list.HasObserver(&a));
- EXPECT_FALSE(observer_list.HasObserver(&c));
+ EXPECT_TRUE(const_observer_list.HasObserver(&a));
+ EXPECT_FALSE(const_observer_list.HasObserver(&c));
+
+ {
+ const ObserverList<Foo>::const_iterator it1 = const_observer_list.begin();
+ EXPECT_NE(it1, const_observer_list.end());
+ // Iterator copy.
+ const ObserverList<Foo>::const_iterator it2 = it1;
+ EXPECT_EQ(it2, it1);
+ EXPECT_NE(it2, const_observer_list.end());
+ // Iterator assignment.
+ ObserverList<Foo>::const_iterator it3;
+ it3 = it2;
+ EXPECT_EQ(it3, it1);
+ EXPECT_EQ(it3, it2);
+ // Self assignment.
+ it3 = it3;
+ EXPECT_EQ(it3, it1);
+ EXPECT_EQ(it3, it2);
+ }
+
+ {
+ const ObserverList<Foo>::iterator it1 = observer_list.begin();
+ EXPECT_NE(it1, observer_list.end());
+ // Iterator copy.
+ const ObserverList<Foo>::iterator it2 = it1;
+ EXPECT_EQ(it2, it1);
+ EXPECT_NE(it2, observer_list.end());
+ // Iterator assignment.
+ ObserverList<Foo>::iterator it3;
+ it3 = it2;
+ EXPECT_EQ(it3, it1);
+ EXPECT_EQ(it3, it2);
+ // Self assignment.
+ it3 = it3;
+ EXPECT_EQ(it3, it1);
+ EXPECT_EQ(it3, it2);
+ }
for (auto& observer : observer_list)
observer.Observe(10);
@@ -224,6 +297,76 @@ TEST(ObserverListTest, BasicTest) {
EXPECT_EQ(0, e.total);
}
+TEST(ObserverListTest, CompactsWhenNoActiveIterator) {
+ ObserverList<const Foo> ol;
+ const ObserverList<const Foo>& col = ol;
+
+ const Adder a(1);
+ const Adder b(2);
+ const Adder c(3);
+
+ ol.AddObserver(&a);
+ ol.AddObserver(&b);
+
+ EXPECT_TRUE(col.HasObserver(&a));
+ EXPECT_FALSE(col.HasObserver(&c));
+
+ EXPECT_TRUE(col.might_have_observers());
+
+ using It = ObserverList<const Foo>::const_iterator;
+
+ {
+ It it = col.begin();
+ EXPECT_NE(it, col.end());
+ It ita = it;
+ EXPECT_EQ(ita, it);
+ EXPECT_NE(++it, col.end());
+ EXPECT_NE(ita, it);
+ It itb = it;
+ EXPECT_EQ(itb, it);
+ EXPECT_EQ(++it, col.end());
+
+ EXPECT_TRUE(col.might_have_observers());
+ EXPECT_EQ(&*ita, &a);
+ EXPECT_EQ(&*itb, &b);
+
+ ol.RemoveObserver(&a);
+ EXPECT_TRUE(col.might_have_observers());
+ EXPECT_FALSE(col.HasObserver(&a));
+ EXPECT_EQ(&*itb, &b);
+
+ ol.RemoveObserver(&b);
+ EXPECT_TRUE(col.might_have_observers());
+ EXPECT_FALSE(col.HasObserver(&a));
+ EXPECT_FALSE(col.HasObserver(&b));
+
+ it = It();
+ ita = It();
+ EXPECT_TRUE(col.might_have_observers());
+ ita = itb;
+ itb = It();
+ EXPECT_TRUE(col.might_have_observers());
+ ita = It();
+ EXPECT_FALSE(col.might_have_observers());
+ }
+
+ ol.AddObserver(&a);
+ ol.AddObserver(&b);
+ EXPECT_TRUE(col.might_have_observers());
+ ol.Clear();
+ EXPECT_FALSE(col.might_have_observers());
+
+ ol.AddObserver(&a);
+ ol.AddObserver(&b);
+ EXPECT_TRUE(col.might_have_observers());
+ {
+ const It it = col.begin();
+ ol.Clear();
+ EXPECT_TRUE(col.might_have_observers());
+ }
+ EXPECT_FALSE(col.might_have_observers());
+}
+
TEST(ObserverListTest, DisruptSelf) {
ObserverList<Foo> observer_list;
Adder a(1), b(-1), c(1), d(-1);
@@ -385,7 +528,7 @@ TEST(ObserverListThreadSafeTest, WithoutSequence) {
class FooRemover : public Foo {
public:
explicit FooRemover(ObserverListThreadSafe<Foo>* list) : list_(list) {}
- ~FooRemover() override {}
+ ~FooRemover() override = default;
void AddFooToRemove(Foo* foo) {
foos_.push_back(foo);
@@ -632,7 +775,7 @@ TEST(ObserverListThreadSafeTest, RemoveWhileNotificationIsRunning) {
}
TEST(ObserverListTest, Existing) {
- ObserverList<Foo> observer_list(ObserverList<Foo>::NOTIFY_EXISTING_ONLY);
+ ObserverList<Foo> observer_list(ObserverListPolicy::EXISTING_ONLY);
Adder a(1);
AddInObserve<ObserverList<Foo> > b(&observer_list);
Adder c(1);
@@ -658,8 +801,8 @@ TEST(ObserverListTest, Existing) {
// Same as above, but for ObserverListThreadSafe
TEST(ObserverListThreadSafeTest, Existing) {
MessageLoop loop;
- scoped_refptr<ObserverListThreadSafe<Foo> > observer_list(
- new ObserverListThreadSafe<Foo>(ObserverList<Foo>::NOTIFY_EXISTING_ONLY));
+ scoped_refptr<ObserverListThreadSafe<Foo>> observer_list(
+ new ObserverListThreadSafe<Foo>(ObserverListPolicy::EXISTING_ONLY));
Adder a(1);
AddInObserve<ObserverListThreadSafe<Foo> > b(observer_list.get());
Adder c(1);
@@ -717,7 +860,7 @@ TEST(ObserverListTest, ClearNotifyAll) {
}
TEST(ObserverListTest, ClearNotifyExistingOnly) {
- ObserverList<Foo> observer_list(ObserverList<Foo>::NOTIFY_EXISTING_ONLY);
+ ObserverList<Foo> observer_list(ObserverListPolicy::EXISTING_ONLY);
AddInClearObserve a(&observer_list);
observer_list.AddObserver(&a);
@@ -732,7 +875,7 @@ TEST(ObserverListTest, ClearNotifyExistingOnly) {
class ListDestructor : public Foo {
public:
explicit ListDestructor(ObserverList<Foo>* list) : list_(list) {}
- ~ListDestructor() override {}
+ ~ListDestructor() override = default;
void Observe(int x) override { delete list_; }
diff --git a/chromium/base/path_service.cc b/chromium/base/path_service.cc
index 2caa3eaf0db..6ac501eafee 100644
--- a/chromium/base/path_service.cc
+++ b/chromium/base/path_service.cc
@@ -52,15 +52,11 @@ struct Provider {
bool is_static;
};
-Provider base_provider = {
- PathProvider,
- NULL,
+Provider base_provider = {PathProvider, nullptr,
#ifndef NDEBUG
- PATH_START,
- PATH_END,
+ PATH_START, PATH_END,
#endif
- true
-};
+ true};
#if defined(OS_WIN)
Provider base_provider_win = {
@@ -190,7 +186,7 @@ bool PathService::Get(int key, FilePath* result) {
if (key == DIR_CURRENT)
return GetCurrentDirectory(result);
- Provider* provider = NULL;
+ Provider* provider = nullptr;
{
AutoLock scoped_lock(path_data->lock);
if (LockedGetFromCache(key, path_data, result))
diff --git a/chromium/base/pending_task.cc b/chromium/base/pending_task.cc
index 64dc1da56f3..31f2d2d9beb 100644
--- a/chromium/base/pending_task.cc
+++ b/chromium/base/pending_task.cc
@@ -33,8 +33,7 @@ PendingTask::PendingTask(const Location& posted_from,
PendingTask::PendingTask(PendingTask&& other) = default;
-PendingTask::~PendingTask() {
-}
+PendingTask::~PendingTask() = default;
PendingTask& PendingTask::operator=(PendingTask&& other) = default;
diff --git a/chromium/base/pickle.cc b/chromium/base/pickle.cc
index 160d838ffb9..c2189c8fb57 100644
--- a/chromium/base/pickle.cc
+++ b/chromium/base/pickle.cc
@@ -53,7 +53,7 @@ template<typename Type>
inline const char* PickleIterator::GetReadPointerAndAdvance() {
if (sizeof(Type) > end_index_ - read_index_) {
read_index_ = end_index_;
- return NULL;
+ return nullptr;
}
const char* current_read_ptr = payload_ + read_index_;
Advance(sizeof(Type));
@@ -64,7 +64,7 @@ const char* PickleIterator::GetReadPointerAndAdvance(int num_bytes) {
if (num_bytes < 0 ||
end_index_ - read_index_ < static_cast<size_t>(num_bytes)) {
read_index_ = end_index_;
- return NULL;
+ return nullptr;
}
const char* current_read_ptr = payload_ + read_index_;
Advance(num_bytes);
@@ -77,7 +77,7 @@ inline const char* PickleIterator::GetReadPointerAndAdvance(
// Check for int32_t overflow.
int num_bytes;
if (!CheckMul(num_elements, size_element).AssignIfValid(&num_bytes))
- return NULL;
+ return nullptr;
return GetReadPointerAndAdvance(num_bytes);
}
@@ -191,7 +191,7 @@ bool PickleIterator::ReadStringPiece16(StringPiece16* result) {
bool PickleIterator::ReadData(const char** data, int* length) {
*length = 0;
- *data = 0;
+ *data = nullptr;
if (!ReadInt(length))
return false;
@@ -207,14 +207,14 @@ bool PickleIterator::ReadBytes(const char** data, int length) {
return true;
}
-Pickle::Attachment::Attachment() {}
+Pickle::Attachment::Attachment() = default;
-Pickle::Attachment::~Attachment() {}
+Pickle::Attachment::~Attachment() = default;
// Payload is uint32_t aligned.
Pickle::Pickle()
- : header_(NULL),
+ : header_(nullptr),
header_size_(sizeof(Header)),
capacity_after_header_(0),
write_offset_(0) {
@@ -225,7 +225,7 @@ Pickle::Pickle()
}
Pickle::Pickle(int header_size)
- : header_(NULL),
+ : header_(nullptr),
header_size_(bits::Align(header_size, sizeof(uint32_t))),
capacity_after_header_(0),
write_offset_(0) {
@@ -251,11 +251,11 @@ Pickle::Pickle(const char* data, int data_len)
// If there is anything wrong with the data, we're not going to use it.
if (!header_size_)
- header_ = NULL;
+ header_ = nullptr;
}
Pickle::Pickle(const Pickle& other)
- : header_(NULL),
+ : header_(nullptr),
header_size_(other.header_size_),
capacity_after_header_(0),
write_offset_(other.write_offset_) {
@@ -273,12 +273,12 @@ Pickle& Pickle::operator=(const Pickle& other) {
return *this;
}
if (capacity_after_header_ == kCapacityReadOnly) {
- header_ = NULL;
+ header_ = nullptr;
capacity_after_header_ = 0;
}
if (header_size_ != other.header_size_) {
free(header_);
- header_ = NULL;
+ header_ = nullptr;
header_size_ = other.header_size_;
}
Resize(other.header_->payload_size);
@@ -360,10 +360,10 @@ const char* Pickle::FindNext(size_t header_size,
const char* end) {
size_t pickle_size = 0;
if (!PeekNext(header_size, start, end, &pickle_size))
- return NULL;
+ return nullptr;
if (pickle_size > static_cast<size_t>(end - start))
- return NULL;
+ return nullptr;
return start + pickle_size;
}
diff --git a/chromium/base/pickle_unittest.cc b/chromium/base/pickle_unittest.cc
index dedb819fe64..6c302572e4d 100644
--- a/chromium/base/pickle_unittest.cc
+++ b/chromium/base/pickle_unittest.cc
@@ -323,9 +323,9 @@ TEST(PickleTest, FindNext) {
const char* start = reinterpret_cast<const char*>(pickle.data());
const char* end = start + pickle.size();
- EXPECT_TRUE(end == Pickle::FindNext(pickle.header_size_, start, end));
- EXPECT_TRUE(NULL == Pickle::FindNext(pickle.header_size_, start, end - 1));
- EXPECT_TRUE(end == Pickle::FindNext(pickle.header_size_, start, end + 1));
+ EXPECT_EQ(end, Pickle::FindNext(pickle.header_size_, start, end));
+ EXPECT_EQ(nullptr, Pickle::FindNext(pickle.header_size_, start, end - 1));
+ EXPECT_EQ(end, Pickle::FindNext(pickle.header_size_, start, end + 1));
}
TEST(PickleTest, FindNextWithIncompleteHeader) {
@@ -336,7 +336,7 @@ TEST(PickleTest, FindNextWithIncompleteHeader) {
const char* start = buffer.get();
const char* end = start + header_size - 1;
- EXPECT_TRUE(NULL == Pickle::FindNext(header_size, start, end));
+ EXPECT_EQ(nullptr, Pickle::FindNext(header_size, start, end));
}
#if defined(COMPILER_MSVC)
@@ -357,14 +357,14 @@ TEST(PickleTest, FindNextOverflow) {
return;
header->payload_size = -(reinterpret_cast<uintptr_t>(start) + header_size2);
- EXPECT_TRUE(NULL == Pickle::FindNext(header_size2, start, end));
+ EXPECT_EQ(nullptr, Pickle::FindNext(header_size2, start, end));
header->payload_size = -header_size2;
- EXPECT_TRUE(NULL == Pickle::FindNext(header_size2, start, end));
+ EXPECT_EQ(nullptr, Pickle::FindNext(header_size2, start, end));
header->payload_size = 0;
end = start + header_size;
- EXPECT_TRUE(NULL == Pickle::FindNext(header_size2, start, end));
+ EXPECT_EQ(nullptr, Pickle::FindNext(header_size2, start, end));
}
#if defined(COMPILER_MSVC)
#pragma warning(pop)
@@ -485,7 +485,7 @@ TEST(PickleTest, EvilLengths) {
// Check we can write zero bytes of data and 'data' can be NULL.
TEST(PickleTest, ZeroLength) {
Pickle pickle;
- pickle.WriteData(NULL, 0);
+ pickle.WriteData(nullptr, 0);
PickleIterator iter(pickle);
const char* outdata;
@@ -502,7 +502,7 @@ TEST(PickleTest, ReadBytes) {
pickle.WriteBytes(&data, sizeof(data));
PickleIterator iter(pickle);
- const char* outdata_char = NULL;
+ const char* outdata_char = nullptr;
EXPECT_TRUE(iter.ReadBytes(&outdata_char, sizeof(data)));
int outdata;
@@ -529,7 +529,7 @@ namespace {
// Publicly exposes the ClaimBytes interface for testing.
class TestingPickle : public Pickle {
public:
- TestingPickle() {}
+ TestingPickle() = default;
void* ClaimBytes(size_t num_bytes) { return Pickle::ClaimBytes(num_bytes); }
};
diff --git a/chromium/base/posix/global_descriptors.cc b/chromium/base/posix/global_descriptors.cc
index 9cc75531bc7..738d14e3ad5 100644
--- a/chromium/base/posix/global_descriptors.cc
+++ b/chromium/base/posix/global_descriptors.cc
@@ -94,8 +94,8 @@ void GlobalDescriptors::Reset(const Mapping& mapping) {
descriptors_ = mapping;
}
-GlobalDescriptors::GlobalDescriptors() {}
+GlobalDescriptors::GlobalDescriptors() = default;
-GlobalDescriptors::~GlobalDescriptors() {}
+GlobalDescriptors::~GlobalDescriptors() = default;
} // namespace base
diff --git a/chromium/base/posix/safe_strerror.cc b/chromium/base/posix/safe_strerror.cc
index 798658e9620..aef5742d331 100644
--- a/chromium/base/posix/safe_strerror.cc
+++ b/chromium/base/posix/safe_strerror.cc
@@ -108,7 +108,7 @@ static void POSSIBLY_UNUSED wrap_posix_strerror_r(
}
void safe_strerror_r(int err, char *buf, size_t len) {
- if (buf == NULL || len <= 0) {
+ if (buf == nullptr || len <= 0) {
return;
}
// If using glibc (i.e., Linux), the compiler will automatically select the
diff --git a/chromium/base/posix/unix_domain_socket.cc b/chromium/base/posix/unix_domain_socket.cc
index 4e288f01264..578a53cb349 100644
--- a/chromium/base/posix/unix_domain_socket.cc
+++ b/chromium/base/posix/unix_domain_socket.cc
@@ -76,7 +76,7 @@ bool UnixDomainSocket::SendMsg(int fd,
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
- char* control_buffer = NULL;
+ char* control_buffer = nullptr;
if (fds.size()) {
const unsigned control_len = CMSG_SPACE(sizeof(int) * fds.size());
control_buffer = new char[control_len];
@@ -119,7 +119,7 @@ ssize_t UnixDomainSocket::RecvMsg(int fd,
void* buf,
size_t length,
std::vector<ScopedFD>* fds) {
- return UnixDomainSocket::RecvMsgWithPid(fd, buf, length, fds, NULL);
+ return UnixDomainSocket::RecvMsgWithPid(fd, buf, length, fds, nullptr);
}
// static
@@ -161,7 +161,7 @@ ssize_t UnixDomainSocket::RecvMsgWithFlags(int fd,
if (r == -1)
return -1;
- int* wire_fds = NULL;
+ int* wire_fds = nullptr;
unsigned wire_fds_len = 0;
ProcessId pid = -1;
@@ -261,14 +261,14 @@ ssize_t UnixDomainSocket::SendRecvMsgWithFlags(int fd,
// When porting to OSX keep in mind it doesn't support MSG_NOSIGNAL, so the
// sender might get a SIGPIPE.
const ssize_t reply_len = RecvMsgWithFlags(
- recv_sock.get(), reply, max_reply_len, recvmsg_flags, &recv_fds, NULL);
+ recv_sock.get(), reply, max_reply_len, recvmsg_flags, &recv_fds, nullptr);
recv_sock.reset();
if (reply_len == -1)
return -1;
// If we received more file descriptors than caller expected, then we treat
// that as an error.
- if (recv_fds.size() > (result_fd != NULL ? 1 : 0)) {
+ if (recv_fds.size() > (result_fd != nullptr ? 1 : 0)) {
NOTREACHED();
return -1;
}
diff --git a/chromium/base/posix/unix_domain_socket_unittest.cc b/chromium/base/posix/unix_domain_socket_unittest.cc
index eed7180febb..453064f5353 100644
--- a/chromium/base/posix/unix_domain_socket_unittest.cc
+++ b/chromium/base/posix/unix_domain_socket_unittest.cc
@@ -59,8 +59,7 @@ TEST(UnixDomainSocketTest, SendRecvMsgAbortOnReplyFDClose) {
Pickle request;
message_thread.task_runner()->PostTask(
FROM_HERE, BindOnce(IgnoreResult(&UnixDomainSocket::SendRecvMsg), fds[1],
- static_cast<uint8_t*>(NULL), 0U,
- static_cast<int*>(NULL), request));
+ nullptr, 0U, nullptr, request));
// Receive the message.
std::vector<ScopedFD> message_fds;
@@ -95,11 +94,10 @@ TEST(UnixDomainSocketTest, SendRecvMsgAvoidsSIGPIPE) {
// message is sent with MSG_NOSIGNAL, this shall result in SIGPIPE.
Pickle request;
ASSERT_EQ(
- -1, UnixDomainSocket::SendRecvMsg(fds[1], static_cast<uint8_t*>(NULL), 0U,
- static_cast<int*>(NULL), request));
+ -1, UnixDomainSocket::SendRecvMsg(fds[1], nullptr, 0U, nullptr, request));
ASSERT_EQ(EPIPE, errno);
// Restore the SIGPIPE handler.
- ASSERT_EQ(0, sigaction(SIGPIPE, &oldact, NULL));
+ ASSERT_EQ(0, sigaction(SIGPIPE, &oldact, nullptr));
}
// Simple sanity check within a single process that receiving PIDs works.
diff --git a/chromium/base/power_monitor/power_monitor.cc b/chromium/base/power_monitor/power_monitor.cc
index 11082df6957..30e06a2a833 100644
--- a/chromium/base/power_monitor/power_monitor.cc
+++ b/chromium/base/power_monitor/power_monitor.cc
@@ -10,7 +10,7 @@
namespace base {
-static PowerMonitor* g_power_monitor = NULL;
+static PowerMonitor* g_power_monitor = nullptr;
PowerMonitor::PowerMonitor(std::unique_ptr<PowerMonitorSource> source)
: observers_(new ObserverListThreadSafe<PowerObserver>()),
@@ -21,7 +21,7 @@ PowerMonitor::PowerMonitor(std::unique_ptr<PowerMonitorSource> source)
PowerMonitor::~PowerMonitor() {
DCHECK_EQ(this, g_power_monitor);
- g_power_monitor = NULL;
+ g_power_monitor = nullptr;
}
// static
diff --git a/chromium/base/power_monitor/power_monitor_device_source_android.cc b/chromium/base/power_monitor/power_monitor_device_source_android.cc
index 5a764759b13..7688513501d 100644
--- a/chromium/base/power_monitor/power_monitor_device_source_android.cc
+++ b/chromium/base/power_monitor/power_monitor_device_source_android.cc
@@ -19,7 +19,9 @@ namespace android {
// Native implementation of PowerMonitor.java. Note: This will be invoked by
// PowerMonitor.java shortly after startup to set the correct initial value for
// "is on battery power."
-void OnBatteryChargingChanged(JNIEnv* env, const JavaParamRef<jclass>& clazz) {
+void JNI_PowerMonitor_OnBatteryChargingChanged(
+ JNIEnv* env,
+ const JavaParamRef<jclass>& clazz) {
ProcessPowerEventHelper(PowerMonitorSource::POWER_STATE_EVENT);
}
diff --git a/chromium/base/power_monitor/power_monitor_source.cc b/chromium/base/power_monitor/power_monitor_source.cc
index 5d27a0e5a07..d4757b0629f 100644
--- a/chromium/base/power_monitor/power_monitor_source.cc
+++ b/chromium/base/power_monitor/power_monitor_source.cc
@@ -9,8 +9,8 @@
namespace base {
-PowerMonitorSource::PowerMonitorSource() {}
-PowerMonitorSource::~PowerMonitorSource() {}
+PowerMonitorSource::PowerMonitorSource() = default;
+PowerMonitorSource::~PowerMonitorSource() = default;
bool PowerMonitorSource::IsOnBatteryPower() {
AutoLock auto_lock(battery_lock_);
diff --git a/chromium/base/power_monitor/power_monitor_unittest.cc b/chromium/base/power_monitor/power_monitor_unittest.cc
index f1c24598740..7f2a84b774b 100644
--- a/chromium/base/power_monitor/power_monitor_unittest.cc
+++ b/chromium/base/power_monitor/power_monitor_unittest.cc
@@ -17,7 +17,7 @@ class PowerMonitorTest : public testing::Test {
power_monitor_.reset(new PowerMonitor(
std::unique_ptr<PowerMonitorSource>(power_monitor_source_)));
}
- ~PowerMonitorTest() override {}
+ ~PowerMonitorTest() override = default;
PowerMonitorTestSource* source() { return power_monitor_source_; }
PowerMonitor* monitor() { return power_monitor_.get(); }
diff --git a/chromium/base/process/kill.cc b/chromium/base/process/kill.cc
index 5d8ba6a2d75..9fa0a0e1039 100644
--- a/chromium/base/process/kill.cc
+++ b/chromium/base/process/kill.cc
@@ -15,6 +15,14 @@ bool KillProcesses(const FilePath::StringType& executable_name,
NamedProcessIterator iter(executable_name, filter);
while (const ProcessEntry* entry = iter.NextProcessEntry()) {
Process process = Process::Open(entry->pid());
+ // Sometimes process open fails. This would cause a DCHECK in
+ // process.Terminate(). Maybe the process has killed itself between the
+ // time the process list was enumerated and the time we try to open the
+ // process?
+ if (!process.IsValid()) {
+ result = false;
+ continue;
+ }
result &= process.Terminate(exit_code, true);
}
return result;
diff --git a/chromium/base/process/kill.h b/chromium/base/process/kill.h
index 61db31c434a..524ed040d9b 100644
--- a/chromium/base/process/kill.h
+++ b/chromium/base/process/kill.h
@@ -24,6 +24,20 @@ namespace win {
// See definition in sandbox/win/src/sandbox_types.h
const DWORD kSandboxFatalMemoryExceeded = 7012;
+// Exit codes with special meanings on Windows.
+const DWORD kNormalTerminationExitCode = 0;
+const DWORD kDebuggerInactiveExitCode = 0xC0000354;
+const DWORD kKeyboardInterruptExitCode = 0xC000013A;
+const DWORD kDebuggerTerminatedExitCode = 0x40010004;
+
+// This exit code is used by the Windows task manager when it kills a
+// process. It's value is obviously not that unique, and it's
+// surprising to me that the task manager uses this value, but it
+// seems to be common practice on Windows to test for it as an
+// indication that the task manager has killed something if the
+// process goes away.
+const DWORD kProcessKilledExitCode = 1;
+
} // namespace win
#endif // OS_WIN
diff --git a/chromium/base/process/kill_posix.cc b/chromium/base/process/kill_posix.cc
index 0b390d56471..2427fde28ef 100644
--- a/chromium/base/process/kill_posix.cc
+++ b/chromium/base/process/kill_posix.cc
@@ -10,6 +10,7 @@
#include <sys/wait.h>
#include <unistd.h>
+#include "base/debug/activity_tracker.h"
#include "base/files/file_util.h"
#include "base/files/scoped_file.h"
#include "base/logging.h"
@@ -138,12 +139,14 @@ namespace {
// Return true if the given child is dead. This will also reap the process.
// Doesn't block.
static bool IsChildDead(pid_t child) {
- const pid_t result = HANDLE_EINTR(waitpid(child, NULL, WNOHANG));
+ int status;
+ const pid_t result = HANDLE_EINTR(waitpid(child, &status, WNOHANG));
if (result == -1) {
DPLOG(ERROR) << "waitpid(" << child << ")";
NOTREACHED();
} else if (result > 0) {
// The child has died.
+ Process(child).Exited(WIFEXITED(status) ? WEXITSTATUS(status) : -1);
return true;
}
@@ -168,7 +171,7 @@ class BackgroundReaper : public PlatformThread::Delegate {
void WaitForChildToDie() {
// Wait forever case.
if (timeout_ == 0) {
- pid_t r = HANDLE_EINTR(waitpid(child_, NULL, 0));
+ pid_t r = HANDLE_EINTR(waitpid(child_, nullptr, 0));
if (r != child_) {
DPLOG(ERROR) << "While waiting for " << child_
<< " to terminate, we got the following result: " << r;
@@ -189,7 +192,7 @@ class BackgroundReaper : public PlatformThread::Delegate {
if (kill(child_, SIGKILL) == 0) {
// SIGKILL is uncatchable. Since the signal was delivered, we can
// just wait for the process to die now in a blocking manner.
- if (HANDLE_EINTR(waitpid(child_, NULL, 0)) < 0)
+ if (HANDLE_EINTR(waitpid(child_, nullptr, 0)) < 0)
DPLOG(WARNING) << "waitpid";
} else {
DLOG(ERROR) << "While waiting for " << child_ << " to terminate we"
diff --git a/chromium/base/process/kill_win.cc b/chromium/base/process/kill_win.cc
index 6a0038e2c00..d7b6db6d91b 100644
--- a/chromium/base/process/kill_win.cc
+++ b/chromium/base/process/kill_win.cc
@@ -13,6 +13,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
+#include "base/debug/activity_tracker.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/process/memory.h"
@@ -23,19 +24,17 @@ namespace base {
namespace {
-// Exit codes with special meanings on Windows.
-const DWORD kNormalTerminationExitCode = 0;
-const DWORD kDebuggerInactiveExitCode = 0xC0000354;
-const DWORD kKeyboardInterruptExitCode = 0xC000013A;
-const DWORD kDebuggerTerminatedExitCode = 0x40010004;
-
-// This exit code is used by the Windows task manager when it kills a
-// process. It's value is obviously not that unique, and it's
-// surprising to me that the task manager uses this value, but it
-// seems to be common practice on Windows to test for it as an
-// indication that the task manager has killed something if the
-// process goes away.
-const DWORD kProcessKilledExitCode = 1;
+bool CheckForProcessExitAndReport(const Process& process) {
+ if (WaitForSingleObject(process.Handle(), 0) == WAIT_OBJECT_0) {
+ int exit_code;
+ TerminationStatus status =
+ GetTerminationStatus(process.Handle(), &exit_code);
+ DCHECK_NE(TERMINATION_STATUS_STILL_RUNNING, status);
+ process.Exited(exit_code);
+ return true;
+ }
+ return false;
+}
} // namespace
@@ -56,7 +55,7 @@ TerminationStatus GetTerminationStatus(ProcessHandle handle, int* exit_code) {
// to leave exit_code uninitialized, since that could cause
// random interpretations of the exit code. So we assume it
// terminated "normally" in this case.
- *exit_code = kNormalTerminationExitCode;
+ *exit_code = win::kNormalTerminationExitCode;
// Assume the child has exited normally if we can't get the exit
// code.
@@ -84,17 +83,17 @@ TerminationStatus GetTerminationStatus(ProcessHandle handle, int* exit_code) {
*exit_code = tmp_exit_code;
switch (tmp_exit_code) {
- case kNormalTerminationExitCode:
+ case win::kNormalTerminationExitCode:
return TERMINATION_STATUS_NORMAL_TERMINATION;
- case kDebuggerInactiveExitCode: // STATUS_DEBUGGER_INACTIVE.
- case kKeyboardInterruptExitCode: // Control-C/end session.
- case kDebuggerTerminatedExitCode: // Debugger terminated process.
- case kProcessKilledExitCode: // Task manager kill.
+ case win::kDebuggerInactiveExitCode: // STATUS_DEBUGGER_INACTIVE.
+ case win::kKeyboardInterruptExitCode: // Control-C/end session.
+ case win::kDebuggerTerminatedExitCode: // Debugger terminated process.
+ case win::kProcessKilledExitCode: // Task manager kill.
return TERMINATION_STATUS_PROCESS_WAS_KILLED;
- case base::win::kSandboxFatalMemoryExceeded: // Terminated process due to
- // exceeding the sandbox job
- // object memory limits.
- case base::win::kOomExceptionCode: // Ran out of memory.
+ case win::kSandboxFatalMemoryExceeded: // Terminated process due to
+ // exceeding the sandbox job
+ // object memory limits.
+ case win::kOomExceptionCode: // Ran out of memory.
return TERMINATION_STATUS_OOM;
default:
// All other exit codes indicate crashes.
@@ -139,7 +138,7 @@ void EnsureProcessTerminated(Process process) {
DCHECK(!process.is_current());
// If already signaled, then we are done!
- if (WaitForSingleObject(process.Handle(), 0) == WAIT_OBJECT_0)
+ if (CheckForProcessExitAndReport(process))
return;
PostDelayedTaskWithTraits(
@@ -147,9 +146,9 @@ void EnsureProcessTerminated(Process process) {
{TaskPriority::BACKGROUND, TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
Bind(
[](Process process) {
- if (WaitForSingleObject(process.Handle(), 0) == WAIT_OBJECT_0)
+ if (CheckForProcessExitAndReport(process))
return;
- process.Terminate(kProcessKilledExitCode, false);
+ process.Terminate(win::kProcessKilledExitCode, false);
},
Passed(&process)),
TimeDelta::FromSeconds(2));
diff --git a/chromium/base/process/launch.h b/chromium/base/process/launch.h
index 0c425a7a95c..2e06c4c386a 100644
--- a/chromium/base/process/launch.h
+++ b/chromium/base/process/launch.h
@@ -206,8 +206,8 @@ struct BASE_EXPORT LaunchOptions {
// no capabilities.
// By default the child will inherit the same capabilities, job, and CWD
// from the parent process.
- uint32_t clone_flags = LP_CLONE_FDIO_NAMESPACE | LP_CLONE_DEFAULT_JOB |
- LP_CLONE_FDIO_CWD | LP_CLONE_FDIO_STDIO;
+ uint32_t clone_flags =
+ LP_CLONE_FDIO_NAMESPACE | LP_CLONE_DEFAULT_JOB | LP_CLONE_FDIO_STDIO;
#endif // defined(OS_FUCHSIA)
#if defined(OS_POSIX)
diff --git a/chromium/base/process/launch_fuchsia.cc b/chromium/base/process/launch_fuchsia.cc
index 16ac513b570..b79210f842b 100644
--- a/chromium/base/process/launch_fuchsia.cc
+++ b/chromium/base/process/launch_fuchsia.cc
@@ -11,6 +11,7 @@
#include <zircon/processargs.h>
#include "base/command_line.h"
+#include "base/files/file_util.h"
#include "base/fuchsia/default_job.h"
#include "base/logging.h"
@@ -96,9 +97,10 @@ Process LaunchProcess(const std::vector<std::string>& argv,
EnvironmentMap environ_modifications = options.environ;
if (!options.current_directory.empty()) {
environ_modifications["PWD"] = options.current_directory.value();
-
- // Don't clone the parent's CWD if we are overriding the child's PWD.
- to_clone = to_clone & ~LP_CLONE_FDIO_CWD;
+ } else {
+ FilePath cwd;
+ base::GetCurrentDirectory(&cwd);
+ environ_modifications["PWD"] = cwd.value();
}
if (to_clone & LP_CLONE_DEFAULT_JOB) {
diff --git a/chromium/base/process/launch_mac.cc b/chromium/base/process/launch_mac.cc
index dfa9257a51e..06dbb99c245 100644
--- a/chromium/base/process/launch_mac.cc
+++ b/chromium/base/process/launch_mac.cc
@@ -169,7 +169,7 @@ Process LaunchProcessPosixSpawn(const std::vector<std::string>& argv,
if (options.wait) {
// While this isn't strictly disk IO, waiting for another process to
// finish is the sort of thing ThreadRestrictions is trying to prevent.
- base::ThreadRestrictions::AssertIOAllowed();
+ base::AssertBlockingAllowed();
pid_t ret = HANDLE_EINTR(waitpid(pid, nullptr, 0));
DPCHECK(ret > 0);
}
diff --git a/chromium/base/process/launch_posix.cc b/chromium/base/process/launch_posix.cc
index 9c769891062..148ad4f54c7 100644
--- a/chromium/base/process/launch_posix.cc
+++ b/chromium/base/process/launch_posix.cc
@@ -68,6 +68,13 @@ extern char** environ;
namespace base {
+// Friend and derived class of ScopedAllowBaseSyncPrimitives which allows
+// GetAppOutputInternal() to join a process. GetAppOutputInternal() can't itself
+// be a friend of ScopedAllowBaseSyncPrimitives because it is in the anonymous
+// namespace.
+class GetAppOutputScopedAllowBaseSyncPrimitives
+ : public base::ScopedAllowBaseSyncPrimitives {};
+
#if !defined(OS_NACL_NONSFI)
namespace {
@@ -163,7 +170,7 @@ int sys_rt_sigaction(int sig, const struct kernel_sigaction* act,
// See crbug.com/177956.
void ResetChildSignalHandlersToDefaults(void) {
for (int signum = 1; ; ++signum) {
- struct kernel_sigaction act = {0};
+ struct kernel_sigaction act = {nullptr};
int sigaction_get_ret = sys_rt_sigaction(signum, nullptr, &act);
if (sigaction_get_ret && errno == EINVAL) {
#if !defined(NDEBUG)
@@ -509,8 +516,8 @@ Process LaunchProcess(const std::vector<std::string>& argv,
if (options.wait) {
// While this isn't strictly disk IO, waiting for another process to
// finish is the sort of thing ThreadRestrictions is trying to prevent.
- base::ThreadRestrictions::AssertIOAllowed();
- pid_t ret = HANDLE_EINTR(waitpid(pid, 0, 0));
+ base::AssertBlockingAllowed();
+ pid_t ret = HANDLE_EINTR(waitpid(pid, nullptr, 0));
DPCHECK(ret > 0);
}
}
@@ -540,8 +547,7 @@ static bool GetAppOutputInternal(
std::string* output,
bool do_search_path,
int* exit_code) {
- // Doing a blocking wait for another command to finish counts as IO.
- base::ThreadRestrictions::AssertIOAllowed();
+ base::AssertBlockingAllowed();
// exit_code must be supplied so calling function can determine success.
DCHECK(exit_code);
*exit_code = EXIT_FAILURE;
@@ -638,6 +644,9 @@ static bool GetAppOutputInternal(
// Always wait for exit code (even if we know we'll declare
// GOT_MAX_OUTPUT).
Process process(pid);
+ // A process launched with GetAppOutput*() usually doesn't wait on the
+ // process that launched it and thus chances of deadlock are low.
+ GetAppOutputScopedAllowBaseSyncPrimitives allow_base_sync_primitives;
return process.WaitForExit(exit_code);
}
}
@@ -684,10 +693,6 @@ bool GetAppOutputWithExitCode(const CommandLine& cl,
#if defined(OS_LINUX) || defined(OS_NACL_NONSFI) || defined(OS_AIX)
namespace {
-bool IsRunningOnValgrind() {
- return RUNNING_ON_VALGRIND;
-}
-
// This function runs on the stack specified on the clone call. It uses longjmp
// to switch back to the original stack so the child can return from sys_clone.
int CloneHelper(void* arg) {
@@ -720,8 +725,9 @@ NOINLINE pid_t CloneAndLongjmpInChild(unsigned long flags,
// specifying a new stack, so we use setjmp/longjmp to emulate
// fork-like behavior.
alignas(16) char stack_buf[PTHREAD_STACK_MIN];
-#if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \
- defined(ARCH_CPU_MIPS_FAMILY)
+#if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \
+ defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_S390_FAMILY) || \
+ defined(ARCH_CPU_PPC64_FAMILY)
// The stack grows downward.
void* stack = stack_buf + sizeof(stack_buf);
#else
@@ -749,13 +755,14 @@ pid_t ForkWithFlags(unsigned long flags, pid_t* ptid, pid_t* ctid) {
// without CLONE_VM, so we cannot use libc's clone wrapper when running under
// Valgrind. As a result, the libc pid cache may be incorrect under Valgrind.
// See crbug.com/442817 for more details.
- if (IsRunningOnValgrind()) {
+ if (RunningOnValgrind()) {
// See kernel/fork.c in Linux. There is different ordering of sys_clone
// parameters depending on CONFIG_CLONE_BACKWARDS* configuration options.
#if defined(ARCH_CPU_X86_64)
return syscall(__NR_clone, flags, nullptr, ptid, ctid, nullptr);
-#elif defined(ARCH_CPU_X86) || defined(ARCH_CPU_ARM_FAMILY) || \
- defined(ARCH_CPU_MIPS_FAMILY)
+#elif defined(ARCH_CPU_X86) || defined(ARCH_CPU_ARM_FAMILY) || \
+ defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_S390_FAMILY) || \
+ defined(ARCH_CPU_PPC64_FAMILY)
// CONFIG_CLONE_BACKWARDS defined.
return syscall(__NR_clone, flags, nullptr, ptid, nullptr, ctid);
#else
diff --git a/chromium/base/process/launch_win.cc b/chromium/base/process/launch_win.cc
index b13f70d7ce3..0c798a88b45 100644
--- a/chromium/base/process/launch_win.cc
+++ b/chromium/base/process/launch_win.cc
@@ -34,14 +34,6 @@ namespace base {
namespace {
-// This exit code is used by the Windows task manager when it kills a
-// process. It's value is obviously not that unique, and it's
-// surprising to me that the task manager uses this value, but it
-// seems to be common practice on Windows to test for it as an
-// indication that the task manager has killed something if the
-// process goes away.
-const DWORD kProcessKilledExitCode = 1;
-
bool GetAppOutputInternal(const StringPiece16& cl,
bool include_stderr,
std::string* output,
@@ -114,7 +106,7 @@ bool GetAppOutputInternal(const StringPiece16& cl,
for (;;) {
DWORD bytes_read = 0;
BOOL success =
- ReadFile(out_read, buffer, kBufferSize, &bytes_read, nullptr);
+ ::ReadFile(out_read, buffer, kBufferSize, &bytes_read, nullptr);
if (!success || bytes_read == 0)
break;
output->append(buffer, bytes_read);
@@ -320,7 +312,7 @@ Process LaunchProcess(const string16& cmdline,
process_info.process_handle())) {
DLOG(ERROR) << "Could not AssignProcessToObject.";
Process scoped_process(process_info.TakeProcessHandle());
- scoped_process.Terminate(kProcessKilledExitCode, true);
+ scoped_process.Terminate(win::kProcessKilledExitCode, true);
return Process();
}
diff --git a/chromium/base/process/memory.cc b/chromium/base/process/memory.cc
index 6349c08ca0a..5b987339baf 100644
--- a/chromium/base/process/memory.cc
+++ b/chromium/base/process/memory.cc
@@ -38,7 +38,7 @@ bool UncheckedCalloc(size_t num_items, size_t size, void** result) {
// Overflow check
if (size && ((alloc_size / size) != num_items)) {
- *result = NULL;
+ *result = nullptr;
return false;
}
diff --git a/chromium/base/process/memory_linux.cc b/chromium/base/process/memory_linux.cc
index 91eef2186c6..a985b7a8ceb 100644
--- a/chromium/base/process/memory_linux.cc
+++ b/chromium/base/process/memory_linux.cc
@@ -107,7 +107,7 @@ bool UncheckedMalloc(size_t size, void** result) {
#elif defined(USE_TCMALLOC)
*result = tc_malloc_skip_new_handler(size);
#endif
- return *result != NULL;
+ return *result != nullptr;
}
} // namespace base
diff --git a/chromium/base/process/memory_unittest.cc b/chromium/base/process/memory_unittest.cc
index 64ee0aaa6a2..3f6716b1b53 100644
--- a/chromium/base/process/memory_unittest.cc
+++ b/chromium/base/process/memory_unittest.cc
@@ -122,15 +122,14 @@ const int kExitCode = 1;
class OutOfMemoryTest : public testing::Test {
public:
OutOfMemoryTest()
- : value_(NULL),
- // Make test size as large as possible minus a few pages so
- // that alignment or other rounding doesn't make it wrap.
- test_size_(std::numeric_limits<std::size_t>::max() - 12 * 1024),
- // A test size that is > 2Gb and will cause the allocators to reject
- // the allocation due to security restrictions. See crbug.com/169327.
- insecure_test_size_(std::numeric_limits<int>::max()),
- signed_test_size_(std::numeric_limits<ssize_t>::max()) {
- }
+ : value_(nullptr),
+ // Make test size as large as possible minus a few pages so
+ // that alignment or other rounding doesn't make it wrap.
+ test_size_(std::numeric_limits<std::size_t>::max() - 12 * 1024),
+ // A test size that is > 2Gb and will cause the allocators to reject
+ // the allocation due to security restrictions. See crbug.com/169327.
+ insecure_test_size_(std::numeric_limits<int>::max()),
+ signed_test_size_(std::numeric_limits<ssize_t>::max()) {}
protected:
void* value_;
@@ -185,7 +184,7 @@ TEST_F(OutOfMemoryDeathTest, Malloc) {
TEST_F(OutOfMemoryDeathTest, Realloc) {
ASSERT_EXIT({
SetUpInDeathAssert();
- value_ = realloc(NULL, test_size_);
+ value_ = realloc(nullptr, test_size_);
}, testing::ExitedWithCode(kExitCode), kOomRegex);
}
@@ -263,7 +262,7 @@ TEST_F(OutOfMemoryDeathTest, SecurityMalloc) {
TEST_F(OutOfMemoryDeathTest, SecurityRealloc) {
ASSERT_EXIT({
SetUpInDeathAssert();
- value_ = realloc(NULL, insecure_test_size_);
+ value_ = realloc(nullptr, insecure_test_size_);
}, testing::ExitedWithCode(kExitCode), kOomRegex);
}
@@ -504,16 +503,16 @@ TEST_F(OutOfMemoryTest, TerminateBecauseOutOfMemoryReportsAllocSize) {
// on Windows as well.
TEST_F(OutOfMemoryHandledTest, UncheckedMalloc) {
EXPECT_TRUE(base::UncheckedMalloc(kSafeMallocSize, &value_));
- EXPECT_TRUE(value_ != NULL);
+ EXPECT_TRUE(value_ != nullptr);
free(value_);
EXPECT_FALSE(base::UncheckedMalloc(test_size_, &value_));
- EXPECT_TRUE(value_ == NULL);
+ EXPECT_TRUE(value_ == nullptr);
}
TEST_F(OutOfMemoryHandledTest, UncheckedCalloc) {
EXPECT_TRUE(base::UncheckedCalloc(1, kSafeMallocSize, &value_));
- EXPECT_TRUE(value_ != NULL);
+ EXPECT_TRUE(value_ != nullptr);
const char* bytes = static_cast<const char*>(value_);
for (size_t i = 0; i < kSafeMallocSize; ++i)
EXPECT_EQ(0, bytes[i]);
@@ -521,14 +520,14 @@ TEST_F(OutOfMemoryHandledTest, UncheckedCalloc) {
EXPECT_TRUE(
base::UncheckedCalloc(kSafeCallocItems, kSafeCallocSize, &value_));
- EXPECT_TRUE(value_ != NULL);
+ EXPECT_TRUE(value_ != nullptr);
bytes = static_cast<const char*>(value_);
for (size_t i = 0; i < (kSafeCallocItems * kSafeCallocSize); ++i)
EXPECT_EQ(0, bytes[i]);
free(value_);
EXPECT_FALSE(base::UncheckedCalloc(1, test_size_, &value_));
- EXPECT_TRUE(value_ == NULL);
+ EXPECT_TRUE(value_ == nullptr);
}
#endif // !defined(OS_OPENBSD) && BUILDFLAG(ENABLE_WIN_ALLOCATOR_SHIM_TESTS) &&
// !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/chromium/base/process/process.h b/chromium/base/process/process.h
index 481d92ddc17..2b945015af3 100644
--- a/chromium/base/process/process.h
+++ b/chromium/base/process/process.h
@@ -134,6 +134,13 @@ class BASE_EXPORT Process {
// is not required.
bool WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const;
+ // Indicates that the process has exited with the specified |exit_code|.
+ // This should be called if process exit is observed outside of this class.
+ // (i.e. Not because Terminate or WaitForExit, above, was called.)
+ // Note that nothing prevents this being called multiple times for a dead
+ // process though that should be avoided.
+ void Exited(int exit_code) const;
+
#if defined(OS_MACOSX)
// The Mac needs a Mach port in order to manipulate a process's priority,
// and there's no good way to get that from base given the pid. These Mac
diff --git a/chromium/base/process/process_fuchsia.cc b/chromium/base/process/process_fuchsia.cc
index 57da2ae4368..94bce344a10 100644
--- a/chromium/base/process/process_fuchsia.cc
+++ b/chromium/base/process/process_fuchsia.cc
@@ -203,6 +203,8 @@ bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const {
return true;
}
+void Process::Exited(int exit_code) const {}
+
bool Process::IsProcessBackgrounded() const {
// See SetProcessBackgrounded().
DCHECK(IsValid());
diff --git a/chromium/base/process/process_info_win.cc b/chromium/base/process/process_info_win.cc
index 725c56d32d9..b9864b02a9d 100644
--- a/chromium/base/process/process_info_win.cc
+++ b/chromium/base/process/process_info_win.cc
@@ -17,8 +17,13 @@ namespace {
base::win::ScopedHandle GetCurrentProcessToken() {
HANDLE process_token;
- OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &process_token);
- DCHECK(process_token != NULL && process_token != INVALID_HANDLE_VALUE);
+ BOOL result =
+ OpenProcessToken(::GetCurrentProcess(), TOKEN_QUERY, &process_token);
+ // These checks are turned on in release builds to debug
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=748431.
+ PCHECK(result);
+ CHECK(process_token != NULL);
+ CHECK(process_token != INVALID_HANDLE_VALUE);
return base::win::ScopedHandle(process_token);
}
diff --git a/chromium/base/process/process_iterator.cc b/chromium/base/process/process_iterator.cc
index d4024d9a5e2..013017f75b5 100644
--- a/chromium/base/process/process_iterator.cc
+++ b/chromium/base/process/process_iterator.cc
@@ -10,7 +10,7 @@ namespace base {
#if defined(OS_POSIX)
ProcessEntry::ProcessEntry() : pid_(0), ppid_(0), gid_(0) {}
ProcessEntry::ProcessEntry(const ProcessEntry& other) = default;
-ProcessEntry::~ProcessEntry() {}
+ProcessEntry::~ProcessEntry() = default;
#endif
const ProcessEntry* ProcessIterator::NextProcessEntry() {
@@ -20,7 +20,7 @@ const ProcessEntry* ProcessIterator::NextProcessEntry() {
} while (result && !IncludeEntry());
if (result)
return &entry_;
- return NULL;
+ return nullptr;
}
ProcessIterator::ProcessEntries ProcessIterator::Snapshot() {
@@ -52,8 +52,7 @@ NamedProcessIterator::NamedProcessIterator(
#endif
}
-NamedProcessIterator::~NamedProcessIterator() {
-}
+NamedProcessIterator::~NamedProcessIterator() = default;
int GetProcessCount(const FilePath::StringType& executable_name,
const ProcessFilter* filter) {
diff --git a/chromium/base/process/process_iterator_linux.cc b/chromium/base/process/process_iterator_linux.cc
index 421565f8e41..9fea70e4d32 100644
--- a/chromium/base/process/process_iterator_linux.cc
+++ b/chromium/base/process/process_iterator_linux.cc
@@ -33,7 +33,7 @@ std::string GetProcStatsFieldAsString(
return proc_stats[field_num];
NOTREACHED();
- return 0;
+ return nullptr;
}
// Reads /proc/<pid>/cmdline and populates |proc_cmd_line_args| with the command
diff --git a/chromium/base/process/process_iterator_mac.cc b/chromium/base/process/process_iterator_mac.cc
index 3d616980abd..f33121a83c2 100644
--- a/chromium/base/process/process_iterator_mac.cc
+++ b/chromium/base/process/process_iterator_mac.cc
@@ -57,8 +57,7 @@ ProcessIterator::ProcessIterator(const ProcessFilter* filter)
}
} else {
// Got the list, just make sure we're sized exactly right
- size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
- kinfo_procs_.resize(num_of_kinfo_proc);
+ kinfo_procs_.resize(len / sizeof(struct kinfo_proc));
done = true;
}
}
diff --git a/chromium/base/process/process_metrics.cc b/chromium/base/process/process_metrics.cc
index 0d9140a99e8..2eff3ea3d0c 100644
--- a/chromium/base/process/process_metrics.cc
+++ b/chromium/base/process/process_metrics.cc
@@ -61,6 +61,7 @@ SystemMetrics SystemMetrics::Sample() {
system_metrics.committed_memory_ = GetSystemCommitCharge();
#if defined(OS_LINUX) || defined(OS_ANDROID)
GetSystemMemoryInfo(&system_metrics.memory_info_);
+ GetVmStatInfo(&system_metrics.vmstat_info_);
GetSystemDiskInfo(&system_metrics.disk_info_);
#endif
#if defined(OS_CHROMEOS)
@@ -75,7 +76,10 @@ std::unique_ptr<Value> SystemMetrics::ToValue() const {
res->SetInteger("committed_memory", static_cast<int>(committed_memory_));
#if defined(OS_LINUX) || defined(OS_ANDROID)
- res->Set("meminfo", memory_info_.ToValue());
+ std::unique_ptr<DictionaryValue> meminfo = memory_info_.ToValue();
+ std::unique_ptr<DictionaryValue> vmstat = vmstat_info_.ToValue();
+ meminfo->MergeDictionary(vmstat.get());
+ res->Set("meminfo", std::move(meminfo));
res->Set("diskinfo", disk_info_.ToValue());
#endif
#if defined(OS_CHROMEOS)
diff --git a/chromium/base/process/process_metrics.h b/chromium/base/process/process_metrics.h
index 844cd73cb4a..52a5912a288 100644
--- a/chromium/base/process/process_metrics.h
+++ b/chromium/base/process/process_metrics.h
@@ -367,7 +367,7 @@ struct BASE_EXPORT SystemMemoryInfoKB {
SystemMemoryInfoKB(const SystemMemoryInfoKB& other);
// Serializes the platform specific fields to value.
- std::unique_ptr<Value> ToValue() const;
+ std::unique_ptr<DictionaryValue> ToValue() const;
int total = 0;
@@ -408,11 +408,6 @@ struct BASE_EXPORT SystemMemoryInfoKB {
int inactive_file = 0;
int dirty = 0;
int reclaimable = 0;
-
- // vmstats data.
- unsigned long pswpin = 0;
- unsigned long pswpout = 0;
- unsigned long pgmajfault = 0;
#endif // defined(OS_ANDROID) || defined(OS_LINUX) || defined(OS_AIX) ||
// defined(OS_FUCHSIA)
@@ -463,11 +458,24 @@ BASE_EXPORT extern const char kProcSelfExe[];
BASE_EXPORT bool ParseProcMeminfo(StringPiece input,
SystemMemoryInfoKB* meminfo);
+// Data from /proc/vmstat.
+struct BASE_EXPORT VmStatInfo {
+ // Serializes the platform specific fields to value.
+ std::unique_ptr<DictionaryValue> ToValue() const;
+
+ unsigned long pswpin = 0;
+ unsigned long pswpout = 0;
+ unsigned long pgmajfault = 0;
+};
+
+// Retrieves data from /proc/vmstat about system-wide vm operations.
+// Fills in the provided |vmstat| structure. Returns true on success.
+BASE_EXPORT bool GetVmStatInfo(VmStatInfo* vmstat);
+
// Parses a string containing the contents of /proc/vmstat
// returns true on success or false for a parsing error
// Exposed for testing.
-BASE_EXPORT bool ParseProcVmstat(StringPiece input,
- SystemMemoryInfoKB* meminfo);
+BASE_EXPORT bool ParseProcVmstat(StringPiece input, VmStatInfo* vmstat);
// Data from /proc/diskstats about system-wide disk I/O.
struct BASE_EXPORT SystemDiskInfo {
@@ -477,17 +485,17 @@ struct BASE_EXPORT SystemDiskInfo {
// Serializes the platform specific fields to value.
std::unique_ptr<Value> ToValue() const;
- uint64_t reads;
- uint64_t reads_merged;
- uint64_t sectors_read;
- uint64_t read_time;
- uint64_t writes;
- uint64_t writes_merged;
- uint64_t sectors_written;
- uint64_t write_time;
- uint64_t io;
- uint64_t io_time;
- uint64_t weighted_io_time;
+ uint64_t reads = 0;
+ uint64_t reads_merged = 0;
+ uint64_t sectors_read = 0;
+ uint64_t read_time = 0;
+ uint64_t writes = 0;
+ uint64_t writes_merged = 0;
+ uint64_t sectors_written = 0;
+ uint64_t write_time = 0;
+ uint64_t io = 0;
+ uint64_t io_time = 0;
+ uint64_t weighted_io_time = 0;
};
// Checks whether the candidate string is a valid disk name, [hsv]d[a-z]+
@@ -501,6 +509,7 @@ BASE_EXPORT bool GetSystemDiskInfo(SystemDiskInfo* diskinfo);
// Returns the amount of time spent in user space since boot across all CPUs.
BASE_EXPORT TimeDelta GetUserCpuTimeSinceBoot();
+
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
#if defined(OS_CHROMEOS)
@@ -517,11 +526,11 @@ struct BASE_EXPORT SwapInfo {
// Serializes the platform specific fields to value.
std::unique_ptr<Value> ToValue() const;
- uint64_t num_reads;
- uint64_t num_writes;
- uint64_t compr_data_size;
- uint64_t orig_data_size;
- uint64_t mem_used_total;
+ uint64_t num_reads = 0;
+ uint64_t num_writes = 0;
+ uint64_t compr_data_size = 0;
+ uint64_t orig_data_size = 0;
+ uint64_t mem_used_total = 0;
};
// Parses a string containing the contents of /sys/block/zram0/mm_stat.
@@ -560,6 +569,7 @@ class SystemMetrics {
size_t committed_memory_;
#if defined(OS_LINUX) || defined(OS_ANDROID)
SystemMemoryInfoKB memory_info_;
+ VmStatInfo vmstat_info_;
SystemDiskInfo disk_info_;
#endif
#if defined(OS_CHROMEOS)
diff --git a/chromium/base/process/process_metrics_fuchsia.cc b/chromium/base/process/process_metrics_fuchsia.cc
index a5234c40a79..d07911de538 100644
--- a/chromium/base/process/process_metrics_fuchsia.cc
+++ b/chromium/base/process/process_metrics_fuchsia.cc
@@ -39,6 +39,11 @@ size_t ProcessMetrics::GetPeakWorkingSetSize() const {
return 0;
}
+bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
+ NOTIMPLEMENTED(); // TODO(fuchsia): https://crbug.com/706592.
+ return false;
+}
+
bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
NOTIMPLEMENTED(); // TODO(fuchsia): https://crbug.com/706592.
return false;
diff --git a/chromium/base/process/process_metrics_linux.cc b/chromium/base/process/process_metrics_linux.cc
index 6a32964d22d..1fb76e5a2fa 100644
--- a/chromium/base/process/process_metrics_linux.cc
+++ b/chromium/base/process/process_metrics_linux.cc
@@ -616,7 +616,7 @@ const size_t kDiskWeightedIOTime = 13;
} // namespace
-std::unique_ptr<Value> SystemMemoryInfoKB::ToValue() const {
+std::unique_ptr<DictionaryValue> SystemMemoryInfoKB::ToValue() const {
auto res = std::make_unique<DictionaryValue>();
res->SetInteger("total", total);
res->SetInteger("free", free);
@@ -632,9 +632,6 @@ std::unique_ptr<Value> SystemMemoryInfoKB::ToValue() const {
res->SetInteger("swap_used", swap_total - swap_free);
res->SetInteger("dirty", dirty);
res->SetInteger("reclaimable", reclaimable);
- res->SetInteger("pswpin", pswpin);
- res->SetInteger("pswpout", pswpout);
- res->SetInteger("pgmajfault", pgmajfault);
#ifdef OS_CHROMEOS
res->SetInteger("shmem", shmem);
res->SetInteger("slab", slab);
@@ -642,7 +639,7 @@ std::unique_ptr<Value> SystemMemoryInfoKB::ToValue() const {
res->SetInteger("gem_size", gem_size);
#endif
- return std::move(res);
+ return res;
}
bool ParseProcMeminfo(StringPiece meminfo_data, SystemMemoryInfoKB* meminfo) {
@@ -715,7 +712,7 @@ bool ParseProcMeminfo(StringPiece meminfo_data, SystemMemoryInfoKB* meminfo) {
return meminfo->total > 0;
}
-bool ParseProcVmstat(StringPiece vmstat_data, SystemMemoryInfoKB* meminfo) {
+bool ParseProcVmstat(StringPiece vmstat_data, VmStatInfo* vmstat) {
// The format of /proc/vmstat is:
//
// nr_free_pages 299878
@@ -741,15 +738,15 @@ bool ParseProcVmstat(StringPiece vmstat_data, SystemMemoryInfoKB* meminfo) {
continue;
if (tokens[0] == "pswpin") {
- meminfo->pswpin = val;
+ vmstat->pswpin = val;
DCHECK(!has_pswpin);
has_pswpin = true;
} else if (tokens[0] == "pswpout") {
- meminfo->pswpout = val;
+ vmstat->pswpout = val;
DCHECK(!has_pswpout);
has_pswpout = true;
} else if (tokens[0] == "pgmajfault") {
- meminfo->pgmajfault = val;
+ vmstat->pgmajfault = val;
DCHECK(!has_pgmajfault);
has_pgmajfault = true;
}
@@ -781,17 +778,31 @@ bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
ReadChromeOSGraphicsMemory(meminfo);
#endif
+ return true;
+}
+
+std::unique_ptr<DictionaryValue> VmStatInfo::ToValue() const {
+ auto res = std::make_unique<DictionaryValue>();
+ res->SetInteger("pswpin", pswpin);
+ res->SetInteger("pswpout", pswpout);
+ res->SetInteger("pgmajfault", pgmajfault);
+ return res;
+}
+
+bool GetVmStatInfo(VmStatInfo* vmstat) {
+ // Synchronously reading files in /proc and /sys are safe.
+ ThreadRestrictions::ScopedAllowIO allow_io;
+
FilePath vmstat_file("/proc/vmstat");
std::string vmstat_data;
if (!ReadFileToString(vmstat_file, &vmstat_data)) {
DLOG(WARNING) << "Failed to open " << vmstat_file.value();
return false;
}
- if (!ParseProcVmstat(vmstat_data, meminfo)) {
+ if (!ParseProcVmstat(vmstat_data, vmstat)) {
DLOG(WARNING) << "Failed to parse " << vmstat_file.value();
return false;
}
-
return true;
}
diff --git a/chromium/base/process/process_metrics_posix.cc b/chromium/base/process/process_metrics_posix.cc
index 0eb5c1f97b0..73a52d6210d 100644
--- a/chromium/base/process/process_metrics_posix.cc
+++ b/chromium/base/process/process_metrics_posix.cc
@@ -29,7 +29,7 @@ int64_t TimeValToMicroseconds(const struct timeval& tv) {
return ret;
}
-ProcessMetrics::~ProcessMetrics() { }
+ProcessMetrics::~ProcessMetrics() = default;
#if defined(OS_LINUX)
static const rlim_t kSystemDefaultMaxFds = 8192;
diff --git a/chromium/base/process/process_metrics_unittest.cc b/chromium/base/process/process_metrics_unittest.cc
index 2a187151e6a..fdf9c96a909 100644
--- a/chromium/base/process/process_metrics_unittest.cc
+++ b/chromium/base/process/process_metrics_unittest.cc
@@ -51,7 +51,7 @@ void BusyWork(std::vector<std::string>* vec) {
// Exists as a class so it can be a friend of SystemMetrics.
class SystemMetricsTest : public testing::Test {
public:
- SystemMetricsTest() {}
+ SystemMetricsTest() = default;
private:
DISALLOW_COPY_AND_ASSIGN(SystemMetricsTest);
@@ -251,7 +251,7 @@ TEST_F(SystemMetricsTest, ParseMeminfo) {
}
TEST_F(SystemMetricsTest, ParseVmstat) {
- SystemMemoryInfoKB meminfo;
+ VmStatInfo vmstat;
// part of vmstat from a 3.2 kernel with numa enabled
const char valid_input1[] =
"nr_free_pages 905104\n"
@@ -341,21 +341,21 @@ TEST_F(SystemMetricsTest, ParseVmstat) {
"pgrefill_normal 0\n"
"pgrefill_high 0\n"
"pgrefill_movable 0\n";
- EXPECT_TRUE(ParseProcVmstat(valid_input1, &meminfo));
- EXPECT_EQ(179LU, meminfo.pswpin);
- EXPECT_EQ(406LU, meminfo.pswpout);
- EXPECT_EQ(487192LU, meminfo.pgmajfault);
- EXPECT_TRUE(ParseProcVmstat(valid_input2, &meminfo));
- EXPECT_EQ(12LU, meminfo.pswpin);
- EXPECT_EQ(901LU, meminfo.pswpout);
- EXPECT_EQ(2023LU, meminfo.pgmajfault);
+ EXPECT_TRUE(ParseProcVmstat(valid_input1, &vmstat));
+ EXPECT_EQ(179LU, vmstat.pswpin);
+ EXPECT_EQ(406LU, vmstat.pswpout);
+ EXPECT_EQ(487192LU, vmstat.pgmajfault);
+ EXPECT_TRUE(ParseProcVmstat(valid_input2, &vmstat));
+ EXPECT_EQ(12LU, vmstat.pswpin);
+ EXPECT_EQ(901LU, vmstat.pswpout);
+ EXPECT_EQ(2023LU, vmstat.pgmajfault);
const char missing_pgmajfault_input[] =
"pswpin 12\n"
"pswpout 901\n";
- EXPECT_FALSE(ParseProcVmstat(missing_pgmajfault_input, &meminfo));
+ EXPECT_FALSE(ParseProcVmstat(missing_pgmajfault_input, &vmstat));
const char empty_input[] = "";
- EXPECT_FALSE(ParseProcVmstat(empty_input, &meminfo));
+ EXPECT_FALSE(ParseProcVmstat(empty_input, &vmstat));
}
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
diff --git a/chromium/base/process/process_posix.cc b/chromium/base/process/process_posix.cc
index 11f48f75acb..2481e26ba16 100644
--- a/chromium/base/process/process_posix.cc
+++ b/chromium/base/process/process_posix.cc
@@ -16,6 +16,7 @@
#include "base/posix/eintr_wrapper.h"
#include "base/process/kill.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
#if defined(OS_MACOSX)
@@ -224,8 +225,7 @@ namespace base {
Process::Process(ProcessHandle handle) : process_(handle) {
}
-Process::~Process() {
-}
+Process::~Process() = default;
Process::Process(Process&& other) : process_(other.process_) {
other.Close();
@@ -327,7 +327,7 @@ bool Process::Terminate(int exit_code, bool wait) const {
// The process may not end immediately due to pending I/O
bool exited = false;
while (tries-- > 0) {
- pid_t pid = HANDLE_EINTR(waitpid(process_, NULL, WNOHANG));
+ pid_t pid = HANDLE_EINTR(waitpid(process_, nullptr, WNOHANG));
if (pid == process_) {
exited = true;
break;
@@ -354,7 +354,9 @@ bool Process::Terminate(int exit_code, bool wait) const {
result = kill(process_, SIGKILL) == 0;
}
- if (!result)
+ if (result)
+ Exited(exit_code);
+ else
DPLOG(ERROR) << "Unable to terminate process " << process_;
return result;
@@ -366,12 +368,24 @@ bool Process::WaitForExit(int* exit_code) const {
}
bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const {
+ if (!timeout.is_zero())
+ internal::AssertBaseSyncPrimitivesAllowed();
+
// Record the event that this thread is blocking upon (for hang diagnosis).
base::debug::ScopedProcessWaitActivity process_activity(this);
- return WaitForExitWithTimeoutImpl(Handle(), exit_code, timeout);
+ int local_exit_code;
+ bool exited = WaitForExitWithTimeoutImpl(Handle(), &local_exit_code, timeout);
+ if (exited) {
+ Exited(local_exit_code);
+ if (exit_code)
+ *exit_code = local_exit_code;
+ }
+ return exited;
}
+void Process::Exited(int exit_code) const {}
+
#if !defined(OS_LINUX) && !defined(OS_MACOSX) && !defined(OS_AIX)
bool Process::IsProcessBackgrounded() const {
// See SetProcessBackgrounded().
diff --git a/chromium/base/process/process_unittest.cc b/chromium/base/process/process_unittest.cc
index d9c2409fb16..b4a9e830021 100644
--- a/chromium/base/process/process_unittest.cc
+++ b/chromium/base/process/process_unittest.cc
@@ -168,7 +168,7 @@ class ThreadLocalObject {
MULTIPROCESS_TEST_MAIN(TerminateCurrentProcessImmediatelyWithCode0) {
base::ThreadLocalPointer<ThreadLocalObject> object;
- base::AtExitManager::RegisterCallback(&AtExitHandler, NULL);
+ base::AtExitManager::RegisterCallback(&AtExitHandler, nullptr);
Process::TerminateCurrentProcessImmediately(0);
NOTREACHED();
return 42;
diff --git a/chromium/base/process/process_util_unittest.cc b/chromium/base/process/process_util_unittest.cc
index 74c22505f49..43f2898801f 100644
--- a/chromium/base/process/process_util_unittest.cc
+++ b/chromium/base/process/process_util_unittest.cc
@@ -283,7 +283,7 @@ MULTIPROCESS_TEST_MAIN(CrashingChildProcess) {
::signal(SIGSEGV, SIG_DFL);
#endif
// Make this process have a segmentation fault.
- volatile int* oops = NULL;
+ volatile int* oops = nullptr;
*oops = 0xDEAD;
return 1;
}
@@ -995,7 +995,7 @@ bool IsProcessDead(base::ProcessHandle child) {
// waitpid() will actually reap the process which is exactly NOT what we
// want to test for. The good thing is that if it can't find the process
// we'll get a nice value for errno which we can test for.
- const pid_t result = HANDLE_EINTR(waitpid(child, NULL, WNOHANG));
+ const pid_t result = HANDLE_EINTR(waitpid(child, nullptr, WNOHANG));
return result == -1 && errno == ECHILD;
#endif
}
@@ -1038,7 +1038,7 @@ MULTIPROCESS_TEST_MAIN(process_util_test_die_immediately) {
class ReadFromPipeDelegate : public base::LaunchOptions::PreExecDelegate {
public:
explicit ReadFromPipeDelegate(int fd) : fd_(fd) {}
- ~ReadFromPipeDelegate() override {}
+ ~ReadFromPipeDelegate() override = default;
void RunAsyncSafe() override {
char c;
RAW_CHECK(HANDLE_EINTR(read(fd_, &c, 1)) == 1);
diff --git a/chromium/base/process/process_win.cc b/chromium/base/process/process_win.cc
index 555f5751058..005a68e6e3b 100644
--- a/chromium/base/process/process_win.cc
+++ b/chromium/base/process/process_win.cc
@@ -8,6 +8,7 @@
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/process/kill.h"
+#include "base/threading/thread_restrictions.h"
namespace {
@@ -83,6 +84,9 @@ bool Process::CanBackgroundProcesses() {
// static
void Process::TerminateCurrentProcessImmediately(int exit_code) {
::TerminateProcess(GetCurrentProcess(), exit_code);
+ // There is some ambiguity over whether the call above can return. Rather than
+ // hitting confusing crashes later on we should crash right here.
+ CHECK(false);
}
bool Process::IsValid() const {
@@ -131,16 +135,21 @@ bool Process::Terminate(int exit_code, bool wait) const {
// exit_code cannot be implemented.
DCHECK(IsValid());
bool result = (::TerminateProcess(Handle(), exit_code) != FALSE);
- if (result && wait) {
+ if (result) {
// The process may not end immediately due to pending I/O
- if (::WaitForSingleObject(Handle(), 60 * 1000) != WAIT_OBJECT_0)
+ if (wait && ::WaitForSingleObject(Handle(), 60 * 1000) != WAIT_OBJECT_0)
DPLOG(ERROR) << "Error waiting for process exit";
- } else if (!result) {
+ Exited(exit_code);
+ } else {
+ // The process can't be terminated, perhaps because it has already
+ // exited.
DPLOG(ERROR) << "Unable to terminate process";
- }
- if (result) {
- base::debug::GlobalActivityTracker::RecordProcessExitIfEnabled(Pid(),
- exit_code);
+ if (::WaitForSingleObject(Handle(), 0) == WAIT_OBJECT_0) {
+ DWORD actual_exit;
+ Exited(::GetExitCodeProcess(Handle(), &actual_exit) ? actual_exit
+ : exit_code);
+ result = true;
+ }
}
return result;
}
@@ -151,6 +160,9 @@ bool Process::WaitForExit(int* exit_code) const {
}
bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const {
+ if (!timeout.is_zero())
+ internal::AssertBaseSyncPrimitivesAllowed();
+
// Record the event that this thread is blocking upon (for hang diagnosis).
base::debug::ScopedProcessWaitActivity process_activity(this);
@@ -166,11 +178,15 @@ bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const {
if (exit_code)
*exit_code = temp_code;
- base::debug::GlobalActivityTracker::RecordProcessExitIfEnabled(
- Pid(), static_cast<int>(temp_code));
+ Exited(temp_code);
return true;
}
+void Process::Exited(int exit_code) const {
+ base::debug::GlobalActivityTracker::RecordProcessExitIfEnabled(Pid(),
+ exit_code);
+}
+
bool Process::IsProcessBackgrounded() const {
DCHECK(IsValid());
DWORD priority = GetPriority();
diff --git a/chromium/base/profiler/native_stack_sampler.cc b/chromium/base/profiler/native_stack_sampler.cc
index 6fa9cfa2357..6eed54f0468 100644
--- a/chromium/base/profiler/native_stack_sampler.cc
+++ b/chromium/base/profiler/native_stack_sampler.cc
@@ -13,11 +13,11 @@ NativeStackSampler::StackBuffer::StackBuffer(size_t buffer_size)
sizeof(uintptr_t)]),
size_(buffer_size) {}
-NativeStackSampler::StackBuffer::~StackBuffer() {}
+NativeStackSampler::StackBuffer::~StackBuffer() = default;
-NativeStackSampler::NativeStackSampler() {}
+NativeStackSampler::NativeStackSampler() = default;
-NativeStackSampler::~NativeStackSampler() {}
+NativeStackSampler::~NativeStackSampler() = default;
std::unique_ptr<NativeStackSampler::StackBuffer>
NativeStackSampler::CreateStackBuffer() {
@@ -27,8 +27,8 @@ NativeStackSampler::CreateStackBuffer() {
return std::make_unique<StackBuffer>(size);
}
-NativeStackSamplerTestDelegate::~NativeStackSamplerTestDelegate() {}
+NativeStackSamplerTestDelegate::~NativeStackSamplerTestDelegate() = default;
-NativeStackSamplerTestDelegate::NativeStackSamplerTestDelegate() {}
+NativeStackSamplerTestDelegate::NativeStackSamplerTestDelegate() = default;
} // namespace base
diff --git a/chromium/base/profiler/stack_sampling_profiler.cc b/chromium/base/profiler/stack_sampling_profiler.cc
index bacde018b14..35e60581e0e 100644
--- a/chromium/base/profiler/stack_sampling_profiler.cc
+++ b/chromium/base/profiler/stack_sampling_profiler.cc
@@ -59,7 +59,7 @@ StackSamplingProfiler::Module::Module(uintptr_t base_address,
const FilePath& filename)
: base_address(base_address), id(id), filename(filename) {}
-StackSamplingProfiler::Module::~Module() {}
+StackSamplingProfiler::Module::~Module() = default;
// StackSamplingProfiler::Frame -----------------------------------------------
@@ -67,7 +67,7 @@ StackSamplingProfiler::Frame::Frame(uintptr_t instruction_pointer,
size_t module_index)
: instruction_pointer(instruction_pointer), module_index(module_index) {}
-StackSamplingProfiler::Frame::~Frame() {}
+StackSamplingProfiler::Frame::~Frame() = default;
StackSamplingProfiler::Frame::Frame()
: instruction_pointer(0), module_index(kUnknownModuleIndex) {
@@ -75,11 +75,11 @@ StackSamplingProfiler::Frame::Frame()
// StackSamplingProfiler::Sample ----------------------------------------------
-StackSamplingProfiler::Sample::Sample() {}
+StackSamplingProfiler::Sample::Sample() = default;
StackSamplingProfiler::Sample::Sample(const Sample& sample) = default;
-StackSamplingProfiler::Sample::~Sample() {}
+StackSamplingProfiler::Sample::~Sample() = default;
StackSamplingProfiler::Sample::Sample(const Frame& frame) {
frames.push_back(std::move(frame));
@@ -90,12 +90,12 @@ StackSamplingProfiler::Sample::Sample(const std::vector<Frame>& frames)
// StackSamplingProfiler::CallStackProfile ------------------------------------
-StackSamplingProfiler::CallStackProfile::CallStackProfile() {}
+StackSamplingProfiler::CallStackProfile::CallStackProfile() = default;
StackSamplingProfiler::CallStackProfile::CallStackProfile(
CallStackProfile&& other) = default;
-StackSamplingProfiler::CallStackProfile::~CallStackProfile() {}
+StackSamplingProfiler::CallStackProfile::~CallStackProfile() = default;
StackSamplingProfiler::CallStackProfile&
StackSamplingProfiler::CallStackProfile::operator=(CallStackProfile&& other) =
@@ -151,7 +151,7 @@ class StackSamplingProfiler::SamplingThread : public Thread {
callback(callback),
finished(finished),
native_sampler(std::move(sampler)) {}
- ~CollectionContext() {}
+ ~CollectionContext() = default;
// An identifier for the profiler associated with this collection, used to
// uniquely identify the collection to outside interests.
diff --git a/chromium/base/profiler/stack_sampling_profiler_unittest.cc b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
index 94c3d341f1e..94b8a6e8b1e 100644
--- a/chromium/base/profiler/stack_sampling_profiler_unittest.cc
+++ b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
@@ -863,7 +863,7 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopSafely) {
// Test delegate that counts samples.
class SampleRecordedCounter : public NativeStackSamplerTestDelegate {
public:
- SampleRecordedCounter() {}
+ SampleRecordedCounter() = default;
void OnPreStackWalk() override {
AutoLock lock(lock_);
diff --git a/chromium/base/scoped_native_library.cc b/chromium/base/scoped_native_library.cc
index 7290d295950..c94f262603a 100644
--- a/chromium/base/scoped_native_library.cc
+++ b/chromium/base/scoped_native_library.cc
@@ -6,15 +6,14 @@
namespace base {
-ScopedNativeLibrary::ScopedNativeLibrary() : library_(NULL) {
-}
+ScopedNativeLibrary::ScopedNativeLibrary() : library_(nullptr) {}
ScopedNativeLibrary::ScopedNativeLibrary(NativeLibrary library)
: library_(library) {
}
ScopedNativeLibrary::ScopedNativeLibrary(const FilePath& library_path) {
- library_ = base::LoadNativeLibrary(library_path, NULL);
+ library_ = base::LoadNativeLibrary(library_path, nullptr);
}
ScopedNativeLibrary::~ScopedNativeLibrary() {
@@ -25,7 +24,7 @@ ScopedNativeLibrary::~ScopedNativeLibrary() {
void* ScopedNativeLibrary::GetFunctionPointer(
const char* function_name) const {
if (!library_)
- return NULL;
+ return nullptr;
return base::GetFunctionPointerFromNativeLibrary(library_, function_name);
}
@@ -37,7 +36,7 @@ void ScopedNativeLibrary::Reset(NativeLibrary library) {
NativeLibrary ScopedNativeLibrary::Release() {
NativeLibrary result = library_;
- library_ = NULL;
+ library_ = nullptr;
return result;
}
diff --git a/chromium/base/security_unittest.cc b/chromium/base/security_unittest.cc
index 61c7cf91524..a41607c8571 100644
--- a/chromium/base/security_unittest.cc
+++ b/chromium/base/security_unittest.cc
@@ -142,20 +142,20 @@ TEST(SecurityTest, MALLOC_OVERFLOW_TEST(RandomMemoryAllocations)) {
// that it has allocated early on, before starting the sophisticated
// allocators.
void* default_mmap_heap_address =
- mmap(0, kPageSize, PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(default_mmap_heap_address,
static_cast<void*>(MAP_FAILED));
ASSERT_EQ(munmap(default_mmap_heap_address, kPageSize), 0);
void* brk_heap_address = sbrk(0);
ASSERT_NE(brk_heap_address, reinterpret_cast<void*>(-1));
- ASSERT_TRUE(brk_heap_address != NULL);
+ ASSERT_TRUE(brk_heap_address != nullptr);
// 1 MB should get us past what TCMalloc pre-allocated before initializing
// the sophisticated allocators.
size_t kAllocSize = 1<<20;
std::unique_ptr<char, base::FreeDeleter> ptr(
static_cast<char*>(malloc(kAllocSize)));
- ASSERT_TRUE(ptr != NULL);
+ ASSERT_TRUE(ptr != nullptr);
// If two pointers are separated by less than 512MB, they are considered
// to be in the same area.
// Our random pointer could be anywhere within 0x3fffffffffff (46bits),
diff --git a/chromium/base/sequenced_task_runner.cc b/chromium/base/sequenced_task_runner.cc
index 4f36dc4deca..86771c67b5a 100644
--- a/chromium/base/sequenced_task_runner.cc
+++ b/chromium/base/sequenced_task_runner.cc
@@ -28,8 +28,7 @@ OnTaskRunnerDeleter::OnTaskRunnerDeleter(
: task_runner_(std::move(task_runner)) {
}
-OnTaskRunnerDeleter::~OnTaskRunnerDeleter() {
-}
+OnTaskRunnerDeleter::~OnTaskRunnerDeleter() = default;
OnTaskRunnerDeleter::OnTaskRunnerDeleter(OnTaskRunnerDeleter&&) = default;
diff --git a/chromium/base/stl_util.h b/chromium/base/stl_util.h
index f0ec634f3ed..186bf12b440 100644
--- a/chromium/base/stl_util.h
+++ b/chromium/base/stl_util.h
@@ -307,6 +307,35 @@ void EraseIf(std::unordered_multiset<Key, Hash, KeyEqual, Allocator>& container,
internal::IterateAndEraseIf(container, pred);
}
+// A helper class to be used as the predicate with |EraseIf| to implement
+// in-place set intersection. Helps implement the algorithm of going through
+// each container an element at a time, erasing elements from the first
+// container if they aren't in the second container. Requires each container be
+// sorted. Note that the logic below appears inverted since it is returning
+// whether an element should be erased.
+template <class Collection>
+class IsNotIn {
+ public:
+ explicit IsNotIn(const Collection& collection)
+ : i_(collection.begin()), end_(collection.end()) {}
+
+ bool operator()(const typename Collection::value_type& x) {
+ while (i_ != end_ && *i_ < x)
+ ++i_;
+ if (i_ == end_)
+ return true;
+ if (*i_ == x) {
+ ++i_;
+ return false;
+ }
+ return true;
+ }
+
+ private:
+ typename Collection::const_iterator i_;
+ const typename Collection::const_iterator end_;
+};
+
} // namespace base
#endif // BASE_STL_UTIL_H_
diff --git a/chromium/base/stl_util_unittest.cc b/chromium/base/stl_util_unittest.cc
index 4d35e9cdc95..8d7364fd4bd 100644
--- a/chromium/base/stl_util_unittest.cc
+++ b/chromium/base/stl_util_unittest.cc
@@ -419,6 +419,15 @@ TEST(Erase, UnorderedMultiset) {
RunEraseIfTest<std::unordered_multiset<std::pair<int, int>, HashByFirst>>();
}
+TEST(Erase, IsNotIn) {
+ // Should keep both '2' but only one '4', like std::set_intersection.
+ std::vector<int> lhs = {0, 2, 2, 4, 4, 4, 6, 8, 10};
+ std::vector<int> rhs = {1, 2, 2, 4, 5, 6, 7};
+ std::vector<int> expected = {2, 2, 4, 6};
+ EraseIf(lhs, IsNotIn<std::vector<int>>(rhs));
+ EXPECT_EQ(expected, lhs);
+}
+
TEST(ContainsValue, OrdinaryArrays) {
const char allowed_chars[] = {'a', 'b', 'c', 'd'};
EXPECT_TRUE(ContainsValue(allowed_chars, 'a'));
diff --git a/chromium/base/strings/pattern.cc b/chromium/base/strings/pattern.cc
index af30aab86da..e10d4ac0d79 100644
--- a/chromium/base/strings/pattern.cc
+++ b/chromium/base/strings/pattern.cc
@@ -19,7 +19,7 @@ template <typename CHAR, typename NEXT>
static void EatSameChars(const CHAR** pattern, const CHAR* pattern_end,
const CHAR** string, const CHAR* string_end,
NEXT next) {
- const CHAR* escape = NULL;
+ const CHAR* escape = nullptr;
while (*pattern != pattern_end && *string != string_end) {
if (!escape && IsWildcard(**pattern)) {
// We don't want to match wildcard here, except if it's escaped.
@@ -54,7 +54,7 @@ static void EatSameChars(const CHAR** pattern, const CHAR* pattern_end,
return;
}
- escape = NULL;
+ escape = nullptr;
}
}
diff --git a/chromium/base/strings/safe_sprintf.cc b/chromium/base/strings/safe_sprintf.cc
index a51c778271e..4d695cf984b 100644
--- a/chromium/base/strings/safe_sprintf.cc
+++ b/chromium/base/strings/safe_sprintf.cc
@@ -318,7 +318,7 @@ bool Buffer::IToASCII(bool sign, bool upcase, int64_t i, int base,
// We cannot choose the easier approach of just reversing the number, as that
// fails in situations where we need to truncate numbers that have padding
// and/or prefixes.
- const char* reverse_prefix = NULL;
+ const char* reverse_prefix = nullptr;
if (prefix && *prefix) {
if (pad == '0') {
while (*prefix) {
@@ -327,13 +327,13 @@ bool Buffer::IToASCII(bool sign, bool upcase, int64_t i, int base,
}
Out(*prefix++);
}
- prefix = NULL;
+ prefix = nullptr;
} else {
for (reverse_prefix = prefix; *reverse_prefix; ++reverse_prefix) {
}
}
} else
- prefix = NULL;
+ prefix = nullptr;
const size_t prefix_length = reverse_prefix - prefix;
// Loop until we have converted the entire number. Output at least one
@@ -530,7 +530,7 @@ ssize_t SafeSNPrintf(char* buf, size_t sz, const char* fmt, const Arg* args,
const Arg& arg = args[cur_arg++];
int64_t i;
- const char* prefix = NULL;
+ const char* prefix = nullptr;
if (ch != 'p') {
// Check that the argument has the expected type.
if (arg.type != Arg::INT && arg.type != Arg::UINT) {
diff --git a/chromium/base/strings/safe_sprintf_unittest.cc b/chromium/base/strings/safe_sprintf_unittest.cc
index 1a21728a8e5..bb9908f9289 100644
--- a/chromium/base/strings/safe_sprintf_unittest.cc
+++ b/chromium/base/strings/safe_sprintf_unittest.cc
@@ -384,14 +384,16 @@ void PrintLongString(char* buf, size_t sz) {
size_t out_sz = sz;
size_t len;
for (std::unique_ptr<char[]> perfect_buf;;) {
- size_t needed = SafeSNPrintf(out, out_sz,
+ size_t needed =
+ SafeSNPrintf(out, out_sz,
#if defined(NDEBUG)
- "A%2cong %s: %d %010X %d %p%7s", 'l', "string", "",
+ "A%2cong %s: %d %010X %d %p%7s", 'l', "string", "",
#else
- "A%2cong %s: %%d %010X %d %p%7s", 'l', "string",
+ "A%2cong %s: %%d %010X %d %p%7s", 'l', "string",
#endif
- 0xDEADBEEF, std::numeric_limits<intptr_t>::min(),
- PrintLongString, static_cast<char*>(NULL)) + 1;
+ 0xDEADBEEF, std::numeric_limits<intptr_t>::min(),
+ PrintLongString, static_cast<char*>(nullptr)) +
+ 1;
// Various sanity checks:
// The numbered of characters needed to print the full string should always
diff --git a/chromium/base/strings/string16.cc b/chromium/base/strings/string16.cc
index d85859a1f70..2abb0e5df55 100644
--- a/chromium/base/strings/string16.cc
+++ b/chromium/base/strings/string16.cc
@@ -47,7 +47,7 @@ const char16* c16memchr(const char16* s, char16 c, size_t n) {
}
++s;
}
- return 0;
+ return nullptr;
}
char16* c16memmove(char16* s1, const char16* s2, size_t n) {
diff --git a/chromium/base/strings/string_number_conversions.cc b/chromium/base/strings/string_number_conversions.cc
index 9148def07c1..f8a7cff3e5d 100644
--- a/chromium/base/strings/string_number_conversions.cc
+++ b/chromium/base/strings/string_number_conversions.cc
@@ -15,6 +15,7 @@
#include "base/logging.h"
#include "base/numerics/safe_math.h"
#include "base/scoped_clear_errno.h"
+#include "base/strings/utf_string_conversions.h"
#include "base/third_party/dmg_fp/dmg_fp.h"
namespace base {
@@ -328,53 +329,65 @@ bool String16ToIntImpl(const StringPiece16& input, VALUE* output) {
} // namespace
-std::string IntToString(int value) {
- return IntToStringT<std::string, int>::IntToString(value);
+std::string NumberToString(int32_t value) {
+ return IntToStringT<std::string, int32_t>::IntToString(value);
}
-string16 IntToString16(int value) {
- return IntToStringT<string16, int>::IntToString(value);
+string16 NumberToString16(int32_t value) {
+ return IntToStringT<string16, int32_t>::IntToString(value);
}
-std::string UintToString(unsigned int value) {
- return IntToStringT<std::string, unsigned int>::IntToString(value);
+std::string NumberToString(uint32_t value) {
+ return IntToStringT<std::string, uint32_t>::IntToString(value);
}
-string16 UintToString16(unsigned int value) {
- return IntToStringT<string16, unsigned int>::IntToString(value);
+string16 NumberToString16(uint32_t value) {
+ return IntToStringT<string16, uint32_t>::IntToString(value);
}
-std::string Int64ToString(int64_t value) {
+std::string NumberToString(int64_t value) {
return IntToStringT<std::string, int64_t>::IntToString(value);
}
-string16 Int64ToString16(int64_t value) {
+string16 NumberToString16(int64_t value) {
return IntToStringT<string16, int64_t>::IntToString(value);
}
-std::string Uint64ToString(uint64_t value) {
+std::string NumberToString(uint64_t value) {
return IntToStringT<std::string, uint64_t>::IntToString(value);
}
-string16 Uint64ToString16(uint64_t value) {
+string16 NumberToString16(uint64_t value) {
return IntToStringT<string16, uint64_t>::IntToString(value);
}
-std::string SizeTToString(size_t value) {
+#if defined(OS_MACOSX)
+std::string NumberToString(size_t value) {
return IntToStringT<std::string, size_t>::IntToString(value);
}
-string16 SizeTToString16(size_t value) {
+string16 NumberToString16(size_t value) {
return IntToStringT<string16, size_t>::IntToString(value);
}
+#endif
-std::string DoubleToString(double value) {
+std::string NumberToString(double value) {
// According to g_fmt.cc, it is sufficient to declare a buffer of size 32.
char buffer[32];
dmg_fp::g_fmt(buffer, value);
return std::string(buffer);
}
+base::string16 NumberToString16(double value) {
+ // According to g_fmt.cc, it is sufficient to declare a buffer of size 32.
+ char buffer[32];
+ dmg_fp::g_fmt(buffer, value);
+
+ // The number will be ASCII. This creates the string using the "input
+ // iterator" variant which promotes from 8-bit to 16-bit via "=".
+ return base::string16(&buffer[0], &buffer[strlen(buffer)]);
+}
+
bool StringToInt(const StringPiece& input, int* output) {
return StringToIntImpl(input, output);
}
@@ -419,7 +432,7 @@ bool StringToDouble(const std::string& input, double* output) {
// Thread-safe? It is on at least Mac, Linux, and Windows.
ScopedClearErrno clear_errno;
- char* endptr = NULL;
+ char* endptr = nullptr;
*output = dmg_fp::strtod(input.c_str(), &endptr);
// Cases to return false:
diff --git a/chromium/base/strings/string_number_conversions.h b/chromium/base/strings/string_number_conversions.h
index a3b876aa155..daf205eb5b8 100644
--- a/chromium/base/strings/string_number_conversions.h
+++ b/chromium/base/strings/string_number_conversions.h
@@ -14,6 +14,7 @@
#include "base/base_export.h"
#include "base/strings/string16.h"
#include "base/strings/string_piece.h"
+#include "build/build_config.h"
// ----------------------------------------------------------------------------
// IMPORTANT MESSAGE FROM YOUR SPONSOR
@@ -39,24 +40,57 @@ namespace base {
// Number -> string conversions ------------------------------------------------
-BASE_EXPORT std::string IntToString(int value);
-BASE_EXPORT string16 IntToString16(int value);
-
-BASE_EXPORT std::string UintToString(unsigned value);
-BASE_EXPORT string16 UintToString16(unsigned value);
-
-BASE_EXPORT std::string Int64ToString(int64_t value);
-BASE_EXPORT string16 Int64ToString16(int64_t value);
-
-BASE_EXPORT std::string Uint64ToString(uint64_t value);
-BASE_EXPORT string16 Uint64ToString16(uint64_t value);
-
-BASE_EXPORT std::string SizeTToString(size_t value);
-BASE_EXPORT string16 SizeTToString16(size_t value);
-
-// DoubleToString converts the double to a string format that ignores the
-// locale. If you want to use locale specific formatting, use ICU.
-BASE_EXPORT std::string DoubleToString(double value);
+// Ignores locale! see warning above.
+BASE_EXPORT std::string NumberToString(int32_t value);
+BASE_EXPORT std::string NumberToString(uint32_t value);
+BASE_EXPORT std::string NumberToString(int64_t value);
+BASE_EXPORT std::string NumberToString(uint64_t value);
+BASE_EXPORT std::string NumberToString(double value);
+
+BASE_EXPORT base::string16 NumberToString16(int32_t value);
+BASE_EXPORT base::string16 NumberToString16(uint32_t value);
+BASE_EXPORT base::string16 NumberToString16(int64_t value);
+BASE_EXPORT base::string16 NumberToString16(uint64_t value);
+BASE_EXPORT base::string16 NumberToString16(double value);
+
+// Compilers seem to disagree about whether size_t is a different name for
+// uint32_t/uint64_t, or whether it's a completely different type that requires
+// a conversion. Therefore, a size_t version must exist for some compilers (to
+// avoid ambiguous call errors), but must not exist for others (to avoid
+// multiple definition errors).
+#if defined(OS_MACOSX)
+BASE_EXPORT std::string NumberToString(size_t value);
+BASE_EXPORT base::string16 NumberToString16(size_t value);
+#endif
+
+// Type-specific naming for backwards compatibility.
+//
+// TODO(brettw) these should be removed and callers converted to the overloaded
+// "NumberToString" variant.
+inline std::string IntToString(int value) {
+ return NumberToString(value);
+}
+inline string16 IntToString16(int value) {
+ return NumberToString16(value);
+}
+inline std::string UintToString(unsigned value) {
+ return NumberToString(value);
+}
+inline string16 UintToString16(unsigned value) {
+ return NumberToString16(value);
+}
+inline std::string Int64ToString(int64_t value) {
+ return NumberToString(value);
+}
+inline string16 Int64ToString16(int64_t value) {
+ return NumberToString16(value);
+}
+inline std::string Uint64ToString(uint64_t value) {
+ return NumberToString(value);
+}
+inline string16 Uint64ToString16(uint64_t value) {
+ return NumberToString16(value);
+}
// String -> number conversions ------------------------------------------------
diff --git a/chromium/base/strings/string_number_conversions_unittest.cc b/chromium/base/strings/string_number_conversions_unittest.cc
index aa11b93a46f..d969450667a 100644
--- a/chromium/base/strings/string_number_conversions_unittest.cc
+++ b/chromium/base/strings/string_number_conversions_unittest.cc
@@ -25,7 +25,7 @@ namespace base {
namespace {
template <typename INT>
-struct IntToStringTest {
+struct NumberToStringTest {
INT num;
const char* sexpected;
const char* uexpected;
@@ -33,14 +33,14 @@ struct IntToStringTest {
} // namespace
-TEST(StringNumberConversionsTest, IntToString) {
- static const IntToStringTest<int> int_tests[] = {
- { 0, "0", "0" },
- { -1, "-1", "4294967295" },
- { std::numeric_limits<int>::max(), "2147483647", "2147483647" },
- { std::numeric_limits<int>::min(), "-2147483648", "2147483648" },
+TEST(StringNumberConversionsTest, NumberToString) {
+ static const NumberToStringTest<int> int_tests[] = {
+ {0, "0", "0"},
+ {-1, "-1", "4294967295"},
+ {std::numeric_limits<int>::max(), "2147483647", "2147483647"},
+ {std::numeric_limits<int>::min(), "-2147483648", "2147483648"},
};
- static const IntToStringTest<int64_t> int64_tests[] = {
+ static const NumberToStringTest<int64_t> int64_tests[] = {
{0, "0", "0"},
{-1, "-1", "18446744073709551615"},
{
@@ -52,18 +52,20 @@ TEST(StringNumberConversionsTest, IntToString) {
};
for (size_t i = 0; i < arraysize(int_tests); ++i) {
- const IntToStringTest<int>* test = &int_tests[i];
- EXPECT_EQ(IntToString(test->num), test->sexpected);
- EXPECT_EQ(IntToString16(test->num), UTF8ToUTF16(test->sexpected));
- EXPECT_EQ(UintToString(test->num), test->uexpected);
- EXPECT_EQ(UintToString16(test->num), UTF8ToUTF16(test->uexpected));
+ const NumberToStringTest<int>& test = int_tests[i];
+ EXPECT_EQ(NumberToString(test.num), test.sexpected);
+ EXPECT_EQ(NumberToString16(test.num), UTF8ToUTF16(test.sexpected));
+ EXPECT_EQ(NumberToString(static_cast<unsigned>(test.num)), test.uexpected);
+ EXPECT_EQ(NumberToString16(static_cast<unsigned>(test.num)),
+ UTF8ToUTF16(test.uexpected));
}
for (size_t i = 0; i < arraysize(int64_tests); ++i) {
- const IntToStringTest<int64_t>* test = &int64_tests[i];
- EXPECT_EQ(Int64ToString(test->num), test->sexpected);
- EXPECT_EQ(Int64ToString16(test->num), UTF8ToUTF16(test->sexpected));
- EXPECT_EQ(Uint64ToString(test->num), test->uexpected);
- EXPECT_EQ(Uint64ToString16(test->num), UTF8ToUTF16(test->uexpected));
+ const NumberToStringTest<int64_t>& test = int64_tests[i];
+ EXPECT_EQ(NumberToString(test.num), test.sexpected);
+ EXPECT_EQ(NumberToString16(test.num), UTF8ToUTF16(test.sexpected));
+ EXPECT_EQ(NumberToString(static_cast<uint64_t>(test.num)), test.uexpected);
+ EXPECT_EQ(NumberToString16(static_cast<uint64_t>(test.num)),
+ UTF8ToUTF16(test.uexpected));
}
}
@@ -79,7 +81,7 @@ TEST(StringNumberConversionsTest, Uint64ToString) {
};
for (size_t i = 0; i < arraysize(cases); ++i)
- EXPECT_EQ(cases[i].output, Uint64ToString(cases[i].input));
+ EXPECT_EQ(cases[i].output, NumberToString(cases[i].input));
}
TEST(StringNumberConversionsTest, SizeTToString) {
@@ -102,7 +104,7 @@ TEST(StringNumberConversionsTest, SizeTToString) {
};
for (size_t i = 0; i < arraysize(cases); ++i)
- EXPECT_EQ(cases[i].output, SizeTToString(cases[i].input));
+ EXPECT_EQ(cases[i].output, NumberToString(cases[i].input));
}
TEST(StringNumberConversionsTest, StringToInt) {
@@ -826,23 +828,24 @@ TEST(StringNumberConversionsTest, DoubleToString) {
};
for (size_t i = 0; i < arraysize(cases); ++i) {
- EXPECT_EQ(cases[i].expected, DoubleToString(cases[i].input));
+ EXPECT_EQ(cases[i].expected, NumberToString(cases[i].input));
+ EXPECT_EQ(cases[i].expected, UTF16ToUTF8(NumberToString16(cases[i].input)));
}
// The following two values were seen in crashes in the wild.
const char input_bytes[8] = {0, 0, 0, 0, '\xee', '\x6d', '\x73', '\x42'};
double input = 0;
memcpy(&input, input_bytes, arraysize(input_bytes));
- EXPECT_EQ("1335179083776", DoubleToString(input));
+ EXPECT_EQ("1335179083776", NumberToString(input));
const char input_bytes2[8] =
{0, 0, 0, '\xa0', '\xda', '\x6c', '\x73', '\x42'};
input = 0;
memcpy(&input, input_bytes2, arraysize(input_bytes2));
- EXPECT_EQ("1334890332160", DoubleToString(input));
+ EXPECT_EQ("1334890332160", NumberToString(input));
}
TEST(StringNumberConversionsTest, HexEncode) {
- std::string hex(HexEncode(NULL, 0));
+ std::string hex(HexEncode(nullptr, 0));
EXPECT_EQ(hex.length(), 0U);
unsigned char bytes[] = {0x01, 0xff, 0x02, 0xfe, 0x03, 0x80, 0x81};
hex = HexEncode(bytes, sizeof(bytes));
diff --git a/chromium/base/strings/string_piece_unittest.cc b/chromium/base/strings/string_piece_unittest.cc
index 7dfd71116bc..40e21e99588 100644
--- a/chromium/base/strings/string_piece_unittest.cc
+++ b/chromium/base/strings/string_piece_unittest.cc
@@ -158,7 +158,7 @@ TYPED_TEST(CommonStringPieceTest, CheckSTL) {
ASSERT_EQ(*d.data(), static_cast<typename TypeParam::value_type>('f'));
ASSERT_EQ(d.data()[5], static_cast<typename TypeParam::value_type>('r'));
- ASSERT_TRUE(e.data() == NULL);
+ ASSERT_EQ(e.data(), nullptr);
ASSERT_EQ(*a.begin(), static_cast<typename TypeParam::value_type>('a'));
ASSERT_EQ(*(b.begin() + 2), static_cast<typename TypeParam::value_type>('c'));
@@ -168,7 +168,7 @@ TYPED_TEST(CommonStringPieceTest, CheckSTL) {
ASSERT_EQ(*(b.rbegin() + 2),
static_cast<typename TypeParam::value_type>('a'));
ASSERT_EQ(*(c.rend() - 1), static_cast<typename TypeParam::value_type>('x'));
- ASSERT_TRUE(a.rbegin() + 26 == a.rend());
+ ASSERT_EQ(a.rbegin() + 26, a.rend());
ASSERT_EQ(a.size(), 26U);
ASSERT_EQ(b.size(), 3U);
@@ -179,16 +179,16 @@ TYPED_TEST(CommonStringPieceTest, CheckSTL) {
ASSERT_TRUE(!d.empty());
ASSERT_TRUE(d.begin() != d.end());
- ASSERT_TRUE(d.begin() + 6 == d.end());
+ ASSERT_EQ(d.begin() + 6, d.end());
ASSERT_TRUE(e.empty());
- ASSERT_TRUE(e.begin() == e.end());
+ ASSERT_EQ(e.begin(), e.end());
d.clear();
ASSERT_EQ(d.size(), 0U);
ASSERT_TRUE(d.empty());
- ASSERT_TRUE(d.data() == NULL);
- ASSERT_TRUE(d.begin() == d.end());
+ ASSERT_EQ(d.data(), nullptr);
+ ASSERT_EQ(d.begin(), d.end());
ASSERT_GE(a.max_size(), a.capacity());
ASSERT_GE(a.capacity(), a.size());
@@ -517,13 +517,13 @@ TYPED_TEST(CommonStringPieceTest, CheckCustom) {
// as_string
TypeParam s3(a.as_string().c_str(), 7); // Note, has an embedded NULL
- ASSERT_TRUE(c == s3);
+ ASSERT_EQ(c, s3);
TypeParam s4(e.as_string());
ASSERT_TRUE(s4.empty());
// operator STRING_TYPE()
TypeParam s5(TypeParam(a).c_str(), 7); // Note, has an embedded NULL
- ASSERT_TRUE(c == s5);
+ ASSERT_EQ(c, s5);
TypeParam s6(e);
ASSERT_TRUE(s6.empty());
}
@@ -591,12 +591,12 @@ TEST(StringPieceTest, CheckCustom) {
TYPED_TEST(CommonStringPieceTest, CheckNULL) {
// we used to crash here, but now we don't.
- BasicStringPiece<TypeParam> s(NULL);
- ASSERT_EQ(s.data(), (const typename TypeParam::value_type*)NULL);
+ BasicStringPiece<TypeParam> s(nullptr);
+ ASSERT_EQ(s.data(), nullptr);
ASSERT_EQ(s.size(), 0U);
- s.set(NULL);
- ASSERT_EQ(s.data(), (const typename TypeParam::value_type*)NULL);
+ s.set(nullptr);
+ ASSERT_EQ(s.data(), nullptr);
ASSERT_EQ(s.size(), 0U);
TypeParam str(s);
@@ -615,7 +615,7 @@ TYPED_TEST(CommonStringPieceTest, CheckComparisons2) {
BasicStringPiece<TypeParam> abc(alphabet);
// check comparison operations on strings longer than 4 bytes.
- ASSERT_TRUE(abc == BasicStringPiece<TypeParam>(alphabet));
+ ASSERT_EQ(abc, BasicStringPiece<TypeParam>(alphabet));
ASSERT_EQ(abc.compare(BasicStringPiece<TypeParam>(alphabet)), 0);
ASSERT_TRUE(abc < BasicStringPiece<TypeParam>(alphabet_z));
@@ -650,8 +650,8 @@ TYPED_TEST(CommonStringPieceTest, StringCompareNotAmbiguous) {
TYPED_TEST(CommonStringPieceTest, HeterogenousStringPieceEquals) {
TypeParam hello(TestFixture::as_string("hello"));
- ASSERT_TRUE(BasicStringPiece<TypeParam>(hello) == hello);
- ASSERT_TRUE(hello.c_str() == BasicStringPiece<TypeParam>(hello));
+ ASSERT_EQ(BasicStringPiece<TypeParam>(hello), hello);
+ ASSERT_EQ(hello.c_str(), BasicStringPiece<TypeParam>(hello));
}
// string16-specific stuff
@@ -684,20 +684,26 @@ TYPED_TEST(CommonStringPieceTest, CheckConstructors) {
TypeParam str(TestFixture::as_string("hello world"));
TypeParam empty;
- ASSERT_TRUE(str == BasicStringPiece<TypeParam>(str));
- ASSERT_TRUE(str == BasicStringPiece<TypeParam>(str.c_str()));
+ ASSERT_EQ(str, BasicStringPiece<TypeParam>(str));
+ ASSERT_EQ(str, BasicStringPiece<TypeParam>(str.c_str()));
ASSERT_TRUE(TestFixture::as_string("hello") ==
BasicStringPiece<TypeParam>(str.c_str(), 5));
- ASSERT_TRUE(empty == BasicStringPiece<TypeParam>(str.c_str(),
- static_cast<typename BasicStringPiece<TypeParam>::size_type>(0)));
- ASSERT_TRUE(empty == BasicStringPiece<TypeParam>(NULL));
- ASSERT_TRUE(empty == BasicStringPiece<TypeParam>(NULL,
- static_cast<typename BasicStringPiece<TypeParam>::size_type>(0)));
- ASSERT_TRUE(empty == BasicStringPiece<TypeParam>());
- ASSERT_TRUE(str == BasicStringPiece<TypeParam>(str.begin(), str.end()));
- ASSERT_TRUE(empty == BasicStringPiece<TypeParam>(str.begin(), str.begin()));
- ASSERT_TRUE(empty == BasicStringPiece<TypeParam>(empty));
- ASSERT_TRUE(empty == BasicStringPiece<TypeParam>(empty.begin(), empty.end()));
+ ASSERT_EQ(
+ empty,
+ BasicStringPiece<TypeParam>(
+ str.c_str(),
+ static_cast<typename BasicStringPiece<TypeParam>::size_type>(0)));
+ ASSERT_EQ(empty, BasicStringPiece<TypeParam>(nullptr));
+ ASSERT_TRUE(
+ empty ==
+ BasicStringPiece<TypeParam>(
+ nullptr,
+ static_cast<typename BasicStringPiece<TypeParam>::size_type>(0)));
+ ASSERT_EQ(empty, BasicStringPiece<TypeParam>());
+ ASSERT_EQ(str, BasicStringPiece<TypeParam>(str.begin(), str.end()));
+ ASSERT_EQ(empty, BasicStringPiece<TypeParam>(str.begin(), str.begin()));
+ ASSERT_EQ(empty, BasicStringPiece<TypeParam>(empty));
+ ASSERT_EQ(empty, BasicStringPiece<TypeParam>(empty.begin(), empty.end()));
}
} // namespace base
diff --git a/chromium/base/strings/string_util.cc b/chromium/base/strings/string_util.cc
index d538567fdb1..2112f2309f7 100644
--- a/chromium/base/strings/string_util.cc
+++ b/chromium/base/strings/string_util.cc
@@ -36,7 +36,7 @@ namespace {
// prevents other code that might accidentally use Singleton<string> from
// getting our internal one.
struct EmptyStrings {
- EmptyStrings() {}
+ EmptyStrings() = default;
const std::string s;
const string16 s16;
diff --git a/chromium/base/strings/string_util.h b/chromium/base/strings/string_util.h
index 35b2603786e..55ceb44504d 100644
--- a/chromium/base/strings/string_util.h
+++ b/chromium/base/strings/string_util.h
@@ -202,7 +202,8 @@ enum TrimPositions {
};
// Removes characters in |trim_chars| from the beginning and end of |input|.
-// The 8-bit version only works on 8-bit characters, not UTF-8.
+// The 8-bit version only works on 8-bit characters, not UTF-8. Returns true if
+// any characters were removed.
//
// It is safe to use the same variable for both |input| and |output| (this is
// the normal usage to trim in-place).
@@ -246,7 +247,7 @@ BASE_EXPORT TrimPositions TrimWhitespaceASCII(const std::string& input,
BASE_EXPORT StringPiece TrimWhitespaceASCII(StringPiece input,
TrimPositions positions);
-// Searches for CR or LF characters. Removes all contiguous whitespace
+// Searches for CR or LF characters. Removes all contiguous whitespace
// strings that contain them. This is useful when trying to deal with text
// copied from terminals.
// Returns |text|, with the following three transformations:
@@ -426,9 +427,6 @@ BASE_EXPORT void ReplaceSubstringsAfterOffset(
// to this function (probably 0).
BASE_EXPORT char* WriteInto(std::string* str, size_t length_with_null);
BASE_EXPORT char16* WriteInto(string16* str, size_t length_with_null);
-#ifndef OS_WIN
-BASE_EXPORT wchar_t* WriteInto(std::wstring* str, size_t length_with_null);
-#endif
// Does the opposite of SplitString()/SplitStringPiece(). Joins a vector or list
// of strings into a single string, inserting |separator| (which may be empty)
diff --git a/chromium/base/strings/sys_string_conversions_posix.cc b/chromium/base/strings/sys_string_conversions_posix.cc
index 35acaa009a5..cc039db79f8 100644
--- a/chromium/base/strings/sys_string_conversions_posix.cc
+++ b/chromium/base/strings/sys_string_conversions_posix.cc
@@ -109,7 +109,7 @@ std::wstring SysNativeMBToWide(const StringPiece& native_mb) {
memset(&ps, 0, sizeof(ps));
for (size_t i = 0; i < native_mb.size(); ) {
const char* src = native_mb.data() + i;
- size_t res = mbrtowc(NULL, src, native_mb.size() - i, &ps);
+ size_t res = mbrtowc(nullptr, src, native_mb.size() - i, &ps);
switch (res) {
// Handle any errors and return an empty string.
case static_cast<size_t>(-2):
diff --git a/chromium/base/strings/utf_offset_string_conversions_unittest.cc b/chromium/base/strings/utf_offset_string_conversions_unittest.cc
index c365d9e57ff..c5ce647a996 100644
--- a/chromium/base/strings/utf_offset_string_conversions_unittest.cc
+++ b/chromium/base/strings/utf_offset_string_conversions_unittest.cc
@@ -30,7 +30,7 @@ TEST(UTFOffsetStringConversionsTest, AdjustOffset) {
{"", kNpos, kNpos},
{"\xe4\xbd\xa0\xe5\xa5\xbd", 1, kNpos},
{"\xe4\xbd\xa0\xe5\xa5\xbd", 3, 1},
- {"\xed\xb0\x80z", 3, 1},
+ {"\xed\xb0\x80z", 3, 3},
{"A\xF0\x90\x8C\x80z", 1, 1},
{"A\xF0\x90\x8C\x80z", 2, kNpos},
{"A\xF0\x90\x8C\x80z", 5, 3},
diff --git a/chromium/base/strings/utf_string_conversions_unittest.cc b/chromium/base/strings/utf_string_conversions_unittest.cc
index 810771357a2..6f5e60cb954 100644
--- a/chromium/base/strings/utf_string_conversions_unittest.cc
+++ b/chromium/base/strings/utf_string_conversions_unittest.cc
@@ -83,9 +83,9 @@ TEST(UTFStringConversionsTest, ConvertUTF8ToWide) {
// Truncated off the end.
{"\xe5\xa5\xbd\xe4\xa0", L"\x597d\xfffd", false},
// Non-shortest-form UTF-8.
- {"\xf0\x84\xbd\xa0\xe5\xa5\xbd", L"\xfffd\x597d", false},
+ {"\xf0\x84\xbd\xa0\xe5\xa5\xbd", L"\xfffd\xfffd\xfffd\xfffd\x597d", false},
// This UTF-8 character decodes to a UTF-16 surrogate, which is illegal.
- {"\xed\xb0\x80", L"\xfffd", false},
+ {"\xed\xb0\x80", L"\xfffd\xfffd\xfffd", false},
// Non-BMP characters. The second is a non-character regarded as valid.
// The result will either be in UTF-16 or UTF-32.
#if defined(WCHAR_T_IS_UTF16)
diff --git a/chromium/base/supports_user_data_unittest.cc b/chromium/base/supports_user_data_unittest.cc
index d5da9a175ae..2e0a724bda9 100644
--- a/chromium/base/supports_user_data_unittest.cc
+++ b/chromium/base/supports_user_data_unittest.cc
@@ -21,7 +21,7 @@ struct UsesItself : public SupportsUserData::Data {
}
~UsesItself() override {
- EXPECT_EQ(NULL, supports_user_data_->GetUserData(key_));
+ EXPECT_EQ(nullptr, supports_user_data_->GetUserData(key_));
}
SupportsUserData* supports_user_data_;
diff --git a/chromium/base/sync_socket_posix.cc b/chromium/base/sync_socket_posix.cc
index a67592837d5..ff1e0e6caa3 100644
--- a/chromium/base/sync_socket_posix.cc
+++ b/chromium/base/sync_socket_posix.cc
@@ -120,12 +120,12 @@ bool SyncSocket::Close() {
}
size_t SyncSocket::Send(const void* buffer, size_t length) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
return SendHelper(handle_, buffer, length);
}
size_t SyncSocket::Receive(void* buffer, size_t length) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK_GT(length, 0u);
DCHECK_LE(length, kMaxMessageLength);
DCHECK_NE(handle_, kInvalidHandle);
@@ -138,7 +138,7 @@ size_t SyncSocket::Receive(void* buffer, size_t length) {
size_t SyncSocket::ReceiveWithTimeout(void* buffer,
size_t length,
TimeDelta timeout) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK_GT(length, 0u);
DCHECK_LE(length, kMaxMessageLength);
DCHECK_NE(handle_, kInvalidHandle);
@@ -212,7 +212,7 @@ SyncSocket::Handle SyncSocket::Release() {
return r;
}
-CancelableSyncSocket::CancelableSyncSocket() {}
+CancelableSyncSocket::CancelableSyncSocket() = default;
CancelableSyncSocket::CancelableSyncSocket(Handle handle)
: SyncSocket(handle) {
}
diff --git a/chromium/base/sync_socket_unittest.cc b/chromium/base/sync_socket_unittest.cc
index 202aa2c764a..fdcd9a1cc67 100644
--- a/chromium/base/sync_socket_unittest.cc
+++ b/chromium/base/sync_socket_unittest.cc
@@ -30,7 +30,7 @@ class HangingReceiveThread : public DelegateSimpleThread::Delegate {
thread_.Start();
}
- ~HangingReceiveThread() override {}
+ ~HangingReceiveThread() override = default;
void Run() override {
int data = 0;
diff --git a/chromium/base/sync_socket_win.cc b/chromium/base/sync_socket_win.cc
index 797f12f72dc..905d0a2e9b8 100644
--- a/chromium/base/sync_socket_win.cc
+++ b/chromium/base/sync_socket_win.cc
@@ -123,7 +123,7 @@ size_t CancelableFileOperation(Function operation,
WaitableEvent* cancel_event,
CancelableSyncSocket* socket,
DWORD timeout_in_ms) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
// The buffer must be byte size or the length check won't make much sense.
static_assert(sizeof(buffer[0]) == sizeof(char), "incorrect buffer type");
DCHECK_GT(length, 0u);
@@ -245,7 +245,7 @@ bool SyncSocket::Close() {
}
size_t SyncSocket::Send(const void* buffer, size_t length) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK_GT(length, 0u);
DCHECK_LE(length, kMaxMessageLength);
DCHECK_NE(handle_, kInvalidHandle);
@@ -253,8 +253,8 @@ size_t SyncSocket::Send(const void* buffer, size_t length) {
while (count < length) {
DWORD len;
DWORD chunk = GetNextChunkSize(count, length);
- if (WriteFile(handle_, static_cast<const char*>(buffer) + count,
- chunk, &len, NULL) == FALSE) {
+ if (::WriteFile(handle_, static_cast<const char*>(buffer) + count, chunk,
+ &len, NULL) == FALSE) {
return count;
}
count += len;
@@ -270,7 +270,7 @@ size_t SyncSocket::ReceiveWithTimeout(void* buffer,
}
size_t SyncSocket::Receive(void* buffer, size_t length) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK_GT(length, 0u);
DCHECK_LE(length, kMaxMessageLength);
DCHECK_NE(handle_, kInvalidHandle);
@@ -278,8 +278,8 @@ size_t SyncSocket::Receive(void* buffer, size_t length) {
while (count < length) {
DWORD len;
DWORD chunk = GetNextChunkSize(count, length);
- if (ReadFile(handle_, static_cast<char*>(buffer) + count,
- chunk, &len, NULL) == FALSE) {
+ if (::ReadFile(handle_, static_cast<char*>(buffer) + count, chunk, &len,
+ NULL) == FALSE) {
return count;
}
count += len;
@@ -328,23 +328,23 @@ bool CancelableSyncSocket::Close() {
size_t CancelableSyncSocket::Send(const void* buffer, size_t length) {
static const DWORD kWaitTimeOutInMs = 500;
return CancelableFileOperation(
- &WriteFile, handle_, reinterpret_cast<const char*>(buffer),
- length, &file_operation_, &shutdown_event_, this, kWaitTimeOutInMs);
+ &::WriteFile, handle_, reinterpret_cast<const char*>(buffer), length,
+ &file_operation_, &shutdown_event_, this, kWaitTimeOutInMs);
}
size_t CancelableSyncSocket::Receive(void* buffer, size_t length) {
return CancelableFileOperation(
- &ReadFile, handle_, reinterpret_cast<char*>(buffer), length,
+ &::ReadFile, handle_, reinterpret_cast<char*>(buffer), length,
&file_operation_, &shutdown_event_, this, INFINITE);
}
size_t CancelableSyncSocket::ReceiveWithTimeout(void* buffer,
size_t length,
TimeDelta timeout) {
- return CancelableFileOperation(
- &ReadFile, handle_, reinterpret_cast<char*>(buffer), length,
- &file_operation_, &shutdown_event_, this,
- static_cast<DWORD>(timeout.InMilliseconds()));
+ return CancelableFileOperation(&::ReadFile, handle_,
+ reinterpret_cast<char*>(buffer), length,
+ &file_operation_, &shutdown_event_, this,
+ static_cast<DWORD>(timeout.InMilliseconds()));
}
// static
diff --git a/chromium/base/synchronization/condition_variable_unittest.cc b/chromium/base/synchronization/condition_variable_unittest.cc
index ebdbe5776b9..1aa1a4a41d2 100644
--- a/chromium/base/synchronization/condition_variable_unittest.cc
+++ b/chromium/base/synchronization/condition_variable_unittest.cc
@@ -201,9 +201,9 @@ void BackInTime(Lock* lock) {
AutoLock auto_lock(*lock);
timeval tv;
- gettimeofday(&tv, NULL);
+ gettimeofday(&tv, nullptr);
tv.tv_sec -= kDiscontinuitySeconds;
- settimeofday(&tv, NULL);
+ settimeofday(&tv, nullptr);
}
// Tests that TimedWait ignores changes to the system clock.
@@ -212,9 +212,9 @@ void BackInTime(Lock* lock) {
// http://crbug.com/293736
TEST_F(ConditionVariableTest, DISABLED_TimeoutAcrossSetTimeOfDay) {
timeval tv;
- gettimeofday(&tv, NULL);
+ gettimeofday(&tv, nullptr);
tv.tv_sec += kDiscontinuitySeconds;
- if (settimeofday(&tv, NULL) < 0) {
+ if (settimeofday(&tv, nullptr) < 0) {
PLOG(ERROR) << "Could not set time of day. Run as root?";
return;
}
diff --git a/chromium/base/synchronization/lock_impl_posix.cc b/chromium/base/synchronization/lock_impl_posix.cc
index 5ad337d9f71..3bfd9c2e5d2 100644
--- a/chromium/base/synchronization/lock_impl_posix.cc
+++ b/chromium/base/synchronization/lock_impl_posix.cc
@@ -58,6 +58,17 @@ bool LockImpl::Try() {
}
void LockImpl::Lock() {
+ // The ScopedLockAcquireActivity below is relatively expensive and so its
+ // actions can become significant due to the very large number of locks
+ // that tend to be used throughout the build. To avoid this cost in the
+ // vast majority of the calls, simply "try" the lock first and only do the
+ // (tracked) blocking call if that fails. Since "try" itself is a system
+ // call, and thus also somewhat expensive, don't bother with it unless
+ // tracking is actually enabled.
+ if (base::debug::GlobalActivityTracker::IsEnabled())
+ if (Try())
+ return;
+
base::debug::ScopedLockAcquireActivity lock_activity(this);
int rv = pthread_mutex_lock(&native_handle_);
DCHECK_EQ(rv, 0) << ". " << strerror(rv);
diff --git a/chromium/base/synchronization/lock_impl_win.cc b/chromium/base/synchronization/lock_impl_win.cc
index 7a2ff72fc8d..80a5316e6e2 100644
--- a/chromium/base/synchronization/lock_impl_win.cc
+++ b/chromium/base/synchronization/lock_impl_win.cc
@@ -18,6 +18,17 @@ bool LockImpl::Try() {
}
void LockImpl::Lock() {
+ // The ScopedLockAcquireActivity below is relatively expensive and so its
+ // actions can become significant due to the very large number of locks
+ // that tend to be used throughout the build. To avoid this cost in the
+ // vast majority of the calls, simply "try" the lock first and only do the
+ // (tracked) blocking call if that fails. Since "try" itself is a system
+ // call, and thus also somewhat expensive, don't bother with it unless
+ // tracking is actually enabled.
+ if (base::debug::GlobalActivityTracker::IsEnabled())
+ if (Try())
+ return;
+
base::debug::ScopedLockAcquireActivity lock_activity(this);
::AcquireSRWLockExclusive(&native_handle_);
}
diff --git a/chromium/base/synchronization/lock_unittest.cc b/chromium/base/synchronization/lock_unittest.cc
index 27f335e2cc5..1e2f9981daa 100644
--- a/chromium/base/synchronization/lock_unittest.cc
+++ b/chromium/base/synchronization/lock_unittest.cc
@@ -7,6 +7,7 @@
#include <stdlib.h>
#include "base/compiler_specific.h"
+#include "base/debug/activity_tracker.h"
#include "base/macros.h"
#include "base/threading/platform_thread.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -146,6 +147,47 @@ TEST(LockTest, TryLock) {
lock.Release();
}
+TEST(LockTest, TryTrackedLock) {
+ // Enable the activity tracker.
+ debug::GlobalActivityTracker::CreateWithLocalMemory(64 << 10, 0, "", 3, 0);
+
+ Lock lock;
+
+ ASSERT_TRUE(lock.Try());
+ // We now have the lock....
+
+ // This thread will not be able to get the lock.
+ {
+ TryLockTestThread thread(&lock);
+ PlatformThreadHandle handle;
+
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+
+ PlatformThread::Join(handle);
+
+ ASSERT_FALSE(thread.got_lock());
+ }
+
+ lock.Release();
+
+ // This thread will....
+ {
+ TryLockTestThread thread(&lock);
+ PlatformThreadHandle handle;
+
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+
+ PlatformThread::Join(handle);
+
+ ASSERT_TRUE(thread.got_lock());
+ // But it released it....
+ ASSERT_TRUE(lock.Try());
+ }
+
+ lock.Release();
+ debug::GlobalActivityTracker::ReleaseForTesting();
+}
+
// Tests that locks actually exclude -------------------------------------------
class MutexLockTestThread : public PlatformThread::Delegate {
diff --git a/chromium/base/synchronization/waitable_event_posix.cc b/chromium/base/synchronization/waitable_event_posix.cc
index 51ab227114e..9799e7d0315 100644
--- a/chromium/base/synchronization/waitable_event_posix.cc
+++ b/chromium/base/synchronization/waitable_event_posix.cc
@@ -89,11 +89,7 @@ bool WaitableEvent::IsSignaled() {
class SyncWaiter : public WaitableEvent::Waiter {
public:
SyncWaiter()
- : fired_(false),
- signaling_event_(NULL),
- lock_(),
- cv_(&lock_) {
- }
+ : fired_(false), signaling_event_(nullptr), lock_(), cv_(&lock_) {}
bool Fire(WaitableEvent* signaling_event) override {
base::AutoLock locked(lock_);
diff --git a/chromium/base/synchronization/waitable_event_watcher_posix.cc b/chromium/base/synchronization/waitable_event_watcher_posix.cc
index 47fa2df7ae0..21368a87104 100644
--- a/chromium/base/synchronization/waitable_event_watcher_posix.cc
+++ b/chromium/base/synchronization/waitable_event_watcher_posix.cc
@@ -46,7 +46,7 @@ class Flag : public RefCountedThreadSafe<Flag> {
private:
friend class RefCountedThreadSafe<Flag>;
- ~Flag() {}
+ ~Flag() = default;
mutable Lock lock_;
bool flag_;
@@ -171,7 +171,7 @@ void WaitableEventWatcher::StopWatching() {
if (cancel_flag_->value()) {
// In this case, the event has fired, but we haven't figured that out yet.
// The WaitableEvent may have been deleted too.
- cancel_flag_ = NULL;
+ cancel_flag_ = nullptr;
return;
}
@@ -185,7 +185,7 @@ void WaitableEventWatcher::StopWatching() {
// delegate getting called. If the task has run then we have the last
// reference to the flag and it will be deleted immedately after.
cancel_flag_->Set();
- cancel_flag_ = NULL;
+ cancel_flag_ = nullptr;
return;
}
@@ -211,7 +211,7 @@ void WaitableEventWatcher::StopWatching() {
// have been enqueued with the MessageLoop because the waiter was never
// signaled)
delete waiter_;
- cancel_flag_ = NULL;
+ cancel_flag_ = nullptr;
return;
}
@@ -220,7 +220,7 @@ void WaitableEventWatcher::StopWatching() {
// task on the SequencedTaskRunner, but to delete it instead. The Waiter
// deletes itself once run.
cancel_flag_->Set();
- cancel_flag_ = NULL;
+ cancel_flag_ = nullptr;
// If the waiter has already run then the task has been enqueued. If the Task
// hasn't yet run, the flag will stop the delegate from getting called. (This
diff --git a/chromium/base/sys_info_android.cc b/chromium/base/sys_info_android.cc
index b21bd2d56df..1d1710c72db 100644
--- a/chromium/base/sys_info_android.cc
+++ b/chromium/base/sys_info_android.cc
@@ -62,8 +62,8 @@ namespace {
// cannot be acquired. Use the latest Android release with a higher bug fix
// version to avoid unnecessarily comparison errors with the latest release.
// This should be manually kept up to date on each Android release.
-const int kDefaultAndroidMajorVersion = 7;
-const int kDefaultAndroidMinorVersion = 0;
+const int kDefaultAndroidMajorVersion = 8;
+const int kDefaultAndroidMinorVersion = 1;
const int kDefaultAndroidBugfixVersion = 99;
// Get and parse out the OS version numbers from the system properties.
diff --git a/chromium/base/sys_info_chromeos.cc b/chromium/base/sys_info_chromeos.cc
index 29f83845dc5..b406b4f3484 100644
--- a/chromium/base/sys_info_chromeos.cc
+++ b/chromium/base/sys_info_chromeos.cc
@@ -224,8 +224,7 @@ void SysInfo::SetChromeOSVersionInfoForTest(const std::string& lsb_release,
const Time& lsb_release_time) {
std::unique_ptr<Environment> env(Environment::Create());
env->SetVar(kLsbReleaseKey, lsb_release);
- env->SetVar(kLsbReleaseTimeKey,
- DoubleToString(lsb_release_time.ToDoubleT()));
+ env->SetVar(kLsbReleaseTimeKey, NumberToString(lsb_release_time.ToDoubleT()));
g_chrome_os_version_info.Get().Parse();
}
diff --git a/chromium/base/sys_info_posix.cc b/chromium/base/sys_info_posix.cc
index e30c5a3a2c1..472c5f43c8a 100644
--- a/chromium/base/sys_info_posix.cc
+++ b/chromium/base/sys_info_posix.cc
@@ -145,7 +145,7 @@ int64_t SysInfo::AmountOfVirtualMemory() {
// static
int64_t SysInfo::AmountOfFreeDiskSpace(const FilePath& path) {
- base::ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
int64_t available;
if (!GetDiskSpaceInfo(path, &available, nullptr))
@@ -155,7 +155,7 @@ int64_t SysInfo::AmountOfFreeDiskSpace(const FilePath& path) {
// static
int64_t SysInfo::AmountOfTotalDiskSpace(const FilePath& path) {
- base::ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
int64_t total;
if (!GetDiskSpaceInfo(path, nullptr, &total))
diff --git a/chromium/base/sys_info_win.cc b/chromium/base/sys_info_win.cc
index 3a3236b493d..094554919d6 100644
--- a/chromium/base/sys_info_win.cc
+++ b/chromium/base/sys_info_win.cc
@@ -82,7 +82,7 @@ int64_t SysInfo::AmountOfVirtualMemory() {
// static
int64_t SysInfo::AmountOfFreeDiskSpace(const FilePath& path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
int64_t available;
if (!GetDiskSpaceInfo(path, &available, nullptr))
@@ -92,7 +92,7 @@ int64_t SysInfo::AmountOfFreeDiskSpace(const FilePath& path) {
// static
int64_t SysInfo::AmountOfTotalDiskSpace(const FilePath& path) {
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
int64_t total;
if (!GetDiskSpaceInfo(path, nullptr, &total))
diff --git a/chromium/base/system_monitor/system_monitor.cc b/chromium/base/system_monitor/system_monitor.cc
index 99152ab8a85..c19785ad618 100644
--- a/chromium/base/system_monitor/system_monitor.cc
+++ b/chromium/base/system_monitor/system_monitor.cc
@@ -12,7 +12,7 @@
namespace base {
-static SystemMonitor* g_system_monitor = NULL;
+static SystemMonitor* g_system_monitor = nullptr;
SystemMonitor::SystemMonitor()
: devices_changed_observer_list_(
@@ -23,7 +23,7 @@ SystemMonitor::SystemMonitor()
SystemMonitor::~SystemMonitor() {
DCHECK_EQ(this, g_system_monitor);
- g_system_monitor = NULL;
+ g_system_monitor = nullptr;
}
// static
diff --git a/chromium/base/task_runner.cc b/chromium/base/task_runner.cc
index bbba7028503..aae9f9ec4f3 100644
--- a/chromium/base/task_runner.cc
+++ b/chromium/base/task_runner.cc
@@ -51,9 +51,9 @@ bool TaskRunner::PostTaskAndReply(const Location& from_here,
from_here, std::move(task), std::move(reply));
}
-TaskRunner::TaskRunner() {}
+TaskRunner::TaskRunner() = default;
-TaskRunner::~TaskRunner() {}
+TaskRunner::~TaskRunner() = default;
void TaskRunner::OnDestruct() const {
delete this;
diff --git a/chromium/base/task_scheduler/OWNERS b/chromium/base/task_scheduler/OWNERS
index e4b383c9318..0f3ad5e8529 100644
--- a/chromium/base/task_scheduler/OWNERS
+++ b/chromium/base/task_scheduler/OWNERS
@@ -1,3 +1,6 @@
fdoray@chromium.org
gab@chromium.org
robliao@chromium.org
+
+# TEAM: scheduler-dev@chromium.org
+# COMPONENT: Internals>TaskScheduler
diff --git a/chromium/base/task_scheduler/post_task.cc b/chromium/base/task_scheduler/post_task.cc
index 9c297b8dc86..15210a55564 100644
--- a/chromium/base/task_scheduler/post_task.cc
+++ b/chromium/base/task_scheduler/post_task.cc
@@ -15,9 +15,10 @@ namespace base {
namespace {
-class PostTaskAndReplyTaskRunner : public internal::PostTaskAndReplyImpl {
+class PostTaskAndReplyWithTraitsTaskRunner
+ : public internal::PostTaskAndReplyImpl {
public:
- explicit PostTaskAndReplyTaskRunner(const TaskTraits& traits)
+ explicit PostTaskAndReplyWithTraitsTaskRunner(const TaskTraits& traits)
: traits_(traits) {}
private:
@@ -81,7 +82,7 @@ void PostTaskWithTraitsAndReply(const Location& from_here,
const TaskTraits& traits,
OnceClosure task,
OnceClosure reply) {
- PostTaskAndReplyTaskRunner(traits).PostTaskAndReply(
+ PostTaskAndReplyWithTraitsTaskRunner(traits).PostTaskAndReply(
from_here, std::move(task), std::move(reply));
}
diff --git a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc
index 63b82445d14..345a116ee12 100644
--- a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc
@@ -591,21 +591,20 @@ class TaskSchedulerSingleThreadTaskRunnerManagerStartTest
TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerStartTest,
PostTaskBeforeStart) {
AtomicFlag manager_started;
- WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_finished(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
single_thread_task_runner_manager_
->CreateSingleThreadTaskRunnerWithTraits(
"A", TaskTraits(), SingleThreadTaskRunnerThreadMode::DEDICATED)
->PostTask(
FROM_HERE,
BindOnce(
- [](WaitableEvent* task_running, AtomicFlag* manager_started) {
- task_running->Signal();
-
+ [](WaitableEvent* task_finished, AtomicFlag* manager_started) {
// The task should not run before Start().
EXPECT_TRUE(manager_started->IsSet());
+ task_finished->Signal();
},
- Unretained(&task_running), Unretained(&manager_started)));
+ Unretained(&task_finished), Unretained(&manager_started)));
// Wait a little bit to make sure that the task doesn't run before start.
// Note: This test won't catch a case where the task runs between setting
@@ -615,8 +614,8 @@ TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerStartTest,
manager_started.Set();
single_thread_task_runner_manager_->Start();
- // This should not hang if the task runs after Start().
- task_running.Wait();
+ // Wait for the task to complete to keep |manager_started| alive.
+ task_finished.Wait();
}
} // namespace internal
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc b/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
index a0e74dee4cc..5e510b12aaf 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
@@ -24,6 +24,13 @@
#include "base/threading/scoped_blocking_call.h"
#include "base/threading/thread_restrictions.h"
+#if defined(OS_WIN)
+#include "base/win/scoped_com_initializer.h"
+#include "base/win/scoped_windows_thread_environment.h"
+#include "base/win/scoped_winrt_initializer.h"
+#include "base/win/windows_version.h"
+#endif // defined(OS_WIN)
+
namespace base {
namespace internal {
@@ -138,6 +145,10 @@ class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl
// returned a non-empty sequence and DidRunTask() hasn't been called yet).
bool is_running_task_ = false;
+#if defined(OS_WIN)
+ std::unique_ptr<win::ScopedWindowsThreadEnvironment> win_thread_environment_;
+#endif // defined(OS_WIN)
+
DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerDelegateImpl);
};
@@ -182,7 +193,8 @@ SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
void SchedulerWorkerPoolImpl::Start(
const SchedulerWorkerPoolParams& params,
- scoped_refptr<TaskRunner> service_thread_task_runner) {
+ scoped_refptr<TaskRunner> service_thread_task_runner,
+ WorkerEnvironment worker_environment) {
AutoSchedulerLock auto_lock(lock_);
DCHECK(workers_.empty());
@@ -191,6 +203,7 @@ void SchedulerWorkerPoolImpl::Start(
initial_worker_capacity_ = worker_capacity_;
suggested_reclaim_time_ = params.suggested_reclaim_time();
backward_compatibility_ = params.backward_compatibility();
+ worker_environment_ = worker_environment;
service_thread_task_runner_ = std::move(service_thread_task_runner);
@@ -338,6 +351,18 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnMainEntry(
#endif
}
+#if defined(OS_WIN)
+ if (outer_->worker_environment_ == WorkerEnvironment::COM_MTA) {
+ if (win::GetVersion() >= win::VERSION_WIN8) {
+ win_thread_environment_ = std::make_unique<win::ScopedWinrtInitializer>();
+ } else {
+ win_thread_environment_ = std::make_unique<win::ScopedCOMInitializer>(
+ win::ScopedCOMInitializer::kMTA);
+ }
+ DCHECK(win_thread_environment_->Succeeded());
+ }
+#endif // defined(OS_WIN)
+
DCHECK_EQ(num_tasks_since_last_wait_, 0U);
PlatformThread::SetName(
@@ -498,6 +523,10 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnMainExit(
DCHECK(!ContainsWorker(outer_->workers_, worker));
}
#endif
+
+#if defined(OS_WIN)
+ win_thread_environment_.reset();
+#endif // defined(OS_WIN)
}
void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl.h b/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
index a469bcea1b9..49a2378585f 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
@@ -27,6 +27,7 @@
#include "base/task_scheduler/sequence.h"
#include "base/task_scheduler/task.h"
#include "base/time/time.h"
+#include "build/build_config.h"
namespace base {
@@ -46,6 +47,15 @@ class TaskTracker;
// This class is thread-safe.
class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
public:
+ enum class WorkerEnvironment {
+ // No special worker environment required.
+ NONE,
+#if defined(OS_WIN)
+ // Initialize a COM MTA on the worker.
+ COM_MTA,
+#endif // defined(OS_WIN)
+ };
+
// Constructs a pool without workers.
//
// |name| is used to label the pool's threads ("TaskScheduler" + |name| +
@@ -62,9 +72,12 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
// Creates workers following the |params| specification, allowing existing and
// future tasks to run. Uses |service_thread_task_runner| to monitor for
- // blocked threads in the pool. Can only be called once. CHECKs on failure.
+ // blocked threads in the pool. |worker_environment| specifies any requested
+ // environment to execute the tasks. Can only be called once.
+ // CHECKs on failure.
void Start(const SchedulerWorkerPoolParams& params,
- scoped_refptr<TaskRunner> service_thread_task_runner);
+ scoped_refptr<TaskRunner> service_thread_task_runner,
+ WorkerEnvironment worker_environment);
// Destroying a SchedulerWorkerPoolImpl returned by Create() is not allowed in
// production; it is always leaked. In tests, it can only be destroyed after
@@ -229,6 +242,9 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
// but haven't caused a worker capacity increase yet.
int num_pending_may_block_workers_ = 0;
+ // Environment to be initialized per worker.
+ WorkerEnvironment worker_environment_ = WorkerEnvironment::NONE;
+
// Stack of idle workers. Initially, all workers are on this stack. A worker
// is removed from the stack before its WakeUp() function is called and when
// it receives work from GetWork() (a worker calls GetWork() when its sleep
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
index d6074b75bf3..cc80d605c4c 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
@@ -43,8 +43,13 @@
#include "base/threading/thread_local_storage.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
+#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if defined(OS_WIN)
+#include "base/win/com_init_util.h"
+#endif // defined(OS_WIN)
+
namespace base {
namespace internal {
namespace {
@@ -64,11 +69,11 @@ class TaskSchedulerWorkerPoolImplTestBase {
TaskSchedulerWorkerPoolImplTestBase()
: service_thread_("TaskSchedulerServiceThread"){};
- void SetUp() {
+ void CommonSetUp() {
CreateAndStartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
}
- void TearDown() {
+ void CommonTearDown() {
service_thread_.Stop();
task_tracker_.Flush();
worker_pool_->WaitForAllWorkersIdleForTesting();
@@ -85,11 +90,13 @@ class TaskSchedulerWorkerPoolImplTestBase {
ASSERT_TRUE(worker_pool_);
}
- void StartWorkerPool(TimeDelta suggested_reclaim_time, size_t num_workers) {
+ virtual void StartWorkerPool(TimeDelta suggested_reclaim_time,
+ size_t num_workers) {
ASSERT_TRUE(worker_pool_);
worker_pool_->Start(
SchedulerWorkerPoolParams(num_workers, suggested_reclaim_time),
- service_thread_.task_runner());
+ service_thread_.task_runner(),
+ SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
}
void CreateAndStartWorkerPool(TimeDelta suggested_reclaim_time,
@@ -115,9 +122,11 @@ class TaskSchedulerWorkerPoolImplTest
protected:
TaskSchedulerWorkerPoolImplTest() = default;
- void SetUp() override { TaskSchedulerWorkerPoolImplTestBase::SetUp(); }
+ void SetUp() override { TaskSchedulerWorkerPoolImplTestBase::CommonSetUp(); }
- void TearDown() override { TaskSchedulerWorkerPoolImplTestBase::TearDown(); }
+ void TearDown() override {
+ TaskSchedulerWorkerPoolImplTestBase::CommonTearDown();
+ }
private:
DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplTest);
@@ -129,9 +138,11 @@ class TaskSchedulerWorkerPoolImplTestParam
protected:
TaskSchedulerWorkerPoolImplTestParam() = default;
- void SetUp() override { TaskSchedulerWorkerPoolImplTestBase::SetUp(); }
+ void SetUp() override { TaskSchedulerWorkerPoolImplTestBase::CommonSetUp(); }
- void TearDown() override { TaskSchedulerWorkerPoolImplTestBase::TearDown(); }
+ void TearDown() override {
+ TaskSchedulerWorkerPoolImplTestBase::CommonTearDown();
+ }
private:
DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplTestParam);
@@ -255,6 +266,29 @@ TEST_P(TaskSchedulerWorkerPoolImplTestParam, Saturate) {
worker_pool_->WaitForAllWorkersIdleForTesting();
}
+#if defined(OS_WIN)
+TEST_P(TaskSchedulerWorkerPoolImplTestParam, NoEnvironment) {
+ // Verify that COM is not initialized in a SchedulerWorkerPoolImpl initialized
+ // with SchedulerWorkerPoolImpl::WorkerEnvironment::NONE.
+ scoped_refptr<TaskRunner> task_runner =
+ CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam());
+
+ WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner->PostTask(
+ FROM_HERE, BindOnce(
+ [](WaitableEvent* task_running) {
+ win::AssertComApartmentType(win::ComApartmentType::NONE);
+ task_running->Signal();
+ },
+ &task_running));
+
+ task_running.Wait();
+
+ worker_pool_->WaitForAllWorkersIdleForTesting();
+}
+#endif // defined(OS_WIN)
+
INSTANTIATE_TEST_CASE_P(Parallel,
TaskSchedulerWorkerPoolImplTestParam,
::testing::Values(test::ExecutionMode::PARALLEL));
@@ -262,6 +296,66 @@ INSTANTIATE_TEST_CASE_P(Sequenced,
TaskSchedulerWorkerPoolImplTestParam,
::testing::Values(test::ExecutionMode::SEQUENCED));
+#if defined(OS_WIN)
+
+namespace {
+
+class TaskSchedulerWorkerPoolImplTestCOMMTAParam
+ : public TaskSchedulerWorkerPoolImplTestBase,
+ public testing::TestWithParam<test::ExecutionMode> {
+ protected:
+ TaskSchedulerWorkerPoolImplTestCOMMTAParam() = default;
+
+ void SetUp() override { TaskSchedulerWorkerPoolImplTestBase::CommonSetUp(); }
+
+ void TearDown() override {
+ TaskSchedulerWorkerPoolImplTestBase::CommonTearDown();
+ }
+
+ private:
+ void StartWorkerPool(TimeDelta suggested_reclaim_time,
+ size_t num_workers) override {
+ ASSERT_TRUE(worker_pool_);
+ worker_pool_->Start(
+ SchedulerWorkerPoolParams(num_workers, suggested_reclaim_time),
+ service_thread_.task_runner(),
+ SchedulerWorkerPoolImpl::WorkerEnvironment::COM_MTA);
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplTestCOMMTAParam);
+};
+
+} // namespace
+
+TEST_P(TaskSchedulerWorkerPoolImplTestCOMMTAParam, COMMTAInitialized) {
+ // Verify that SchedulerWorkerPoolImpl workers have a COM MTA available.
+ scoped_refptr<TaskRunner> task_runner =
+ CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam());
+
+ WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner->PostTask(
+ FROM_HERE, BindOnce(
+ [](WaitableEvent* task_running) {
+ win::AssertComApartmentType(win::ComApartmentType::MTA);
+ task_running->Signal();
+ },
+ &task_running));
+
+ task_running.Wait();
+
+ worker_pool_->WaitForAllWorkersIdleForTesting();
+}
+
+INSTANTIATE_TEST_CASE_P(Parallel,
+ TaskSchedulerWorkerPoolImplTestCOMMTAParam,
+ ::testing::Values(test::ExecutionMode::PARALLEL));
+INSTANTIATE_TEST_CASE_P(Sequenced,
+ TaskSchedulerWorkerPoolImplTestCOMMTAParam,
+ ::testing::Values(test::ExecutionMode::SEQUENCED));
+
+#endif // defined(OS_WIN)
+
namespace {
class TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest
@@ -694,7 +788,8 @@ TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, InitOne) {
"OnePolicyWorkerPool", ThreadPriority::NORMAL, &task_tracker,
&delayed_task_manager);
worker_pool->Start(SchedulerWorkerPoolParams(8U, TimeDelta::Max()),
- service_thread_task_runner);
+ service_thread_task_runner,
+ SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
ASSERT_TRUE(worker_pool);
EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting());
worker_pool->JoinForTesting();
@@ -715,7 +810,8 @@ TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, VerifyStandbyThread) {
&delayed_task_manager);
worker_pool->Start(
SchedulerWorkerPoolParams(worker_capacity, kReclaimTimeForCleanupTests),
- service_thread_task_runner);
+ service_thread_task_runner,
+ SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
ASSERT_TRUE(worker_pool);
EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting());
@@ -819,12 +915,14 @@ class TaskSchedulerWorkerPoolBlockingTest
}
void SetUp() override {
- TaskSchedulerWorkerPoolImplTestBase::SetUp();
+ TaskSchedulerWorkerPoolImplTestBase::CommonSetUp();
task_runner_ =
worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
}
- void TearDown() override { TaskSchedulerWorkerPoolImplTestBase::TearDown(); }
+ void TearDown() override {
+ TaskSchedulerWorkerPoolImplTestBase::CommonTearDown();
+ }
protected:
// Saturates the worker pool with a task that first blocks, waits to be
@@ -1237,7 +1335,8 @@ TEST(TaskSchedulerWorkerPoolOverWorkerCapacityTest, VerifyCleanup) {
&delayed_task_manager);
worker_pool.Start(
SchedulerWorkerPoolParams(kWorkerCapacity, kReclaimTimeForCleanupTests),
- service_thread_task_runner);
+ service_thread_task_runner,
+ SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
scoped_refptr<TaskRunner> task_runner =
worker_pool.CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_pool_unittest.cc
index 54c9c9a377c..818af0dde78 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_unittest.cc
@@ -133,7 +133,8 @@ class TaskSchedulerWorkerPoolTest
scheduler_worker_pool_impl->Start(
SchedulerWorkerPoolParams(kNumWorkersInWorkerPool,
TimeDelta::Max()),
- service_thread_.task_runner());
+ service_thread_.task_runner(),
+ SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
break;
}
#if defined(OS_WIN)
diff --git a/chromium/base/task_scheduler/task_scheduler.cc b/chromium/base/task_scheduler/task_scheduler.cc
index e01426e17a1..29f8db4d511 100644
--- a/chromium/base/task_scheduler/task_scheduler.cc
+++ b/chromium/base/task_scheduler/task_scheduler.cc
@@ -27,13 +27,15 @@ TaskScheduler::InitParams::InitParams(
const SchedulerWorkerPoolParams& background_worker_pool_params_in,
const SchedulerWorkerPoolParams& background_blocking_worker_pool_params_in,
const SchedulerWorkerPoolParams& foreground_worker_pool_params_in,
- const SchedulerWorkerPoolParams& foreground_blocking_worker_pool_params_in)
+ const SchedulerWorkerPoolParams& foreground_blocking_worker_pool_params_in,
+ SharedWorkerPoolEnvironment shared_worker_pool_environment_in)
: background_worker_pool_params(background_worker_pool_params_in),
background_blocking_worker_pool_params(
background_blocking_worker_pool_params_in),
foreground_worker_pool_params(foreground_worker_pool_params_in),
foreground_blocking_worker_pool_params(
- foreground_blocking_worker_pool_params_in) {}
+ foreground_blocking_worker_pool_params_in),
+ shared_worker_pool_environment(shared_worker_pool_environment_in) {}
TaskScheduler::InitParams::~InitParams() = default;
diff --git a/chromium/base/task_scheduler/task_scheduler.h b/chromium/base/task_scheduler/task_scheduler.h
index 6e30af00fe4..56393ab2e4f 100644
--- a/chromium/base/task_scheduler/task_scheduler.h
+++ b/chromium/base/task_scheduler/task_scheduler.h
@@ -23,7 +23,7 @@
#include "build/build_config.h"
namespace gin {
-class V8Platform;
+class V8BackgroundTaskRunner;
}
namespace content {
@@ -51,19 +51,31 @@ class Location;
class BASE_EXPORT TaskScheduler {
public:
struct BASE_EXPORT InitParams {
+ enum class SharedWorkerPoolEnvironment {
+ // Use the default environment (no environment).
+ DEFAULT,
+#if defined(OS_WIN)
+ // Place the worker in a COM MTA.
+ COM_MTA,
+#endif // defined(OS_WIN)
+ };
+
InitParams(
const SchedulerWorkerPoolParams& background_worker_pool_params_in,
const SchedulerWorkerPoolParams&
background_blocking_worker_pool_params_in,
const SchedulerWorkerPoolParams& foreground_worker_pool_params_in,
const SchedulerWorkerPoolParams&
- foreground_blocking_worker_pool_params_in);
+ foreground_blocking_worker_pool_params_in,
+ SharedWorkerPoolEnvironment shared_worker_pool_environment_in =
+ SharedWorkerPoolEnvironment::DEFAULT);
~InitParams();
SchedulerWorkerPoolParams background_worker_pool_params;
SchedulerWorkerPoolParams background_blocking_worker_pool_params;
SchedulerWorkerPoolParams foreground_worker_pool_params;
SchedulerWorkerPoolParams foreground_blocking_worker_pool_params;
+ SharedWorkerPoolEnvironment shared_worker_pool_environment;
};
// Destroying a TaskScheduler is not allowed in production; it is always
@@ -198,7 +210,7 @@ class BASE_EXPORT TaskScheduler {
static TaskScheduler* GetInstance();
private:
- friend class gin::V8Platform;
+ friend class gin::V8BackgroundTaskRunner;
friend class content::BrowserMainLoopTest_CreateThreadsInSingleProcess_Test;
// Returns the maximum number of non-single-threaded non-blocked tasks posted
diff --git a/chromium/base/task_scheduler/task_scheduler_impl.cc b/chromium/base/task_scheduler/task_scheduler_impl.cc
index cde76f5a274..942c844c953 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl.cc
+++ b/chromium/base/task_scheduler/task_scheduler_impl.cc
@@ -86,16 +86,28 @@ void TaskSchedulerImpl::Start(const TaskScheduler::InitParams& init_params) {
single_thread_task_runner_manager_.Start();
+ const SchedulerWorkerPoolImpl::WorkerEnvironment worker_environment =
+#if defined(OS_WIN)
+ init_params.shared_worker_pool_environment ==
+ InitParams::SharedWorkerPoolEnvironment::COM_MTA
+ ? SchedulerWorkerPoolImpl::WorkerEnvironment::COM_MTA
+ : SchedulerWorkerPoolImpl::WorkerEnvironment::NONE;
+#else
+ SchedulerWorkerPoolImpl::WorkerEnvironment::NONE;
+#endif
+
worker_pools_[BACKGROUND]->Start(init_params.background_worker_pool_params,
- service_thread_task_runner);
+ service_thread_task_runner,
+ worker_environment);
worker_pools_[BACKGROUND_BLOCKING]->Start(
init_params.background_blocking_worker_pool_params,
- service_thread_task_runner);
+ service_thread_task_runner, worker_environment);
worker_pools_[FOREGROUND]->Start(init_params.foreground_worker_pool_params,
- service_thread_task_runner);
+ service_thread_task_runner,
+ worker_environment);
worker_pools_[FOREGROUND_BLOCKING]->Start(
init_params.foreground_blocking_worker_pool_params,
- service_thread_task_runner);
+ service_thread_task_runner, worker_environment);
}
void TaskSchedulerImpl::PostDelayedTaskWithTraits(const Location& from_here,
@@ -171,12 +183,16 @@ void TaskSchedulerImpl::JoinForTesting() {
#if DCHECK_IS_ON()
DCHECK(!join_for_testing_returned_.IsSet());
#endif
+ // The service thread must be stopped before the workers are joined, otherwise
+ // tasks scheduled by the DelayedTaskManager might be posted between joining
+ // those workers and stopping the service thread which will cause a CHECK. See
+ // https://crbug.com/771701.
+ service_thread_.Stop();
single_thread_task_runner_manager_.JoinForTesting();
for (const auto& worker_pool : worker_pools_)
worker_pool->DisallowWorkerCleanupForTesting();
for (const auto& worker_pool : worker_pools_)
worker_pool->JoinForTesting();
- service_thread_.Stop();
#if DCHECK_IS_ON()
join_for_testing_returned_.Set();
#endif
diff --git a/chromium/base/task_scheduler/task_tracker.cc b/chromium/base/task_scheduler/task_tracker.cc
index 3d3c088665b..c5be7d24f25 100644
--- a/chromium/base/task_scheduler/task_tracker.cc
+++ b/chromium/base/task_scheduler/task_tracker.cc
@@ -246,7 +246,7 @@ void TaskTracker::Shutdown() {
void TaskTracker::Flush() {
AutoSchedulerLock auto_lock(flush_lock_);
- while (subtle::Acquire_Load(&num_pending_undelayed_tasks_) != 0 &&
+ while (subtle::Acquire_Load(&num_incomplete_undelayed_tasks_) != 0 &&
!IsShutdownComplete()) {
flush_cv_->Wait();
}
@@ -259,7 +259,7 @@ bool TaskTracker::WillPostTask(const Task* task) {
return false;
if (task->delayed_run_time.is_null())
- subtle::NoBarrier_AtomicIncrement(&num_pending_undelayed_tasks_, 1);
+ subtle::NoBarrier_AtomicIncrement(&num_incomplete_undelayed_tasks_, 1);
debug::TaskAnnotator task_annotator;
task_annotator.DidQueueTask(kQueueFunctionName, *task);
@@ -313,9 +313,7 @@ scoped_refptr<Sequence> TaskTracker::RunNextTask(
AfterRunTask(shutdown_behavior);
if (!is_delayed)
- DecrementNumPendingUndelayedTasks();
-
- OnRunNextTaskCompleted();
+ DecrementNumIncompleteUndelayedTasks();
const bool sequence_is_empty_after_pop = sequence->Pop();
@@ -476,8 +474,8 @@ bool TaskTracker::IsPostingBlockShutdownTaskAfterShutdownAllowed() {
}
#endif
-int TaskTracker::GetNumPendingUndelayedTasksForTesting() const {
- return subtle::NoBarrier_Load(&num_pending_undelayed_tasks_);
+int TaskTracker::GetNumIncompleteUndelayedTasksForTesting() const {
+ return subtle::NoBarrier_Load(&num_incomplete_undelayed_tasks_);
}
bool TaskTracker::BeforePostTask(TaskShutdownBehavior shutdown_behavior) {
@@ -595,11 +593,11 @@ void TaskTracker::OnBlockingShutdownTasksComplete() {
shutdown_event_->Signal();
}
-void TaskTracker::DecrementNumPendingUndelayedTasks() {
- const auto new_num_pending_undelayed_tasks =
- subtle::Barrier_AtomicIncrement(&num_pending_undelayed_tasks_, -1);
- DCHECK_GE(new_num_pending_undelayed_tasks, 0);
- if (new_num_pending_undelayed_tasks == 0) {
+void TaskTracker::DecrementNumIncompleteUndelayedTasks() {
+ const auto new_num_incomplete_undelayed_tasks =
+ subtle::Barrier_AtomicIncrement(&num_incomplete_undelayed_tasks_, -1);
+ DCHECK_GE(new_num_incomplete_undelayed_tasks, 0);
+ if (new_num_incomplete_undelayed_tasks == 0) {
AutoSchedulerLock auto_lock(flush_lock_);
flush_cv_->Signal();
}
diff --git a/chromium/base/task_scheduler/task_tracker.h b/chromium/base/task_scheduler/task_tracker.h
index 37e7f891af0..f249b73c445 100644
--- a/chromium/base/task_scheduler/task_tracker.h
+++ b/chromium/base/task_scheduler/task_tracker.h
@@ -99,7 +99,7 @@ class BASE_EXPORT TaskTracker {
// This can only be called once.
void Shutdown();
- // Waits until there are no pending undelayed tasks. May be called in tests
+ // Waits until there are no incomplete undelayed tasks. May be called in tests
// to validate that a condition is met after all undelayed tasks have run.
//
// Does not wait for delayed tasks. Waits for undelayed tasks posted from
@@ -166,13 +166,9 @@ class BASE_EXPORT TaskTracker {
virtual bool IsPostingBlockShutdownTaskAfterShutdownAllowed();
#endif
- // Called at the very end of RunNextTask() after the completion of all task
- // metrics accounting.
- virtual void OnRunNextTaskCompleted() {}
-
// Returns the number of undelayed tasks that haven't completed their
- // execution.
- int GetNumPendingUndelayedTasksForTesting() const;
+ // execution (still queued or in progress).
+ int GetNumIncompleteUndelayedTasksForTesting() const;
private:
class State;
@@ -199,9 +195,9 @@ class BASE_EXPORT TaskTracker {
// shutdown has started.
void OnBlockingShutdownTasksComplete();
- // Decrements the number of pending undelayed tasks and signals |flush_cv_| if
- // it reaches zero.
- void DecrementNumPendingUndelayedTasks();
+ // Decrements the number of incomplete undelayed tasks and signals |flush_cv_|
+ // if it reaches zero.
+ void DecrementNumIncompleteUndelayedTasks();
// To be called after running a background task from |just_ran_sequence|.
// Performs the following actions:
@@ -233,15 +229,15 @@ class BASE_EXPORT TaskTracker {
// decremented with a memory barrier after a task runs. Is accessed with an
// acquire memory barrier in Flush(). The memory barriers ensure that the
// memory written by flushed tasks is visible when Flush() returns.
- subtle::Atomic32 num_pending_undelayed_tasks_ = 0;
+ subtle::Atomic32 num_incomplete_undelayed_tasks_ = 0;
// Lock associated with |flush_cv_|. Partially synchronizes access to
- // |num_pending_undelayed_tasks_|. Full synchronization isn't needed because
- // it's atomic, but synchronization is needed to coordinate waking and
+ // |num_incomplete_undelayed_tasks_|. Full synchronization isn't needed
+ // because it's atomic, but synchronization is needed to coordinate waking and
// sleeping at the right time.
mutable SchedulerLock flush_lock_;
- // Signaled when |num_pending_undelayed_tasks_| is zero or when shutdown
+ // Signaled when |num_incomplete_undelayed_tasks_| is zero or when shutdown
// completes.
const std::unique_ptr<ConditionVariable> flush_cv_;
diff --git a/chromium/base/task_scheduler/task_tracker_unittest.cc b/chromium/base/task_scheduler/task_tracker_unittest.cc
index 38d3daa3cb5..685fb3573e8 100644
--- a/chromium/base/task_scheduler/task_tracker_unittest.cc
+++ b/chromium/base/task_scheduler/task_tracker_unittest.cc
@@ -476,7 +476,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, IOAllowed) {
auto task_with_may_block =
std::make_unique<Task>(FROM_HERE, Bind([]() {
// Shouldn't fail.
- ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
}),
TaskTraits(MayBlock(), GetParam()), TimeDelta());
EXPECT_TRUE(tracker_.WillPostTask(task_with_may_block.get()));
@@ -486,9 +486,8 @@ TEST_P(TaskSchedulerTaskTrackerTest, IOAllowed) {
// task without the MayBlock() trait.
ThreadRestrictions::SetIOAllowed(true);
auto task_without_may_block = std::make_unique<Task>(
- FROM_HERE, Bind([]() {
- EXPECT_DCHECK_DEATH({ ThreadRestrictions::AssertIOAllowed(); });
- }),
+ FROM_HERE,
+ Bind([]() { EXPECT_DCHECK_DEATH({ AssertBlockingAllowed(); }); }),
TaskTraits(GetParam()), TimeDelta());
EXPECT_TRUE(tracker_.WillPostTask(task_without_may_block.get()));
DispatchAndRunTaskWithTracker(std::move(task_without_may_block));
diff --git a/chromium/base/test/BUILD.gn b/chromium/base/test/BUILD.gn
index a48f9df0f3c..5871b9f308e 100644
--- a/chromium/base/test/BUILD.gn
+++ b/chromium/base/test/BUILD.gn
@@ -26,10 +26,8 @@ static_library("test_support") {
testonly = true
sources = [
"../trace_event/trace_config_memory_test_util.h",
- "android/java_handler_thread_for_testing.cc",
- "android/java_handler_thread_for_testing.h",
- "android/test_system_message_handler_link_android.cc",
- "android/test_system_message_handler_link_android.h",
+ "android/java_handler_thread_helpers.cc",
+ "android/java_handler_thread_helpers.h",
"copy_only_int.h",
"fuzzed_data_provider.cc",
"fuzzed_data_provider.h",
@@ -334,8 +332,7 @@ if (is_android) {
generate_jni("base_unittests_jni_headers") {
sources = [
"android/java/src/org/chromium/base/ContentUriTestUtils.java",
- "android/java/src/org/chromium/base/JavaHandlerThreadTest.java",
- "android/java/src/org/chromium/base/TestSystemMessageHandler.java",
+ "android/java/src/org/chromium/base/JavaHandlerThreadHelpers.java",
"android/java/src/org/chromium/base/TestUiThread.java",
]
jni_package = "base"
diff --git a/chromium/base/third_party/dmg_fp/README.chromium b/chromium/base/third_party/dmg_fp/README.chromium
index e3270cf3f41..13d5fb2551e 100644
--- a/chromium/base/third_party/dmg_fp/README.chromium
+++ b/chromium/base/third_party/dmg_fp/README.chromium
@@ -19,3 +19,4 @@ List of changes made to original code:
- fixed parsing of long exponents, see exp_length.patch and crbug.com/542881
- made hexdig array const
- removed deprecated `register` keyword
+ - #undef Long so that it won't change Long in other files in jumbo builds
diff --git a/chromium/base/third_party/dmg_fp/dtoa_wrapper.cc b/chromium/base/third_party/dmg_fp/dtoa_wrapper.cc
index c314c59d874..5141e238c22 100644
--- a/chromium/base/third_party/dmg_fp/dtoa_wrapper.cc
+++ b/chromium/base/third_party/dmg_fp/dtoa_wrapper.cc
@@ -44,3 +44,5 @@ inline static void FREE_DTOA_LOCK(size_t n) {
}
#include "base/third_party/dmg_fp/dtoa.cc"
+
+#undef Long // To avoid breaking jni code in jumbo builds
diff --git a/chromium/base/third_party/icu/LICENSE b/chromium/base/third_party/icu/LICENSE
index 40282f49496..2882e4ebda9 100644
--- a/chromium/base/third_party/icu/LICENSE
+++ b/chromium/base/third_party/icu/LICENSE
@@ -1,9 +1,50 @@
-ICU License - ICU 1.8.1 and later
+COPYRIGHT AND PERMISSION NOTICE (ICU 58 and later)
-COPYRIGHT AND PERMISSION NOTICE
+Copyright © 1991-2017 Unicode, Inc. All rights reserved.
+Distributed under the Terms of Use in http://www.unicode.org/copyright.html
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Unicode data files and any associated documentation
+(the "Data Files") or Unicode software and any associated documentation
+(the "Software") to deal in the Data Files or Software
+without restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, and/or sell copies of
+the Data Files or Software, and to permit persons to whom the Data Files
+or Software are furnished to do so, provided that either
+(a) this copyright and permission notice appear with all copies
+of the Data Files or Software, or
+(b) this copyright and permission notice appear in associated
+Documentation.
+
+THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
+NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
+DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+
+Except as contained in this notice, the name of a copyright holder
+shall not be used in advertising or otherwise to promote the sale,
+use or other dealings in these Data Files or Software without prior
+written authorization of the copyright holder.
-Copyright (c) 1995-2009 International Business Machines Corporation and others
+---------------------
+Third-Party Software Licenses
+
+This section contains third-party software notices and/or additional
+terms for licensed third-party software components included within ICU
+libraries.
+
+1. ICU License - ICU 1.8.1 to ICU 57.1
+
+COPYRIGHT AND PERMISSION NOTICE
+
+Copyright (c) 1995-2016 International Business Machines Corporation and others
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
@@ -30,3 +71,6 @@ Except as contained in this notice, the name of a copyright holder
shall not be used in advertising or otherwise to promote the sale, use
or other dealings in this Software without prior written authorization
of the copyright holder.
+
+All trademarks and registered trademarks mentioned herein are the
+property of their respective owners.
diff --git a/chromium/base/third_party/icu/README.chromium b/chromium/base/third_party/icu/README.chromium
index 6a9a15aac8e..f755f27700b 100644
--- a/chromium/base/third_party/icu/README.chromium
+++ b/chromium/base/third_party/icu/README.chromium
@@ -1,14 +1,12 @@
Name: ICU
URL: http://site.icu-project.org/
-License: MIT
-License File: NOT_SHIPPED
+Version: 60
+License: Unicode
+License File: LICENSE
-This file has the relevant components from ICU copied to handle basic
-UTF8/16/32 conversions. Components are copied from utf.h utf8.h utf16.h and
-utf_impl.c
-
-The same module appears in third_party/icu, so we don't repeat the license
-file here.
+This file has the relevant components from ICU copied to handle basic UTF8/16/32
+conversions. Components are copied from umachine.h, utf.h, utf8.h, and utf16.h
+into icu_utf.h, and from utf_impl.cpp into icu_utf.cc.
The main change is that U_/U8_/U16_ prefixes have been replaced with
CBU_/CBU8_/CBU16_ (for "Chrome Base") to avoid confusion with the "real" ICU
diff --git a/chromium/base/third_party/icu/icu_utf.cc b/chromium/base/third_party/icu/icu_utf.cc
index 2b67c5d9c21..a3262b04d3a 100644
--- a/chromium/base/third_party/icu/icu_utf.cc
+++ b/chromium/base/third_party/icu/icu_utf.cc
@@ -1,12 +1,14 @@
+// © 2016 and later: Unicode, Inc. and others.
+// License & terms of use: http://www.unicode.org/copyright.html
/*
******************************************************************************
*
-* Copyright (C) 1999-2006, International Business Machines
+* Copyright (C) 1999-2012, International Business Machines
* Corporation and others. All Rights Reserved.
*
******************************************************************************
-* file name: utf_impl.c
-* encoding: US-ASCII
+* file name: utf_impl.cpp
+* encoding: UTF-8
* tab size: 8 (not used)
* indentation:4
*
@@ -21,99 +23,41 @@
namespace base_icu {
-/**
- * UTF8_ERROR_VALUE_1 and UTF8_ERROR_VALUE_2 are special error values for UTF-8,
- * which need 1 or 2 bytes in UTF-8:
- * \code
- * U+0015 = NAK = Negative Acknowledge, C0 control character
- * U+009f = highest C1 control character
- * \endcode
- *
- * These are used by UTF8_..._SAFE macros so that they can return an error value
- * that needs the same number of code units (bytes) as were seen by
- * a macro. They should be tested with UTF_IS_ERROR() or UTF_IS_VALID().
- *
- * @deprecated ICU 2.4. Obsolete, see utf_old.h.
- */
-#define CBUTF8_ERROR_VALUE_1 0x15
-
-/**
- * See documentation on UTF8_ERROR_VALUE_1 for details.
- *
- * @deprecated ICU 2.4. Obsolete, see utf_old.h.
- */
-#define CBUTF8_ERROR_VALUE_2 0x9f
-
-
-/**
- * Error value for all UTFs. This code point value will be set by macros with e>
- * checking if an error is detected.
- *
- * @deprecated ICU 2.4. Obsolete, see utf_old.h.
- */
-#define CBUTF_ERROR_VALUE 0xffff
-
-/*
- * This table could be replaced on many machines by
- * a few lines of assembler code using an
- * "index of first 0-bit from msb" instruction and
- * one or two more integer instructions.
- *
- * For example, on an i386, do something like
- * - MOV AL, leadByte
- * - NOT AL (8-bit, leave b15..b8==0..0, reverse only b7..b0)
- * - MOV AH, 0
- * - BSR BX, AX (16-bit)
- * - MOV AX, 6 (result)
- * - JZ finish (ZF==1 if leadByte==0xff)
- * - SUB AX, BX (result)
- * -finish:
- * (BSR: Bit Scan Reverse, scans for a 1-bit, starting from the MSB)
- *
- * In Unicode, all UTF-8 byte sequences with more than 4 bytes are illegal;
- * lead bytes above 0xf4 are illegal.
- * We keep them in this table for skipping long ISO 10646-UTF-8 sequences.
- */
-const uint8_t utf8_countTrailBytes[256] =
- {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,
- 3, 3, /* illegal in Unicode */
- 4, 4, 4, 4, /* illegal in Unicode */
- 5, 5, /* illegal in Unicode */
- 0, 0 /* illegal bytes 0xfe and 0xff */
-};
-
-static const UChar32
-utf8_minLegal[4]={ 0, 0x80, 0x800, 0x10000 };
+// source/common/utf_impl.cpp
static const UChar32
utf8_errorValue[6]={
- CBUTF8_ERROR_VALUE_1, CBUTF8_ERROR_VALUE_2, CBUTF_ERROR_VALUE, 0x10ffff,
- 0x3ffffff, 0x7fffffff
+ // Same values as UTF8_ERROR_VALUE_1, UTF8_ERROR_VALUE_2, UTF_ERROR_VALUE,
+ // but without relying on the obsolete unicode/utf_old.h.
+ 0x15, 0x9f, 0xffff,
+ 0x10ffff
};
+static UChar32
+errorValue(int32_t count, int8_t strict) {
+ if(strict>=0) {
+ return utf8_errorValue[count];
+ } else if(strict==-3) {
+ return 0xfffd;
+ } else {
+ return CBU_SENTINEL;
+ }
+}
+
/*
- * Handle the non-inline part of the U8_NEXT() macro and its obsolete sibling
- * UTF8_NEXT_CHAR_SAFE().
+ * Handle the non-inline part of the U8_NEXT() and U8_NEXT_FFFD() macros
+ * and their obsolete sibling UTF8_NEXT_CHAR_SAFE().
+ *
+ * U8_NEXT() supports NUL-terminated strings indicated via length<0.
*
* The "strict" parameter controls the error behavior:
- * <0 "Safe" behavior of U8_NEXT(): All illegal byte sequences yield a negative
- * code point result.
+ * <0 "Safe" behavior of U8_NEXT():
+ * -1: All illegal byte sequences yield U_SENTINEL=-1.
+ * -2: Same as -1, except for lenient treatment of surrogate code points as legal.
+ * Some implementations use this for roundtripping of
+ * Unicode 16-bit strings that are not well-formed UTF-16, that is, they
+ * contain unpaired surrogates.
+ * -3: All illegal byte sequences yield U+FFFD.
* 0 Obsolete "safe" behavior of UTF8_NEXT_CHAR_SAFE(..., FALSE):
* All illegal byte sequences yield a positive code point such that this
* result code point would be encoded with the same number of bytes as
@@ -122,104 +66,64 @@ utf8_errorValue[6]={
* Same as the obsolete "safe" behavior, but non-characters are also treated
* like illegal sequences.
*
- * The special negative (<0) value -2 is used for lenient treatment of surrogate
- * code points as legal. Some implementations use this for roundtripping of
- * Unicode 16-bit strings that are not well-formed UTF-16, that is, they
- * contain unpaired surrogates.
- *
* Note that a UBool is the same as an int8_t.
*/
-UChar32 utf8_nextCharSafeBody(const uint8_t* s,
- int32_t* pi,
- int32_t length,
- UChar32 c,
- UBool strict) {
- int32_t i = *pi;
- uint8_t count = CBU8_COUNT_TRAIL_BYTES(c);
- if((i)+count<=(length)) {
- uint8_t trail, illegal = 0;
-
- CBU8_MASK_LEAD_BYTE((c), count);
- /* count==0 for illegally leading trail bytes and the illegal bytes 0xfe and 0xff */
- switch(count) {
- /* each branch falls through to the next one */
- case 5:
- case 4:
- /* count>=4 is always illegal: no more than 3 trail bytes in Unicode's UTF-8 */
- illegal=1;
- break;
- case 3:
- trail=s[(i)++];
- (c)=((c)<<6)|(trail&0x3f);
- if(c<0x110) {
- illegal|=(trail&0xc0)^0x80;
- } else {
- /* code point>0x10ffff, outside Unicode */
- illegal=1;
- break;
- }
- case 2:
- trail=s[(i)++];
- (c)=((c)<<6)|(trail&0x3f);
- illegal|=(trail&0xc0)^0x80;
- case 1:
- trail=s[(i)++];
- (c)=((c)<<6)|(trail&0x3f);
- illegal|=(trail&0xc0)^0x80;
- break;
- case 0:
- if(strict>=0) {
- return CBUTF8_ERROR_VALUE_1;
- } else {
- return CBU_SENTINEL;
+UChar32
+utf8_nextCharSafeBody(const uint8_t *s, int32_t *pi, int32_t length, UChar32 c, UBool strict) {
+ // *pi is one after byte c.
+ int32_t i=*pi;
+ // length can be negative for NUL-terminated strings: Read and validate one byte at a time.
+ if(i==length || c>0xf4) {
+ // end of string, or not a lead byte
+ } else if(c>=0xf0) {
+ // Test for 4-byte sequences first because
+ // U8_NEXT() handles shorter valid sequences inline.
+ uint8_t t1=s[i], t2, t3;
+ c&=7;
+ if(CBU8_IS_VALID_LEAD4_AND_T1(c, t1) &&
+ ++i!=length && (t2=s[i]-0x80)<=0x3f &&
+ ++i!=length && (t3=s[i]-0x80)<=0x3f) {
+ ++i;
+ c=(c<<18)|((t1&0x3f)<<12)|(t2<<6)|t3;
+ // strict: forbid non-characters like U+fffe
+ if(strict<=0 || !CBU_IS_UNICODE_NONCHAR(c)) {
+ *pi=i;
+ return c;
}
- /* no default branch to optimize switch() - all values are covered */
}
-
- /*
- * All the error handling should return a value
- * that needs count bytes so that UTF8_GET_CHAR_SAFE() works right.
- *
- * Starting with Unicode 3.0.1, non-shortest forms are illegal.
- * Starting with Unicode 3.2, surrogate code points must not be
- * encoded in UTF-8, and there are no irregular sequences any more.
- *
- * U8_ macros (new in ICU 2.4) return negative values for error conditions.
- */
-
- /* correct sequence - all trail bytes have (b7..b6)==(10)? */
- /* illegal is also set if count>=4 */
- if(illegal || (c)<utf8_minLegal[count] || (CBU_IS_SURROGATE(c) && strict!=-2)) {
- /* error handling */
- uint8_t errorCount = count;
- /* don't go beyond this sequence */
- i=*pi;
- while(count>0 && CBU8_IS_TRAIL(s[i])) {
- ++(i);
- --count;
+ } else if(c>=0xe0) {
+ c&=0xf;
+ if(strict!=-2) {
+ uint8_t t1=s[i], t2;
+ if(CBU8_IS_VALID_LEAD3_AND_T1(c, t1) &&
+ ++i!=length && (t2=s[i]-0x80)<=0x3f) {
+ ++i;
+ c=(c<<12)|((t1&0x3f)<<6)|t2;
+ // strict: forbid non-characters like U+fffe
+ if(strict<=0 || !CBU_IS_UNICODE_NONCHAR(c)) {
+ *pi=i;
+ return c;
+ }
}
- if(strict>=0) {
- c=utf8_errorValue[errorCount-count];
- } else {
- c=CBU_SENTINEL;
+ } else {
+ // strict=-2 -> lenient: allow surrogates
+ uint8_t t1=s[i]-0x80, t2;
+ if(t1<=0x3f && (c>0 || t1>=0x20) &&
+ ++i!=length && (t2=s[i]-0x80)<=0x3f) {
+ *pi=i+1;
+ return (c<<12)|(t1<<6)|t2;
}
- } else if((strict)>0 && CBU_IS_UNICODE_NONCHAR(c)) {
- /* strict: forbid non-characters like U+fffe */
- c=utf8_errorValue[count];
}
- } else /* too few bytes left */ {
- /* error handling */
- int32_t i0 = i;
- /* don't just set (i)=(length) in case there is an illegal sequence */
- while((i)<(length) && CBU8_IS_TRAIL(s[i])) {
- ++(i);
+ } else if(c>=0xc2) {
+ uint8_t t1=s[i]-0x80;
+ if(t1<=0x3f) {
+ *pi=i+1;
+ return ((c-0xc0)<<6)|t1;
}
- if(strict>=0) {
- c=utf8_errorValue[i-i0];
- } else {
- c=CBU_SENTINEL;
- }
- }
+ } // else 0x80<=c<0xc2 is not a lead byte
+
+ /* error handling */
+ c=errorValue(i-*pi, strict);
*pi=i;
return c;
}
diff --git a/chromium/base/third_party/icu/icu_utf.h b/chromium/base/third_party/icu/icu_utf.h
index 4370fdec15e..2ba82316c25 100644
--- a/chromium/base/third_party/icu/icu_utf.h
+++ b/chromium/base/third_party/icu/icu_utf.h
@@ -1,17 +1,12 @@
+// © 2016 and later: Unicode, Inc. and others.
+// License & terms of use: http://www.unicode.org/copyright.html
/*
-*******************************************************************************
+******************************************************************************
*
-* Copyright (C) 1999-2004, International Business Machines
+* Copyright (C) 1999-2015, International Business Machines
* Corporation and others. All Rights Reserved.
*
-*******************************************************************************
-* file name: utf.h
-* encoding: US-ASCII
-* tab size: 8 (not used)
-* indentation:4
-*
-* created on: 1999sep09
-* created by: Markus W. Scherer
+******************************************************************************
*/
#ifndef BASE_THIRD_PARTY_ICU_ICU_UTF_H_
@@ -21,12 +16,29 @@
namespace base_icu {
-typedef int32_t UChar32;
-typedef uint16_t UChar;
+// source/common/unicode/umachine.h
+
+/** The ICU boolean type @stable ICU 2.0 */
typedef int8_t UBool;
-// General ---------------------------------------------------------------------
-// from utf.h
+/**
+ * Define UChar32 as a type for single Unicode code points.
+ * UChar32 is a signed 32-bit integer (same as int32_t).
+ *
+ * The Unicode code point range is 0..0x10ffff.
+ * All other values (negative or >=0x110000) are illegal as Unicode code points.
+ * They may be used as sentinel values to indicate "done", "error"
+ * or similar non-code point conditions.
+ *
+ * Before ICU 2.4 (Jitterbug 2146), UChar32 was defined
+ * to be wchar_t if that is 32 bits wide (wchar_t may be signed or unsigned)
+ * or else to be uint32_t.
+ * That is, the definition of UChar32 was platform-dependent.
+ *
+ * @see U_SENTINEL
+ * @stable ICU 2.4
+ */
+typedef int32_t UChar32;
/**
* This value is intended for sentinel values for APIs that
@@ -34,7 +46,7 @@ typedef int8_t UBool;
* It is outside of the Unicode code point range 0..0x10ffff.
*
* For example, a "done" or "error" value in a new API
- * could be indicated with CBU_SENTINEL.
+ * could be indicated with U_SENTINEL.
*
* ICU APIs designed before ICU 2.4 usually define service-specific "done"
* values, mostly 0xffff.
@@ -48,15 +60,17 @@ typedef int8_t UBool;
*/
#define CBU_SENTINEL (-1)
+// source/common/unicode/utf.h
+
/**
* Is this code point a Unicode noncharacter?
* @param c 32-bit code point
* @return TRUE or FALSE
* @stable ICU 2.4
*/
-#define CBU_IS_UNICODE_NONCHAR(c) \
- ((c) >= 0xfdd0 && ((uint32_t)(c) <= 0xfdef || ((c)&0xfffe) == 0xfffe) && \
- (uint32_t)(c) <= 0x10ffff)
+#define CBU_IS_UNICODE_NONCHAR(c) \
+ ((c)>=0xfdd0 && \
+ ((c)<=0xfdef || ((c)&0xfffe)==0xfffe) && (c)<=0x10ffff)
/**
* Is c a Unicode code point value (0..U+10ffff)
@@ -75,10 +89,9 @@ typedef int8_t UBool;
* @return TRUE or FALSE
* @stable ICU 2.4
*/
-#define CBU_IS_UNICODE_CHAR(c) \
- ((uint32_t)(c) < 0xd800 || \
- ((uint32_t)(c) > 0xdfff && (uint32_t)(c) <= 0x10ffff && \
- !CBU_IS_UNICODE_NONCHAR(c)))
+#define CBU_IS_UNICODE_CHAR(c) \
+ ((uint32_t)(c)<0xd800 || \
+ (0xdfff<(c) && (c)<=0x10ffff && !CBU_IS_UNICODE_NONCHAR(c)))
/**
* Is this code point a surrogate (U+d800..U+dfff)?
@@ -97,24 +110,56 @@ typedef int8_t UBool;
*/
#define CBU_IS_SURROGATE_LEAD(c) (((c)&0x400)==0)
+// source/common/unicode/utf8.h
-// UTF-8 macros ----------------------------------------------------------------
-// from utf8.h
+/**
+ * Internal bit vector for 3-byte UTF-8 validity check, for use in U8_IS_VALID_LEAD3_AND_T1.
+ * Each bit indicates whether one lead byte + first trail byte pair starts a valid sequence.
+ * Lead byte E0..EF bits 3..0 are used as byte index,
+ * first trail byte bits 7..5 are used as bit index into that byte.
+ * @see U8_IS_VALID_LEAD3_AND_T1
+ * @internal
+ */
+#define CBU8_LEAD3_T1_BITS "\x20\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x10\x30\x30"
-extern const uint8_t utf8_countTrailBytes[256];
+/**
+ * Internal 3-byte UTF-8 validity check.
+ * Non-zero if lead byte E0..EF and first trail byte 00..FF start a valid sequence.
+ * @internal
+ */
+#define CBU8_IS_VALID_LEAD3_AND_T1(lead, t1) (CBU8_LEAD3_T1_BITS[(lead)&0xf]&(1<<((uint8_t)(t1)>>5)))
/**
- * Count the trail bytes for a UTF-8 lead byte.
+ * Internal bit vector for 4-byte UTF-8 validity check, for use in U8_IS_VALID_LEAD4_AND_T1.
+ * Each bit indicates whether one lead byte + first trail byte pair starts a valid sequence.
+ * First trail byte bits 7..4 are used as byte index,
+ * lead byte F0..F4 bits 2..0 are used as bit index into that byte.
+ * @see U8_IS_VALID_LEAD4_AND_T1
* @internal
*/
-#define CBU8_COUNT_TRAIL_BYTES(leadByte) \
- (base_icu::utf8_countTrailBytes[(uint8_t)leadByte])
+#define CBU8_LEAD4_T1_BITS "\x00\x00\x00\x00\x00\x00\x00\x00\x1E\x0F\x0F\x0F\x00\x00\x00\x00"
/**
- * Mask a UTF-8 lead byte, leave only the lower bits that form part of the code point value.
+ * Internal 4-byte UTF-8 validity check.
+ * Non-zero if lead byte F0..F4 and first trail byte 00..FF start a valid sequence.
* @internal
*/
-#define CBU8_MASK_LEAD_BYTE(leadByte, countTrailBytes) ((leadByte)&=(1<<(6-(countTrailBytes)))-1)
+#define CBU8_IS_VALID_LEAD4_AND_T1(lead, t1) (CBU8_LEAD4_T1_BITS[(uint8_t)(t1)>>4]&(1<<((lead)&7)))
+
+/**
+ * Function for handling "next code point" with error-checking.
+ *
+ * This is internal since it is not meant to be called directly by external clie
+nts;
+ * however it is U_STABLE (not U_INTERNAL) since it is called by public macros i
+n this
+ * file and thus must remain stable, and should not be hidden when other interna
+l
+ * functions are hidden (otherwise public macros would fail to compile).
+ * @internal
+ */
+UChar32
+utf8_nextCharSafeBody(const uint8_t *s, int32_t *pi, int32_t length, ::base_icu::UChar32 c, ::base_icu::UBool strict);
/**
* Does this code unit (byte) encode a code point by itself (US-ASCII 0..0x7f)?
@@ -125,20 +170,20 @@ extern const uint8_t utf8_countTrailBytes[256];
#define CBU8_IS_SINGLE(c) (((c)&0x80)==0)
/**
- * Is this code unit (byte) a UTF-8 lead byte?
+ * Is this code unit (byte) a UTF-8 lead byte? (0xC2..0xF4)
* @param c 8-bit code unit (byte)
* @return TRUE or FALSE
* @stable ICU 2.4
*/
-#define CBU8_IS_LEAD(c) ((uint8_t)((c)-0xc0) < 0x3e)
+#define CBU8_IS_LEAD(c) ((uint8_t)((c)-0xc2)<=0x32)
/**
- * Is this code unit (byte) a UTF-8 trail byte?
+ * Is this code unit (byte) a UTF-8 trail byte? (0x80..0xBF)
* @param c 8-bit code unit (byte)
* @return TRUE or FALSE
* @stable ICU 2.4
*/
-#define CBU8_IS_TRAIL(c) (((c)&0xc0)==0x80)
+#define CBU8_IS_TRAIL(c) ((int8_t)(c)<-0x40)
/**
* How many code units (bytes) are used for the UTF-8 encoding
@@ -147,16 +192,16 @@ extern const uint8_t utf8_countTrailBytes[256];
* @return 1..4, or 0 if c is a surrogate or not a Unicode code point
* @stable ICU 2.4
*/
-#define CBU8_LENGTH(c) \
- ((uint32_t)(c) <= 0x7f \
- ? 1 \
- : ((uint32_t)(c) <= 0x7ff \
- ? 2 \
- : ((uint32_t)(c) <= 0xd7ff \
- ? 3 \
- : ((uint32_t)(c) <= 0xdfff || (uint32_t)(c) > 0x10ffff \
- ? 0 \
- : ((uint32_t)(c) <= 0xffff ? 3 : 4)))))
+#define CBU8_LENGTH(c) \
+ ((uint32_t)(c)<=0x7f ? 1 : \
+ ((uint32_t)(c)<=0x7ff ? 2 : \
+ ((uint32_t)(c)<=0xd7ff ? 3 : \
+ ((uint32_t)(c)<=0xdfff || (uint32_t)(c)>0x10ffff ? 0 : \
+ ((uint32_t)(c)<=0xffff ? 3 : 4)\
+ ) \
+ ) \
+ ) \
+ )
/**
* The maximum number of UTF-8 code units (bytes) per Unicode code point (U+0000..U+10ffff).
@@ -166,82 +211,82 @@ extern const uint8_t utf8_countTrailBytes[256];
#define CBU8_MAX_LENGTH 4
/**
- * Function for handling "next code point" with error-checking.
- * @internal
- */
-UChar32 utf8_nextCharSafeBody(const uint8_t* s,
- int32_t* pi,
- int32_t length,
- UChar32 c,
- UBool strict);
-
-/**
* Get a code point from a string at a code point boundary offset,
* and advance the offset to the next code point boundary.
* (Post-incrementing forward iteration.)
* "Safe" macro, checks for illegal sequences and for string boundaries.
*
+ * The length can be negative for a NUL-terminated string.
+ *
* The offset may point to the lead byte of a multi-byte sequence,
* in which case the macro will read the whole sequence.
* If the offset points to a trail byte or an illegal UTF-8 sequence, then
* c is set to a negative value.
*
* @param s const uint8_t * string
- * @param i string offset, i<length
- * @param length string length
+ * @param i int32_t string offset, must be i<length
+ * @param length int32_t string length
* @param c output UChar32 variable, set to <0 in case of an error
- * @see CBU8_NEXT_UNSAFE
+ * @see U8_NEXT_UNSAFE
* @stable ICU 2.4
*/
-#define CBU8_NEXT(s, i, length, c) \
- { \
- (c) = (s)[(i)++]; \
- if (((uint8_t)(c)) >= 0x80) { \
- if (CBU8_IS_LEAD(c)) { \
- (c) = base_icu::utf8_nextCharSafeBody((const uint8_t*)s, &(i), \
- (int32_t)(length), c, -1); \
- } else { \
- (c) = CBU_SENTINEL; \
- } \
- } \
- }
+#define CBU8_NEXT(s, i, length, c) { \
+ (c)=(uint8_t)(s)[(i)++]; \
+ if(!CBU8_IS_SINGLE(c)) { \
+ uint8_t __t1, __t2; \
+ if( /* handle U+0800..U+FFFF inline */ \
+ (0xe0<=(c) && (c)<0xf0) && \
+ (((i)+1)<(length) || (length)<0) && \
+ CBU8_IS_VALID_LEAD3_AND_T1((c), __t1=(s)[i]) && \
+ (__t2=(s)[(i)+1]-0x80)<=0x3f) { \
+ (c)=(((c)&0xf)<<12)|((__t1&0x3f)<<6)|__t2; \
+ (i)+=2; \
+ } else if( /* handle U+0080..U+07FF inline */ \
+ ((c)<0xe0 && (c)>=0xc2) && \
+ ((i)!=(length)) && \
+ (__t1=(s)[i]-0x80)<=0x3f) { \
+ (c)=(((c)&0x1f)<<6)|__t1; \
+ ++(i); \
+ } else { \
+ /* function call for "complicated" and error cases */ \
+ (c)=::base_icu::utf8_nextCharSafeBody((const uint8_t *)s, &(i), (length), c, -1); \
+ } \
+ } \
+}
/**
* Append a code point to a string, overwriting 1 to 4 bytes.
* The offset points to the current end of the string contents
* and is advanced (post-increment).
- * "Unsafe" macro, assumes a valid code point and sufficient space in the
- * string.
+ * "Unsafe" macro, assumes a valid code point and sufficient space in the string.
* Otherwise, the result is undefined.
*
* @param s const uint8_t * string buffer
* @param i string offset
* @param c code point to append
- * @see CBU8_APPEND
+ * @see U8_APPEND
* @stable ICU 2.4
*/
-#define CBU8_APPEND_UNSAFE(s, i, c) \
- { \
- if ((uint32_t)(c) <= 0x7f) { \
- (s)[(i)++] = (uint8_t)(c); \
- } else { \
- if ((uint32_t)(c) <= 0x7ff) { \
- (s)[(i)++] = (uint8_t)(((c) >> 6) | 0xc0); \
- } else { \
- if ((uint32_t)(c) <= 0xffff) { \
- (s)[(i)++] = (uint8_t)(((c) >> 12) | 0xe0); \
- } else { \
- (s)[(i)++] = (uint8_t)(((c) >> 18) | 0xf0); \
- (s)[(i)++] = (uint8_t)((((c) >> 12) & 0x3f) | 0x80); \
- } \
- (s)[(i)++] = (uint8_t)((((c) >> 6) & 0x3f) | 0x80); \
- } \
- (s)[(i)++] = (uint8_t)(((c)&0x3f) | 0x80); \
- } \
- }
-
-// UTF-16 macros ---------------------------------------------------------------
-// from utf16.h
+#define CBU8_APPEND_UNSAFE(s, i, c) { \
+ if((uint32_t)(c)<=0x7f) { \
+ (s)[(i)++]=(uint8_t)(c); \
+ } else { \
+ if((uint32_t)(c)<=0x7ff) { \
+ (s)[(i)++]=(uint8_t)(((c)>>6)|0xc0); \
+ } else { \
+ if((uint32_t)(c)<=0xffff) { \
+ (s)[(i)++]=(uint8_t)(((c)>>12)|0xe0); \
+ } else { \
+ (s)[(i)++]=(uint8_t)(((c)>>18)|0xf0); \
+ (s)[(i)++]=(uint8_t)((((c)>>12)&0x3f)|0x80); \
+ } \
+ (s)[(i)++]=(uint8_t)((((c)>>6)&0x3f)|0x80); \
+ } \
+ (s)[(i)++]=(uint8_t)(((c)&0x3f)|0x80); \
+ } \
+}
+
+// source/common/unicode/utf16.h
/**
* Does this code unit alone encode a code point (BMP, not a surrogate)?
@@ -285,7 +330,7 @@ UChar32 utf8_nextCharSafeBody(const uint8_t* s,
#define CBU16_IS_SURROGATE_LEAD(c) (((c)&0x400)==0)
/**
- * Helper constant for CBU16_GET_SUPPLEMENTARY.
+ * Helper constant for U16_GET_SUPPLEMENTARY.
* @internal
*/
#define CBU16_SURROGATE_OFFSET ((0xd800<<10UL)+0xdc00-0x10000)
@@ -302,8 +347,7 @@ UChar32 utf8_nextCharSafeBody(const uint8_t* s,
* @stable ICU 2.4
*/
#define CBU16_GET_SUPPLEMENTARY(lead, trail) \
- (((base_icu::UChar32)(lead)<<10UL)+(base_icu::UChar32)(trail)-CBU16_SURROGATE_OFFSET)
-
+ (((::base_icu::UChar32)(lead)<<10UL)+(::base_icu::UChar32)(trail)-CBU16_SURROGATE_OFFSET)
/**
* Get the lead surrogate (0xd800..0xdbff) for a
@@ -312,8 +356,7 @@ UChar32 utf8_nextCharSafeBody(const uint8_t* s,
* @return lead surrogate (U+d800..U+dbff) for supplementary
* @stable ICU 2.4
*/
-#define CBU16_LEAD(supplementary) \
- (base_icu::UChar)(((supplementary)>>10)+0xd7c0)
+#define CBU16_LEAD(supplementary) (::base_icu::UChar)(((supplementary)>>10)+0xd7c0)
/**
* Get the trail surrogate (0xdc00..0xdfff) for a
@@ -322,8 +365,7 @@ UChar32 utf8_nextCharSafeBody(const uint8_t* s,
* @return trail surrogate (U+dc00..U+dfff) for supplementary
* @stable ICU 2.4
*/
-#define CBU16_TRAIL(supplementary) \
- (base_icu::UChar)(((supplementary)&0x3ff)|0xdc00)
+#define CBU16_TRAIL(supplementary) (::base_icu::UChar)(((supplementary)&0x3ff)|0xdc00)
/**
* How many 16-bit code units are used to encode this Unicode code point? (1 or 2)
@@ -332,7 +374,7 @@ UChar32 utf8_nextCharSafeBody(const uint8_t* s,
* @return 1 or 2
* @stable ICU 2.4
*/
-#define CBU16_LENGTH(c) ((uint32_t)(c) <= 0xffff ? 1 : 2)
+#define CBU16_LENGTH(c) ((uint32_t)(c)<=0xffff ? 1 : 2)
/**
* The maximum number of 16-bit code units per Unicode code point (U+0000..U+10ffff).
@@ -347,30 +389,31 @@ UChar32 utf8_nextCharSafeBody(const uint8_t* s,
* (Post-incrementing forward iteration.)
* "Safe" macro, handles unpaired surrogates and checks for string boundaries.
*
+ * The length can be negative for a NUL-terminated string.
+ *
* The offset may point to the lead surrogate unit
* for a supplementary code point, in which case the macro will read
* the following trail surrogate as well.
* If the offset points to a trail surrogate or
- * to a single, unpaired lead surrogate, then that itself
- * will be returned as the code point.
+ * to a single, unpaired lead surrogate, then c is set to that unpaired surrogate.
*
* @param s const UChar * string
- * @param i string offset, i<length
+ * @param i string offset, must be i<length
* @param length string length
* @param c output UChar32 variable
+ * @see U16_NEXT_UNSAFE
* @stable ICU 2.4
*/
-#define CBU16_NEXT(s, i, length, c) \
- { \
- (c) = (s)[(i)++]; \
- if (CBU16_IS_LEAD(c)) { \
- uint16_t __c2; \
- if ((i) < (length) && CBU16_IS_TRAIL(__c2 = (s)[(i)])) { \
- ++(i); \
- (c) = CBU16_GET_SUPPLEMENTARY((c), __c2); \
- } \
- } \
- }
+#define CBU16_NEXT(s, i, length, c) { \
+ (c)=(s)[(i)++]; \
+ if(CBU16_IS_LEAD(c)) { \
+ uint16_t __c2; \
+ if((i)!=(length) && CBU16_IS_TRAIL(__c2=(s)[(i)])) { \
+ ++(i); \
+ (c)=CBU16_GET_SUPPLEMENTARY((c), __c2); \
+ } \
+ } \
+}
/**
* Append a code point to a string, overwriting 1 or 2 code units.
@@ -382,18 +425,17 @@ UChar32 utf8_nextCharSafeBody(const uint8_t* s,
* @param s const UChar * string buffer
* @param i string offset
* @param c code point to append
- * @see CBU16_APPEND
+ * @see U16_APPEND
* @stable ICU 2.4
*/
-#define CBU16_APPEND_UNSAFE(s, i, c) \
- { \
- if ((uint32_t)(c) <= 0xffff) { \
- (s)[(i)++] = (uint16_t)(c); \
- } else { \
- (s)[(i)++] = (uint16_t)(((c) >> 10) + 0xd7c0); \
- (s)[(i)++] = (uint16_t)(((c)&0x3ff) | 0xdc00); \
- } \
- }
+#define CBU16_APPEND_UNSAFE(s, i, c) { \
+ if((uint32_t)(c)<=0xffff) { \
+ (s)[(i)++]=(uint16_t)(c); \
+ } else { \
+ (s)[(i)++]=(uint16_t)(((c)>>10)+0xd7c0); \
+ (s)[(i)++]=(uint16_t)(((c)&0x3ff)|0xdc00); \
+ } \
+}
} // namesapce base_icu
diff --git a/chromium/base/third_party/symbolize/README.chromium b/chromium/base/third_party/symbolize/README.chromium
index a2185fd9840..ff78e0ef3b6 100644
--- a/chromium/base/third_party/symbolize/README.chromium
+++ b/chromium/base/third_party/symbolize/README.chromium
@@ -11,8 +11,10 @@ https://github.com/google/glog/tree/a5ffa884137f7687d0393ccba22557d583654a25
- symbolize.cc
- symbolize.h
-Cherry picked upstream change https://github.com/google/glog/pull/115 to
-fix a symbolization issue when using lld.
+Cherry picked upstream changes:
+https://github.com/google/glog/pull/115
+https://github.com/google/glog/pull/261
+to fix symbolization issues when using lld.
The following files are minimal stubs created for use in Chromium:
diff --git a/chromium/base/third_party/symbolize/symbolize.cc b/chromium/base/third_party/symbolize/symbolize.cc
index b41a17b8b61..41b53bd5e37 100644
--- a/chromium/base/third_party/symbolize/symbolize.cc
+++ b/chromium/base/third_party/symbolize/symbolize.cc
@@ -56,6 +56,8 @@
#if defined(HAVE_SYMBOLIZE)
+#include <string.h>
+
#include <algorithm>
#include <limits>
@@ -328,41 +330,17 @@ FindSymbol(uint64_t pc, const int fd, char *out, int out_size,
// both regular and dynamic symbol tables if necessary. On success,
// write the symbol name to "out" and return true. Otherwise, return
// false.
-static bool GetSymbolFromObjectFile(const int fd, uint64_t pc,
- char *out, int out_size,
- uint64_t map_base_address) {
+static bool GetSymbolFromObjectFile(const int fd,
+ uint64_t pc,
+ char* out,
+ int out_size,
+ uint64_t base_address) {
// Read the ELF header.
ElfW(Ehdr) elf_header;
if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
return false;
}
- uint64_t symbol_offset = 0;
- if (elf_header.e_type == ET_DYN) { // DSO needs offset adjustment.
- ElfW(Phdr) phdr;
- // We need to find the PT_LOAD segment corresponding to the read-execute
- // file mapping in order to correctly perform the offset adjustment.
- for (unsigned i = 0; i != elf_header.e_phnum; ++i) {
- if (!ReadFromOffsetExact(fd, &phdr, sizeof(phdr),
- elf_header.e_phoff + i * sizeof(phdr)))
- return false;
- if (phdr.p_type == PT_LOAD &&
- (phdr.p_flags & (PF_R | PF_X)) == (PF_R | PF_X)) {
- // Find the mapped address corresponding to virtual address zero. We do
- // this by first adding p_offset. This gives us the mapped address of
- // the start of the segment, or in other words the mapped address
- // corresponding to the virtual address of the segment. (Note that this
- // is distinct from the start address, as p_offset is not guaranteed to
- // be page aligned.) We then subtract p_vaddr, which takes us to virtual
- // address zero.
- symbol_offset = map_base_address + phdr.p_offset - phdr.p_vaddr;
- break;
- }
- }
- if (symbol_offset == 0)
- return false;
- }
-
ElfW(Shdr) symtab, strtab;
// Consult a regular symbol table first.
@@ -372,8 +350,7 @@ static bool GetSymbolFromObjectFile(const int fd, uint64_t pc,
symtab.sh_link * sizeof(symtab))) {
return false;
}
- if (FindSymbol(pc, fd, out, out_size, symbol_offset,
- &strtab, &symtab)) {
+ if (FindSymbol(pc, fd, out, out_size, base_address, &strtab, &symtab)) {
return true; // Found the symbol in a regular symbol table.
}
}
@@ -385,8 +362,7 @@ static bool GetSymbolFromObjectFile(const int fd, uint64_t pc,
symtab.sh_link * sizeof(symtab))) {
return false;
}
- if (FindSymbol(pc, fd, out, out_size, symbol_offset,
- &strtab, &symtab)) {
+ if (FindSymbol(pc, fd, out, out_size, base_address, &strtab, &symtab)) {
return true; // Found the symbol in a dynamic symbol table.
}
}
@@ -535,7 +511,6 @@ OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
int out_file_name_size) {
int object_fd;
- // Open /proc/self/maps.
int maps_fd;
NO_INTR(maps_fd = open("/proc/self/maps", O_RDONLY));
FileDescriptor wrapped_maps_fd(maps_fd);
@@ -543,6 +518,13 @@ OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
return -1;
}
+ int mem_fd;
+ NO_INTR(mem_fd = open("/proc/self/mem", O_RDONLY));
+ FileDescriptor wrapped_mem_fd(mem_fd);
+ if (wrapped_mem_fd.get() < 0) {
+ return -1;
+ }
+
// Iterate over maps and look for the map containing the pc. Then
// look into the symbol tables inside.
char buf[1024]; // Big enough for line of sane /proc/self/maps
@@ -578,11 +560,6 @@ OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
}
++cursor; // Skip ' '.
- // Check start and end addresses.
- if (!(start_address <= pc && pc < end_address)) {
- continue; // We skip this map. PC isn't in this map.
- }
-
// Read flags. Skip flags until we encounter a space or eol.
const char * const flags_start = cursor;
while (cursor < eol && *cursor != ' ') {
@@ -593,6 +570,48 @@ OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
return -1; // Malformed line.
}
+ // Determine the base address by reading ELF headers in process memory.
+ ElfW(Ehdr) ehdr;
+ if (flags_start[0] == 'r' &&
+ ReadFromOffsetExact(mem_fd, &ehdr, sizeof(ElfW(Ehdr)), start_address) &&
+ memcmp(ehdr.e_ident, ELFMAG, SELFMAG) == 0) {
+ switch (ehdr.e_type) {
+ case ET_EXEC:
+ base_address = 0;
+ break;
+ case ET_DYN:
+ // Find the segment containing file offset 0. This will correspond
+ // to the ELF header that we just read. Normally this will have
+ // virtual address 0, but this is not guaranteed. We must subtract
+ // the virtual address from the address where the ELF header was
+ // mapped to get the base address.
+ //
+ // If we fail to find a segment for file offset 0, use the address
+ // of the ELF header as the base address.
+ base_address = start_address;
+ for (unsigned i = 0; i != ehdr.e_phnum; ++i) {
+ ElfW(Phdr) phdr;
+ if (ReadFromOffsetExact(
+ mem_fd, &phdr, sizeof(phdr),
+ start_address + ehdr.e_phoff + i * sizeof(phdr)) &&
+ phdr.p_type == PT_LOAD && phdr.p_offset == 0) {
+ base_address = start_address - phdr.p_vaddr;
+ break;
+ }
+ }
+ break;
+ default:
+ // ET_REL or ET_CORE. These aren't directly executable, so they don't
+ // affect the base address.
+ break;
+ }
+ }
+
+ // Check start and end addresses.
+ if (!(start_address <= pc && pc < end_address)) {
+ continue; // We skip this map. PC isn't in this map.
+ }
+
// Check flags. We are only interested in "r-x" maps.
if (memcmp(flags_start, "r-x", 3) != 0) { // Not a "r-x" map.
continue; // We skip this map.
@@ -607,19 +626,6 @@ OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
}
++cursor; // Skip ' '.
- // Don't subtract 'start_address' from the first entry:
- // * If a binary is compiled w/o -pie, then the first entry in
- // process maps is likely the binary itself (all dynamic libs
- // are mapped higher in address space). For such a binary,
- // instruction offset in binary coincides with the actual
- // instruction address in virtual memory (as code section
- // is mapped to a fixed memory range).
- // * If a binary is compiled with -pie, all the modules are
- // mapped high at address space (in particular, higher than
- // shadow memory of the tool), so the module can't be the
- // first entry.
- base_address = ((num_maps == 1) ? 0U : start_address) - file_offset;
-
// Skip to file name. "cursor" now points to dev. We need to
// skip at least two spaces for dev and inode.
int num_spaces = 0;
diff --git a/chromium/base/threading/platform_thread_fuchsia.cc b/chromium/base/threading/platform_thread_fuchsia.cc
index 655c9da4545..66d8e09fc6c 100644
--- a/chromium/base/threading/platform_thread_fuchsia.cc
+++ b/chromium/base/threading/platform_thread_fuchsia.cc
@@ -6,6 +6,7 @@
#include <pthread.h>
#include <sched.h>
+#include <zircon/syscalls.h>
#include "base/threading/platform_thread_internal_posix.h"
#include "base/threading/thread_id_name_manager.h"
@@ -49,8 +50,10 @@ size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
// static
void PlatformThread::SetName(const std::string& name) {
- // TODO(fuchsia): There's no system-level API to communicate a thread name
- // (for the debugger, etc.), so for now only set to our internal mechanisms.
+ zx_status_t status = zx_object_set_property(CurrentId(), ZX_PROP_NAME,
+ name.data(), name.size());
+ DCHECK_EQ(status, ZX_OK);
+
ThreadIdNameManager::GetInstance()->SetName(PlatformThread::CurrentId(),
name);
}
diff --git a/chromium/base/threading/platform_thread_posix.cc b/chromium/base/threading/platform_thread_posix.cc
index 849fd088ba6..7f0cf22ed54 100644
--- a/chromium/base/threading/platform_thread_posix.cc
+++ b/chromium/base/threading/platform_thread_posix.cc
@@ -42,7 +42,7 @@ namespace {
struct ThreadParams {
ThreadParams()
- : delegate(NULL), joinable(false), priority(ThreadPriority::NORMAL) {}
+ : delegate(nullptr), joinable(false), priority(ThreadPriority::NORMAL) {}
PlatformThread::Delegate* delegate;
bool joinable;
@@ -79,7 +79,7 @@ void* ThreadFunc(void* params) {
PlatformThread::CurrentId());
base::TerminateOnThread();
- return NULL;
+ return nullptr;
}
bool CreateThread(size_t stack_size,
@@ -225,8 +225,8 @@ void PlatformThread::Join(PlatformThreadHandle thread_handle) {
// Joining another thread may block the current thread for a long time, since
// the thread referred to by |thread_handle| may still be running long-lived /
// blocking tasks.
- base::ThreadRestrictions::AssertIOAllowed();
- CHECK_EQ(0, pthread_join(thread_handle.platform_handle(), NULL));
+ AssertBlockingAllowed();
+ CHECK_EQ(0, pthread_join(thread_handle.platform_handle(), nullptr));
}
// static
diff --git a/chromium/base/threading/platform_thread_unittest.cc b/chromium/base/threading/platform_thread_unittest.cc
index e6186f65c68..502557623db 100644
--- a/chromium/base/threading/platform_thread_unittest.cc
+++ b/chromium/base/threading/platform_thread_unittest.cc
@@ -354,4 +354,13 @@ TEST(PlatformThreadTest, GetNiceValueToThreadPriority) {
}
#endif
+TEST(PlatformThreadTest, SetHugeThreadName) {
+ // Construct an excessively long thread name.
+ std::string long_name(1024, 'a');
+
+ // SetName has no return code, so just verify that implementations
+ // don't [D]CHECK().
+ PlatformThread::SetName(long_name);
+}
+
} // namespace base
diff --git a/chromium/base/threading/platform_thread_win.cc b/chromium/base/threading/platform_thread_win.cc
index e03142b87a5..eb2edcee441 100644
--- a/chromium/base/threading/platform_thread_win.cc
+++ b/chromium/base/threading/platform_thread_win.cc
@@ -229,9 +229,7 @@ void PlatformThread::Join(PlatformThreadHandle thread_handle) {
// Joining another thread may block the current thread for a long time, since
// the thread referred to by |thread_handle| may still be running long-lived /
// blocking tasks.
-#if 0
- base::ThreadRestrictions::AssertIOAllowed();
-#endif
+ // AssertBlockingAllowed();
DWORD thread_id = 0;
thread_id = ::GetThreadId(thread_handle.platform_handle());
diff --git a/chromium/base/threading/sequenced_task_runner_handle.cc b/chromium/base/threading/sequenced_task_runner_handle.cc
index 19e995f605c..eb2a0e3b30e 100644
--- a/chromium/base/threading/sequenced_task_runner_handle.cc
+++ b/chromium/base/threading/sequenced_task_runner_handle.cc
@@ -17,29 +17,15 @@ namespace base {
namespace {
LazyInstance<ThreadLocalPointer<SequencedTaskRunnerHandle>>::Leaky
- lazy_tls_ptr = LAZY_INSTANCE_INITIALIZER;
+ sequenced_task_runner_tls = LAZY_INSTANCE_INITIALIZER;
} // namespace
// static
scoped_refptr<SequencedTaskRunner> SequencedTaskRunnerHandle::Get() {
- // Return the registered SingleThreadTaskRunner, if any. This must be at the
- // top so that a SingleThreadTaskRunner has priority over a
- // SequencedTaskRunner (RLZ registers both on the same thread despite that
- // being prevented by DCHECKs).
- // TODO(fdoray): Move this to the bottom once RLZ stops registering a
- // SingleThreadTaskRunner and a SequencedTaskRunner on the same thread.
- // https://crbug.com/618530#c14
- if (ThreadTaskRunnerHandle::IsSet()) {
- // Various modes of setting SequencedTaskRunnerHandle don't combine.
- DCHECK(!lazy_tls_ptr.Pointer()->Get());
- DCHECK(!SequencedWorkerPool::GetSequenceTokenForCurrentThread().IsValid());
-
- return ThreadTaskRunnerHandle::Get();
- }
-
// Return the registered SequencedTaskRunner, if any.
- const SequencedTaskRunnerHandle* handle = lazy_tls_ptr.Pointer()->Get();
+ const SequencedTaskRunnerHandle* handle =
+ sequenced_task_runner_tls.Pointer()->Get();
if (handle) {
// Various modes of setting SequencedTaskRunnerHandle don't combine.
DCHECK(!SequencedWorkerPool::GetSequenceTokenForCurrentThread().IsValid());
@@ -47,27 +33,31 @@ scoped_refptr<SequencedTaskRunner> SequencedTaskRunnerHandle::Get() {
return handle->task_runner_;
}
- // If we are on a worker thread for a SequencedBlockingPool that is running a
+ // If we are on a worker thread for a SequencedWorkerPool that is running a
// sequenced task, return a SequencedTaskRunner for it.
scoped_refptr<SequencedWorkerPool> pool =
SequencedWorkerPool::GetWorkerPoolForCurrentThread();
- // Note if you hit this: the problem isn't the lack of a |pool|, it's the lack
- // of a sequenced context above. The |pool| is just the last desperate attempt
- // at finding such a context from the deprecated SequencedWorkerPool.
- CHECK(pool) << "Error: This caller requires a sequenced context (i.e. the "
- "current task needs to run from a SequencedTaskRunner).";
- SequencedWorkerPool::SequenceToken sequence_token =
- SequencedWorkerPool::GetSequenceTokenForCurrentThread();
- DCHECK(sequence_token.IsValid());
- scoped_refptr<SequencedTaskRunner> sequenced_task_runner(
- pool->GetSequencedTaskRunner(sequence_token));
- DCHECK(sequenced_task_runner->RunsTasksInCurrentSequence());
- return sequenced_task_runner;
+ if (pool) {
+ SequencedWorkerPool::SequenceToken sequence_token =
+ SequencedWorkerPool::GetSequenceTokenForCurrentThread();
+ DCHECK(sequence_token.IsValid());
+ scoped_refptr<SequencedTaskRunner> sequenced_task_runner(
+ pool->GetSequencedTaskRunner(sequence_token));
+ DCHECK(sequenced_task_runner->RunsTasksInCurrentSequence());
+ return sequenced_task_runner;
+ }
+
+ // Note if you hit this: the problem is the lack of a sequenced context. The
+ // ThreadTaskRunnerHandle is just the last attempt at finding such a context.
+ CHECK(ThreadTaskRunnerHandle::IsSet())
+ << "Error: This caller requires a sequenced context (i.e. the "
+ "current task needs to run from a SequencedTaskRunner).";
+ return ThreadTaskRunnerHandle::Get();
}
// static
bool SequencedTaskRunnerHandle::IsSet() {
- return lazy_tls_ptr.Pointer()->Get() ||
+ return sequenced_task_runner_tls.Pointer()->Get() ||
SequencedWorkerPool::GetSequenceTokenForCurrentThread().IsValid() ||
ThreadTaskRunnerHandle::IsSet();
}
@@ -77,13 +67,13 @@ SequencedTaskRunnerHandle::SequencedTaskRunnerHandle(
: task_runner_(std::move(task_runner)) {
DCHECK(task_runner_->RunsTasksInCurrentSequence());
DCHECK(!SequencedTaskRunnerHandle::IsSet());
- lazy_tls_ptr.Pointer()->Set(this);
+ sequenced_task_runner_tls.Pointer()->Set(this);
}
SequencedTaskRunnerHandle::~SequencedTaskRunnerHandle() {
DCHECK(task_runner_->RunsTasksInCurrentSequence());
- DCHECK_EQ(lazy_tls_ptr.Pointer()->Get(), this);
- lazy_tls_ptr.Pointer()->Set(nullptr);
+ DCHECK_EQ(sequenced_task_runner_tls.Pointer()->Get(), this);
+ sequenced_task_runner_tls.Pointer()->Set(nullptr);
}
} // namespace base
diff --git a/chromium/base/threading/sequenced_worker_pool.cc b/chromium/base/threading/sequenced_worker_pool.cc
index 5c336d55977..4a2fdfda6b9 100644
--- a/chromium/base/threading/sequenced_worker_pool.cc
+++ b/chromium/base/threading/sequenced_worker_pool.cc
@@ -80,7 +80,7 @@ struct SequencedTask {
sequence_task_number(0),
shutdown_behavior(SequencedWorkerPool::BLOCK_SHUTDOWN) {}
- ~SequencedTask() {}
+ ~SequencedTask() = default;
SequencedTask(SequencedTask&&) = default;
SequencedTask& operator=(SequencedTask&&) = default;
@@ -151,8 +151,7 @@ SequencedWorkerPoolTaskRunner::SequencedWorkerPoolTaskRunner(
SequencedWorkerPool::WorkerShutdown shutdown_behavior)
: pool_(std::move(pool)), shutdown_behavior_(shutdown_behavior) {}
-SequencedWorkerPoolTaskRunner::~SequencedWorkerPoolTaskRunner() {
-}
+SequencedWorkerPoolTaskRunner::~SequencedWorkerPoolTaskRunner() = default;
bool SequencedWorkerPoolTaskRunner::PostDelayedTask(const Location& from_here,
OnceClosure task,
@@ -578,8 +577,7 @@ SequencedWorkerPool::Worker::Worker(
Start();
}
-SequencedWorkerPool::Worker::~Worker() {
-}
+SequencedWorkerPool::Worker::~Worker() = default;
void SequencedWorkerPool::Worker::Run() {
DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
@@ -745,8 +743,7 @@ bool SequencedWorkerPool::Inner::PostTask(
if (!PostTaskToTaskScheduler(std::move(sequenced), delay))
return false;
} else {
- SequencedWorkerPool::WorkerShutdown shutdown_behavior =
- sequenced.shutdown_behavior;
+ shutdown_behavior = sequenced.shutdown_behavior;
pending_tasks_.insert(std::move(sequenced));
if (shutdown_behavior == BLOCK_SHUTDOWN)
@@ -1462,7 +1459,7 @@ SequencedWorkerPool::SequencedWorkerPool(size_t max_threads,
max_threads,
thread_name_prefix,
task_priority,
- NULL)) {}
+ nullptr)) {}
SequencedWorkerPool::SequencedWorkerPool(size_t max_threads,
const std::string& thread_name_prefix,
@@ -1475,7 +1472,7 @@ SequencedWorkerPool::SequencedWorkerPool(size_t max_threads,
task_priority,
observer)) {}
-SequencedWorkerPool::~SequencedWorkerPool() {}
+SequencedWorkerPool::~SequencedWorkerPool() = default;
void SequencedWorkerPool::OnDestruct() const {
// Avoid deleting ourselves on a worker thread (which would deadlock).
@@ -1516,7 +1513,7 @@ SequencedWorkerPool::GetTaskRunnerWithShutdownBehavior(
bool SequencedWorkerPool::PostWorkerTask(const Location& from_here,
OnceClosure task) {
- return inner_->PostTask(NULL, SequenceToken(), BLOCK_SHUTDOWN, from_here,
+ return inner_->PostTask(nullptr, SequenceToken(), BLOCK_SHUTDOWN, from_here,
std::move(task), TimeDelta());
}
@@ -1524,14 +1521,14 @@ bool SequencedWorkerPool::PostWorkerTaskWithShutdownBehavior(
const Location& from_here,
OnceClosure task,
WorkerShutdown shutdown_behavior) {
- return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior, from_here,
- std::move(task), TimeDelta());
+ return inner_->PostTask(nullptr, SequenceToken(), shutdown_behavior,
+ from_here, std::move(task), TimeDelta());
}
bool SequencedWorkerPool::PostSequencedWorkerTask(SequenceToken sequence_token,
const Location& from_here,
OnceClosure task) {
- return inner_->PostTask(NULL, sequence_token, BLOCK_SHUTDOWN, from_here,
+ return inner_->PostTask(nullptr, sequence_token, BLOCK_SHUTDOWN, from_here,
std::move(task), TimeDelta());
}
@@ -1542,7 +1539,7 @@ bool SequencedWorkerPool::PostDelayedSequencedWorkerTask(
TimeDelta delay) {
WorkerShutdown shutdown_behavior =
delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
- return inner_->PostTask(NULL, sequence_token, shutdown_behavior, from_here,
+ return inner_->PostTask(nullptr, sequence_token, shutdown_behavior, from_here,
std::move(task), delay);
}
@@ -1560,7 +1557,7 @@ bool SequencedWorkerPool::PostSequencedWorkerTaskWithShutdownBehavior(
const Location& from_here,
OnceClosure task,
WorkerShutdown shutdown_behavior) {
- return inner_->PostTask(NULL, sequence_token, shutdown_behavior, from_here,
+ return inner_->PostTask(nullptr, sequence_token, shutdown_behavior, from_here,
std::move(task), TimeDelta());
}
diff --git a/chromium/base/threading/sequenced_worker_pool_unittest.cc b/chromium/base/threading/sequenced_worker_pool_unittest.cc
index 8afd5a18d7a..32ed9126f21 100644
--- a/chromium/base/threading/sequenced_worker_pool_unittest.cc
+++ b/chromium/base/threading/sequenced_worker_pool_unittest.cc
@@ -210,7 +210,7 @@ class TestTracker : public base::RefCountedThreadSafe<TestTracker> {
private:
friend class base::RefCountedThreadSafe<TestTracker>;
- ~TestTracker() {}
+ ~TestTracker() = default;
void SignalWorkerDone(int id) {
{
@@ -1105,9 +1105,9 @@ INSTANTIATE_TEST_CASE_P(
class SequencedWorkerPoolTaskRunnerTestDelegate {
public:
- SequencedWorkerPoolTaskRunnerTestDelegate() {}
+ SequencedWorkerPoolTaskRunnerTestDelegate() = default;
- ~SequencedWorkerPoolTaskRunnerTestDelegate() {}
+ ~SequencedWorkerPoolTaskRunnerTestDelegate() = default;
void StartTaskRunner() {
pool_owner_.reset(
@@ -1140,10 +1140,9 @@ INSTANTIATE_TYPED_TEST_CASE_P(SequencedWorkerPool, TaskRunnerAffinityTest,
class SequencedWorkerPoolTaskRunnerWithShutdownBehaviorTestDelegate {
public:
- SequencedWorkerPoolTaskRunnerWithShutdownBehaviorTestDelegate() {}
+ SequencedWorkerPoolTaskRunnerWithShutdownBehaviorTestDelegate() = default;
- ~SequencedWorkerPoolTaskRunnerWithShutdownBehaviorTestDelegate() {
- }
+ ~SequencedWorkerPoolTaskRunnerWithShutdownBehaviorTestDelegate() = default;
void StartTaskRunner() {
pool_owner_.reset(
@@ -1180,10 +1179,9 @@ INSTANTIATE_TYPED_TEST_CASE_P(
class SequencedWorkerPoolSequencedTaskRunnerTestDelegate {
public:
- SequencedWorkerPoolSequencedTaskRunnerTestDelegate() {}
+ SequencedWorkerPoolSequencedTaskRunnerTestDelegate() = default;
- ~SequencedWorkerPoolSequencedTaskRunnerTestDelegate() {
- }
+ ~SequencedWorkerPoolSequencedTaskRunnerTestDelegate() = default;
void StartTaskRunner() {
pool_owner_.reset(new SequencedWorkerPoolOwner(
diff --git a/chromium/base/threading/simple_thread.cc b/chromium/base/threading/simple_thread.cc
index 9eb443afab1..18add0eda0d 100644
--- a/chromium/base/threading/simple_thread.cc
+++ b/chromium/base/threading/simple_thread.cc
@@ -119,7 +119,7 @@ void DelegateSimpleThreadPool::JoinAll() {
DCHECK(!threads_.empty()) << "JoinAll() called with no outstanding threads.";
// Tell all our threads to quit their worker loop.
- AddWork(NULL, num_threads_);
+ AddWork(nullptr, num_threads_);
// Join and destroy all the worker threads.
for (int i = 0; i < num_threads_; ++i) {
@@ -140,7 +140,7 @@ void DelegateSimpleThreadPool::AddWork(Delegate* delegate, int repeat_count) {
}
void DelegateSimpleThreadPool::Run() {
- Delegate* work = NULL;
+ Delegate* work = nullptr;
while (true) {
dry_.Wait();
diff --git a/chromium/base/threading/simple_thread_unittest.cc b/chromium/base/threading/simple_thread_unittest.cc
index 0e52500c522..4e618f9c6aa 100644
--- a/chromium/base/threading/simple_thread_unittest.cc
+++ b/chromium/base/threading/simple_thread_unittest.cc
@@ -19,7 +19,7 @@ namespace {
class SetIntRunner : public DelegateSimpleThread::Delegate {
public:
SetIntRunner(int* ptr, int val) : ptr_(ptr), val_(val) { }
- ~SetIntRunner() override {}
+ ~SetIntRunner() override = default;
private:
void Run() override { *ptr_ = val_; }
@@ -69,7 +69,7 @@ class ControlledRunner : public DelegateSimpleThread::Delegate {
class WaitEventRunner : public DelegateSimpleThread::Delegate {
public:
explicit WaitEventRunner(WaitableEvent* event) : event_(event) { }
- ~WaitEventRunner() override {}
+ ~WaitEventRunner() override = default;
private:
void Run() override {
diff --git a/chromium/base/threading/thread_checker.h b/chromium/base/threading/thread_checker.h
index 85731a80d4b..6799e25813d 100644
--- a/chromium/base/threading/thread_checker.h
+++ b/chromium/base/threading/thread_checker.h
@@ -66,7 +66,7 @@
#define DETACH_FROM_THREAD(name) (name).DetachFromThread()
#else // DCHECK_IS_ON()
#define THREAD_CHECKER(name)
-#define DCHECK_CALLED_ON_VALID_THREAD(name)
+#define DCHECK_CALLED_ON_VALID_THREAD(name) EAT_STREAM_PARAMETERS
#define DETACH_FROM_THREAD(name)
#endif // DCHECK_IS_ON()
diff --git a/chromium/base/threading/thread_checker_unittest.cc b/chromium/base/threading/thread_checker_unittest.cc
index e258bfc2ebb..5fbbc5284a7 100644
--- a/chromium/base/threading/thread_checker_unittest.cc
+++ b/chromium/base/threading/thread_checker_unittest.cc
@@ -216,6 +216,8 @@ class ThreadCheckerMacroTest : public testing::Test {
void ExpectNoDeathOnOtherThreadAfterDetach() {
DCHECK_CALLED_ON_VALID_THREAD(my_thread_checker_);
+ DCHECK_CALLED_ON_VALID_THREAD(my_thread_checker_)
+ << "Make sure it compiles when DCHECK is off";
}
protected:
diff --git a/chromium/base/threading/thread_collision_warner_unittest.cc b/chromium/base/threading/thread_collision_warner_unittest.cc
index 71447efd737..cd56768c61b 100644
--- a/chromium/base/threading/thread_collision_warner_unittest.cc
+++ b/chromium/base/threading/thread_collision_warner_unittest.cc
@@ -46,7 +46,7 @@ class AssertReporter : public base::AsserterBase {
void warn() override { failed_ = true; }
- ~AssertReporter() override {}
+ ~AssertReporter() override = default;
bool fail_state() const { return failed_; }
void reset() { failed_ = false; }
diff --git a/chromium/base/threading/thread_id_name_manager.cc b/chromium/base/threading/thread_id_name_manager.cc
index 107e0dc4985..74a42c7d707 100644
--- a/chromium/base/threading/thread_id_name_manager.cc
+++ b/chromium/base/threading/thread_id_name_manager.cc
@@ -21,16 +21,14 @@ static std::string* g_default_name;
}
ThreadIdNameManager::ThreadIdNameManager()
- : main_process_name_(NULL),
- main_process_id_(kInvalidThreadId) {
+ : main_process_name_(nullptr), main_process_id_(kInvalidThreadId) {
g_default_name = new std::string(kDefaultName);
AutoLock locked(lock_);
name_to_interned_name_[kDefaultName] = g_default_name;
}
-ThreadIdNameManager::~ThreadIdNameManager() {
-}
+ThreadIdNameManager::~ThreadIdNameManager() = default;
ThreadIdNameManager* ThreadIdNameManager::GetInstance() {
return Singleton<ThreadIdNameManager,
@@ -51,7 +49,7 @@ void ThreadIdNameManager::RegisterThread(PlatformThreadHandle::Handle handle,
void ThreadIdNameManager::SetName(PlatformThreadId id,
const std::string& name) {
- std::string* leaked_str = NULL;
+ std::string* leaked_str = nullptr;
{
AutoLock locked(lock_);
NameToInternedNameMap::iterator iter = name_to_interned_name_.find(name);
diff --git a/chromium/base/threading/thread_local_storage_unittest.cc b/chromium/base/threading/thread_local_storage_unittest.cc
index 335252b18ed..a577e7680b6 100644
--- a/chromium/base/threading/thread_local_storage_unittest.cc
+++ b/chromium/base/threading/thread_local_storage_unittest.cc
@@ -35,7 +35,7 @@ class ThreadLocalStorageRunner : public DelegateSimpleThread::Delegate {
explicit ThreadLocalStorageRunner(int* tls_value_ptr)
: tls_value_ptr_(tls_value_ptr) {}
- ~ThreadLocalStorageRunner() override {}
+ ~ThreadLocalStorageRunner() override = default;
void Run() override {
*tls_value_ptr_ = kInitialTlsValue;
diff --git a/chromium/base/threading/thread_local_unittest.cc b/chromium/base/threading/thread_local_unittest.cc
index cdc1ca6f567..54f2ad236b0 100644
--- a/chromium/base/threading/thread_local_unittest.cc
+++ b/chromium/base/threading/thread_local_unittest.cc
@@ -20,7 +20,7 @@ class ThreadLocalTesterBase : public base::DelegateSimpleThreadPool::Delegate {
: tlp_(tlp),
done_(done) {
}
- ~ThreadLocalTesterBase() override {}
+ ~ThreadLocalTesterBase() override = default;
protected:
TLPType* tlp_;
@@ -30,10 +30,8 @@ class ThreadLocalTesterBase : public base::DelegateSimpleThreadPool::Delegate {
class SetThreadLocal : public ThreadLocalTesterBase {
public:
SetThreadLocal(TLPType* tlp, base::WaitableEvent* done)
- : ThreadLocalTesterBase(tlp, done),
- val_(NULL) {
- }
- ~SetThreadLocal() override {}
+ : ThreadLocalTesterBase(tlp, done), val_(nullptr) {}
+ ~SetThreadLocal() override = default;
void set_value(char* val) { val_ = val; }
@@ -50,10 +48,8 @@ class SetThreadLocal : public ThreadLocalTesterBase {
class GetThreadLocal : public ThreadLocalTesterBase {
public:
GetThreadLocal(TLPType* tlp, base::WaitableEvent* done)
- : ThreadLocalTesterBase(tlp, done),
- ptr_(NULL) {
- }
- ~GetThreadLocal() override {}
+ : ThreadLocalTesterBase(tlp, done), ptr_(nullptr) {}
+ ~GetThreadLocal() override = default;
void set_ptr(char** ptr) { ptr_ = ptr; }
@@ -93,14 +89,13 @@ TEST(ThreadLocalTest, Pointer) {
done.Reset();
tp1.AddWork(&getter);
done.Wait();
- EXPECT_EQ(static_cast<char*>(NULL), tls_val);
+ EXPECT_EQ(static_cast<char*>(nullptr), tls_val);
tls_val = kBogusPointer;
done.Reset();
tp2.AddWork(&getter);
done.Wait();
- EXPECT_EQ(static_cast<char*>(NULL), tls_val);
-
+ EXPECT_EQ(static_cast<char*>(nullptr), tls_val);
SetThreadLocal setter(&tlp, &done);
setter.set_value(kBogusPointer);
@@ -110,7 +105,7 @@ TEST(ThreadLocalTest, Pointer) {
tp1.AddWork(&setter);
done.Wait();
- tls_val = NULL;
+ tls_val = nullptr;
done.Reset();
tp1.AddWork(&getter);
done.Wait();
@@ -121,7 +116,7 @@ TEST(ThreadLocalTest, Pointer) {
done.Reset();
tp2.AddWork(&getter);
done.Wait();
- EXPECT_EQ(static_cast<char*>(NULL), tls_val);
+ EXPECT_EQ(static_cast<char*>(nullptr), tls_val);
// Set thread 2 to kBogusPointer + 1.
setter.set_value(kBogusPointer + 1);
@@ -130,14 +125,14 @@ TEST(ThreadLocalTest, Pointer) {
tp2.AddWork(&setter);
done.Wait();
- tls_val = NULL;
+ tls_val = nullptr;
done.Reset();
tp2.AddWork(&getter);
done.Wait();
EXPECT_EQ(kBogusPointer + 1, tls_val);
// Make sure thread 1 is still kBogusPointer.
- tls_val = NULL;
+ tls_val = nullptr;
done.Reset();
tp1.AddWork(&getter);
done.Wait();
diff --git a/chromium/base/threading/thread_perftest.cc b/chromium/base/threading/thread_perftest.cc
index 4e29f25d9e3..bf890496645 100644
--- a/chromium/base/threading/thread_perftest.cc
+++ b/chromium/base/threading/thread_perftest.cc
@@ -273,8 +273,8 @@ class PthreadEvent {
WaitableEvent::InitialState initial_state) {
DCHECK_EQ(WaitableEvent::ResetPolicy::AUTOMATIC, reset_policy);
DCHECK_EQ(WaitableEvent::InitialState::NOT_SIGNALED, initial_state);
- pthread_mutex_init(&mutex_, 0);
- pthread_cond_init(&cond_, 0);
+ pthread_mutex_init(&mutex_, nullptr);
+ pthread_cond_init(&cond_, nullptr);
signaled_ = false;
}
diff --git a/chromium/base/threading/thread_restrictions.cc b/chromium/base/threading/thread_restrictions.cc
index 76c077edccb..e7e1716e080 100644
--- a/chromium/base/threading/thread_restrictions.cc
+++ b/chromium/base/threading/thread_restrictions.cc
@@ -126,11 +126,6 @@ bool ThreadRestrictions::SetIOAllowed(bool allowed) {
}
// static
-void ThreadRestrictions::AssertIOAllowed() {
- AssertBlockingAllowed();
-}
-
-// static
bool ThreadRestrictions::SetSingletonAllowed(bool allowed) {
bool previous_disallowed = g_singleton_disallowed.Get().Get();
g_singleton_disallowed.Get().Set(!allowed);
diff --git a/chromium/base/threading/thread_restrictions.h b/chromium/base/threading/thread_restrictions.h
index d372d5ac17e..0056f10fdca 100644
--- a/chromium/base/threading/thread_restrictions.h
+++ b/chromium/base/threading/thread_restrictions.h
@@ -66,20 +66,26 @@ class GpuChannelHost;
namespace leveldb {
class LevelDBMojoProxy;
}
+namespace media {
+class BlockingUrlProtocol;
+}
namespace mojo {
class SyncCallRestrictions;
namespace edk {
class ScopedIPCSupport;
}
}
+namespace rlz_lib {
+class FinancialPing;
+}
namespace ui {
class CommandBufferClientImpl;
class CommandBufferLocal;
class GpuState;
}
namespace net {
+class MultiThreadedCertVerifierScopedAllowBaseSyncPrimitives;
class NetworkChangeNotifierMac;
-class OCSPScopedAllowBaseSyncPrimitives;
namespace internal {
class AddressTrackerLinux;
}
@@ -89,6 +95,14 @@ namespace remoting {
class AutoThread;
}
+namespace resource_coordinator {
+class TabManagerDelegate;
+}
+
+namespace shell_integration {
+class LaunchXdgUtilityScopedAllowBaseSyncPrimitives;
+}
+
namespace ui {
class WindowResizeHelperMac;
}
@@ -111,6 +125,7 @@ namespace internal {
class TaskTracker;
}
+class GetAppOutputScopedAllowBaseSyncPrimitives;
class SequencedWorkerPool;
class SimpleThread;
class StackSamplingProfiler;
@@ -133,6 +148,26 @@ class ThreadTestHelper;
// Acquiring a low contention lock is not considered a blocking call.
// Asserts that blocking calls are allowed in the current scope.
+//
+// Style tip: It's best if you put AssertBlockingAllowed() checks as close to
+// the blocking call as possible. For example:
+//
+// void ReadFile() {
+// PreWork();
+//
+// base::AssertBlockingAllowed();
+// fopen(...);
+//
+// PostWork();
+// }
+//
+// void Bar() {
+// ReadFile();
+// }
+//
+// void Foo() {
+// Bar();
+// }
INLINE_IF_DCHECK_IS_OFF void AssertBlockingAllowed()
EMPTY_BODY_IF_DCHECK_IS_OFF;
@@ -165,6 +200,7 @@ class BASE_EXPORT ScopedAllowBlocking {
FRIEND_TEST_ALL_PREFIXES(ThreadRestrictionsTest, ScopedAllowBlocking);
friend class cronet::CronetPrefsManager;
friend class cronet::CronetURLRequestContextAdapter;
+ friend class resource_coordinator::TabManagerDelegate; // crbug.com/778703
friend class ScopedAllowBlockingForTesting;
ScopedAllowBlocking() EMPTY_BODY_IF_DCHECK_IS_OFF;
@@ -190,8 +226,10 @@ class ScopedAllowBlockingForTesting {
DISALLOW_COPY_AND_ASSIGN(ScopedAllowBlockingForTesting);
};
-// "Waiting on a //base sync primitive" refers to calling
-// base::WaitableEvent::*Wait* or base::ConditionVariable::*Wait*.
+// "Waiting on a //base sync primitive" refers to calling one of these methods:
+// - base::WaitableEvent::*Wait*
+// - base::ConditionVariable::*Wait*
+// - base::Process::WaitForExit*
// Disallows waiting on a //base sync primitive on the current thread.
INLINE_IF_DCHECK_IS_OFF void DisallowBaseSyncPrimitives()
@@ -200,11 +238,15 @@ INLINE_IF_DCHECK_IS_OFF void DisallowBaseSyncPrimitives()
// ScopedAllowBaseSyncPrimitives(ForTesting)(OutsideBlockingScope) allow waiting
// on a //base sync primitive within a scope where this is normally disallowed.
//
-// Avoid using this. Instead of waiting on a WaitableEvent or a
-// ConditionVariable, put the work that should happen after the wait in a
-// callback and post that callback from where the WaitableEvent or
-// ConditionVariable would have been signaled. If something needs to be
-// scheduled after many tasks have executed, use base::BarrierClosure.
+// Avoid using this.
+//
+// Instead of waiting on a WaitableEvent or a ConditionVariable, put the work
+// that should happen after the wait in a callback and post that callback from
+// where the WaitableEvent or ConditionVariable would have been signaled. If
+// something needs to be scheduled after many tasks have executed, use
+// base::BarrierClosure.
+//
+// On Windows, join processes asynchronously using base::win::ObjectWatcher.
// This can only be used in a scope where blocking is allowed.
class BASE_EXPORT ScopedAllowBaseSyncPrimitives {
@@ -218,8 +260,12 @@ class BASE_EXPORT ScopedAllowBaseSyncPrimitives {
ScopedAllowBaseSyncPrimitivesResetsState);
FRIEND_TEST_ALL_PREFIXES(ThreadRestrictionsTest,
ScopedAllowBaseSyncPrimitivesWithBlockingDisallowed);
+ friend class base::GetAppOutputScopedAllowBaseSyncPrimitives;
friend class leveldb::LevelDBMojoProxy;
- friend class net::OCSPScopedAllowBaseSyncPrimitives;
+ friend class media::BlockingUrlProtocol;
+ friend class net::MultiThreadedCertVerifierScopedAllowBaseSyncPrimitives;
+ friend class rlz_lib::FinancialPing;
+ friend class shell_integration::LaunchXdgUtilityScopedAllowBaseSyncPrimitives;
ScopedAllowBaseSyncPrimitives() EMPTY_BODY_IF_DCHECK_IS_OFF;
~ScopedAllowBaseSyncPrimitives() EMPTY_BODY_IF_DCHECK_IS_OFF;
@@ -283,32 +329,6 @@ INLINE_IF_DCHECK_IS_OFF void ResetThreadRestrictionsForTesting()
} // namespace internal
-// Certain behavior is disallowed on certain threads. ThreadRestrictions helps
-// enforce these rules. Examples of such rules:
-//
-// * Do not do blocking IO (makes the thread janky)
-// * Do not access Singleton/LazyInstance (may lead to shutdown crashes)
-//
-// Here's more about how the protection works:
-//
-// 1) If a thread should not be allowed to make IO calls, mark it:
-// base::ThreadRestrictions::SetIOAllowed(false);
-// By default, threads *are* allowed to make IO calls.
-// In Chrome browser code, IO calls should be proxied to a TaskRunner with
-// the base::MayBlock() trait.
-//
-// 2) If a function makes a call that will go out to disk, check whether the
-// current thread is allowed:
-// base::ThreadRestrictions::AssertIOAllowed();
-//
-//
-// Style tip: where should you put AssertIOAllowed checks? It's best
-// if you put them as close to the disk access as possible, at the
-// lowest level. This rule is simple to follow and helps catch all
-// callers. For example, if your function GoDoSomeBlockingDiskCall()
-// only calls other functions in Chrome and not fopen(), you should go
-// add the AssertIOAllowed checks in the helper functions.
-
class BASE_EXPORT ThreadRestrictions {
public:
// Constructing a ScopedAllowIO temporarily allows IO for the current
@@ -334,13 +354,6 @@ class BASE_EXPORT ThreadRestrictions {
// DEPRECATED. Use ScopedAllowBlocking(ForTesting) or ScopedDisallowBlocking.
static bool SetIOAllowed(bool allowed);
- // Check whether the current thread is allowed to make IO calls,
- // and DCHECK if not. See the block comment above the class for
- // a discussion of where to add these checks.
- //
- // DEPRECATED. Use AssertBlockingAllowed.
- static void AssertIOAllowed();
-
// Set whether the current thread can use singletons. Returns the previous
// value.
static bool SetSingletonAllowed(bool allowed);
@@ -358,7 +371,6 @@ class BASE_EXPORT ThreadRestrictions {
// Inline the empty definitions of these functions so that they can be
// compiled out.
static bool SetIOAllowed(bool allowed) { return true; }
- static void AssertIOAllowed() {}
static bool SetSingletonAllowed(bool allowed) { return true; }
static void AssertSingletonAllowed() {}
static void DisallowWaiting() {}
diff --git a/chromium/base/threading/thread_task_runner_handle.cc b/chromium/base/threading/thread_task_runner_handle.cc
index 883f921d4e6..1d9756fb235 100644
--- a/chromium/base/threading/thread_task_runner_handle.cc
+++ b/chromium/base/threading/thread_task_runner_handle.cc
@@ -19,13 +19,13 @@ namespace base {
namespace {
base::LazyInstance<base::ThreadLocalPointer<ThreadTaskRunnerHandle>>::Leaky
- lazy_tls_ptr = LAZY_INSTANCE_INITIALIZER;
+ thread_task_runner_tls = LAZY_INSTANCE_INITIALIZER;
} // namespace
// static
scoped_refptr<SingleThreadTaskRunner> ThreadTaskRunnerHandle::Get() {
- ThreadTaskRunnerHandle* current = lazy_tls_ptr.Pointer()->Get();
+ ThreadTaskRunnerHandle* current = thread_task_runner_tls.Pointer()->Get();
CHECK(current) << "Error: This caller requires a single-threaded context "
"(i.e. the current task needs to run from a "
"SingleThreadTaskRunner).";
@@ -34,7 +34,7 @@ scoped_refptr<SingleThreadTaskRunner> ThreadTaskRunnerHandle::Get() {
// static
bool ThreadTaskRunnerHandle::IsSet() {
- return !!lazy_tls_ptr.Pointer()->Get();
+ return !!thread_task_runner_tls.Pointer()->Get();
}
// static
@@ -61,7 +61,7 @@ ScopedClosureRunner ThreadTaskRunnerHandle::OverrideForTesting(
base::Passed(&top_level_ttrh)));
}
- ThreadTaskRunnerHandle* ttrh = lazy_tls_ptr.Pointer()->Get();
+ ThreadTaskRunnerHandle* ttrh = thread_task_runner_tls.Pointer()->Get();
// Swap the two (and below bind |overriding_task_runner|, which is now the
// previous one, as the |task_runner_to_restore|).
ttrh->task_runner_.swap(overriding_task_runner);
@@ -74,7 +74,7 @@ ScopedClosureRunner ThreadTaskRunnerHandle::OverrideForTesting(
SingleThreadTaskRunner* expected_task_runner_before_restore,
std::unique_ptr<RunLoop::ScopedDisallowRunningForTesting>
no_running_during_override) {
- ThreadTaskRunnerHandle* ttrh = lazy_tls_ptr.Pointer()->Get();
+ ThreadTaskRunnerHandle* ttrh = thread_task_runner_tls.Pointer()->Get();
DCHECK_EQ(expected_task_runner_before_restore, ttrh->task_runner_.get())
<< "Nested overrides must expire their ScopedClosureRunners "
@@ -94,13 +94,13 @@ ThreadTaskRunnerHandle::ThreadTaskRunnerHandle(
// No SequencedTaskRunnerHandle (which includes ThreadTaskRunnerHandles)
// should already be set for this thread.
DCHECK(!SequencedTaskRunnerHandle::IsSet());
- lazy_tls_ptr.Pointer()->Set(this);
+ thread_task_runner_tls.Pointer()->Set(this);
}
ThreadTaskRunnerHandle::~ThreadTaskRunnerHandle() {
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_EQ(lazy_tls_ptr.Pointer()->Get(), this);
- lazy_tls_ptr.Pointer()->Set(nullptr);
+ DCHECK_EQ(thread_task_runner_tls.Pointer()->Get(), this);
+ thread_task_runner_tls.Pointer()->Set(nullptr);
}
} // namespace base
diff --git a/chromium/base/threading/thread_unittest.cc b/chromium/base/threading/thread_unittest.cc
index 08c5e034423..d0f732dd417 100644
--- a/chromium/base/threading/thread_unittest.cc
+++ b/chromium/base/threading/thread_unittest.cc
@@ -109,7 +109,7 @@ class CapturingDestructionObserver
// DestructionObserver implementation:
void WillDestroyCurrentMessageLoop() override {
event_list_->push_back(THREAD_EVENT_MESSAGE_LOOP_DESTROYED);
- event_list_ = NULL;
+ event_list_ = nullptr;
}
private:
diff --git a/chromium/base/threading/watchdog_unittest.cc b/chromium/base/threading/watchdog_unittest.cc
index 473f3ecfe21..f534a863d4a 100644
--- a/chromium/base/threading/watchdog_unittest.cc
+++ b/chromium/base/threading/watchdog_unittest.cc
@@ -27,7 +27,7 @@ class WatchdogCounter : public Watchdog {
alarm_counter_(0) {
}
- ~WatchdogCounter() override {}
+ ~WatchdogCounter() override = default;
void Alarm() override {
alarm_counter_++;
diff --git a/chromium/base/time/clock.cc b/chromium/base/time/clock.cc
index 34dc37e38b5..9e3f27122ee 100644
--- a/chromium/base/time/clock.cc
+++ b/chromium/base/time/clock.cc
@@ -6,6 +6,6 @@
namespace base {
-Clock::~Clock() {}
+Clock::~Clock() = default;
} // namespace base
diff --git a/chromium/base/time/default_clock.cc b/chromium/base/time/default_clock.cc
index 5f70114bb8b..284e12d526c 100644
--- a/chromium/base/time/default_clock.cc
+++ b/chromium/base/time/default_clock.cc
@@ -4,12 +4,20 @@
#include "base/time/default_clock.h"
+#include "base/lazy_instance.h"
+
namespace base {
-DefaultClock::~DefaultClock() {}
+DefaultClock::~DefaultClock() = default;
Time DefaultClock::Now() {
return Time::Now();
}
+// static
+DefaultClock* DefaultClock::GetInstance() {
+ static LazyInstance<DefaultClock>::Leaky instance = LAZY_INSTANCE_INITIALIZER;
+ return instance.Pointer();
+}
+
} // namespace base
diff --git a/chromium/base/time/default_clock.h b/chromium/base/time/default_clock.h
index 0b8250e539b..b652599f360 100644
--- a/chromium/base/time/default_clock.h
+++ b/chromium/base/time/default_clock.h
@@ -18,6 +18,9 @@ class BASE_EXPORT DefaultClock : public Clock {
// Simply returns Time::Now().
Time Now() override;
+
+ // Returns a shared instance of DefaultClock. This is thread-safe.
+ static DefaultClock* GetInstance();
};
} // namespace base
diff --git a/chromium/base/time/default_tick_clock.cc b/chromium/base/time/default_tick_clock.cc
index ce62fcc3d1b..96d4d869eb6 100644
--- a/chromium/base/time/default_tick_clock.cc
+++ b/chromium/base/time/default_tick_clock.cc
@@ -4,12 +4,21 @@
#include "base/time/default_tick_clock.h"
+#include "base/lazy_instance.h"
+
namespace base {
-DefaultTickClock::~DefaultTickClock() {}
+DefaultTickClock::~DefaultTickClock() = default;
TimeTicks DefaultTickClock::NowTicks() {
return TimeTicks::Now();
}
+// static
+DefaultTickClock* DefaultTickClock::GetInstance() {
+ static LazyInstance<DefaultTickClock>::Leaky instance =
+ LAZY_INSTANCE_INITIALIZER;
+ return instance.Pointer();
+}
+
} // namespace base
diff --git a/chromium/base/time/default_tick_clock.h b/chromium/base/time/default_tick_clock.h
index cb041e6124a..dce2538b1d9 100644
--- a/chromium/base/time/default_tick_clock.h
+++ b/chromium/base/time/default_tick_clock.h
@@ -18,6 +18,9 @@ class BASE_EXPORT DefaultTickClock : public TickClock {
// Simply returns TimeTicks::Now().
TimeTicks NowTicks() override;
+
+ // Returns a shared instance of DefaultTickClock. This is thread-safe.
+ static DefaultTickClock* GetInstance();
};
} // namespace base
diff --git a/chromium/base/time/tick_clock.cc b/chromium/base/time/tick_clock.cc
index 495805c26a9..79e396de612 100644
--- a/chromium/base/time/tick_clock.cc
+++ b/chromium/base/time/tick_clock.cc
@@ -6,6 +6,6 @@
namespace base {
-TickClock::~TickClock() {}
+TickClock::~TickClock() = default;
} // namespace base
diff --git a/chromium/base/time/time.cc b/chromium/base/time/time.cc
index c5248261109..52be6aa8adb 100644
--- a/chromium/base/time/time.cc
+++ b/chromium/base/time/time.cc
@@ -250,7 +250,7 @@ Time Time::LocalMidnight() const {
bool Time::FromStringInternal(const char* time_string,
bool is_local,
Time* parsed_time) {
- DCHECK((time_string != NULL) && (parsed_time != NULL));
+ DCHECK((time_string != nullptr) && (parsed_time != nullptr));
if (time_string[0] == '\0')
return false;
diff --git a/chromium/base/time/time.h b/chromium/base/time/time.h
index 2befad3918d..c1e25134222 100644
--- a/chromium/base/time/time.h
+++ b/chromium/base/time/time.h
@@ -106,8 +106,7 @@ BASE_EXPORT int64_t SaturatedSub(TimeDelta delta, int64_t value);
class BASE_EXPORT TimeDelta {
public:
- TimeDelta() : delta_(0) {
- }
+ constexpr TimeDelta() : delta_(0) {}
// Converts units of time to TimeDeltas.
static constexpr TimeDelta FromDays(int days);
@@ -460,6 +459,33 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
static constexpr int64_t kQPCOverflowThreshold = INT64_C(0x8637BD05AF7);
#endif
+// kExplodedMinYear and kExplodedMaxYear define the platform-specific limits
+// for values passed to FromUTCExploded() and FromLocalExploded(). Those
+// functions will return false if passed values outside these limits. The limits
+// are inclusive, meaning that the API should support all dates within a given
+// limit year.
+#if defined(OS_WIN)
+ static constexpr int kExplodedMinYear = 1601;
+ static constexpr int kExplodedMaxYear = 30827;
+#elif defined(OS_IOS)
+ static constexpr int kExplodedMinYear = std::numeric_limits<int>::min();
+ static constexpr int kExplodedMaxYear = std::numeric_limits<int>::max();
+#elif defined(OS_MACOSX)
+ static constexpr int kExplodedMinYear = 1902;
+ static constexpr int kExplodedMaxYear = std::numeric_limits<int>::max();
+#elif defined(OS_ANDROID)
+ // Though we use 64-bit time APIs on both 32 and 64 bit Android, some OS
+ // versions like KitKat (ARM but not x86 emulator) can't handle some early
+ // dates (e.g. before 1170). So we set min conservatively here.
+ static constexpr int kExplodedMinYear = 1902;
+ static constexpr int kExplodedMaxYear = std::numeric_limits<int>::max();
+#else
+ static constexpr int kExplodedMinYear =
+ (sizeof(time_t) == 4 ? 1902 : std::numeric_limits<int>::min());
+ static constexpr int kExplodedMaxYear =
+ (sizeof(time_t) == 4 ? 2037 : std::numeric_limits<int>::max());
+#endif
+
// Represents an exploded time that can be formatted nicely. This is kind of
// like the Win32 SYSTEMTIME structure or the Unix "struct tm" with a few
// additions and changes to prevent errors.
diff --git a/chromium/base/time/time_exploded_posix.cc b/chromium/base/time/time_exploded_posix.cc
index beeffa32b21..627c6b4f873 100644
--- a/chromium/base/time/time_exploded_posix.cc
+++ b/chromium/base/time/time_exploded_posix.cc
@@ -24,10 +24,8 @@
#include "base/os_compat_nacl.h"
#endif
-// Ensure the Mac build does not include this module. Instead, non-POSIX
-// implementation is used to support Time::Exploded.
#if defined(OS_MACOSX)
-#error "This implementation is for POSIX platforms other than Mac."
+static_assert(sizeof(time_t) >= 8, "Y2038 problem!");
#endif
namespace {
@@ -189,7 +187,7 @@ bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
timestruct.tm_isdst = -1; // attempt to figure it out
#if !defined(OS_NACL) && !defined(OS_SOLARIS) && !defined(OS_AIX)
timestruct.tm_gmtoff = 0; // not a POSIX field, so mktime/timegm ignore
- timestruct.tm_zone = NULL; // not a POSIX field, so mktime/timegm ignore
+ timestruct.tm_zone = nullptr; // not a POSIX field, so mktime/timegm ignore
#endif
SysTime seconds;
diff --git a/chromium/base/time/time_mac.cc b/chromium/base/time/time_mac.cc
index c4f83156a54..fdac0c46a29 100644
--- a/chromium/base/time/time_mac.cc
+++ b/chromium/base/time/time_mac.cc
@@ -171,6 +171,13 @@ Time Time::NowFromSystemTime() {
return Now();
}
+// Note: These implementations of Time::FromExploded() and Time::Explode() are
+// only used on iOS now. Since Mac is now always 64-bit, we can use the POSIX
+// versions of these functions as time_t is not capped at year 2038 on 64-bit
+// builds. The POSIX functions are preferred since they don't suffer from some
+// performance problems that are present in these implementations.
+// See crbug.com/781601 for more details.
+#if defined(OS_IOS)
// static
bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
base::ScopedCFTypeRef<CFTimeZoneRef> time_zone(
@@ -258,6 +265,7 @@ void Time::Explode(bool is_local, Exploded* exploded) const {
(microsecond - kMicrosecondsPerMillisecond + 1) /
kMicrosecondsPerMillisecond;
}
+#endif // OS_IOS
// TimeTicks ------------------------------------------------------------------
diff --git a/chromium/base/time/time_unittest.cc b/chromium/base/time/time_unittest.cc
index fe675f194c3..28eff7aa6bb 100644
--- a/chromium/base/time/time_unittest.cc
+++ b/chromium/base/time/time_unittest.cc
@@ -17,6 +17,10 @@
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if defined(OS_IOS)
+#include "base/ios/ios_util.h"
+#endif
+
namespace base {
namespace {
@@ -111,10 +115,66 @@ class TimeTest : public testing::Test {
Time comparison_time_pdt_;
};
-// Test conversions to/from time_t and exploding/unexploding.
+// Test conversion to/from time_t.
TEST_F(TimeTest, TimeT) {
+ EXPECT_EQ(10, Time().FromTimeT(10).ToTimeT());
+ EXPECT_EQ(10.0, Time().FromTimeT(10).ToDoubleT());
+
+ // Conversions of 0 should stay 0.
+ EXPECT_EQ(0, Time().ToTimeT());
+ EXPECT_EQ(0, Time::FromTimeT(0).ToInternalValue());
+}
+
+// Test conversions to/from time_t and exploding/unexploding (utc time).
+TEST_F(TimeTest, UTCTimeT) {
+ // C library time and exploded time.
+ time_t now_t_1 = time(nullptr);
+ struct tm tms;
+#if defined(OS_WIN)
+ gmtime_s(&tms, &now_t_1);
+#elif defined(OS_POSIX)
+ gmtime_r(&now_t_1, &tms);
+#endif
+
+ // Convert to ours.
+ Time our_time_1 = Time::FromTimeT(now_t_1);
+ Time::Exploded exploded;
+ our_time_1.UTCExplode(&exploded);
+
+ // This will test both our exploding and our time_t -> Time conversion.
+ EXPECT_EQ(tms.tm_year + 1900, exploded.year);
+ EXPECT_EQ(tms.tm_mon + 1, exploded.month);
+ EXPECT_EQ(tms.tm_mday, exploded.day_of_month);
+ EXPECT_EQ(tms.tm_hour, exploded.hour);
+ EXPECT_EQ(tms.tm_min, exploded.minute);
+ EXPECT_EQ(tms.tm_sec, exploded.second);
+
+ // Convert exploded back to the time struct.
+ Time our_time_2;
+ EXPECT_TRUE(Time::FromUTCExploded(exploded, &our_time_2));
+ EXPECT_TRUE(our_time_1 == our_time_2);
+
+ time_t now_t_2 = our_time_2.ToTimeT();
+ EXPECT_EQ(now_t_1, now_t_2);
+}
+
+// Test conversions to/from time_t and exploding/unexploding (local time).
+TEST_F(TimeTest, LocalTimeT) {
+#if defined(OS_IOS) && TARGET_OS_SIMULATOR
+ // The function CFTimeZoneCopySystem() fails to determine the system timezone
+ // when running iOS 11.0 simulator on an host running High Sierra and return
+ // the "GMT" timezone. This causes Time::LocalExplode and localtime_r values
+ // to differ by the local timezone offset. Disable the test if simulating
+ // iOS 10.0 as it is not possible to check the version of the host mac.
+ // TODO(crbug.com/782033): remove this once support for iOS pre-11.0 is
+ // dropped or when the bug in CFTimeZoneCopySystem() is fixed.
+ if (ios::IsRunningOnIOS10OrLater() && !ios::IsRunningOnIOS11OrLater()) {
+ return;
+ }
+#endif
+
// C library time and exploded time.
- time_t now_t_1 = time(NULL);
+ time_t now_t_1 = time(nullptr);
struct tm tms;
#if defined(OS_WIN)
localtime_s(&tms, &now_t_1);
@@ -142,13 +202,6 @@ TEST_F(TimeTest, TimeT) {
time_t now_t_2 = our_time_2.ToTimeT();
EXPECT_EQ(now_t_1, now_t_2);
-
- EXPECT_EQ(10, Time().FromTimeT(10).ToTimeT());
- EXPECT_EQ(10.0, Time().FromTimeT(10).ToDoubleT());
-
- // Conversions of 0 should stay 0.
- EXPECT_EQ(0, Time().ToTimeT());
- EXPECT_EQ(0, Time::FromTimeT(0).ToInternalValue());
}
// Test conversions to/from javascript time.
@@ -631,6 +684,48 @@ TEST_F(TimeTest, FromLocalExplodedCrashOnAndroid) {
}
#endif // OS_ANDROID
+TEST_F(TimeTest, FromExploded_MinMax) {
+ Time::Exploded exploded = {0};
+ exploded.month = 1;
+ exploded.day_of_month = 1;
+
+ Time parsed_time;
+
+ if (Time::kExplodedMinYear != std::numeric_limits<int>::min()) {
+ exploded.year = Time::kExplodedMinYear;
+ EXPECT_TRUE(Time::FromUTCExploded(exploded, &parsed_time));
+#if !defined(OS_WIN)
+ // On Windows, January 1, 1601 00:00:00 is actually the null time.
+ EXPECT_FALSE(parsed_time.is_null());
+#endif
+
+#if !defined(OS_ANDROID) && !defined(OS_MACOSX)
+ // The dates earlier than |kExplodedMinYear| that don't work are OS version
+ // dependent on Android and Mac (for example, macOS 10.13 seems to support
+ // dates before 1902).
+ exploded.year--;
+ EXPECT_FALSE(Time::FromUTCExploded(exploded, &parsed_time));
+ EXPECT_TRUE(parsed_time.is_null());
+#endif
+ }
+
+ if (Time::kExplodedMaxYear != std::numeric_limits<int>::max()) {
+ exploded.year = Time::kExplodedMaxYear;
+ exploded.month = 12;
+ exploded.day_of_month = 31;
+ exploded.hour = 23;
+ exploded.minute = 59;
+ exploded.second = 59;
+ exploded.millisecond = 999;
+ EXPECT_TRUE(Time::FromUTCExploded(exploded, &parsed_time));
+ EXPECT_FALSE(parsed_time.is_null());
+
+ exploded.year++;
+ EXPECT_FALSE(Time::FromUTCExploded(exploded, &parsed_time));
+ EXPECT_TRUE(parsed_time.is_null());
+ }
+}
+
TEST(TimeTicks, Deltas) {
for (int index = 0; index < 50; index++) {
TimeTicks ticks_start = TimeTicks::Now();
diff --git a/chromium/base/time/time_win.cc b/chromium/base/time/time_win.cc
index ff1e0169daf..44d442d0a04 100644
--- a/chromium/base/time/time_win.cc
+++ b/chromium/base/time/time_win.cc
@@ -75,15 +75,15 @@ int64_t CurrentWallclockMicroseconds() {
return FileTimeToMicroseconds(ft);
}
-// Time between resampling the un-granular clock for this API. 60 seconds.
-const int kMaxMillisecondsToAvoidDrift = 60 * Time::kMillisecondsPerSecond;
+// Time between resampling the un-granular clock for this API.
+constexpr TimeDelta kMaxTimeToAvoidDrift = TimeDelta::FromSeconds(60);
-int64_t initial_time = 0;
-TimeTicks initial_ticks;
+int64_t g_initial_time = 0;
+TimeTicks g_initial_ticks;
void InitializeClock() {
- initial_ticks = TimeTicks::Now();
- initial_time = CurrentWallclockMicroseconds();
+ g_initial_ticks = TimeTicks::Now();
+ g_initial_time = CurrentWallclockMicroseconds();
}
// The two values that ActivateHighResolutionTimer uses to set the systemwide
@@ -132,14 +132,14 @@ bool SafeConvertToWord(int in, WORD* out) {
// static
Time Time::Now() {
- if (initial_time == 0)
+ if (g_initial_time == 0)
InitializeClock();
// We implement time using the high-resolution timers so that we can get
// timeouts which are smaller than 10-15ms. If we just used
// CurrentWallclockMicroseconds(), we'd have the less-granular timer.
//
- // To make this work, we initialize the clock (initial_time) and the
+ // To make this work, we initialize the clock (g_initial_time) and the
// counter (initial_ctr). To compute the initial time, we can check
// the number of ticks that have elapsed, and compute the delta.
//
@@ -149,15 +149,15 @@ Time Time::Now() {
TimeTicks ticks = TimeTicks::Now();
// Calculate the time elapsed since we started our timer
- TimeDelta elapsed = ticks - initial_ticks;
+ TimeDelta elapsed = ticks - g_initial_ticks;
// Check if enough time has elapsed that we need to resync the clock.
- if (elapsed.InMilliseconds() > kMaxMillisecondsToAvoidDrift) {
+ if (elapsed > kMaxTimeToAvoidDrift) {
InitializeClock();
continue;
}
- return Time(elapsed + Time(initial_time));
+ return Time(elapsed + Time(g_initial_time));
}
}
@@ -165,7 +165,7 @@ Time Time::Now() {
Time Time::NowFromSystemTime() {
// Force resync.
InitializeClock();
- return Time(initial_time);
+ return Time(g_initial_time);
}
// static
@@ -695,7 +695,7 @@ double ThreadTicks::TSCTicksPerSecond() {
double elapsed_time_seconds =
perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);
- const double kMinimumEvaluationPeriodSeconds = 0.05;
+ static constexpr double kMinimumEvaluationPeriodSeconds = 0.05;
if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
return 0;
diff --git a/chromium/base/timer/hi_res_timer_manager_posix.cc b/chromium/base/timer/hi_res_timer_manager_posix.cc
index 89012b9f61f..d2a3aa56cf5 100644
--- a/chromium/base/timer/hi_res_timer_manager_posix.cc
+++ b/chromium/base/timer/hi_res_timer_manager_posix.cc
@@ -12,8 +12,7 @@ HighResolutionTimerManager::HighResolutionTimerManager()
: hi_res_clock_available_(false) {
}
-HighResolutionTimerManager::~HighResolutionTimerManager() {
-}
+HighResolutionTimerManager::~HighResolutionTimerManager() = default;
void HighResolutionTimerManager::OnPowerStateChange(bool on_battery_power) {
}
diff --git a/chromium/base/timer/mock_timer.cc b/chromium/base/timer/mock_timer.cc
index b53c0065dbf..ca0893ba0a1 100644
--- a/chromium/base/timer/mock_timer.cc
+++ b/chromium/base/timer/mock_timer.cc
@@ -17,8 +17,7 @@ MockTimer::MockTimer(const Location& posted_from,
bool is_repeating)
: Timer(true, is_repeating), delay_(delay), is_running_(false) {}
-MockTimer::~MockTimer() {
-}
+MockTimer::~MockTimer() = default;
bool MockTimer::IsRunning() const {
return is_running_;
diff --git a/chromium/base/timer/mock_timer_unittest.cc b/chromium/base/timer/mock_timer_unittest.cc
index a38981513a4..ed05f39db99 100644
--- a/chromium/base/timer/mock_timer_unittest.cc
+++ b/chromium/base/timer/mock_timer_unittest.cc
@@ -56,8 +56,8 @@ TEST(MockTimerTest, Stops) {
class HasWeakPtr : public base::SupportsWeakPtr<HasWeakPtr> {
public:
- HasWeakPtr() {}
- virtual ~HasWeakPtr() {}
+ HasWeakPtr() = default;
+ virtual ~HasWeakPtr() = default;
private:
DISALLOW_COPY_AND_ASSIGN(HasWeakPtr);
diff --git a/chromium/base/tools_sanity_unittest.cc b/chromium/base/tools_sanity_unittest.cc
index 550845c2dc4..5c41bd74f29 100644
--- a/chromium/base/tools_sanity_unittest.cc
+++ b/chromium/base/tools_sanity_unittest.cc
@@ -9,6 +9,7 @@
#include <stddef.h>
#include "base/atomicops.h"
+#include "base/cfi_flags.h"
#include "base/debug/asan_invalid_access.h"
#include "base/debug/profiler.h"
#include "base/message_loop/message_loop.h"
@@ -253,7 +254,7 @@ namespace {
class TOOLS_SANITY_TEST_CONCURRENT_THREAD : public PlatformThread::Delegate {
public:
explicit TOOLS_SANITY_TEST_CONCURRENT_THREAD(bool *value) : value_(value) {}
- ~TOOLS_SANITY_TEST_CONCURRENT_THREAD() override {}
+ ~TOOLS_SANITY_TEST_CONCURRENT_THREAD() override = default;
void ThreadMain() override {
*value_ = true;
@@ -269,7 +270,7 @@ class TOOLS_SANITY_TEST_CONCURRENT_THREAD : public PlatformThread::Delegate {
class ReleaseStoreThread : public PlatformThread::Delegate {
public:
explicit ReleaseStoreThread(base::subtle::Atomic32 *value) : value_(value) {}
- ~ReleaseStoreThread() override {}
+ ~ReleaseStoreThread() override = default;
void ThreadMain() override {
base::subtle::Release_Store(value_, kMagicValue);
@@ -285,7 +286,7 @@ class ReleaseStoreThread : public PlatformThread::Delegate {
class AcquireLoadThread : public PlatformThread::Delegate {
public:
explicit AcquireLoadThread(base::subtle::Atomic32 *value) : value_(value) {}
- ~AcquireLoadThread() override {}
+ ~AcquireLoadThread() override = default;
void ThreadMain() override {
// Wait for the other thread to make Release_Store
PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
@@ -342,7 +343,7 @@ TEST(ToolsSanityTest, AtomicsAreIgnored) {
EXPECT_EQ(kMagicValue, shared);
}
-#if defined(CFI_ENFORCEMENT_TRAP)
+#if BUILDFLAG(CFI_ENFORCEMENT_TRAP)
#if defined(OS_WIN)
#define CFI_ERROR_MSG "EXCEPTION_ILLEGAL_INSTRUCTION"
#elif defined(OS_ANDROID)
@@ -352,9 +353,9 @@ TEST(ToolsSanityTest, AtomicsAreIgnored) {
#else
#define CFI_ERROR_MSG "ILL_ILLOPN"
#endif
-#elif defined(CFI_ENFORCEMENT_DIAGNOSTIC)
+#elif BUILDFLAG(CFI_ENFORCEMENT_DIAGNOSTIC)
#define CFI_ERROR_MSG "runtime error: control flow integrity check"
-#endif // CFI_ENFORCEMENT_TRAP || CFI_ENFORCEMENT_DIAGNOSTIC
+#endif // BUILDFLAG(CFI_ENFORCEMENT_TRAP || CFI_ENFORCEMENT_DIAGNOSTIC)
#if defined(CFI_ERROR_MSG)
class A {
@@ -400,7 +401,7 @@ TEST(ToolsSanityTest, BadVirtualCallWrongType) {
}
// TODO(pcc): remove CFI_CAST_CHECK, see https://crbug.com/626794.
-#if defined(CFI_CAST_CHECK)
+#if BUILDFLAG(CFI_CAST_CHECK)
TEST(ToolsSanityTest, BadDerivedCast) {
A a;
EXPECT_DEATH((void)(B*)&a, CFI_ERROR_MSG);
@@ -418,8 +419,8 @@ TEST(ToolsSanityTest, BadUnrelatedCast) {
A a;
EXPECT_DEATH((void)(B*)&a, CFI_ERROR_MSG);
}
-#endif // CFI_CAST_CHECK
+#endif // BUILDFLAG(CFI_CAST_CHECK)
-#endif // CFI_ERROR_MSG
+#endif // CFI_ERROR_MSG
} // namespace base
diff --git a/chromium/base/trace_event/common/trace_event_common.h b/chromium/base/trace_event/common/trace_event_common.h
index 132a4ea66fc..51869ee9525 100644
--- a/chromium/base/trace_event/common/trace_event_common.h
+++ b/chromium/base/trace_event/common/trace_event_common.h
@@ -189,6 +189,8 @@
// trace points would carry a significant performance cost of acquiring a lock
// and resolving the category.
+// Check that nobody includes this file directly. Clients are supposed to
+// include the surrounding "trace_event.h" of their project instead.
#if defined(TRACE_EVENT0)
#error "Another copy of this file has already been included."
#endif
diff --git a/chromium/base/trace_event/event_name_filter.cc b/chromium/base/trace_event/event_name_filter.cc
index 8d0058c1474..7bf932e0403 100644
--- a/chromium/base/trace_event/event_name_filter.cc
+++ b/chromium/base/trace_event/event_name_filter.cc
@@ -16,7 +16,7 @@ EventNameFilter::EventNameFilter(
std::unique_ptr<EventNamesWhitelist> event_names_whitelist)
: event_names_whitelist_(std::move(event_names_whitelist)) {}
-EventNameFilter::~EventNameFilter() {}
+EventNameFilter::~EventNameFilter() = default;
bool EventNameFilter::FilterTraceEvent(const TraceEvent& trace_event) const {
return event_names_whitelist_->count(trace_event.name()) != 0;
diff --git a/chromium/base/trace_event/heap_profiler.h b/chromium/base/trace_event/heap_profiler.h
index cf575246270..c8deaf60cac 100644
--- a/chromium/base/trace_event/heap_profiler.h
+++ b/chromium/base/trace_event/heap_profiler.h
@@ -25,6 +25,11 @@
#define TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION \
trace_event_internal::HeapProfilerScopedTaskExecutionTracker
+// Scoped tracker that tracks the given program counter as a native stack frame
+// in the heap profiler.
+#define TRACE_HEAP_PROFILER_API_SCOPED_WITH_PROGRAM_COUNTER \
+ trace_event_internal::HeapProfilerScopedStackFrame
+
// A scoped ignore event used to tell heap profiler to ignore all the
// allocations in the scope. It is useful to exclude allocations made for
// tracing from the heap profiler dumps.
@@ -62,6 +67,31 @@ class HeapProfilerScopedTaskExecutionTracker {
const char* context_;
};
+class HeapProfilerScopedStackFrame {
+ public:
+ inline explicit HeapProfilerScopedStackFrame(const void* program_counter)
+ : program_counter_(program_counter) {
+ using base::trace_event::AllocationContextTracker;
+ if (UNLIKELY(AllocationContextTracker::capture_mode() ==
+ AllocationContextTracker::CaptureMode::MIXED_STACK)) {
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->PushNativeStackFrame(program_counter_);
+ }
+ }
+
+ inline ~HeapProfilerScopedStackFrame() {
+ using base::trace_event::AllocationContextTracker;
+ if (UNLIKELY(AllocationContextTracker::capture_mode() ==
+ AllocationContextTracker::CaptureMode::MIXED_STACK)) {
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->PopNativeStackFrame(program_counter_);
+ }
+ }
+
+ private:
+ const void* const program_counter_;
+};
+
class BASE_EXPORT HeapProfilerScopedIgnore {
public:
inline HeapProfilerScopedIgnore() {
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
index ce839fd151c..e9466a8a08f 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -10,6 +10,7 @@
#include "base/atomicops.h"
#include "base/debug/debugging_flags.h"
#include "base/debug/leak_annotations.h"
+#include "base/debug/stack_trace.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_local_storage.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
@@ -83,10 +84,10 @@ AllocationContextTracker::GetInstanceForCurrentThread() {
AllocationContextTracker::AllocationContextTracker()
: thread_name_(nullptr), ignore_scope_depth_(0) {
- pseudo_stack_.reserve(kMaxStackDepth);
+ tracked_stack_.reserve(kMaxStackDepth);
task_contexts_.reserve(kMaxTaskDepth);
}
-AllocationContextTracker::~AllocationContextTracker() {}
+AllocationContextTracker::~AllocationContextTracker() = default;
// static
void AllocationContextTracker::SetCurrentThreadName(const char* name) {
@@ -111,10 +112,12 @@ void AllocationContextTracker::PushPseudoStackFrame(
AllocationContextTracker::PseudoStackFrame stack_frame) {
// Impose a limit on the height to verify that every push is popped, because
// in practice the pseudo stack never grows higher than ~20 frames.
- if (pseudo_stack_.size() < kMaxStackDepth)
- pseudo_stack_.push_back(stack_frame);
- else
+ if (tracked_stack_.size() < kMaxStackDepth) {
+ tracked_stack_.push_back(
+ StackFrame::FromTraceEventName(stack_frame.trace_event_name));
+ } else {
NOTREACHED();
+ }
}
void AllocationContextTracker::PopPseudoStackFrame(
@@ -122,18 +125,25 @@ void AllocationContextTracker::PopPseudoStackFrame(
// Guard for stack underflow. If tracing was started with a TRACE_EVENT in
// scope, the frame was never pushed, so it is possible that pop is called
// on an empty stack.
- if (pseudo_stack_.empty())
+ if (tracked_stack_.empty())
return;
- // Assert that pushes and pops are nested correctly. This DCHECK can be
- // hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call
- // without a corresponding TRACE_EVENT_BEGIN).
- DCHECK(stack_frame == pseudo_stack_.back())
- << "Encountered an unmatched TRACE_EVENT_END: "
- << stack_frame.trace_event_name
- << " vs event in stack: " << pseudo_stack_.back().trace_event_name;
+ tracked_stack_.pop_back();
+}
+
+void AllocationContextTracker::PushNativeStackFrame(const void* pc) {
+ if (tracked_stack_.size() < kMaxStackDepth)
+ tracked_stack_.push_back(StackFrame::FromProgramCounter(pc));
+ else
+ NOTREACHED();
+}
+
+void AllocationContextTracker::PopNativeStackFrame(const void* pc) {
+ if (tracked_stack_.empty())
+ return;
- pseudo_stack_.pop_back();
+ DCHECK_EQ(pc, tracked_stack_.back().value);
+ tracked_stack_.pop_back();
}
void AllocationContextTracker::PushCurrentTaskContext(const char* context) {
@@ -183,18 +193,16 @@ bool AllocationContextTracker::GetContextSnapshot(AllocationContext* ctx) {
switch (mode) {
case CaptureMode::DISABLED:
- case CaptureMode::NO_STACK:
{
break;
}
case CaptureMode::PSEUDO_STACK:
+ case CaptureMode::MIXED_STACK:
{
- for (const PseudoStackFrame& stack_frame : pseudo_stack_) {
- if (backtrace == backtrace_end) {
+ for (const StackFrame& stack_frame : tracked_stack_) {
+ if (backtrace == backtrace_end)
break;
- }
- *backtrace++ =
- StackFrame::FromTraceEventName(stack_frame.trace_event_name);
+ *backtrace++ = stack_frame;
}
break;
}
@@ -243,10 +251,6 @@ bool AllocationContextTracker::GetContextSnapshot(AllocationContext* ctx) {
// (component name) in the heap profiler and not piggy back on the type name.
if (!task_contexts_.empty()) {
ctx->type_name = task_contexts_.back();
- } else if (!pseudo_stack_.empty()) {
- // If task context was unavailable, then the category names are taken from
- // trace events.
- ctx->type_name = pseudo_stack_.back().trace_event_category;
} else {
ctx->type_name = nullptr;
}
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.h b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.h
index 308ead5bde8..9bd656d459f 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.h
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.h
@@ -9,7 +9,6 @@
#include "base/atomicops.h"
#include "base/base_export.h"
-#include "base/debug/stack_trace.h"
#include "base/macros.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
@@ -25,9 +24,10 @@ class BASE_EXPORT AllocationContextTracker {
public:
enum class CaptureMode : int32_t {
DISABLED, // Don't capture anything
- PSEUDO_STACK, // GetContextSnapshot() returns pseudo stack trace
- NATIVE_STACK, // GetContextSnapshot() returns native (real) stack trace
- NO_STACK, // GetContextSnapshot() returns thread names and task contexts.
+ PSEUDO_STACK, // Backtrace has trace events
+ MIXED_STACK, // Backtrace has trace events + from
+ // HeapProfilerScopedStackFrame
+ NATIVE_STACK, // Backtrace has full native backtraces from stack unwinding
};
// Stack frame constructed from trace events in codebase.
@@ -79,12 +79,16 @@ class BASE_EXPORT AllocationContextTracker {
ignore_scope_depth_--;
}
- // Pushes a frame onto the thread-local pseudo stack.
+ // Pushes and pops a frame onto the thread-local pseudo stack.
+ // TODO(ssid): Change PseudoStackFrame to const char*. Only event name is
+ // used.
void PushPseudoStackFrame(PseudoStackFrame stack_frame);
-
- // Pops a frame from the thread-local pseudo stack.
void PopPseudoStackFrame(PseudoStackFrame stack_frame);
+ // Pushes and pops a native stack frame onto thread local tracked stack.
+ void PushNativeStackFrame(const void* pc);
+ void PopNativeStackFrame(const void* pc);
+
// Push and pop current task's context. A stack is used to support nested
// tasks and the top of the stack will be used in allocation context.
void PushCurrentTaskContext(const char* context);
@@ -101,8 +105,8 @@ class BASE_EXPORT AllocationContextTracker {
static subtle::Atomic32 capture_mode_;
- // The pseudo stack where frames are |TRACE_EVENT| names.
- std::vector<PseudoStackFrame> pseudo_stack_;
+ // The pseudo stack where frames are |TRACE_EVENT| names or inserted PCs.
+ std::vector<StackFrame> tracked_stack_;
// The thread name is used as the first entry in the pseudo stack.
const char* thread_name_;
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
index 7f45f384ece..c26149efaa7 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -224,22 +224,49 @@ TEST_F(AllocationContextTrackerTest, PseudoStackMixedTrace) {
AssertBacktraceContainsOnlyThreadName();
}
-TEST_F(AllocationContextTrackerTest, NoStackMode) {
+TEST_F(AllocationContextTrackerTest, MixedStackWithProgramCounter) {
StackFrame t = StackFrame::FromThreadName(kThreadName);
- StackFrame frame_t[] = {t};
- const char kContext[] = "context";
+ StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+ StackFrame f = StackFrame::FromTraceEventName(kFroyo);
+ const void* pc1 = reinterpret_cast<void*>(0x1000);
+ const void* pc2 = reinterpret_cast<void*>(0x2000);
+ StackFrame n1 = StackFrame::FromProgramCounter(pc1);
+ StackFrame n2 = StackFrame::FromProgramCounter(pc2);
+
+ StackFrame frame_c[] = {t, c};
+ StackFrame frame_cd[] = {t, c, n1};
+ StackFrame frame_e[] = {t, n2, n1};
+ StackFrame frame_ef[] = {t, n2, n1, f};
+
+ AssertBacktraceContainsOnlyThreadName();
AllocationContextTracker::SetCaptureMode(
- AllocationContextTracker::CaptureMode::NO_STACK);
+ AllocationContextTracker::CaptureMode::MIXED_STACK);
+
TRACE_EVENT_BEGIN0("Testing", kCupcake);
- AssertBacktraceEquals(frame_t);
+ AssertBacktraceEquals(frame_c);
- TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event1(kContext);
- AllocationContext ctx;
- ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot(&ctx));
- ASSERT_EQ(kContext, ctx.type_name);
- AssertBacktraceEquals(frame_t);
+ {
+ TRACE_HEAP_PROFILER_API_SCOPED_WITH_PROGRAM_COUNTER e1(pc1);
+ AssertBacktraceEquals(frame_cd);
+ }
+
+ AssertBacktraceEquals(frame_c);
+ TRACE_EVENT_END0("Testing", kCupcake);
+ AssertBacktraceContainsOnlyThreadName();
+
+ {
+ TRACE_HEAP_PROFILER_API_SCOPED_WITH_PROGRAM_COUNTER e1(pc2);
+ TRACE_HEAP_PROFILER_API_SCOPED_WITH_PROGRAM_COUNTER e2(pc1);
+ AssertBacktraceEquals(frame_e);
+
+ TRACE_EVENT0("Testing", kFroyo);
+ AssertBacktraceEquals(frame_ef);
+ }
+
+ AssertBacktraceContainsOnlyThreadName();
+ AllocationContextTracker::SetCaptureMode(
+ AllocationContextTracker::CaptureMode::DISABLED);
}
TEST_F(AllocationContextTrackerTest, BacktraceTakesTop) {
@@ -303,22 +330,6 @@ TEST_F(AllocationContextTrackerTest, TrackCategoryName) {
ASSERT_EQ(kContext2, ctx2.type_name);
}
- {
- // Type should be category name of the last seen trace event.
- TRACE_EVENT0("Testing", kCupcake);
- AllocationContext ctx1;
- ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot(&ctx1));
- ASSERT_EQ("Testing", std::string(ctx1.type_name));
-
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("Testing"), kCupcake);
- AllocationContext ctx2;
- ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot(&ctx2));
- ASSERT_EQ(TRACE_DISABLED_BY_DEFAULT("Testing"),
- std::string(ctx2.type_name));
- }
-
// Type should be nullptr without task event.
AllocationContext ctx;
ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
diff --git a/chromium/base/trace_event/heap_profiler_allocation_register.cc b/chromium/base/trace_event/heap_profiler_allocation_register.cc
index 22b178803a1..44f3442302e 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_register.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_register.cc
@@ -91,7 +91,7 @@ AllocationRegister::AllocationRegister(size_t allocation_capacity,
DCHECK_EQ(index_and_flag.first, kOutOfStorageBacktraceIndex);
}
-AllocationRegister::~AllocationRegister() {}
+AllocationRegister::~AllocationRegister() = default;
bool AllocationRegister::Insert(const void* address,
size_t size,
diff --git a/chromium/base/trace_event/heap_profiler_event_filter.cc b/chromium/base/trace_event/heap_profiler_event_filter.cc
index 6c91c91b136..937072ca7b5 100644
--- a/chromium/base/trace_event/heap_profiler_event_filter.cc
+++ b/chromium/base/trace_event/heap_profiler_event_filter.cc
@@ -16,8 +16,11 @@ namespace trace_event {
namespace {
inline bool IsPseudoStackEnabled() {
+ // Only PSEUDO_STACK and MIXED_STACK modes require trace events.
return AllocationContextTracker::capture_mode() ==
- AllocationContextTracker::CaptureMode::PSEUDO_STACK;
+ AllocationContextTracker::CaptureMode::PSEUDO_STACK ||
+ AllocationContextTracker::capture_mode() ==
+ AllocationContextTracker::CaptureMode::MIXED_STACK;
}
inline AllocationContextTracker* GetThreadLocalTracker() {
@@ -29,8 +32,8 @@ inline AllocationContextTracker* GetThreadLocalTracker() {
// static
const char HeapProfilerEventFilter::kName[] = "heap_profiler_predicate";
-HeapProfilerEventFilter::HeapProfilerEventFilter() {}
-HeapProfilerEventFilter::~HeapProfilerEventFilter() {}
+HeapProfilerEventFilter::HeapProfilerEventFilter() = default;
+HeapProfilerEventFilter::~HeapProfilerEventFilter() = default;
bool HeapProfilerEventFilter::FilterTraceEvent(
const TraceEvent& trace_event) const {
diff --git a/chromium/base/trace_event/heap_profiler_heap_dump_writer.cc b/chromium/base/trace_event/heap_profiler_heap_dump_writer.cc
index 9f64f6ed2fa..71c3d97f544 100644
--- a/chromium/base/trace_event/heap_profiler_heap_dump_writer.cc
+++ b/chromium/base/trace_event/heap_profiler_heap_dump_writer.cc
@@ -186,7 +186,7 @@ HeapDumpWriter::HeapDumpWriter(StackFrameDeduplicator* stack_frame_deduplicator,
type_name_deduplicator_(type_name_deduplicator),
breakdown_threshold_bytes_(breakdown_threshold_bytes) {}
-HeapDumpWriter::~HeapDumpWriter() {}
+HeapDumpWriter::~HeapDumpWriter() = default;
bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) {
// The contexts in the bucket are all different, but the [begin, cursor) range
diff --git a/chromium/base/trace_event/heap_profiler_serialization_state.cc b/chromium/base/trace_event/heap_profiler_serialization_state.cc
index d332d43c471..b1866e72f1c 100644
--- a/chromium/base/trace_event/heap_profiler_serialization_state.cc
+++ b/chromium/base/trace_event/heap_profiler_serialization_state.cc
@@ -9,7 +9,7 @@ namespace trace_event {
HeapProfilerSerializationState::HeapProfilerSerializationState()
: heap_profiler_breakdown_threshold_bytes_(0) {}
-HeapProfilerSerializationState::~HeapProfilerSerializationState() {}
+HeapProfilerSerializationState::~HeapProfilerSerializationState() = default;
void HeapProfilerSerializationState::SetStackFrameDeduplicator(
std::unique_ptr<StackFrameDeduplicator> stack_frame_deduplicator) {
diff --git a/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.cc b/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
index 351117f14f7..c05cd0a25e3 100644
--- a/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
+++ b/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
@@ -39,14 +39,14 @@ StackFrameDeduplicator::FrameNode::FrameNode(StackFrame frame,
int parent_frame_index)
: frame(frame), parent_frame_index(parent_frame_index) {}
StackFrameDeduplicator::FrameNode::FrameNode(const FrameNode& other) = default;
-StackFrameDeduplicator::FrameNode::~FrameNode() {}
+StackFrameDeduplicator::FrameNode::~FrameNode() = default;
size_t StackFrameDeduplicator::FrameNode::EstimateMemoryUsage() const {
return base::trace_event::EstimateMemoryUsage(children);
}
-StackFrameDeduplicator::StackFrameDeduplicator() {}
-StackFrameDeduplicator::~StackFrameDeduplicator() {}
+StackFrameDeduplicator::StackFrameDeduplicator() = default;
+StackFrameDeduplicator::~StackFrameDeduplicator() = default;
bool StackFrameDeduplicator::Match(int frame_index,
const StackFrame* begin_frame,
diff --git a/chromium/base/trace_event/heap_profiler_type_name_deduplicator.cc b/chromium/base/trace_event/heap_profiler_type_name_deduplicator.cc
index 26e8aee3ca4..360f239bbd1 100644
--- a/chromium/base/trace_event/heap_profiler_type_name_deduplicator.cc
+++ b/chromium/base/trace_event/heap_profiler_type_name_deduplicator.cc
@@ -19,55 +19,12 @@
namespace base {
namespace trace_event {
-namespace {
-
-// If |type_name| is file name then extract directory name. Or if |type_name| is
-// category name, then disambiguate multple categories and remove
-// "disabled-by-default" prefix if present.
-StringPiece ExtractCategoryFromTypeName(const char* type_name) {
- StringPiece result(type_name);
- size_t last_separator = result.find_last_of("\\/");
-
- // If |type_name| was a not a file path, the separator will not be found, so
- // the whole type name is returned.
- if (last_separator == StringPiece::npos) {
- // |type_name| is C++ typename if its reporting allocator is
- // partition_alloc or blink_gc. In this case, we should not split
- // |type_name| by ',', because of function types and template types.
- // e.g. WTF::HashMap<WTF::AtomicString, WTF::AtomicString>,
- // void (*)(void*, void*), and so on. So if |type_name| contains
- if (result.find_last_of(")>") != StringPiece::npos)
- return result;
-
- // Use the first the category name if it has ",".
- size_t first_comma_position = result.find(',');
- if (first_comma_position != StringPiece::npos)
- result = result.substr(0, first_comma_position);
- if (result.starts_with(TRACE_DISABLED_BY_DEFAULT("")))
- result.remove_prefix(sizeof(TRACE_DISABLED_BY_DEFAULT("")) - 1);
- return result;
- }
-
- // Remove the file name from the path.
- result.remove_suffix(result.length() - last_separator);
-
- // Remove the parent directory references.
- const char kParentDirectory[] = "..";
- const size_t kParentDirectoryLength = 3; // '../' or '..\'.
- while (result.starts_with(kParentDirectory)) {
- result.remove_prefix(kParentDirectoryLength);
- }
- return result;
-}
-
-} // namespace
-
TypeNameDeduplicator::TypeNameDeduplicator() {
// A null pointer has type ID 0 ("unknown type");
type_ids_.insert(std::make_pair(nullptr, 0));
}
-TypeNameDeduplicator::~TypeNameDeduplicator() {}
+TypeNameDeduplicator::~TypeNameDeduplicator() = default;
int TypeNameDeduplicator::Insert(const char* type_name) {
auto result = type_ids_.insert(std::make_pair(type_name, 0));
@@ -103,7 +60,7 @@ void TypeNameDeduplicator::AppendAsTraceFormat(std::string* out) const {
// TODO(ssid): crbug.com/594803 the type name is misused for file name in
// some cases.
- StringPiece type_info = ExtractCategoryFromTypeName(it->first);
+ StringPiece type_info = it->first;
// |EscapeJSONString| appends, it does not overwrite |buffer|.
bool put_in_quotes = true;
diff --git a/chromium/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc b/chromium/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
index b2e681ab26d..f97808bfb07 100644
--- a/chromium/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
+++ b/chromium/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
@@ -22,14 +22,6 @@ const char kBool[] = "bool";
const char kString[] = "string";
const char kNeedsEscape[] = "\"quotes\"";
-#if defined(OS_POSIX)
-const char kTaskFileName[] = "../../base/trace_event/trace_log.cc";
-const char kTaskPath[] = "base/trace_event";
-#else
-const char kTaskFileName[] = "..\\..\\base\\memory\\memory_win.cc";
-const char kTaskPath[] = "base\\memory";
-#endif
-
std::unique_ptr<Value> DumpAndReadBack(
const TypeNameDeduplicator& deduplicator) {
std::string json;
@@ -87,10 +79,5 @@ TEST(TypeNameDeduplicatorTest, EscapeTypeName) {
TestInsertTypeAndReadback(kNeedsEscape, kNeedsEscape);
}
-TEST(TypeNameDeduplicatorTest, TestExtractFileName) {
- // The exported value for passed file name should be the folders in the path.
- TestInsertTypeAndReadback(kTaskFileName, kTaskPath);
-}
-
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/malloc_dump_provider.cc b/chromium/base/trace_event/malloc_dump_provider.cc
index f1cc52a872e..ffd519133c2 100644
--- a/chromium/base/trace_event/malloc_dump_provider.cc
+++ b/chromium/base/trace_event/malloc_dump_provider.cc
@@ -191,12 +191,18 @@ MallocDumpProvider* MallocDumpProvider::GetInstance() {
MallocDumpProvider::MallocDumpProvider()
: tid_dumping_heap_(kInvalidThreadId) {}
-MallocDumpProvider::~MallocDumpProvider() {}
+MallocDumpProvider::~MallocDumpProvider() = default;
// Called at trace dump point time. Creates a snapshot the memory counters for
// the current process.
bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
ProcessMemoryDump* pmd) {
+ {
+ base::AutoLock auto_lock(emit_metrics_on_memory_dump_lock_);
+ if (!emit_metrics_on_memory_dump_)
+ return true;
+ }
+
size_t total_virtual_size = 0;
size_t resident_size = 0;
size_t allocated_objects_size = 0;
@@ -367,5 +373,15 @@ void MallocDumpProvider::RemoveAllocation(void* address) {
allocation_register_.Remove(address);
}
+void MallocDumpProvider::EnableMetrics() {
+ base::AutoLock auto_lock(emit_metrics_on_memory_dump_lock_);
+ emit_metrics_on_memory_dump_ = true;
+}
+
+void MallocDumpProvider::DisableMetrics() {
+ base::AutoLock auto_lock(emit_metrics_on_memory_dump_lock_);
+ emit_metrics_on_memory_dump_ = false;
+}
+
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/malloc_dump_provider.h b/chromium/base/trace_event/malloc_dump_provider.h
index b2487d0a840..ff0032e8c9d 100644
--- a/chromium/base/trace_event/malloc_dump_provider.h
+++ b/chromium/base/trace_event/malloc_dump_provider.h
@@ -7,6 +7,7 @@
#include "base/macros.h"
#include "base/memory/singleton.h"
+#include "base/synchronization/lock.h"
#include "base/threading/platform_thread.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/sharded_allocation_register.h"
@@ -39,6 +40,12 @@ class BASE_EXPORT MallocDumpProvider : public MemoryDumpProvider {
void InsertAllocation(void* address, size_t size);
void RemoveAllocation(void* address);
+ // Used by out-of-process heap-profiling. When malloc is profiled by an
+ // external process, that process will be responsible for emitting metrics on
+ // behalf of this one. Thus, MallocDumpProvider should not do anything.
+ void EnableMetrics();
+ void DisableMetrics();
+
private:
friend struct DefaultSingletonTraits<MallocDumpProvider>;
@@ -53,6 +60,9 @@ class BASE_EXPORT MallocDumpProvider : public MemoryDumpProvider {
// generation is malloc/new-ing for its own bookkeeping data structures.
PlatformThreadId tid_dumping_heap_;
+ bool emit_metrics_on_memory_dump_ = true;
+ base::Lock emit_metrics_on_memory_dump_lock_;
+
DISALLOW_COPY_AND_ASSIGN(MallocDumpProvider);
};
diff --git a/chromium/base/trace_event/memory_allocator_dump.cc b/chromium/base/trace_event/memory_allocator_dump.cc
index 44ad2083bc3..5260a734dbd 100644
--- a/chromium/base/trace_event/memory_allocator_dump.cc
+++ b/chromium/base/trace_event/memory_allocator_dump.cc
@@ -25,13 +25,6 @@ const char MemoryAllocatorDump::kTypeString[] = "string";
const char MemoryAllocatorDump::kUnitsBytes[] = "bytes";
const char MemoryAllocatorDump::kUnitsObjects[] = "objects";
-// static
-MemoryAllocatorDumpGuid MemoryAllocatorDump::GetDumpIdFromName(
- const std::string& absolute_name) {
- return MemoryAllocatorDumpGuid(StringPrintf(
- "%d:%s", TraceLog::GetInstance()->process_id(), absolute_name.c_str()));
-}
-
MemoryAllocatorDump::MemoryAllocatorDump(
const std::string& absolute_name,
MemoryDumpLevelOfDetail level_of_detail,
@@ -48,19 +41,7 @@ MemoryAllocatorDump::MemoryAllocatorDump(
DCHECK(absolute_name[0] != '/' && *absolute_name.rbegin() != '/');
}
-// If the caller didn't provide a guid, make one up by hashing the
-// absolute_name with the current PID.
-// Rationale: |absolute_name| is already supposed to be unique within a
-// process, the pid will make it unique among all processes.
-MemoryAllocatorDump::MemoryAllocatorDump(
- const std::string& absolute_name,
- MemoryDumpLevelOfDetail level_of_detail)
- : MemoryAllocatorDump(absolute_name,
- level_of_detail,
- GetDumpIdFromName(absolute_name)) {}
-
-MemoryAllocatorDump::~MemoryAllocatorDump() {
-}
+MemoryAllocatorDump::~MemoryAllocatorDump() = default;
void MemoryAllocatorDump::AddScalar(const char* name,
const char* units,
diff --git a/chromium/base/trace_event/memory_allocator_dump.h b/chromium/base/trace_event/memory_allocator_dump.h
index 2e7aeb445a6..de38afd9e7f 100644
--- a/chromium/base/trace_event/memory_allocator_dump.h
+++ b/chromium/base/trace_event/memory_allocator_dump.h
@@ -19,6 +19,7 @@
#include "base/trace_event/memory_allocator_dump_guid.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/trace_event_argument.h"
+#include "base/unguessable_token.h"
#include "base/values.h"
namespace base {
@@ -68,16 +69,9 @@ class BASE_EXPORT MemoryAllocatorDump {
DISALLOW_COPY_AND_ASSIGN(Entry);
};
- // Returns the Guid of the dump for the given |absolute_name| for the
- // current process.
- static MemoryAllocatorDumpGuid GetDumpIdFromName(
- const std::string& absolute_name);
-
MemoryAllocatorDump(const std::string& absolute_name,
MemoryDumpLevelOfDetail,
const MemoryAllocatorDumpGuid&);
- MemoryAllocatorDump(const std::string& absolute_name,
- MemoryDumpLevelOfDetail);
~MemoryAllocatorDump();
// Standard attribute |name|s for the AddScalar and AddString() methods.
diff --git a/chromium/base/trace_event/memory_allocator_dump_unittest.cc b/chromium/base/trace_event/memory_allocator_dump_unittest.cc
index bc9cc311f6b..b0b6e741a9b 100644
--- a/chromium/base/trace_event/memory_allocator_dump_unittest.cc
+++ b/chromium/base/trace_event/memory_allocator_dump_unittest.cc
@@ -80,24 +80,6 @@ TEST(MemoryAllocatorDumpTest, GuidGeneration) {
std::unique_ptr<MemoryAllocatorDump> mad(new MemoryAllocatorDump(
"foo", MemoryDumpLevelOfDetail::FIRST, MemoryAllocatorDumpGuid(0x42u)));
ASSERT_EQ("42", mad->guid().ToString());
-
- // If the dumper does not provide a Guid, the MAD will make one up on the
- // flight. Furthermore that Guid will stay stable across across multiple
- // snapshots if the |absolute_name| of the dump doesn't change
- mad.reset(new MemoryAllocatorDump("bar", MemoryDumpLevelOfDetail::FIRST));
- const MemoryAllocatorDumpGuid guid_bar = mad->guid();
- ASSERT_FALSE(guid_bar.empty());
- ASSERT_FALSE(guid_bar.ToString().empty());
- ASSERT_EQ(guid_bar, mad->guid());
- ASSERT_EQ(guid_bar, MemoryAllocatorDump::GetDumpIdFromName("bar"));
-
- mad.reset(new MemoryAllocatorDump("bar", MemoryDumpLevelOfDetail::FIRST));
- const MemoryAllocatorDumpGuid guid_bar_2 = mad->guid();
- ASSERT_EQ(guid_bar, guid_bar_2);
-
- mad.reset(new MemoryAllocatorDump("baz", MemoryDumpLevelOfDetail::FIRST));
- const MemoryAllocatorDumpGuid guid_baz = mad->guid();
- ASSERT_NE(guid_bar, guid_baz);
}
TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
diff --git a/chromium/base/trace_event/memory_dump_manager.cc b/chromium/base/trace_event/memory_dump_manager.cc
index 36aff1e38e3..1e1b84bf8a6 100644
--- a/chromium/base/trace_event/memory_dump_manager.cc
+++ b/chromium/base/trace_event/memory_dump_manager.cc
@@ -60,9 +60,7 @@ void DoGlobalDumpWithoutCallback(
MemoryDumpManager::RequestGlobalDumpFunction global_dump_fn,
MemoryDumpType dump_type,
MemoryDumpLevelOfDetail level_of_detail) {
- // The actual dump_guid will be set by service. TODO(primiano): remove
- // guid from the request args API.
- MemoryDumpRequestArgs args{0 /* dump_guid */, dump_type, level_of_detail};
+ GlobalMemoryDumpRequestArgs args{dump_type, level_of_detail};
global_dump_fn.Run(args);
}
@@ -265,7 +263,7 @@ bool MemoryDumpManager::EnableHeapProfiling(HeapProfilingMode profiling_mode) {
case kHeapProfilingModeBackground:
AllocationContextTracker::SetCaptureMode(
- AllocationContextTracker::CaptureMode::PSEUDO_STACK);
+ AllocationContextTracker::CaptureMode::MIXED_STACK);
break;
case kHeapProfilingModePseudo:
@@ -598,18 +596,32 @@ void MemoryDumpManager::SetupNextMemoryDump(
// If we are in background tracing, we should invoke only the whitelisted
// providers. Ignore other providers and continue.
if (pmd_async_state->req_args.level_of_detail ==
- MemoryDumpLevelOfDetail::BACKGROUND &&
- !mdpinfo->whitelisted_for_background_mode) {
- pmd_async_state->pending_dump_providers.pop_back();
- return SetupNextMemoryDump(std::move(pmd_async_state));
+ MemoryDumpLevelOfDetail::BACKGROUND) {
+ // TODO(ssid): This is a temporary hack to fix crashes
+ // https://crbug.com/797784. We could still cause stack overflow in a
+ // detailed mode dump or when there are lot of providers whitelisted.
+ while (!mdpinfo->whitelisted_for_background_mode) {
+ pmd_async_state->pending_dump_providers.pop_back();
+ if (pmd_async_state->pending_dump_providers.empty())
+ return FinishAsyncProcessDump(std::move(pmd_async_state));
+ mdpinfo = pmd_async_state->pending_dump_providers.back().get();
+ }
}
// If we are in summary mode, we only need to invoke the providers
// whitelisted for summary mode.
- if (pmd_async_state->req_args.dump_type == MemoryDumpType::SUMMARY_ONLY &&
- !mdpinfo->whitelisted_for_summary_mode) {
- pmd_async_state->pending_dump_providers.pop_back();
- return SetupNextMemoryDump(std::move(pmd_async_state));
+ if (pmd_async_state->req_args.dump_type == MemoryDumpType::SUMMARY_ONLY) {
+ // TODO(ssid): This is a temporary hack to fix crashes
+ // https://crbug.com/797784. We could still cause stack overflow in a
+ // detailed mode dump or when there are lot of providers whitelisted. It is
+ // assumed here that a provider whitelisted for summary mode is also
+ // whitelisted for background mode and skip the check.
+ while (!mdpinfo->whitelisted_for_summary_mode) {
+ pmd_async_state->pending_dump_providers.pop_back();
+ if (pmd_async_state->pending_dump_providers.empty())
+ return FinishAsyncProcessDump(std::move(pmd_async_state));
+ mdpinfo = pmd_async_state->pending_dump_providers.back().get();
+ }
}
// If the dump provider did not specify a task runner affinity, dump on
@@ -643,8 +655,8 @@ void MemoryDumpManager::SetupNextMemoryDump(
// The utility thread is normally shutdown when disabling the trace and
// getting here in this case is expected.
if (mdpinfo->task_runner) {
- LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
- << "\". Failed to post task on the task runner provided.";
+ DLOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
+ << "\". Failed to post task on the task runner provided.";
// A locked access is required to R/W |disabled| (for the
// UnregisterAndDeleteDumpProviderSoon() case).
@@ -932,8 +944,8 @@ MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
MakeUnique<ProcessMemoryDump>(heap_profiler_serialization_state, args);
}
-MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
-}
+MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() =
+ default;
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/memory_dump_manager.h b/chromium/base/trace_event/memory_dump_manager.h
index 223633e1daf..76fd239aeb9 100644
--- a/chromium/base/trace_event/memory_dump_manager.h
+++ b/chromium/base/trace_event/memory_dump_manager.h
@@ -50,7 +50,7 @@ enum HeapProfilingMode {
class BASE_EXPORT MemoryDumpManager {
public:
using RequestGlobalDumpFunction =
- RepeatingCallback<void(const MemoryDumpRequestArgs& args)>;
+ RepeatingCallback<void(const GlobalMemoryDumpRequestArgs& args)>;
static const char* const kTraceCategory;
diff --git a/chromium/base/trace_event/memory_dump_manager_test_utils.h b/chromium/base/trace_event/memory_dump_manager_test_utils.h
index 6e3068225e2..032ef844afb 100644
--- a/chromium/base/trace_event/memory_dump_manager_test_utils.h
+++ b/chromium/base/trace_event/memory_dump_manager_test_utils.h
@@ -12,9 +12,12 @@
namespace base {
namespace trace_event {
-void RequestGlobalDumpForInProcessTesting(const MemoryDumpRequestArgs& args) {
+void RequestGlobalDumpForInProcessTesting(
+ const GlobalMemoryDumpRequestArgs& args) {
+ MemoryDumpRequestArgs local_args = {0 /* dump_guid */, args.dump_type,
+ args.level_of_detail};
MemoryDumpManager::GetInstance()->CreateProcessDump(
- args, ProcessMemoryDumpCallback());
+ local_args, ProcessMemoryDumpCallback());
};
// Short circuits the RequestGlobalDumpFunction() to CreateProcessDump(),
diff --git a/chromium/base/trace_event/memory_dump_manager_unittest.cc b/chromium/base/trace_event/memory_dump_manager_unittest.cc
index 6afab3a174e..22cee6d65ce 100644
--- a/chromium/base/trace_event/memory_dump_manager_unittest.cc
+++ b/chromium/base/trace_event/memory_dump_manager_unittest.cc
@@ -168,7 +168,7 @@ class TestSequencedTaskRunner : public SequencedTaskRunner {
}
private:
- ~TestSequencedTaskRunner() override {}
+ ~TestSequencedTaskRunner() override = default;
SequencedWorkerPoolOwner worker_pool_;
const SequencedWorkerPool::SequenceToken token_;
@@ -894,7 +894,7 @@ TEST_F(MemoryDumpManagerTest, EnableHeapProfilingPseudoStack) {
EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
}
-TEST_F(MemoryDumpManagerTest, EnableHeapProfilingNoStack) {
+TEST_F(MemoryDumpManagerTest, EnableHeapProfilingBackground) {
InitializeMemoryDumpManagerForInProcessTesting(true /* is_coordinator */);
MockMemoryDumpProvider mdp1;
MemoryDumpProvider::Options supported_options;
@@ -916,7 +916,7 @@ TEST_F(MemoryDumpManagerTest, EnableHeapProfilingNoStack) {
EXPECT_TRUE(mdm_->EnableHeapProfiling(kHeapProfilingModeBackground));
RunLoop().RunUntilIdle();
- ASSERT_EQ(AllocationContextTracker::CaptureMode::PSEUDO_STACK,
+ ASSERT_EQ(AllocationContextTracker::CaptureMode::MIXED_STACK,
AllocationContextTracker::capture_mode());
EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeBackground);
EXPECT_EQ(0u, TraceLog::GetInstance()->enabled_modes());
@@ -925,7 +925,7 @@ TEST_F(MemoryDumpManagerTest, EnableHeapProfilingNoStack) {
// Do nothing when already enabled.
EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModeBackground));
EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModePseudo));
- ASSERT_EQ(AllocationContextTracker::CaptureMode::PSEUDO_STACK,
+ ASSERT_EQ(AllocationContextTracker::CaptureMode::MIXED_STACK,
AllocationContextTracker::capture_mode());
EXPECT_EQ(0u, TraceLog::GetInstance()->enabled_modes());
EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeBackground);
diff --git a/chromium/base/trace_event/memory_dump_provider_info.cc b/chromium/base/trace_event/memory_dump_provider_info.cc
index 34784eb5c0a..65eeadf6bef 100644
--- a/chromium/base/trace_event/memory_dump_provider_info.cc
+++ b/chromium/base/trace_event/memory_dump_provider_info.cc
@@ -27,7 +27,7 @@ MemoryDumpProviderInfo::MemoryDumpProviderInfo(
consecutive_failures(0),
disabled(false) {}
-MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
+MemoryDumpProviderInfo::~MemoryDumpProviderInfo() = default;
bool MemoryDumpProviderInfo::Comparator::operator()(
const scoped_refptr<MemoryDumpProviderInfo>& a,
diff --git a/chromium/base/trace_event/memory_dump_request_args.cc b/chromium/base/trace_event/memory_dump_request_args.cc
index c073f185945..1862c4e0667 100644
--- a/chromium/base/trace_event/memory_dump_request_args.cc
+++ b/chromium/base/trace_event/memory_dump_request_args.cc
@@ -20,8 +20,6 @@ const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
return "peak_memory_usage";
case MemoryDumpType::SUMMARY_ONLY:
return "summary_only";
- case MemoryDumpType::VM_REGIONS_ONLY:
- return "vm_regions_only";
}
NOTREACHED();
return "unknown";
@@ -36,8 +34,6 @@ MemoryDumpType StringToMemoryDumpType(const std::string& str) {
return MemoryDumpType::PEAK_MEMORY_USAGE;
if (str == "summary_only")
return MemoryDumpType::SUMMARY_ONLY;
- if (str == "vm_regions_only")
- return MemoryDumpType::VM_REGIONS_ONLY;
NOTREACHED();
return MemoryDumpType::LAST;
}
@@ -49,6 +45,8 @@ const char* MemoryDumpLevelOfDetailToString(
return "background";
case MemoryDumpLevelOfDetail::LIGHT:
return "light";
+ case MemoryDumpLevelOfDetail::VM_REGIONS_ONLY_FOR_HEAP_PROFILER:
+ return "vm_regions_only";
case MemoryDumpLevelOfDetail::DETAILED:
return "detailed";
}
@@ -62,6 +60,8 @@ MemoryDumpLevelOfDetail StringToMemoryDumpLevelOfDetail(
return MemoryDumpLevelOfDetail::BACKGROUND;
if (str == "light")
return MemoryDumpLevelOfDetail::LIGHT;
+ if (str == "vm_regions_only")
+ return MemoryDumpLevelOfDetail::VM_REGIONS_ONLY_FOR_HEAP_PROFILER;
if (str == "detailed")
return MemoryDumpLevelOfDetail::DETAILED;
NOTREACHED();
diff --git a/chromium/base/trace_event/memory_dump_request_args.h b/chromium/base/trace_event/memory_dump_request_args.h
index ded56b0fd88..adf8d26e600 100644
--- a/chromium/base/trace_event/memory_dump_request_args.h
+++ b/chromium/base/trace_event/memory_dump_request_args.h
@@ -32,9 +32,7 @@ enum class MemoryDumpType {
EXPLICITLY_TRIGGERED, // Non maskable dump request.
PEAK_MEMORY_USAGE, // Dumping memory at detected peak total memory usage.
SUMMARY_ONLY, // Calculate just the summary & don't add to the trace.
- VM_REGIONS_ONLY, // Retrieve only memory maps & don't add to the trace.
- // Used only for the heap profiler.
- LAST = VM_REGIONS_ONLY
+ LAST = SUMMARY_ONLY
};
// Tells the MemoryDumpProvider(s) how much detailed their dumps should be.
@@ -55,15 +53,17 @@ enum class MemoryDumpLevelOfDetail : uint32_t {
// Few entries, typically a fixed number, per dump.
LIGHT,
+ // Retrieve only memory maps. Used only for the heap profiler.
+ VM_REGIONS_ONLY_FOR_HEAP_PROFILER,
+
// Unrestricted amount of entries per dump.
DETAILED,
LAST = DETAILED
};
-// Initial request arguments for a global memory dump. (see
-// MemoryDumpManager::RequestGlobalMemoryDump()). Keep this consistent with
-// memory_instrumentation.mojo and memory_instrumentation_struct_traits.{h,cc}
+// Keep this consistent with memory_instrumentation.mojo and
+// memory_instrumentation_struct_traits.{h,cc}
struct BASE_EXPORT MemoryDumpRequestArgs {
// Globally unique identifier. In multi-process dumps, all processes issue a
// local dump with the same guid. This allows the trace importers to
@@ -74,6 +74,15 @@ struct BASE_EXPORT MemoryDumpRequestArgs {
MemoryDumpLevelOfDetail level_of_detail;
};
+// Initial request arguments for a global memory dump. (see
+// MemoryDumpManager::RequestGlobalMemoryDump()). Keep this consistent with
+// memory_instrumentation.mojo and memory_instrumentation_struct_traits.{h,cc}
+// TODO(hjd): Move this to memory_instrumentation, crbug.com/776726
+struct BASE_EXPORT GlobalMemoryDumpRequestArgs {
+ MemoryDumpType dump_type;
+ MemoryDumpLevelOfDetail level_of_detail;
+};
+
// Args for ProcessMemoryDump and passed to OnMemoryDump calls for memory dump
// providers. Dump providers are expected to read the args for creating dumps.
struct MemoryDumpArgs {
diff --git a/chromium/base/trace_event/memory_dump_scheduler.cc b/chromium/base/trace_event/memory_dump_scheduler.cc
index 61cf960ce2b..0332af7569d 100644
--- a/chromium/base/trace_event/memory_dump_scheduler.cc
+++ b/chromium/base/trace_event/memory_dump_scheduler.cc
@@ -50,6 +50,11 @@ void MemoryDumpScheduler::StartInternal(MemoryDumpScheduler::Config config) {
for (const Config::Trigger& trigger : config.triggers) {
DCHECK_GT(trigger.period_ms, 0u);
switch (trigger.level_of_detail) {
+ case MemoryDumpLevelOfDetail::VM_REGIONS_ONLY_FOR_HEAP_PROFILER:
+ // There is no use case to request a periodic dump which contains
+ // details that are useful only for the heap-profiler.
+ NOTREACHED();
+ return;
case MemoryDumpLevelOfDetail::BACKGROUND:
break;
case MemoryDumpLevelOfDetail::LIGHT:
@@ -109,8 +114,8 @@ void MemoryDumpScheduler::Tick(uint32_t expected_generation) {
TimeDelta::FromMilliseconds(period_ms_));
}
-MemoryDumpScheduler::Config::Config() {}
-MemoryDumpScheduler::Config::~Config() {}
+MemoryDumpScheduler::Config::Config() = default;
+MemoryDumpScheduler::Config::~Config() = default;
MemoryDumpScheduler::Config::Config(const MemoryDumpScheduler::Config&) =
default;
diff --git a/chromium/base/trace_event/memory_infra_background_whitelist.cc b/chromium/base/trace_event/memory_infra_background_whitelist.cc
index 9bc4447382c..60f08b52ccc 100644
--- a/chromium/base/trace_event/memory_infra_background_whitelist.cc
+++ b/chromium/base/trace_event/memory_infra_background_whitelist.cc
@@ -24,6 +24,7 @@ const char* const kDumpProviderWhitelist[] = {
"ClientDiscardableSharedMemoryManager",
"DOMStorage",
"DiscardableSharedMemoryManager",
+ "DnsConfigServicePosix::HostsReader",
"gpu::BufferManager",
"gpu::RenderbufferManager",
"gpu::TextureManager",
@@ -36,9 +37,11 @@ const char* const kDumpProviderWhitelist[] = {
"MemoryCache",
"MojoHandleTable",
"MojoLevelDB",
+ "OutOfProcessHeapProfilingDumpProvider",
"PartitionAlloc",
"ProcessMemoryMetrics",
"Skia",
+ "SharedMemoryTracker",
"Sql",
"URLRequestContext",
"V8Isolate",
@@ -57,6 +60,7 @@ const char* const kDumpProviderSummaryWhitelist[] = {
"Malloc",
"PartitionAlloc",
"ProcessMemoryMetrics",
+ "SharedMemoryTracker",
"V8Isolate",
nullptr // End of list marker.
};
@@ -79,10 +83,13 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"gpu/gl/textures/share_group_0x?",
"java_heap",
"java_heap/allocated_objects",
- "leveldatabase/0x?",
- "leveldb/leveldb_proto/0x?",
- "leveldb/mojo/0x?",
- "leveldb/mojo/0x?/block_cache",
+ "leveldatabase",
+ "leveldatabase/block_cache/browser",
+ "leveldatabase/block_cache/in_memory",
+ "leveldatabase/block_cache/unified",
+ "leveldatabase/block_cache/web",
+ "leveldatabase/db_0x?",
+ "leveldatabase/db_0x?/block_cache",
"malloc",
"malloc/allocated_objects",
"malloc/metadata_fragmentation_caches",
@@ -94,6 +101,7 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"mojo/shared_buffer",
"mojo/unknown",
"mojo/watcher",
+ "net/dns_config_service_posix_hosts_reader",
"net/http_network_session_0x?",
"net/http_network_session_0x?/quic_stream_factory",
"net/http_network_session_0x?/socket_pool",
@@ -262,12 +270,21 @@ bool IsMemoryDumpProviderWhitelistedForSummary(const char* mdp_name) {
bool IsMemoryAllocatorDumpNameWhitelisted(const std::string& name) {
// Global dumps are explicitly whitelisted for background use.
if (base::StartsWith(name, "global/", CompareCase::SENSITIVE)) {
- for (size_t i = sizeof("global/"); i < name.size(); i++)
+ for (size_t i = strlen("global/"); i < name.size(); i++)
if (!base::IsHexDigit(name[i]))
return false;
return true;
}
+ // As are shared memory dumps. Note: we skip the first character after the
+ // slash and last character in the string as they are expected to be brackets.
+ if (base::StartsWith(name, "shared_memory/(", CompareCase::SENSITIVE)) {
+ for (size_t i = strlen("shared_memory/") + 1; i < name.size() - 1; i++)
+ if (!base::IsHexDigit(name[i]))
+ return false;
+ return name.back() == ')';
+ }
+
// Remove special characters, numbers (including hexadecimal which are marked
// by '0x') from the given string.
const size_t length = name.size();
diff --git a/chromium/base/trace_event/process_memory_dump.cc b/chromium/base/trace_event/process_memory_dump.cc
index e0d5a1dea4d..3d068afedbd 100644
--- a/chromium/base/trace_event/process_memory_dump.cc
+++ b/chromium/base/trace_event/process_memory_dump.cc
@@ -50,6 +50,11 @@ size_t GetSystemPageCount(size_t mapped_size, size_t page_size) {
}
#endif
+UnguessableToken GetTokenForCurrentProcess() {
+ static UnguessableToken instance = UnguessableToken::Create();
+ return instance;
+}
+
} // namespace
// static
@@ -188,10 +193,12 @@ ProcessMemoryDump::ProcessMemoryDump(
scoped_refptr<HeapProfilerSerializationState>
heap_profiler_serialization_state,
const MemoryDumpArgs& dump_args)
- : heap_profiler_serialization_state_(
+ : process_token_(GetTokenForCurrentProcess()),
+ heap_profiler_serialization_state_(
std::move(heap_profiler_serialization_state)),
dump_args_(dump_args) {}
-ProcessMemoryDump::~ProcessMemoryDump() {}
+
+ProcessMemoryDump::~ProcessMemoryDump() = default;
ProcessMemoryDump::ProcessMemoryDump(ProcessMemoryDump&& other) = default;
ProcessMemoryDump& ProcessMemoryDump::operator=(ProcessMemoryDump&& other) =
default;
@@ -199,7 +206,7 @@ ProcessMemoryDump& ProcessMemoryDump::operator=(ProcessMemoryDump&& other) =
MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
const std::string& absolute_name) {
return AddAllocatorDumpInternal(std::make_unique<MemoryAllocatorDump>(
- absolute_name, dump_args_.level_of_detail));
+ absolute_name, dump_args_.level_of_detail, GetDumpId(absolute_name)));
}
MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
@@ -377,11 +384,12 @@ void ProcessMemoryDump::AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
int importance) {
// This will either override an existing edge or create a new one.
auto it = allocator_dumps_edges_.find(source);
+ int max_importance = importance;
if (it != allocator_dumps_edges_.end()) {
- DCHECK_EQ(target.ToUint64(),
- allocator_dumps_edges_[source].target.ToUint64());
+ DCHECK_EQ(target.ToUint64(), it->second.target.ToUint64());
+ max_importance = std::max(importance, it->second.importance);
}
- allocator_dumps_edges_[source] = {source, target, importance,
+ allocator_dumps_edges_[source] = {source, target, max_importance,
false /* overridable */};
}
@@ -434,8 +442,8 @@ void ProcessMemoryDump::CreateSharedMemoryOwnershipEdgeInternal(
// The guid of the local dump created by SharedMemoryTracker for the memory
// segment.
- auto local_shm_guid = MemoryAllocatorDump::GetDumpIdFromName(
- SharedMemoryTracker::GetDumpNameForTracing(shared_memory_guid));
+ auto local_shm_guid =
+ GetDumpId(SharedMemoryTracker::GetDumpNameForTracing(shared_memory_guid));
// The dump guid of the global dump created by the tracker for the memory
// segment.
@@ -468,12 +476,20 @@ void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
MemoryAllocatorDump* ProcessMemoryDump::GetBlackHoleMad() {
DCHECK(is_black_hole_non_fatal_for_testing_);
- if (!black_hole_mad_)
- black_hole_mad_.reset(
- new MemoryAllocatorDump("discarded", dump_args_.level_of_detail));
+ if (!black_hole_mad_) {
+ std::string name = "discarded";
+ black_hole_mad_.reset(new MemoryAllocatorDump(
+ name, dump_args_.level_of_detail, GetDumpId(name)));
+ }
return black_hole_mad_.get();
}
+MemoryAllocatorDumpGuid ProcessMemoryDump::GetDumpId(
+ const std::string& absolute_name) {
+ return MemoryAllocatorDumpGuid(StringPrintf(
+ "%s:%s", process_token().ToString().c_str(), absolute_name.c_str()));
+}
+
bool ProcessMemoryDump::MemoryAllocatorDumpEdge::operator==(
const MemoryAllocatorDumpEdge& other) const {
return source == other.source && target == other.target &&
diff --git a/chromium/base/trace_event/process_memory_dump.h b/chromium/base/trace_event/process_memory_dump.h
index 9ac8bb6e163..4085838f5f1 100644
--- a/chromium/base/trace_event/process_memory_dump.h
+++ b/chromium/base/trace_event/process_memory_dump.h
@@ -46,8 +46,8 @@ class BASE_EXPORT ProcessMemoryDump {
MemoryAllocatorDumpGuid source;
MemoryAllocatorDumpGuid target;
- int importance;
- bool overridable;
+ int importance = 0;
+ bool overridable = false;
};
// Maps allocator dumps absolute names (allocator_name/heap/subheap) to
@@ -159,6 +159,8 @@ class BASE_EXPORT ProcessMemoryDump {
// the memory usage of |target| to |source|. |importance| is optional and
// relevant only for the cases of co-ownership, where it acts as a z-index:
// the owner with the highest importance will be attributed |target|'s memory.
+ // If an edge is present, its importance will not be updated unless
+ // |importance| is larger.
void AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
const MemoryAllocatorDumpGuid& target,
int importance);
@@ -235,10 +237,25 @@ class BASE_EXPORT ProcessMemoryDump {
private:
FRIEND_TEST_ALL_PREFIXES(ProcessMemoryDumpTest, BackgroundModeTest);
+ FRIEND_TEST_ALL_PREFIXES(ProcessMemoryDumpTest, SharedMemoryOwnershipTest);
+ FRIEND_TEST_ALL_PREFIXES(ProcessMemoryDumpTest, GuidsTest);
MemoryAllocatorDump* AddAllocatorDumpInternal(
std::unique_ptr<MemoryAllocatorDump> mad);
+ // A per-process token, valid throughout all the lifetime of the current
+ // process, used to disambiguate dumps with the same name generated in
+ // different processes.
+ const UnguessableToken& process_token() const { return process_token_; }
+ void set_process_token_for_testing(UnguessableToken token) {
+ process_token_ = token;
+ };
+
+ // Returns the Guid of the dump for the given |absolute_name| for
+ // for the given process' token. |process_token| is used to disambiguate GUIDs
+ // derived from the same name under different processes.
+ MemoryAllocatorDumpGuid GetDumpId(const std::string& absolute_name);
+
void CreateSharedMemoryOwnershipEdgeInternal(
const MemoryAllocatorDumpGuid& client_local_dump_guid,
const UnguessableToken& shared_memory_guid,
@@ -247,6 +264,7 @@ class BASE_EXPORT ProcessMemoryDump {
MemoryAllocatorDump* GetBlackHoleMad();
+ UnguessableToken process_token_;
AllocatorDumpsMap allocator_dumps_;
HeapDumpsMap heap_dumps_;
diff --git a/chromium/base/trace_event/process_memory_dump_unittest.cc b/chromium/base/trace_event/process_memory_dump_unittest.cc
index 12cfaf64067..0148588200e 100644
--- a/chromium/base/trace_event/process_memory_dump_unittest.cc
+++ b/chromium/base/trace_event/process_memory_dump_unittest.cc
@@ -13,7 +13,7 @@
#include "base/trace_event/memory_allocator_dump_guid.h"
#include "base/trace_event/memory_infra_background_whitelist.h"
#include "base/trace_event/trace_event_argument.h"
-#include "base/unguessable_token.h"
+#include "base/trace_event/trace_log.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -311,7 +311,7 @@ TEST(ProcessMemoryDumpTest, OverrideOwnershipEdge) {
EXPECT_EQ(2, edges.find(shm_dump3->guid())->second.importance);
EXPECT_FALSE(edges.find(shm_dump3->guid())->second.overridable);
EXPECT_EQ(shm_dump4->guid(), edges.find(child4_dump->guid())->second.target);
- EXPECT_EQ(0, edges.find(child4_dump->guid())->second.importance);
+ EXPECT_EQ(4, edges.find(child4_dump->guid())->second.importance);
EXPECT_FALSE(edges.find(child4_dump->guid())->second.overridable);
}
@@ -395,8 +395,7 @@ TEST(ProcessMemoryDumpTest, SharedMemoryOwnershipTest) {
auto* client_dump2 = pmd->CreateAllocatorDump("discardable/segment2");
auto shm_token2 = UnguessableToken::Create();
MemoryAllocatorDumpGuid shm_local_guid2 =
- MemoryAllocatorDump::GetDumpIdFromName(
- SharedMemoryTracker::GetDumpNameForTracing(shm_token2));
+ pmd->GetDumpId(SharedMemoryTracker::GetDumpNameForTracing(shm_token2));
MemoryAllocatorDumpGuid shm_global_guid2 =
SharedMemoryTracker::GetGlobalDumpIdForTracing(shm_token2);
pmd->AddOverridableOwnershipEdge(shm_local_guid2, shm_global_guid2,
@@ -474,6 +473,39 @@ TEST(ProcessMemoryDumpTest, BackgroundModeTest) {
ASSERT_TRUE(IsMemoryAllocatorDumpNameWhitelisted("Whitelisted/0xA1b2"));
}
+TEST(ProcessMemoryDumpTest, GuidsTest) {
+ MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+
+ const auto process_token_one = UnguessableToken::Create();
+ const auto process_token_two = UnguessableToken::Create();
+
+ ProcessMemoryDump pmd1(nullptr, dump_args);
+ pmd1.set_process_token_for_testing(process_token_one);
+ MemoryAllocatorDump* mad1 = pmd1.CreateAllocatorDump("foo");
+
+ ProcessMemoryDump pmd2(nullptr, dump_args);
+ pmd2.set_process_token_for_testing(process_token_one);
+ MemoryAllocatorDump* mad2 = pmd2.CreateAllocatorDump("foo");
+
+ // If we don't pass the argument we get a random PMD:
+ ProcessMemoryDump pmd3(nullptr, dump_args);
+ MemoryAllocatorDump* mad3 = pmd3.CreateAllocatorDump("foo");
+
+ // PMD's for different processes produce different GUIDs even for the same
+ // names:
+ ProcessMemoryDump pmd4(nullptr, dump_args);
+ pmd4.set_process_token_for_testing(process_token_two);
+ MemoryAllocatorDump* mad4 = pmd4.CreateAllocatorDump("foo");
+
+ ASSERT_EQ(mad1->guid(), mad2->guid());
+
+ ASSERT_NE(mad2->guid(), mad3->guid());
+ ASSERT_NE(mad3->guid(), mad4->guid());
+ ASSERT_NE(mad4->guid(), mad2->guid());
+
+ ASSERT_EQ(mad1->guid(), pmd1.GetDumpId("foo"));
+}
+
#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
TEST(ProcessMemoryDumpTest, CountResidentBytes) {
const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
diff --git a/chromium/base/trace_event/trace_buffer.cc b/chromium/base/trace_event/trace_buffer.cc
index 0a5770c3dbf..8de470f0fdd 100644
--- a/chromium/base/trace_event/trace_buffer.cc
+++ b/chromium/base/trace_event/trace_buffer.cc
@@ -46,7 +46,7 @@ class TraceBufferRingBuffer : public TraceBuffer {
chunks_.resize(*index + 1);
TraceBufferChunk* chunk = chunks_[*index].release();
- chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk.
+ chunks_[*index] = nullptr; // Put nullptr in the slot of a in-flight chunk.
if (chunk)
chunk->Reset(current_chunk_seq_++);
else
@@ -81,16 +81,16 @@ class TraceBufferRingBuffer : public TraceBuffer {
TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
if (handle.chunk_index >= chunks_.size())
- return NULL;
+ return nullptr;
TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
if (!chunk || chunk->seq() != handle.chunk_seq)
- return NULL;
+ return nullptr;
return chunk->GetEventAt(handle.event_index);
}
const TraceBufferChunk* NextChunk() override {
if (chunks_.empty())
- return NULL;
+ return nullptr;
while (current_iteration_index_ != queue_tail_) {
size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
@@ -100,7 +100,7 @@ class TraceBufferRingBuffer : public TraceBuffer {
DCHECK(chunks_[chunk_index]);
return chunks_[chunk_index].get();
}
- return NULL;
+ return nullptr;
}
void EstimateTraceMemoryOverhead(
@@ -198,10 +198,10 @@ class TraceBufferVector : public TraceBuffer {
TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
if (handle.chunk_index >= chunks_.size())
- return NULL;
+ return nullptr;
TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
if (!chunk || chunk->seq() != handle.chunk_seq)
- return NULL;
+ return nullptr;
return chunk->GetEventAt(handle.event_index);
}
@@ -212,7 +212,7 @@ class TraceBufferVector : public TraceBuffer {
if (chunk)
return chunk;
}
- return NULL;
+ return nullptr;
}
void EstimateTraceMemoryOverhead(
@@ -246,7 +246,7 @@ class TraceBufferVector : public TraceBuffer {
TraceBufferChunk::TraceBufferChunk(uint32_t seq) : next_free_(0), seq_(seq) {}
-TraceBufferChunk::~TraceBufferChunk() {}
+TraceBufferChunk::~TraceBufferChunk() = default;
void TraceBufferChunk::Reset(uint32_t new_seq) {
for (size_t i = 0; i < next_free_; ++i)
@@ -312,7 +312,7 @@ void TraceResultBuffer::SimpleOutput::Append(
TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {}
-TraceResultBuffer::~TraceResultBuffer() {}
+TraceResultBuffer::~TraceResultBuffer() = default;
void TraceResultBuffer::SetOutputCallback(
const OutputCallback& json_chunk_callback) {
diff --git a/chromium/base/trace_event/trace_config.cc b/chromium/base/trace_event/trace_config.cc
index 38356d8dc00..5926c9541bd 100644
--- a/chromium/base/trace_event/trace_config.cc
+++ b/chromium/base/trace_event/trace_config.cc
@@ -57,7 +57,7 @@ class ConvertableTraceConfigToTraceFormat
explicit ConvertableTraceConfigToTraceFormat(const TraceConfig& trace_config)
: trace_config_(trace_config) {}
- ~ConvertableTraceConfigToTraceFormat() override {}
+ ~ConvertableTraceConfigToTraceFormat() override = default;
void AppendAsTraceFormat(std::string* out) const override {
out->append(trace_config_.ToString());
@@ -91,12 +91,12 @@ void TraceConfig::ResetMemoryDumpConfig(
memory_dump_config_ = memory_dump_config;
}
-TraceConfig::MemoryDumpConfig::MemoryDumpConfig() {}
+TraceConfig::MemoryDumpConfig::MemoryDumpConfig() = default;
TraceConfig::MemoryDumpConfig::MemoryDumpConfig(
const MemoryDumpConfig& other) = default;
-TraceConfig::MemoryDumpConfig::~MemoryDumpConfig() {}
+TraceConfig::MemoryDumpConfig::~MemoryDumpConfig() = default;
void TraceConfig::MemoryDumpConfig::Clear() {
allowed_dump_modes.clear();
@@ -119,7 +119,7 @@ TraceConfig::EventFilterConfig::EventFilterConfig(
const std::string& predicate_name)
: predicate_name_(predicate_name) {}
-TraceConfig::EventFilterConfig::~EventFilterConfig() {}
+TraceConfig::EventFilterConfig::~EventFilterConfig() = default;
TraceConfig::EventFilterConfig::EventFilterConfig(const EventFilterConfig& tc) {
*this = tc;
@@ -224,16 +224,9 @@ TraceConfig::TraceConfig(StringPiece config_string) {
InitializeDefault();
}
-TraceConfig::TraceConfig(const TraceConfig& tc)
- : record_mode_(tc.record_mode_),
- enable_systrace_(tc.enable_systrace_),
- enable_argument_filter_(tc.enable_argument_filter_),
- category_filter_(tc.category_filter_),
- memory_dump_config_(tc.memory_dump_config_),
- event_filters_(tc.event_filters_) {}
+TraceConfig::TraceConfig(const TraceConfig& tc) = default;
-TraceConfig::~TraceConfig() {
-}
+TraceConfig::~TraceConfig() = default;
TraceConfig& TraceConfig::operator=(const TraceConfig& rhs) {
if (this == &rhs)
diff --git a/chromium/base/trace_event/trace_config_category_filter.cc b/chromium/base/trace_event/trace_config_category_filter.cc
index de78c04e97f..d1884307a94 100644
--- a/chromium/base/trace_event/trace_config_category_filter.cc
+++ b/chromium/base/trace_event/trace_config_category_filter.cc
@@ -20,23 +20,15 @@ const char kIncludedCategoriesParam[] = "included_categories";
const char kExcludedCategoriesParam[] = "excluded_categories";
}
-TraceConfigCategoryFilter::TraceConfigCategoryFilter() {}
+TraceConfigCategoryFilter::TraceConfigCategoryFilter() = default;
TraceConfigCategoryFilter::TraceConfigCategoryFilter(
- const TraceConfigCategoryFilter& other)
- : included_categories_(other.included_categories_),
- disabled_categories_(other.disabled_categories_),
- excluded_categories_(other.excluded_categories_) {}
+ const TraceConfigCategoryFilter& other) = default;
-TraceConfigCategoryFilter::~TraceConfigCategoryFilter() {}
+TraceConfigCategoryFilter::~TraceConfigCategoryFilter() = default;
TraceConfigCategoryFilter& TraceConfigCategoryFilter::operator=(
- const TraceConfigCategoryFilter& rhs) {
- included_categories_ = rhs.included_categories_;
- disabled_categories_ = rhs.disabled_categories_;
- excluded_categories_ = rhs.excluded_categories_;
- return *this;
-}
+ const TraceConfigCategoryFilter& rhs) = default;
void TraceConfigCategoryFilter::InitializeFromString(
const StringPiece& category_filter_string) {
diff --git a/chromium/base/trace_event/trace_event.h b/chromium/base/trace_event/trace_event.h
index 0087a012400..7a524af7658 100644
--- a/chromium/base/trace_event/trace_event.h
+++ b/chromium/base/trace_event/trace_event.h
@@ -433,21 +433,27 @@
// location where it was posted from.
//
// This implementation is for when location sources are available.
+// TODO(ssid): The program counter of the current task should be added here.
#define INTERNAL_TRACE_TASK_EXECUTION(run_function, task) \
TRACE_EVENT2("toplevel", run_function, "src_file", \
(task).posted_from.file_name(), "src_func", \
(task).posted_from.function_name()); \
TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION INTERNAL_TRACE_EVENT_UID( \
- task_event)((task).posted_from.file_name());
+ task_event)((task).posted_from.file_name()); \
+ TRACE_HEAP_PROFILER_API_SCOPED_WITH_PROGRAM_COUNTER \
+ INTERNAL_TRACE_EVENT_UID(task_pc_event)((task).posted_from.program_counter());
#else
// TODO(http://crbug.com760702) remove file name and just pass the program
// counter to the heap profiler macro.
+// TODO(ssid): The program counter of the current task should be added here.
#define INTERNAL_TRACE_TASK_EXECUTION(run_function, task) \
TRACE_EVENT1("toplevel", run_function, "src", (task).posted_from.ToString()) \
TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION INTERNAL_TRACE_EVENT_UID( \
- task_event)((task).posted_from.file_name());
+ task_event)((task).posted_from.file_name()); \
+ TRACE_HEAP_PROFILER_API_SCOPED_WITH_PROGRAM_COUNTER \
+ INTERNAL_TRACE_EVENT_UID(task_pc_event)((task).posted_from.program_counter());
#endif
diff --git a/chromium/base/trace_event/trace_event_argument.cc b/chromium/base/trace_event/trace_event_argument.cc
index 3bca7a9e311..e614b272d50 100644
--- a/chromium/base/trace_event/trace_event_argument.cc
+++ b/chromium/base/trace_event/trace_event_argument.cc
@@ -12,7 +12,7 @@
#include "base/containers/circular_deque.h"
#include "base/json/string_escape.h"
#include "base/memory/ptr_util.h"
-#include "base/trace_event/common/trace_event_common.h"
+#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_impl.h"
#include "base/trace_event/trace_event_memory_overhead.h"
#include "base/values.h"
@@ -353,9 +353,9 @@ void TracedValue::AppendBaseValue(const base::Value& value) {
}
std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
- std::unique_ptr<DictionaryValue> root(new DictionaryValue);
- DictionaryValue* cur_dict = root.get();
- ListValue* cur_list = nullptr;
+ base::Value root(base::Value::Type::DICTIONARY);
+ Value* cur_dict = &root;
+ Value* cur_list = nullptr;
std::vector<Value*> stack;
PickleIterator it(pickle_);
const char* type;
@@ -364,16 +364,15 @@ std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
DCHECK((cur_dict && !cur_list) || (cur_list && !cur_dict));
switch (*type) {
case kTypeStartDict: {
- auto new_dict = std::make_unique<DictionaryValue>();
+ base::Value new_dict(base::Value::Type::DICTIONARY);
if (cur_dict) {
stack.push_back(cur_dict);
- cur_dict = cur_dict->SetDictionaryWithoutPathExpansion(
- ReadKeyName(it), std::move(new_dict));
+ cur_dict = cur_dict->SetKey(ReadKeyName(it), std::move(new_dict));
} else {
- cur_list->Append(std::move(new_dict));
+ cur_list->GetList().push_back(std::move(new_dict));
// |new_dict| is invalidated at this point, so |cur_dict| needs to be
// reset.
- cur_list->GetDictionary(cur_list->GetSize() - 1, &cur_dict);
+ cur_dict = &cur_list->GetList().back();
stack.push_back(cur_list);
cur_list = nullptr;
}
@@ -381,67 +380,72 @@ std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
case kTypeEndArray:
case kTypeEndDict: {
- if (stack.back()->GetAsDictionary(&cur_dict)) {
+ if (stack.back()->is_dict()) {
+ cur_dict = stack.back();
cur_list = nullptr;
- } else if (stack.back()->GetAsList(&cur_list)) {
+ } else if (stack.back()->is_list()) {
+ cur_list = stack.back();
cur_dict = nullptr;
}
stack.pop_back();
} break;
case kTypeStartArray: {
- auto new_list = std::make_unique<ListValue>();
+ base::Value new_list(base::Value::Type::LIST);
if (cur_dict) {
stack.push_back(cur_dict);
- cur_list = cur_dict->SetListWithoutPathExpansion(ReadKeyName(it),
- std::move(new_list));
+ cur_list = cur_dict->SetKey(ReadKeyName(it), std::move(new_list));
cur_dict = nullptr;
} else {
- cur_list->Append(std::move(new_list));
+ cur_list->GetList().push_back(std::move(new_list));
stack.push_back(cur_list);
// |cur_list| is invalidated at this point by the Append, so it needs
// to be reset.
- cur_list->GetList(cur_list->GetSize() - 1, &cur_list);
+ cur_list = &cur_list->GetList().back();
}
} break;
case kTypeBool: {
bool value;
CHECK(it.ReadBool(&value));
+ base::Value new_bool(value);
if (cur_dict) {
- cur_dict->SetKey(ReadKeyName(it), Value(value));
+ cur_dict->SetKey(ReadKeyName(it), std::move(new_bool));
} else {
- cur_list->AppendBoolean(value);
+ cur_list->GetList().push_back(std::move(new_bool));
}
} break;
case kTypeInt: {
int value;
CHECK(it.ReadInt(&value));
+ base::Value new_int(value);
if (cur_dict) {
- cur_dict->SetKey(ReadKeyName(it), Value(value));
+ cur_dict->SetKey(ReadKeyName(it), std::move(new_int));
} else {
- cur_list->AppendInteger(value);
+ cur_list->GetList().push_back(std::move(new_int));
}
} break;
case kTypeDouble: {
double value;
CHECK(it.ReadDouble(&value));
+ base::Value new_double(value);
if (cur_dict) {
- cur_dict->SetKey(ReadKeyName(it), Value(value));
+ cur_dict->SetKey(ReadKeyName(it), std::move(new_double));
} else {
- cur_list->AppendDouble(value);
+ cur_list->GetList().push_back(std::move(new_double));
}
} break;
case kTypeString: {
std::string value;
CHECK(it.ReadString(&value));
+ base::Value new_str(std::move(value));
if (cur_dict) {
- cur_dict->SetKey(ReadKeyName(it), Value(value));
+ cur_dict->SetKey(ReadKeyName(it), std::move(new_str));
} else {
- cur_list->AppendString(value);
+ cur_list->GetList().push_back(std::move(new_str));
}
} break;
@@ -450,7 +454,7 @@ std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
}
}
DCHECK(stack.empty());
- return std::move(root);
+ return base::Value::ToUniquePtrValue(std::move(root));
}
void TracedValue::AppendAsTraceFormat(std::string* out) const {
diff --git a/chromium/base/trace_event/trace_event_filter.cc b/chromium/base/trace_event/trace_event_filter.cc
index 62652958647..d0b116ee044 100644
--- a/chromium/base/trace_event/trace_event_filter.cc
+++ b/chromium/base/trace_event/trace_event_filter.cc
@@ -7,8 +7,8 @@
namespace base {
namespace trace_event {
-TraceEventFilter::TraceEventFilter() {}
-TraceEventFilter::~TraceEventFilter() {}
+TraceEventFilter::TraceEventFilter() = default;
+TraceEventFilter::~TraceEventFilter() = default;
void TraceEventFilter::EndEvent(const char* category_name,
const char* event_name) const {}
diff --git a/chromium/base/trace_event/trace_event_filter_test_utils.cc b/chromium/base/trace_event/trace_event_filter_test_utils.cc
index 06548b049a2..85b4cfa2768 100644
--- a/chromium/base/trace_event/trace_event_filter_test_utils.cc
+++ b/chromium/base/trace_event/trace_event_filter_test_utils.cc
@@ -26,8 +26,8 @@ std::unique_ptr<TraceEventFilter> TestEventFilter::Factory(
return res;
}
-TestEventFilter::TestEventFilter() {}
-TestEventFilter::~TestEventFilter() {}
+TestEventFilter::TestEventFilter() = default;
+TestEventFilter::~TestEventFilter() = default;
bool TestEventFilter::FilterTraceEvent(const TraceEvent& trace_event) const {
if (g_hits_counter)
diff --git a/chromium/base/trace_event/trace_event_impl.cc b/chromium/base/trace_event/trace_event_impl.cc
index a53d9b49bac..649b2af2f81 100644
--- a/chromium/base/trace_event/trace_event_impl.cc
+++ b/chromium/base/trace_event/trace_event_impl.cc
@@ -45,18 +45,17 @@ TraceEvent::TraceEvent()
: duration_(TimeDelta::FromInternalValue(-1)),
scope_(trace_event_internal::kGlobalScope),
id_(0u),
- category_group_enabled_(NULL),
- name_(NULL),
+ category_group_enabled_(nullptr),
+ name_(nullptr),
thread_id_(0),
flags_(0),
phase_(TRACE_EVENT_PHASE_BEGIN) {
for (int i = 0; i < kTraceMaxNumArgs; ++i)
- arg_names_[i] = NULL;
+ arg_names_[i] = nullptr;
memset(arg_values_, 0, sizeof(arg_values_));
}
-TraceEvent::~TraceEvent() {
-}
+TraceEvent::~TraceEvent() = default;
void TraceEvent::MoveFrom(std::unique_ptr<TraceEvent> other) {
timestamp_ = other->timestamp_;
@@ -125,7 +124,7 @@ void TraceEvent::Initialize(
}
}
for (; i < kTraceMaxNumArgs; ++i) {
- arg_names_[i] = NULL;
+ arg_names_[i] = nullptr;
arg_values_[i].as_uint = 0u;
convertable_values_[i].reset();
arg_types_[i] = TRACE_VALUE_TYPE_UINT;
@@ -229,7 +228,7 @@ void TraceEvent::AppendValueAsJSON(unsigned char type,
std::string real;
double val = value.as_double;
if (std::isfinite(val)) {
- real = DoubleToString(val);
+ real = NumberToString(val);
// Ensure that the number has a .0 if there's no decimal or 'e'. This
// makes sure that when we read the JSON back, it's interpreted as a
// real rather than an int.
diff --git a/chromium/base/trace_event/trace_event_memory_overhead.cc b/chromium/base/trace_event/trace_event_memory_overhead.cc
index bb6edfba703..d5875f8b4f8 100644
--- a/chromium/base/trace_event/trace_event_memory_overhead.cc
+++ b/chromium/base/trace_event/trace_event_memory_overhead.cc
@@ -58,7 +58,7 @@ const char* ObjectTypeToString(TraceEventMemoryOverhead::ObjectType type) {
TraceEventMemoryOverhead::TraceEventMemoryOverhead() : allocated_objects_() {}
-TraceEventMemoryOverhead::~TraceEventMemoryOverhead() {}
+TraceEventMemoryOverhead::~TraceEventMemoryOverhead() = default;
void TraceEventMemoryOverhead::AddInternal(ObjectType object_type,
size_t count,
diff --git a/chromium/base/trace_event/trace_event_system_stats_monitor.cc b/chromium/base/trace_event/trace_event_system_stats_monitor.cc
index 52e1cdcc3d9..7e082f348ec 100644
--- a/chromium/base/trace_event/trace_event_system_stats_monitor.cc
+++ b/chromium/base/trace_event/trace_event_system_stats_monitor.cc
@@ -27,8 +27,8 @@ namespace {
// Holds profiled system stats until the tracing system needs to serialize it.
class SystemStatsHolder : public base::trace_event::ConvertableToTraceFormat {
public:
- SystemStatsHolder() { }
- ~SystemStatsHolder() override {}
+ SystemStatsHolder() = default;
+ ~SystemStatsHolder() override = default;
// Fills system_metrics_ with profiled system memory and disk stats.
// Uses the previous stats to compute rates if this is not the first profile.
diff --git a/chromium/base/trace_event/trace_event_system_stats_monitor_unittest.cc b/chromium/base/trace_event/trace_event_system_stats_monitor_unittest.cc
index cf9bdb7c9d4..52a05ba9cd0 100644
--- a/chromium/base/trace_event/trace_event_system_stats_monitor_unittest.cc
+++ b/chromium/base/trace_event/trace_event_system_stats_monitor_unittest.cc
@@ -22,8 +22,8 @@ namespace trace_event {
// Exists as a class so it can be a friend of TraceEventSystemStatsMonitor.
class TraceSystemStatsMonitorTest : public testing::Test {
public:
- TraceSystemStatsMonitorTest() {}
- ~TraceSystemStatsMonitorTest() override {}
+ TraceSystemStatsMonitorTest() = default;
+ ~TraceSystemStatsMonitorTest() override = default;
private:
DISALLOW_COPY_AND_ASSIGN(TraceSystemStatsMonitorTest);
diff --git a/chromium/base/trace_event/trace_event_unittest.cc b/chromium/base/trace_event/trace_event_unittest.cc
index 5ef9a3b5aa9..625168f6417 100644
--- a/chromium/base/trace_event/trace_event_unittest.cc
+++ b/chromium/base/trace_event/trace_event_unittest.cc
@@ -147,7 +147,7 @@ class TraceEventTestFixture : public testing::Test {
void SetUp() override {
const char* name = PlatformThread::GetName();
- old_thread_name_ = name ? strdup(name) : NULL;
+ old_thread_name_ = name ? strdup(name) : nullptr;
TraceLog::DeleteForTesting();
TraceLog* tracelog = TraceLog::GetInstance();
@@ -161,7 +161,7 @@ class TraceEventTestFixture : public testing::Test {
EXPECT_FALSE(TraceLog::GetInstance()->IsEnabled());
PlatformThread::SetName(old_thread_name_ ? old_thread_name_ : "");
free(old_thread_name_);
- old_thread_name_ = NULL;
+ old_thread_name_ = nullptr;
// We want our singleton torn down after each test.
TraceLog::DeleteForTesting();
}
@@ -199,7 +199,7 @@ void TraceEventTestFixture::OnTraceDataCollected(
LOG(ERROR) << json_output_.json_output;
}
- ListValue* root_list = NULL;
+ ListValue* root_list = nullptr;
ASSERT_TRUE(root.get());
ASSERT_TRUE(root->GetAsList(&root_list));
@@ -230,7 +230,7 @@ static bool CompareJsonValues(const std::string& lhs,
static bool IsKeyValueInDict(const JsonKeyValue* key_value,
DictionaryValue* dict) {
- Value* value = NULL;
+ Value* value = nullptr;
std::string value_str;
if (dict->Get(key_value->key, &value) &&
value->GetAsString(&value_str) &&
@@ -238,7 +238,7 @@ static bool IsKeyValueInDict(const JsonKeyValue* key_value,
return true;
// Recurse to test arguments
- DictionaryValue* args_dict = NULL;
+ DictionaryValue* args_dict = nullptr;
dict->GetDictionary("args", &args_dict);
if (args_dict)
return IsKeyValueInDict(key_value, args_dict);
@@ -262,7 +262,7 @@ DictionaryValue* TraceEventTestFixture::FindMatchingTraceEntry(
// Scan all items
size_t trace_parsed_count = trace_parsed_.GetSize();
for (size_t i = 0; i < trace_parsed_count; i++) {
- Value* value = NULL;
+ Value* value = nullptr;
trace_parsed_.Get(i, &value);
if (!value || value->type() != Value::Type::DICTIONARY)
continue;
@@ -271,7 +271,7 @@ DictionaryValue* TraceEventTestFixture::FindMatchingTraceEntry(
if (IsAllKeyValueInDict(key_values, dict))
return dict;
}
- return NULL;
+ return nullptr;
}
void TraceEventTestFixture::DropTracedMetadataRecords() {
@@ -297,11 +297,9 @@ void TraceEventTestFixture::DropTracedMetadataRecords() {
DictionaryValue* TraceEventTestFixture::FindNamePhase(const char* name,
const char* phase) {
- JsonKeyValue key_values[] = {
- {"name", name, IS_EQUAL},
- {"ph", phase, IS_EQUAL},
- {0, 0, IS_EQUAL}
- };
+ JsonKeyValue key_values[] = {{"name", name, IS_EQUAL},
+ {"ph", phase, IS_EQUAL},
+ {nullptr, nullptr, IS_EQUAL}};
return FindMatchingTraceEntry(key_values);
}
@@ -310,30 +308,24 @@ DictionaryValue* TraceEventTestFixture::FindNamePhaseKeyValue(
const char* phase,
const char* key,
const char* value) {
- JsonKeyValue key_values[] = {
- {"name", name, IS_EQUAL},
- {"ph", phase, IS_EQUAL},
- {key, value, IS_EQUAL},
- {0, 0, IS_EQUAL}
- };
+ JsonKeyValue key_values[] = {{"name", name, IS_EQUAL},
+ {"ph", phase, IS_EQUAL},
+ {key, value, IS_EQUAL},
+ {nullptr, nullptr, IS_EQUAL}};
return FindMatchingTraceEntry(key_values);
}
bool TraceEventTestFixture::FindMatchingValue(const char* key,
const char* value) {
- JsonKeyValue key_values[] = {
- {key, value, IS_EQUAL},
- {0, 0, IS_EQUAL}
- };
+ JsonKeyValue key_values[] = {{key, value, IS_EQUAL},
+ {nullptr, nullptr, IS_EQUAL}};
return FindMatchingTraceEntry(key_values);
}
bool TraceEventTestFixture::FindNonMatchingValue(const char* key,
const char* value) {
- JsonKeyValue key_values[] = {
- {key, value, IS_NOT_EQUAL},
- {0, 0, IS_EQUAL}
- };
+ JsonKeyValue key_values[] = {{key, value, IS_NOT_EQUAL},
+ {nullptr, nullptr, IS_EQUAL}};
return FindMatchingTraceEntry(key_values);
}
@@ -349,7 +341,7 @@ bool IsStringInDict(const char* string_to_match, const DictionaryValue* dict) {
}
// Recurse to test arguments
- const DictionaryValue* args_dict = NULL;
+ const DictionaryValue* args_dict = nullptr;
dict->GetDictionary("args", &args_dict);
if (args_dict)
return IsStringInDict(string_to_match, args_dict);
@@ -360,15 +352,15 @@ bool IsStringInDict(const char* string_to_match, const DictionaryValue* dict) {
const DictionaryValue* FindTraceEntry(
const ListValue& trace_parsed,
const char* string_to_match,
- const DictionaryValue* match_after_this_item = NULL) {
+ const DictionaryValue* match_after_this_item = nullptr) {
// Scan all items
size_t trace_parsed_count = trace_parsed.GetSize();
for (size_t i = 0; i < trace_parsed_count; i++) {
- const Value* value = NULL;
+ const Value* value = nullptr;
trace_parsed.Get(i, &value);
if (match_after_this_item) {
if (value == match_after_this_item)
- match_after_this_item = NULL;
+ match_after_this_item = nullptr;
continue;
}
if (!value || value->type() != Value::Type::DICTIONARY)
@@ -378,7 +370,7 @@ const DictionaryValue* FindTraceEntry(
if (IsStringInDict(string_to_match, dict))
return dict;
}
- return NULL;
+ return nullptr;
}
std::vector<const DictionaryValue*> FindTraceEntries(
@@ -387,7 +379,7 @@ std::vector<const DictionaryValue*> FindTraceEntries(
std::vector<const DictionaryValue*> hits;
size_t trace_parsed_count = trace_parsed.GetSize();
for (size_t i = 0; i < trace_parsed_count; i++) {
- const Value* value = NULL;
+ const Value* value = nullptr;
trace_parsed.Get(i, &value);
if (!value || value->type() != Value::Type::DICTIONARY)
continue;
@@ -544,7 +536,7 @@ void TraceWithAllMacroVariants(WaitableEvent* task_complete_event) {
}
void ValidateAllTraceMacrosCreatedData(const ListValue& trace_parsed) {
- const DictionaryValue* item = NULL;
+ const DictionaryValue* item = nullptr;
#define EXPECT_FIND_(string) \
item = FindTraceEntry(trace_parsed, string); \
@@ -1137,7 +1129,7 @@ void ValidateInstantEventPresentOnEveryThread(const ListValue& trace_parsed,
size_t trace_parsed_count = trace_parsed.GetSize();
for (size_t i = 0; i < trace_parsed_count; i++) {
- const Value* value = NULL;
+ const Value* value = nullptr;
trace_parsed.Get(i, &value);
if (!value || value->type() != Value::Type::DICTIONARY)
continue;
@@ -1180,7 +1172,7 @@ TEST_F(TraceEventTestFixture, DataCaptured) {
TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
TraceLog::RECORDING_MODE);
- TraceWithAllMacroVariants(NULL);
+ TraceWithAllMacroVariants(nullptr);
EndTraceAndFlush();
@@ -1193,7 +1185,7 @@ TEST_F(TraceEventTestFixture, DataDiscarded) {
TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
TraceLog::RECORDING_MODE);
- TraceWithAllMacroVariants(NULL);
+ TraceWithAllMacroVariants(nullptr);
CancelTrace();
@@ -1285,8 +1277,8 @@ TEST_F(TraceEventTestFixture, EnabledObserverFiresOnDisable) {
class AfterStateChangeEnabledStateObserver
: public TraceLog::EnabledStateObserver {
public:
- AfterStateChangeEnabledStateObserver() {}
- ~AfterStateChangeEnabledStateObserver() override {}
+ AfterStateChangeEnabledStateObserver() = default;
+ ~AfterStateChangeEnabledStateObserver() override = default;
// TraceLog::EnabledStateObserver overrides:
void OnTraceLogEnabled() override {
@@ -1316,8 +1308,8 @@ TEST_F(TraceEventTestFixture, ObserversFireAfterStateChange) {
class SelfRemovingEnabledStateObserver
: public TraceLog::EnabledStateObserver {
public:
- SelfRemovingEnabledStateObserver() {}
- ~SelfRemovingEnabledStateObserver() override {}
+ SelfRemovingEnabledStateObserver() = default;
+ ~SelfRemovingEnabledStateObserver() override = default;
// TraceLog::EnabledStateObserver overrides:
void OnTraceLogEnabled() override {}
@@ -1414,7 +1406,7 @@ TEST_F(TraceEventTestFixture, AddMetadataEvent) {
class Convertable : public ConvertableToTraceFormat {
public:
explicit Convertable(int* num_calls) : num_calls_(num_calls) {}
- ~Convertable() override {}
+ ~Convertable() override = default;
void AppendAsTraceFormat(std::string* out) const override {
(*num_calls_)++;
out->append("\"metadata_value\"");
@@ -1641,7 +1633,7 @@ TEST_F(TraceEventTestFixture, AsyncBeginEndPointerMangling) {
EXPECT_TRUE(async_begin2);
EXPECT_TRUE(async_end);
- Value* value = NULL;
+ Value* value = nullptr;
std::string async_begin_id_str;
std::string async_begin2_id_str;
std::string async_end_id_str;
@@ -1688,8 +1680,8 @@ TEST_F(TraceEventTestFixture, StaticStringVsString) {
ASSERT_TRUE(event2);
EXPECT_STREQ("name1", event1->name());
EXPECT_STREQ("name2", event2->name());
- EXPECT_TRUE(event1->parameter_copy_storage() != NULL);
- EXPECT_TRUE(event2->parameter_copy_storage() != NULL);
+ EXPECT_TRUE(event1->parameter_copy_storage() != nullptr);
+ EXPECT_TRUE(event2->parameter_copy_storage() != nullptr);
EXPECT_GT(event1->parameter_copy_storage()->size(), 0u);
EXPECT_GT(event2->parameter_copy_storage()->size(), 0u);
EndTraceAndFlush();
@@ -1705,8 +1697,8 @@ TEST_F(TraceEventTestFixture, StaticStringVsString) {
0, trace_event_internal::kNoId,
"arg1", "argval", "arg2", "argval");
// Test that static TRACE_STR_COPY NULL string arguments are not copied.
- const char* str1 = NULL;
- const char* str2 = NULL;
+ const char* str1 = nullptr;
+ const char* str2 = nullptr;
TraceEventHandle handle2 =
trace_event_internal::AddTraceEvent(
TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2",
@@ -1721,8 +1713,8 @@ TEST_F(TraceEventTestFixture, StaticStringVsString) {
ASSERT_TRUE(event2);
EXPECT_STREQ("name1", event1->name());
EXPECT_STREQ("name2", event2->name());
- EXPECT_TRUE(event1->parameter_copy_storage() == NULL);
- EXPECT_TRUE(event2->parameter_copy_storage() == NULL);
+ EXPECT_TRUE(event1->parameter_copy_storage() == nullptr);
+ EXPECT_TRUE(event2->parameter_copy_storage() == nullptr);
EndTraceAndFlush();
}
}
@@ -1904,7 +1896,7 @@ TEST_F(TraceEventTestFixture, DisabledCategories) {
TRACE_EVENT_INSTANT0("included", "first", TRACE_EVENT_SCOPE_THREAD);
EndTraceAndFlush();
{
- const DictionaryValue* item = NULL;
+ const DictionaryValue* item = nullptr;
ListValue& trace_parsed = trace_parsed_;
EXPECT_NOT_FIND_("disabled-by-default-cc");
EXPECT_FIND_("included");
@@ -1918,7 +1910,7 @@ TEST_F(TraceEventTestFixture, DisabledCategories) {
EndTraceAndFlush();
{
- const DictionaryValue* item = NULL;
+ const DictionaryValue* item = nullptr;
ListValue& trace_parsed = trace_parsed_;
EXPECT_FIND_("disabled-by-default-cc");
EXPECT_FIND_("other_included");
@@ -1934,7 +1926,7 @@ TEST_F(TraceEventTestFixture, DisabledCategories) {
EndTraceAndFlush();
{
- const DictionaryValue* item = NULL;
+ const DictionaryValue* item = nullptr;
ListValue& trace_parsed = trace_parsed_;
EXPECT_FIND_("disabled-by-default-cc,other_included");
EXPECT_FIND_("other_included,disabled-by-default-cc");
@@ -2161,8 +2153,8 @@ TEST_F(TraceEventTestFixture, TraceWithDisabledByDefaultCategoryFilters) {
class MyData : public ConvertableToTraceFormat {
public:
- MyData() {}
- ~MyData() override {}
+ MyData() = default;
+ ~MyData() override = default;
void AppendAsTraceFormat(std::string* out) const override {
out->append("{\"foo\":1}");
@@ -2202,12 +2194,12 @@ TEST_F(TraceEventTestFixture, ConvertableTypes) {
DictionaryValue* dict = FindNamePhase("bar", "X");
ASSERT_TRUE(dict);
- const DictionaryValue* args_dict = NULL;
+ const DictionaryValue* args_dict = nullptr;
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
- const Value* value = NULL;
- const DictionaryValue* convertable_dict = NULL;
+ const Value* value = nullptr;
+ const DictionaryValue* convertable_dict = nullptr;
EXPECT_TRUE(args_dict->Get("data", &value));
ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
@@ -2219,17 +2211,17 @@ TEST_F(TraceEventTestFixture, ConvertableTypes) {
dict = FindNamePhase("baz", "X");
ASSERT_TRUE(dict);
- args_dict = NULL;
+ args_dict = nullptr;
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
- value = NULL;
- convertable_dict = NULL;
+ value = nullptr;
+ convertable_dict = nullptr;
EXPECT_TRUE(args_dict->Get("data1", &value));
ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
- value = NULL;
- convertable_dict = NULL;
+ value = nullptr;
+ convertable_dict = nullptr;
EXPECT_TRUE(args_dict->Get("data2", &value));
ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
@@ -2237,7 +2229,7 @@ TEST_F(TraceEventTestFixture, ConvertableTypes) {
dict = FindNamePhase("string_first", "X");
ASSERT_TRUE(dict);
- args_dict = NULL;
+ args_dict = nullptr;
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
@@ -2245,8 +2237,8 @@ TEST_F(TraceEventTestFixture, ConvertableTypes) {
EXPECT_TRUE(args_dict->GetString("str", &str_value));
EXPECT_STREQ("string value 1", str_value.c_str());
- value = NULL;
- convertable_dict = NULL;
+ value = nullptr;
+ convertable_dict = nullptr;
foo_val = 0;
EXPECT_TRUE(args_dict->Get("convert", &value));
ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
@@ -2256,15 +2248,15 @@ TEST_F(TraceEventTestFixture, ConvertableTypes) {
dict = FindNamePhase("string_second", "X");
ASSERT_TRUE(dict);
- args_dict = NULL;
+ args_dict = nullptr;
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
EXPECT_TRUE(args_dict->GetString("str", &str_value));
EXPECT_STREQ("string value 2", str_value.c_str());
- value = NULL;
- convertable_dict = NULL;
+ value = nullptr;
+ convertable_dict = nullptr;
foo_val = 0;
EXPECT_TRUE(args_dict->Get("convert", &value));
ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
@@ -2274,12 +2266,12 @@ TEST_F(TraceEventTestFixture, ConvertableTypes) {
dict = FindNamePhase("both_conv", "X");
ASSERT_TRUE(dict);
- args_dict = NULL;
+ args_dict = nullptr;
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
- value = NULL;
- convertable_dict = NULL;
+ value = nullptr;
+ convertable_dict = nullptr;
foo_val = 0;
EXPECT_TRUE(args_dict->Get("convert1", &value));
ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
@@ -2302,7 +2294,7 @@ TEST_F(TraceEventTestFixture, PrimitiveArgs) {
-std::numeric_limits<float>::infinity());
TRACE_EVENT1("foo", "event7", "double_nan",
std::numeric_limits<double>::quiet_NaN());
- void* p = 0;
+ void* p = nullptr;
TRACE_EVENT1("foo", "event8", "pointer_null", p);
p = reinterpret_cast<void*>(0xbadf00d);
TRACE_EVENT1("foo", "event9", "pointer_badf00d", p);
@@ -2318,9 +2310,9 @@ TEST_F(TraceEventTestFixture, PrimitiveArgs) {
base::TimeTicks::FromInternalValue(1));
EndTraceAndFlush();
- const DictionaryValue* args_dict = NULL;
- DictionaryValue* dict = NULL;
- const Value* value = NULL;
+ const DictionaryValue* args_dict = nullptr;
+ DictionaryValue* dict = nullptr;
+ const Value* value = nullptr;
std::string str_value;
int int_value;
double double_value;
@@ -2503,8 +2495,8 @@ TEST_F(TraceEventTestFixture, ArgsWhitelisting) {
EndTraceAndFlush();
- const DictionaryValue* args_dict = NULL;
- DictionaryValue* dict = NULL;
+ const DictionaryValue* args_dict = nullptr;
+ DictionaryValue* dict = nullptr;
int int_value;
dict = FindNamePhase("event1", "X");
@@ -2551,7 +2543,7 @@ TEST_F(TraceEventTestFixture, TraceBufferVectorReportFull) {
EndTraceAndFlush();
- const DictionaryValue* trace_full_metadata = NULL;
+ const DictionaryValue* trace_full_metadata = nullptr;
trace_full_metadata = FindTraceEntry(trace_parsed_,
"overflowed_at_ts");
@@ -2571,7 +2563,7 @@ TEST_F(TraceEventTestFixture, TraceBufferVectorReportFull) {
// Test that buffer_limit_reached_timestamp's value is between the timestamp
// of the last trace event and current time.
DropTracedMetadataRecords();
- const DictionaryValue* last_trace_event = NULL;
+ const DictionaryValue* last_trace_event = nullptr;
double last_trace_event_timestamp = 0;
EXPECT_TRUE(trace_parsed_.GetDictionary(trace_parsed_.GetSize() - 1,
&last_trace_event));
@@ -2844,7 +2836,7 @@ TEST_F(TraceEventTestFixture, ThreadOnceBlocking) {
ValidateAllTraceMacrosCreatedData(trace_parsed_);
}
-std::string* g_log_buffer = NULL;
+std::string* g_log_buffer = nullptr;
bool MockLogMessageHandler(int, const char*, int, size_t,
const std::string& str) {
if (!g_log_buffer)
@@ -2880,7 +2872,7 @@ TEST_F(TraceEventTestFixture, EchoToConsole) {
EndTraceAndFlush();
delete g_log_buffer;
logging::SetLogMessageHandler(old_log_message_handler);
- g_log_buffer = NULL;
+ g_log_buffer = nullptr;
}
bool LogMessageHandlerWithTraceEvent(int, const char*, int, size_t,
diff --git a/chromium/base/trace_event/trace_log.cc b/chromium/base/trace_event/trace_log.cc
index 81dc4b77b71..92fccb93601 100644
--- a/chromium/base/trace_event/trace_log.cc
+++ b/chromium/base/trace_event/trace_log.cc
@@ -55,7 +55,7 @@ class DeleteTraceLogForTesting {
public:
static void Delete() {
Singleton<trace_event::TraceLog,
- LeakySingletonTraits<trace_event::TraceLog>>::OnExit(0);
+ LeakySingletonTraits<trace_event::TraceLog>>::OnExit(nullptr);
}
};
@@ -267,7 +267,7 @@ TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
FlushWhileLocked();
trace_log_->thread_message_loops_.erase(MessageLoop::current());
}
- trace_log_->thread_local_event_buffer_.Set(NULL);
+ trace_log_->thread_local_event_buffer_.Set(nullptr);
}
TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
@@ -285,7 +285,7 @@ TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
trace_log_->CheckIfBufferIsFullWhileLocked();
}
if (!chunk_)
- return NULL;
+ return nullptr;
size_t event_index;
TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
@@ -327,7 +327,7 @@ void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
struct TraceLog::RegisteredAsyncObserver {
explicit RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer)
: observer(observer), task_runner(ThreadTaskRunnerHandle::Get()) {}
- ~RegisteredAsyncObserver() {}
+ ~RegisteredAsyncObserver() = default;
WeakPtr<AsyncEnabledStateObserver> observer;
scoped_refptr<SequencedTaskRunner> task_runner;
@@ -335,7 +335,7 @@ struct TraceLog::RegisteredAsyncObserver {
TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
-TraceLogStatus::~TraceLogStatus() {}
+TraceLogStatus::~TraceLogStatus() = default;
// static
TraceLog* TraceLog::GetInstance() {
@@ -369,7 +369,7 @@ TraceLog::TraceLog()
nullptr);
}
-TraceLog::~TraceLog() {}
+TraceLog::~TraceLog() = default;
void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
// A ThreadLocalEventBuffer needs the message loop
@@ -384,7 +384,7 @@ void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
if (thread_local_event_buffer &&
!CheckGeneration(thread_local_event_buffer->generation())) {
delete thread_local_event_buffer;
- thread_local_event_buffer = NULL;
+ thread_local_event_buffer = nullptr;
}
if (!thread_local_event_buffer) {
thread_local_event_buffer = new ThreadLocalEventBuffer(this);
@@ -773,7 +773,7 @@ TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
CheckIfBufferIsFullWhileLocked();
}
if (!thread_shared_chunk_)
- return NULL;
+ return nullptr;
size_t event_index;
TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
@@ -915,7 +915,7 @@ void TraceLog::FinishFlush(int generation, bool discard_events) {
UseNextTraceBuffer();
thread_message_loops_.clear();
- flush_task_runner_ = NULL;
+ flush_task_runner_ = nullptr;
flush_output_callback = flush_output_callback_;
flush_output_callback_.Reset();
@@ -1248,7 +1248,7 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
!disabled_by_filters) {
OptionalAutoLock lock(&lock_);
- TraceEvent* trace_event = NULL;
+ TraceEvent* trace_event = nullptr;
if (thread_local_event_buffer) {
trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
} else {
@@ -1457,22 +1457,23 @@ void TraceLog::AddMetadataEventsWhileLocked() {
}
#if !defined(OS_NACL) // NaCl shouldn't expose the process id.
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- 0, "num_cpus", "number",
- base::SysInfo::NumberOfProcessors());
+ InitializeMetadataEvent(
+ AddEventToThreadSharedChunkWhileLocked(nullptr, false), 0, "num_cpus",
+ "number", base::SysInfo::NumberOfProcessors());
#endif
int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
if (process_sort_index_ != 0) {
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- current_thread_id, "process_sort_index",
- "sort_index", process_sort_index_);
+ InitializeMetadataEvent(
+ AddEventToThreadSharedChunkWhileLocked(nullptr, false),
+ current_thread_id, "process_sort_index", "sort_index",
+ process_sort_index_);
}
if (!process_name_.empty()) {
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- current_thread_id, "process_name", "name",
- process_name_);
+ InitializeMetadataEvent(
+ AddEventToThreadSharedChunkWhileLocked(nullptr, false),
+ current_thread_id, "process_name", "name", process_name_);
}
// See https://crbug.com/726484 for Fuchsia.
@@ -1480,9 +1481,10 @@ void TraceLog::AddMetadataEventsWhileLocked() {
Time process_creation_time = CurrentProcessInfo::CreationTime();
if (!process_creation_time.is_null()) {
TimeDelta process_uptime = Time::Now() - process_creation_time;
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- current_thread_id, "process_uptime_seconds",
- "uptime", process_uptime.InSeconds());
+ InitializeMetadataEvent(
+ AddEventToThreadSharedChunkWhileLocked(nullptr, false),
+ current_thread_id, "process_uptime_seconds", "uptime",
+ process_uptime.InSeconds());
}
#endif // !defined(OS_NACL) && !defined(OS_IOS) && !defined(OS_FUCHSIA)
@@ -1490,18 +1492,19 @@ void TraceLog::AddMetadataEventsWhileLocked() {
std::vector<base::StringPiece> labels;
for (const auto& it : process_labels_)
labels.push_back(it.second);
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- current_thread_id, "process_labels", "labels",
- base::JoinString(labels, ","));
+ InitializeMetadataEvent(
+ AddEventToThreadSharedChunkWhileLocked(nullptr, false),
+ current_thread_id, "process_labels", "labels",
+ base::JoinString(labels, ","));
}
// Thread sort indices.
for (const auto& it : thread_sort_indices_) {
if (it.second == 0)
continue;
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- it.first, "thread_sort_index", "sort_index",
- it.second);
+ InitializeMetadataEvent(
+ AddEventToThreadSharedChunkWhileLocked(nullptr, false), it.first,
+ "thread_sort_index", "sort_index", it.second);
}
// Thread names.
@@ -1509,32 +1512,33 @@ void TraceLog::AddMetadataEventsWhileLocked() {
for (const auto& it : thread_names_) {
if (it.second.empty())
continue;
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- it.first, "thread_name", "name", it.second);
+ InitializeMetadataEvent(
+ AddEventToThreadSharedChunkWhileLocked(nullptr, false), it.first,
+ "thread_name", "name", it.second);
}
// If buffer is full, add a metadata record to report this.
if (!buffer_limit_reached_timestamp_.is_null()) {
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- current_thread_id, "trace_buffer_overflowed",
- "overflowed_at_ts",
- buffer_limit_reached_timestamp_);
+ InitializeMetadataEvent(
+ AddEventToThreadSharedChunkWhileLocked(nullptr, false),
+ current_thread_id, "trace_buffer_overflowed", "overflowed_at_ts",
+ buffer_limit_reached_timestamp_);
}
}
void TraceLog::DeleteForTesting() {
- internal::DeleteTraceLogForTesting::Delete();
+ base::internal::DeleteTraceLogForTesting::Delete();
CategoryRegistry::ResetForTesting();
}
TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
- return GetEventByHandleInternal(handle, NULL);
+ return GetEventByHandleInternal(handle, nullptr);
}
TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
OptionalAutoLock* lock) {
if (!handle.chunk_seq)
- return NULL;
+ return nullptr;
DCHECK(handle.chunk_seq);
DCHECK(handle.chunk_index <= TraceBufferChunk::kMaxChunkIndex);
@@ -1556,7 +1560,7 @@ TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
handle.chunk_index == thread_shared_chunk_index_) {
return handle.chunk_seq == thread_shared_chunk_->seq()
? thread_shared_chunk_->GetEventAt(handle.event_index)
- : NULL;
+ : nullptr;
}
return logged_events_->GetEventByHandle(handle);
@@ -1577,11 +1581,6 @@ void TraceLog::SetProcessSortIndex(int sort_index) {
process_sort_index_ = sort_index;
}
-void TraceLog::SetProcessName(const char* process_name) {
- AutoLock lock(lock_);
- process_name_ = process_name;
-}
-
void TraceLog::UpdateProcessLabel(int label_id,
const std::string& current_label) {
if (!current_label.length())
diff --git a/chromium/base/trace_event/trace_log.h b/chromium/base/trace_event/trace_log.h
index abb472155b7..284298ccf6e 100644
--- a/chromium/base/trace_event/trace_log.h
+++ b/chromium/base/trace_event/trace_log.h
@@ -306,9 +306,13 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
// on their sort index, ascending, then by their name, and then tid.
void SetProcessSortIndex(int sort_index);
- // Sets the name of the process. |process_name| should be a string literal
- // since it is a whitelisted argument for background field trials.
- void SetProcessName(const char* process_name);
+ // Sets the name of the process.
+ void set_process_name(const std::string& process_name) {
+ AutoLock lock(lock_);
+ process_name_ = process_name;
+ }
+
+ bool IsProcessNameEmpty() const { return process_name_.empty(); }
// Processes can have labels in addition to their names. Use labels, for
// instance, to list out the web page titles that a process is handling.
diff --git a/chromium/base/trace_event/tracing_agent.cc b/chromium/base/trace_event/tracing_agent.cc
index 0af90f3c724..cfbaad9f3b3 100644
--- a/chromium/base/trace_event/tracing_agent.cc
+++ b/chromium/base/trace_event/tracing_agent.cc
@@ -7,7 +7,7 @@
namespace base {
namespace trace_event {
-TracingAgent::~TracingAgent() {}
+TracingAgent::~TracingAgent() = default;
bool TracingAgent::SupportsExplicitClockSync() {
return false;
diff --git a/chromium/base/tuple_unittest.cc b/chromium/base/tuple_unittest.cc
index 815b43bb1b3..321c549bfcc 100644
--- a/chromium/base/tuple_unittest.cc
+++ b/chromium/base/tuple_unittest.cc
@@ -16,14 +16,14 @@ void DoAdd(int a, int b, int c, int* res) {
}
struct Addy {
- Addy() { }
+ Addy() = default;
void DoAdd(int a, int b, int c, int d, int* res) {
*res = a + b + c + d;
}
};
struct Addz {
- Addz() { }
+ Addz() = default;
void DoAdd(int a, int b, int c, int d, int e, int* res) {
*res = a + b + c + d + e;
}
@@ -39,6 +39,7 @@ TEST(TupleTest, Basic) {
std::make_tuple(1, static_cast<const char*>("wee"));
ALLOW_UNUSED_LOCAL(t2);
std::tuple<int, int, int> t3(1, 2, 3);
+ ALLOW_UNUSED_LOCAL(t3);
std::tuple<int, int, int, int*> t4(1, 2, 3, &std::get<0>(t1));
std::tuple<int, int, int, int, int*> t5(1, 2, 3, 4, &std::get<0>(t4));
std::tuple<int, int, int, int, int, int*> t6(1, 2, 3, 4, 5, &std::get<0>(t4));
@@ -67,7 +68,7 @@ namespace {
struct CopyLogger {
CopyLogger() { ++TimesConstructed; }
CopyLogger(const CopyLogger& tocopy) { ++TimesConstructed; ++TimesCopied; }
- ~CopyLogger() { }
+ ~CopyLogger() = default;
static int TimesCopied;
static int TimesConstructed;
diff --git a/chromium/base/values.cc b/chromium/base/values.cc
index 8d321619e12..76e973221b1 100644
--- a/chromium/base/values.cc
+++ b/chromium/base/values.cc
@@ -17,6 +17,7 @@
#include "base/memory/ptr_util.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/trace_event/memory_usage_estimator.h"
namespace base {
@@ -80,6 +81,16 @@ std::unique_ptr<Value> Value::CreateWithCopiedBuffer(const char* buffer,
return std::make_unique<Value>(BlobStorage(buffer, buffer + size));
}
+// static
+Value Value::FromUniquePtrValue(std::unique_ptr<Value> val) {
+ return std::move(*val);
+}
+
+// static
+std::unique_ptr<Value> Value::ToUniquePtrValue(Value val) {
+ return std::make_unique<Value>(std::move(val));
+}
+
Value::Value(Value&& that) noexcept {
InternalMoveConstructFrom(std::move(that));
}
@@ -312,6 +323,7 @@ Value* Value::FindPath(span<const StringPiece> path) {
}
const Value* Value::FindPath(std::initializer_list<StringPiece> path) const {
+ DCHECK_GE(path.size(), 2u) << "Use FindKey() for a path of length 1.";
return FindPath(make_span(path.begin(), path.size()));
}
@@ -337,6 +349,7 @@ Value* Value::FindPathOfType(span<const StringPiece> path, Type type) {
const Value* Value::FindPathOfType(std::initializer_list<StringPiece> path,
Type type) const {
+ DCHECK_GE(path.size(), 2u) << "Use FindKeyOfType() for a path of length 1.";
return FindPathOfType(make_span(path.begin(), path.size()), type);
}
@@ -349,6 +362,7 @@ const Value* Value::FindPathOfType(span<const StringPiece> path,
}
Value* Value::SetPath(std::initializer_list<StringPiece> path, Value value) {
+ DCHECK_GE(path.size(), 2u) << "Use SetKey() for a path of length 1.";
return SetPath(make_span(path.begin(), path.size()), std::move(value));
}
@@ -383,6 +397,7 @@ Value* Value::SetPath(span<const StringPiece> path, Value value) {
}
bool Value::RemovePath(std::initializer_list<StringPiece> path) {
+ DCHECK_GE(path.size(), 2u) << "Use RemoveKey() for a path of length 1.";
return RemovePath(make_span(path.begin(), path.size()));
}
@@ -607,6 +622,21 @@ bool Value::Equals(const Value* other) const {
return *this == *other;
}
+size_t Value::EstimateMemoryUsage() const {
+ switch (type_) {
+ case Type::STRING:
+ return base::trace_event::EstimateMemoryUsage(string_value_);
+ case Type::BINARY:
+ return base::trace_event::EstimateMemoryUsage(binary_value_);
+ case Type::DICTIONARY:
+ return base::trace_event::EstimateMemoryUsage(dict_);
+ case Type::LIST:
+ return base::trace_event::EstimateMemoryUsage(list_);
+ default:
+ return 0;
+ }
+}
+
void Value::InternalMoveConstructFrom(Value&& that) {
type_ = that.type_;
@@ -695,24 +725,25 @@ Value* DictionaryValue::Set(StringPiece path, std::unique_ptr<Value> in_value) {
DCHECK(in_value);
StringPiece current_path(path);
- DictionaryValue* current_dictionary = this;
+ Value* current_dictionary = this;
for (size_t delimiter_position = current_path.find('.');
delimiter_position != StringPiece::npos;
delimiter_position = current_path.find('.')) {
// Assume that we're indexing into a dictionary.
StringPiece key = current_path.substr(0, delimiter_position);
- DictionaryValue* child_dictionary = nullptr;
- if (!current_dictionary->GetDictionary(key, &child_dictionary)) {
- child_dictionary = current_dictionary->SetDictionaryWithoutPathExpansion(
- key, std::make_unique<DictionaryValue>());
+ Value* child_dictionary =
+ current_dictionary->FindKeyOfType(key, Type::DICTIONARY);
+ if (!child_dictionary) {
+ child_dictionary =
+ current_dictionary->SetKey(key, Value(Type::DICTIONARY));
}
current_dictionary = child_dictionary;
current_path = current_path.substr(delimiter_position + 1);
}
- return current_dictionary->SetWithoutPathExpansion(current_path,
- std::move(in_value));
+ return static_cast<DictionaryValue*>(current_dictionary)
+ ->SetWithoutPathExpansion(current_path, std::move(in_value));
}
Value* DictionaryValue::SetBoolean(StringPiece path, bool in_value) {
@@ -759,20 +790,6 @@ Value* DictionaryValue::SetWithoutPathExpansion(
return result.first->second.get();
}
-DictionaryValue* DictionaryValue::SetDictionaryWithoutPathExpansion(
- StringPiece path,
- std::unique_ptr<DictionaryValue> in_value) {
- return static_cast<DictionaryValue*>(
- SetWithoutPathExpansion(path, std::move(in_value)));
-}
-
-ListValue* DictionaryValue::SetListWithoutPathExpansion(
- StringPiece path,
- std::unique_ptr<ListValue> in_value) {
- return static_cast<ListValue*>(
- SetWithoutPathExpansion(path, std::move(in_value)));
-}
-
bool DictionaryValue::Get(StringPiece path,
const Value** out_value) const {
DCHECK(IsStringUTF8(path));
@@ -781,7 +798,7 @@ bool DictionaryValue::Get(StringPiece path,
for (size_t delimiter_position = current_path.find('.');
delimiter_position != std::string::npos;
delimiter_position = current_path.find('.')) {
- const DictionaryValue* child_dictionary = NULL;
+ const DictionaryValue* child_dictionary = nullptr;
if (!current_dictionary->GetDictionaryWithoutPathExpansion(
current_path.substr(0, delimiter_position), &child_dictionary)) {
return false;
@@ -1064,13 +1081,13 @@ bool DictionaryValue::RemovePath(StringPiece path,
return RemoveWithoutPathExpansion(path, out_value);
StringPiece subdict_path = path.substr(0, delimiter_position);
- DictionaryValue* subdict = NULL;
+ DictionaryValue* subdict = nullptr;
if (!GetDictionary(subdict_path, &subdict))
return false;
result = subdict->RemovePath(path.substr(delimiter_position + 1),
out_value);
if (result && subdict->empty())
- RemoveWithoutPathExpansion(subdict_path, NULL);
+ RemoveWithoutPathExpansion(subdict_path, nullptr);
return result;
}
@@ -1112,7 +1129,7 @@ DictionaryValue::Iterator::Iterator(const DictionaryValue& target)
DictionaryValue::Iterator::Iterator(const Iterator& other) = default;
-DictionaryValue::Iterator::~Iterator() {}
+DictionaryValue::Iterator::~Iterator() = default;
DictionaryValue* DictionaryValue::DeepCopy() const {
return new DictionaryValue(dict_);
@@ -1214,23 +1231,6 @@ bool ListValue::GetString(size_t index, string16* out_value) const {
return value->GetAsString(out_value);
}
-bool ListValue::GetBinary(size_t index, const Value** out_value) const {
- const Value* value;
- bool result = Get(index, &value);
- if (!result || !value->IsType(Type::BINARY))
- return false;
-
- if (out_value)
- *out_value = value;
-
- return true;
-}
-
-bool ListValue::GetBinary(size_t index, Value** out_value) {
- return static_cast<const ListValue&>(*this).GetBinary(
- index, const_cast<const Value**>(out_value));
-}
-
bool ListValue::GetDictionary(size_t index,
const DictionaryValue** out_value) const {
const Value* value;
@@ -1371,11 +1371,9 @@ std::unique_ptr<ListValue> ListValue::CreateDeepCopy() const {
return std::make_unique<ListValue>(list_);
}
-ValueSerializer::~ValueSerializer() {
-}
+ValueSerializer::~ValueSerializer() = default;
-ValueDeserializer::~ValueDeserializer() {
-}
+ValueDeserializer::~ValueDeserializer() = default;
std::ostream& operator<<(std::ostream& out, const Value& value) {
std::string json;
diff --git a/chromium/base/values.h b/chromium/base/values.h
index 10967aaa635..4caa440b438 100644
--- a/chromium/base/values.h
+++ b/chromium/base/values.h
@@ -32,7 +32,6 @@
#include <vector>
#include "base/base_export.h"
-#include "base/compiler_specific.h"
#include "base/containers/flat_map.h"
#include "base/containers/span.h"
#include "base/macros.h"
@@ -82,7 +81,7 @@ class Value;
class BASE_EXPORT Value {
public:
using BlobStorage = std::vector<char>;
- using DictStorage = base::flat_map<std::string, std::unique_ptr<Value>>;
+ using DictStorage = flat_map<std::string, std::unique_ptr<Value>>;
using ListStorage = std::vector<Value>;
enum class Type {
@@ -105,6 +104,10 @@ class BASE_EXPORT Value {
static std::unique_ptr<Value> CreateWithCopiedBuffer(const char* buffer,
size_t size);
+ // Adaptors for converting from the old way to the new way and vice versa.
+ static Value FromUniquePtrValue(std::unique_ptr<Value> val);
+ static std::unique_ptr<Value> ToUniquePtrValue(Value val);
+
Value(Value&& that) noexcept;
Value() noexcept; // A null value.
@@ -144,7 +147,6 @@ class BASE_EXPORT Value {
static const char* GetTypeName(Type type);
// Returns the type of the value stored by the current Value object.
- Type GetType() const { return type_; } // DEPRECATED, use type().
Type type() const { return type_; }
// Returns true if the current object represents a given type.
@@ -230,13 +232,18 @@ class BASE_EXPORT Value {
//
// std::vector<StringPiece> components = ...
// auto* found = FindPath(components);
+ //
+ // Note: If there is only one component in the path, use FindKey() instead.
Value* FindPath(std::initializer_list<StringPiece> path);
Value* FindPath(span<const StringPiece> path);
const Value* FindPath(std::initializer_list<StringPiece> path) const;
const Value* FindPath(span<const StringPiece> path) const;
- // Like FindPath but will only return the value if the leaf Value type
+ // Like FindPath() but will only return the value if the leaf Value type
// matches the given type. Will return nullptr otherwise.
+ //
+ // Note: If there is only one component in the path, use FindKeyOfType()
+ // instead.
Value* FindPathOfType(std::initializer_list<StringPiece> path, Type type);
Value* FindPathOfType(span<const StringPiece> path, Type type);
const Value* FindPathOfType(std::initializer_list<StringPiece> path,
@@ -257,6 +264,8 @@ class BASE_EXPORT Value {
//
// std::vector<StringPiece> components = ...
// value.SetPath(components, std::move(myvalue));
+ //
+ // Note: If there is only one component in the path, use SetKey() instead.
Value* SetPath(std::initializer_list<StringPiece> path, Value value);
Value* SetPath(span<const StringPiece> path, Value value);
@@ -272,6 +281,8 @@ class BASE_EXPORT Value {
//
// std::vector<StringPiece> components = ...
// bool success = value.RemovePath(components);
+ //
+ // Note: If there is only one component in the path, use RemoveKey() instead.
bool RemovePath(std::initializer_list<StringPiece> path);
bool RemovePath(span<const StringPiece> path);
@@ -335,6 +346,10 @@ class BASE_EXPORT Value {
// TODO(crbug.com/646113): Delete this and migrate callsites.
bool Equals(const Value* other) const;
+ // Estimates dynamic memory usage.
+ // See base/trace_event/memory_usage_estimator.h for more info.
+ size_t EstimateMemoryUsage() const;
+
protected:
// TODO(crbug.com/646113): Make these private once DictionaryValue and
// ListValue are properly inlined.
@@ -420,15 +435,6 @@ class BASE_EXPORT DictionaryValue : public Value {
Value* SetWithoutPathExpansion(StringPiece key,
std::unique_ptr<Value> in_value);
- // Convenience forms of SetWithoutPathExpansion().
- // DEPRECATED, use Value::SetKey(key, Value(Type::DICTIONARY)) instead.
- DictionaryValue* SetDictionaryWithoutPathExpansion(
- StringPiece path,
- std::unique_ptr<DictionaryValue> in_value);
- // DEPRECATED, use Value::SetKey(key, Value(Type::LIST)) instead.
- ListValue* SetListWithoutPathExpansion(StringPiece path,
- std::unique_ptr<ListValue> in_value);
-
// Gets the Value associated with the given path starting from this object.
// A path has the form "<key>" or "<key>.<key>.[...]", where "." indexes
// into the next DictionaryValue down. If the path can be resolved
@@ -599,10 +605,6 @@ class BASE_EXPORT ListValue : public Value {
// DEPRECATED, use GetList()::size() instead.
size_t GetSize() const { return list_.size(); }
- // Returns the capacity of storage for Values in this list.
- // DEPRECATED, use GetList()::capacity() instead.
- size_t capacity() const { return list_.capacity(); }
-
// Returns whether the list is empty.
// DEPRECATED, use GetList()::empty() instead.
bool empty() const { return list_.empty(); }
@@ -642,9 +644,6 @@ class BASE_EXPORT ListValue : public Value {
// DEPRECATED, use GetList()::operator[]::GetString() instead.
bool GetString(size_t index, std::string* out_value) const;
bool GetString(size_t index, string16* out_value) const;
- // DEPRECATED, use GetList()::operator[]::GetBlob() instead.
- bool GetBinary(size_t index, const Value** out_value) const;
- bool GetBinary(size_t index, Value** out_value);
bool GetDictionary(size_t index, const DictionaryValue** out_value) const;
bool GetDictionary(size_t index, DictionaryValue** out_value);
diff --git a/chromium/base/values_unittest.cc b/chromium/base/values_unittest.cc
index 0816e82e4a3..4dc6bf0e0c0 100644
--- a/chromium/base/values_unittest.cc
+++ b/chromium/base/values_unittest.cc
@@ -378,6 +378,10 @@ TEST(ValuesTest, FindKey) {
Value dict(std::move(storage));
EXPECT_NE(nullptr, dict.FindKey("foo"));
EXPECT_EQ(nullptr, dict.FindKey("baz"));
+
+ // Single not found key.
+ bool found = dict.FindKey("notfound");
+ EXPECT_FALSE(found);
}
TEST(ValuesTest, FindKeyChangeValue) {
@@ -603,18 +607,9 @@ TEST(ValuesTest, FindPath) {
root.SetKey("foo", std::move(foo));
// No key (stupid but well-defined and takes work to prevent).
- Value* found = root.FindPath({});
+ Value* found = root.FindPath(std::vector<StringPiece>{});
EXPECT_EQ(&root, found);
- // Single not found key.
- found = root.FindPath({"notfound"});
- EXPECT_FALSE(found);
-
- // Single found key.
- found = root.FindPath({"foo"});
- ASSERT_TRUE(found);
- EXPECT_TRUE(found->is_dict());
-
// Double key, second not found.
found = root.FindPath(std::vector<StringPiece>{"foo", "notfound"});
EXPECT_FALSE(found);
@@ -629,8 +624,8 @@ TEST(ValuesTest, FindPath) {
TEST(ValuesTest, SetPath) {
Value root(Value::Type::DICTIONARY);
- Value* inserted = root.SetPath({"one"}, Value(123));
- Value* found = root.FindPathOfType({"one"}, Value::Type::INTEGER);
+ Value* inserted = root.SetPath({"one", "two"}, Value(123));
+ Value* found = root.FindPathOfType({"one", "two"}, Value::Type::INTEGER);
ASSERT_TRUE(found);
EXPECT_EQ(inserted, found);
EXPECT_EQ(123, found->GetInt());
@@ -682,14 +677,14 @@ TEST(ValuesTest, RemovePath) {
EXPECT_FALSE(root.RemovePath({"one", "two", "three"}));
// Intermediate empty dictionaries should be cleared.
- EXPECT_FALSE(root.FindPath({"one"}));
+ EXPECT_FALSE(root.FindKey("one"));
root.SetPath({"one", "two", "three"}, Value(123));
root.SetPath({"one", "two", "four"}, Value(124));
EXPECT_TRUE(root.RemovePath(std::vector<StringPiece>{"one", "two", "three"}));
// Intermediate non-empty dictionaries should be kept.
- EXPECT_TRUE(root.FindPath({"one"}));
+ EXPECT_TRUE(root.FindKey("one"));
EXPECT_TRUE(root.FindPath({"one", "two"}));
EXPECT_TRUE(root.FindPath({"one", "two", "four"}));
}
@@ -701,11 +696,11 @@ TEST(ValuesTest, Basic) {
ASSERT_FALSE(settings.GetString("global.homepage", &homepage));
ASSERT_EQ(std::string("http://google.com"), homepage);
- ASSERT_FALSE(settings.Get("global", NULL));
+ ASSERT_FALSE(settings.Get("global", nullptr));
settings.SetBoolean("global", true);
- ASSERT_TRUE(settings.Get("global", NULL));
+ ASSERT_TRUE(settings.Get("global", nullptr));
settings.SetString("global.homepage", "http://scurvy.com");
- ASSERT_TRUE(settings.Get("global", NULL));
+ ASSERT_TRUE(settings.Get("global", nullptr));
homepage = "http://google.com";
ASSERT_TRUE(settings.GetString("global.homepage", &homepage));
ASSERT_EQ(std::string("http://scurvy.com"), homepage);
@@ -745,7 +740,7 @@ TEST(ValuesTest, List) {
mixed_list->Set(3, std::make_unique<Value>("foo"));
ASSERT_EQ(4u, mixed_list->GetSize());
- Value *value = NULL;
+ Value* value = nullptr;
bool bool_value = false;
int int_value = 0;
double double_value = 0.0;
@@ -824,7 +819,7 @@ TEST(ValuesTest, StringValue) {
// Test overloaded GetAsString.
std::string narrow = "http://google.com";
string16 utf16 = ASCIIToUTF16("http://google.com");
- const Value* string_value = NULL;
+ const Value* string_value = nullptr;
ASSERT_TRUE(narrow_value->GetAsString(&narrow));
ASSERT_TRUE(narrow_value->GetAsString(&utf16));
ASSERT_TRUE(narrow_value->GetAsString(&string_value));
@@ -840,9 +835,9 @@ TEST(ValuesTest, StringValue) {
ASSERT_EQ(string_value->GetString(), narrow);
// Don't choke on NULL values.
- ASSERT_TRUE(narrow_value->GetAsString(static_cast<string16*>(NULL)));
- ASSERT_TRUE(narrow_value->GetAsString(static_cast<std::string*>(NULL)));
- ASSERT_TRUE(narrow_value->GetAsString(static_cast<const Value**>(NULL)));
+ ASSERT_TRUE(narrow_value->GetAsString(static_cast<string16*>(nullptr)));
+ ASSERT_TRUE(narrow_value->GetAsString(static_cast<std::string*>(nullptr)));
+ ASSERT_TRUE(narrow_value->GetAsString(static_cast<const Value**>(nullptr)));
}
TEST(ValuesTest, ListDeletion) {
@@ -872,7 +867,7 @@ TEST(ValuesTest, ListRemoval) {
{
ListValue list;
list.Append(std::make_unique<Value>());
- EXPECT_TRUE(list.Remove(0, NULL));
+ EXPECT_TRUE(list.Remove(0, nullptr));
EXPECT_EQ(0U, list.GetSize());
}
@@ -948,24 +943,10 @@ TEST(ValuesTest, DictionarySetReturnsPointer) {
{
DictionaryValue dict;
- DictionaryValue* dict_ptr = dict.SetDictionaryWithoutPathExpansion(
- "foo.bar", std::make_unique<base::DictionaryValue>());
- EXPECT_EQ(Value::Type::DICTIONARY, dict_ptr->type());
- }
-
- {
- DictionaryValue dict;
ListValue* list_ptr =
dict.SetList("foo.bar", std::make_unique<base::ListValue>());
EXPECT_EQ(Value::Type::LIST, list_ptr->type());
}
-
- {
- DictionaryValue dict;
- ListValue* list_ptr = dict.SetListWithoutPathExpansion(
- "foo.bar", std::make_unique<base::ListValue>());
- EXPECT_EQ(Value::Type::LIST, list_ptr->type());
- }
}
TEST(ValuesTest, DictionaryRemoval) {
@@ -986,7 +967,7 @@ TEST(ValuesTest, DictionaryRemoval) {
DictionaryValue dict;
dict.Set(key, std::make_unique<Value>());
EXPECT_TRUE(dict.HasKey(key));
- EXPECT_TRUE(dict.Remove(key, NULL));
+ EXPECT_TRUE(dict.Remove(key, nullptr));
EXPECT_FALSE(dict.HasKey(key));
}
}
@@ -1048,12 +1029,12 @@ TEST(ValuesTest, DictionaryRemovePath) {
EXPECT_TRUE(removed_item->IsType(base::Value::Type::INTEGER));
EXPECT_FALSE(dict.HasKey("a.long.way.down"));
EXPECT_FALSE(dict.HasKey("a.long.way"));
- EXPECT_TRUE(dict.Get("a.long.key.path", NULL));
+ EXPECT_TRUE(dict.Get("a.long.key.path", nullptr));
removed_item.reset();
EXPECT_FALSE(dict.RemovePath("a.long.way.down", &removed_item));
EXPECT_FALSE(removed_item);
- EXPECT_TRUE(dict.Get("a.long.key.path", NULL));
+ EXPECT_TRUE(dict.Get("a.long.key.path", nullptr));
removed_item.reset();
EXPECT_TRUE(dict.RemovePath("a.long.key.path", &removed_item));
@@ -1093,13 +1074,13 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict.get());
ASSERT_NE(copy_dict.get(), &original_dict);
- Value* copy_null = NULL;
+ Value* copy_null = nullptr;
ASSERT_TRUE(copy_dict->Get("null", &copy_null));
ASSERT_TRUE(copy_null);
ASSERT_NE(copy_null, null_weak);
ASSERT_TRUE(copy_null->IsType(Value::Type::NONE));
- Value* copy_bool = NULL;
+ Value* copy_bool = nullptr;
ASSERT_TRUE(copy_dict->Get("bool", &copy_bool));
ASSERT_TRUE(copy_bool);
ASSERT_NE(copy_bool, bool_weak);
@@ -1108,7 +1089,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_bool->GetAsBoolean(&copy_bool_value));
ASSERT_TRUE(copy_bool_value);
- Value* copy_int = NULL;
+ Value* copy_int = nullptr;
ASSERT_TRUE(copy_dict->Get("int", &copy_int));
ASSERT_TRUE(copy_int);
ASSERT_NE(copy_int, int_weak);
@@ -1117,7 +1098,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_int->GetAsInteger(&copy_int_value));
ASSERT_EQ(42, copy_int_value);
- Value* copy_double = NULL;
+ Value* copy_double = nullptr;
ASSERT_TRUE(copy_dict->Get("double", &copy_double));
ASSERT_TRUE(copy_double);
ASSERT_NE(copy_double, double_weak);
@@ -1126,7 +1107,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_double->GetAsDouble(&copy_double_value));
ASSERT_EQ(3.14, copy_double_value);
- Value* copy_string = NULL;
+ Value* copy_string = nullptr;
ASSERT_TRUE(copy_dict->Get("string", &copy_string));
ASSERT_TRUE(copy_string);
ASSERT_NE(copy_string, string_weak);
@@ -1138,7 +1119,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_EQ(std::string("hello"), copy_string_value);
ASSERT_EQ(ASCIIToUTF16("hello"), copy_string16_value);
- Value* copy_string16 = NULL;
+ Value* copy_string16 = nullptr;
ASSERT_TRUE(copy_dict->Get("string16", &copy_string16));
ASSERT_TRUE(copy_string16);
ASSERT_NE(copy_string16, string16_weak);
@@ -1148,7 +1129,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_EQ(std::string("hello16"), copy_string_value);
ASSERT_EQ(ASCIIToUTF16("hello16"), copy_string16_value);
- Value* copy_binary = NULL;
+ Value* copy_binary = nullptr;
ASSERT_TRUE(copy_dict->Get("binary", &copy_binary));
ASSERT_TRUE(copy_binary);
ASSERT_NE(copy_binary, binary_weak);
@@ -1156,12 +1137,12 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_NE(binary_weak->GetBlob().data(), copy_binary->GetBlob().data());
ASSERT_EQ(binary_weak->GetBlob(), copy_binary->GetBlob());
- Value* copy_value = NULL;
+ Value* copy_value = nullptr;
ASSERT_TRUE(copy_dict->Get("list", &copy_value));
ASSERT_TRUE(copy_value);
ASSERT_NE(copy_value, list_weak);
ASSERT_TRUE(copy_value->IsType(Value::Type::LIST));
- ListValue* copy_list = NULL;
+ ListValue* copy_list = nullptr;
ASSERT_TRUE(copy_value->GetAsList(&copy_list));
ASSERT_TRUE(copy_list);
ASSERT_EQ(2U, copy_list->GetSize());
@@ -1182,12 +1163,12 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_list_element_1->GetAsInteger(&copy_list_element_1_value));
ASSERT_EQ(1, copy_list_element_1_value);
- copy_value = NULL;
+ copy_value = nullptr;
ASSERT_TRUE(copy_dict->Get("dictionary", &copy_value));
ASSERT_TRUE(copy_value);
ASSERT_NE(copy_value, dict_weak);
ASSERT_TRUE(copy_value->IsType(Value::Type::DICTIONARY));
- DictionaryValue* copy_nested_dictionary = NULL;
+ DictionaryValue* copy_nested_dictionary = nullptr;
ASSERT_TRUE(copy_value->GetAsDictionary(&copy_nested_dictionary));
ASSERT_TRUE(copy_nested_dictionary);
EXPECT_TRUE(copy_nested_dictionary->HasKey("key"));
@@ -1229,7 +1210,7 @@ TEST(ValuesTest, Equals) {
// Check if Equals detects differences in only the keys.
copy = dv.CreateDeepCopy();
EXPECT_EQ(dv, *copy);
- copy->Remove("a", NULL);
+ copy->Remove("a", nullptr);
copy->SetBoolean("aa", false);
EXPECT_NE(dv, *copy);
}
@@ -1658,265 +1639,273 @@ TEST(ValuesTest, GetWithNullOutValue) {
main_list.Append(std::make_unique<Value>(dict_value.Clone()));
main_list.Append(std::make_unique<Value>(list_value.Clone()));
- EXPECT_TRUE(main_dict.Get("bool", NULL));
- EXPECT_TRUE(main_dict.Get("int", NULL));
- EXPECT_TRUE(main_dict.Get("double", NULL));
- EXPECT_TRUE(main_dict.Get("string", NULL));
- EXPECT_TRUE(main_dict.Get("binary", NULL));
- EXPECT_TRUE(main_dict.Get("dict", NULL));
- EXPECT_TRUE(main_dict.Get("list", NULL));
- EXPECT_FALSE(main_dict.Get("DNE", NULL));
-
- EXPECT_TRUE(main_dict.GetBoolean("bool", NULL));
- EXPECT_FALSE(main_dict.GetBoolean("int", NULL));
- EXPECT_FALSE(main_dict.GetBoolean("double", NULL));
- EXPECT_FALSE(main_dict.GetBoolean("string", NULL));
- EXPECT_FALSE(main_dict.GetBoolean("binary", NULL));
- EXPECT_FALSE(main_dict.GetBoolean("dict", NULL));
- EXPECT_FALSE(main_dict.GetBoolean("list", NULL));
- EXPECT_FALSE(main_dict.GetBoolean("DNE", NULL));
-
- EXPECT_FALSE(main_dict.GetInteger("bool", NULL));
- EXPECT_TRUE(main_dict.GetInteger("int", NULL));
- EXPECT_FALSE(main_dict.GetInteger("double", NULL));
- EXPECT_FALSE(main_dict.GetInteger("string", NULL));
- EXPECT_FALSE(main_dict.GetInteger("binary", NULL));
- EXPECT_FALSE(main_dict.GetInteger("dict", NULL));
- EXPECT_FALSE(main_dict.GetInteger("list", NULL));
- EXPECT_FALSE(main_dict.GetInteger("DNE", NULL));
+ EXPECT_TRUE(main_dict.Get("bool", nullptr));
+ EXPECT_TRUE(main_dict.Get("int", nullptr));
+ EXPECT_TRUE(main_dict.Get("double", nullptr));
+ EXPECT_TRUE(main_dict.Get("string", nullptr));
+ EXPECT_TRUE(main_dict.Get("binary", nullptr));
+ EXPECT_TRUE(main_dict.Get("dict", nullptr));
+ EXPECT_TRUE(main_dict.Get("list", nullptr));
+ EXPECT_FALSE(main_dict.Get("DNE", nullptr));
+
+ EXPECT_TRUE(main_dict.GetBoolean("bool", nullptr));
+ EXPECT_FALSE(main_dict.GetBoolean("int", nullptr));
+ EXPECT_FALSE(main_dict.GetBoolean("double", nullptr));
+ EXPECT_FALSE(main_dict.GetBoolean("string", nullptr));
+ EXPECT_FALSE(main_dict.GetBoolean("binary", nullptr));
+ EXPECT_FALSE(main_dict.GetBoolean("dict", nullptr));
+ EXPECT_FALSE(main_dict.GetBoolean("list", nullptr));
+ EXPECT_FALSE(main_dict.GetBoolean("DNE", nullptr));
+
+ EXPECT_FALSE(main_dict.GetInteger("bool", nullptr));
+ EXPECT_TRUE(main_dict.GetInteger("int", nullptr));
+ EXPECT_FALSE(main_dict.GetInteger("double", nullptr));
+ EXPECT_FALSE(main_dict.GetInteger("string", nullptr));
+ EXPECT_FALSE(main_dict.GetInteger("binary", nullptr));
+ EXPECT_FALSE(main_dict.GetInteger("dict", nullptr));
+ EXPECT_FALSE(main_dict.GetInteger("list", nullptr));
+ EXPECT_FALSE(main_dict.GetInteger("DNE", nullptr));
// Both int and double values can be obtained from GetDouble.
- EXPECT_FALSE(main_dict.GetDouble("bool", NULL));
- EXPECT_TRUE(main_dict.GetDouble("int", NULL));
- EXPECT_TRUE(main_dict.GetDouble("double", NULL));
- EXPECT_FALSE(main_dict.GetDouble("string", NULL));
- EXPECT_FALSE(main_dict.GetDouble("binary", NULL));
- EXPECT_FALSE(main_dict.GetDouble("dict", NULL));
- EXPECT_FALSE(main_dict.GetDouble("list", NULL));
- EXPECT_FALSE(main_dict.GetDouble("DNE", NULL));
-
- EXPECT_FALSE(main_dict.GetString("bool", static_cast<std::string*>(NULL)));
- EXPECT_FALSE(main_dict.GetString("int", static_cast<std::string*>(NULL)));
- EXPECT_FALSE(main_dict.GetString("double", static_cast<std::string*>(NULL)));
- EXPECT_TRUE(main_dict.GetString("string", static_cast<std::string*>(NULL)));
- EXPECT_FALSE(main_dict.GetString("binary", static_cast<std::string*>(NULL)));
- EXPECT_FALSE(main_dict.GetString("dict", static_cast<std::string*>(NULL)));
- EXPECT_FALSE(main_dict.GetString("list", static_cast<std::string*>(NULL)));
- EXPECT_FALSE(main_dict.GetString("DNE", static_cast<std::string*>(NULL)));
-
- EXPECT_FALSE(main_dict.GetString("bool", static_cast<string16*>(NULL)));
- EXPECT_FALSE(main_dict.GetString("int", static_cast<string16*>(NULL)));
- EXPECT_FALSE(main_dict.GetString("double", static_cast<string16*>(NULL)));
- EXPECT_TRUE(main_dict.GetString("string", static_cast<string16*>(NULL)));
- EXPECT_FALSE(main_dict.GetString("binary", static_cast<string16*>(NULL)));
- EXPECT_FALSE(main_dict.GetString("dict", static_cast<string16*>(NULL)));
- EXPECT_FALSE(main_dict.GetString("list", static_cast<string16*>(NULL)));
- EXPECT_FALSE(main_dict.GetString("DNE", static_cast<string16*>(NULL)));
-
- EXPECT_FALSE(main_dict.GetBinary("bool", NULL));
- EXPECT_FALSE(main_dict.GetBinary("int", NULL));
- EXPECT_FALSE(main_dict.GetBinary("double", NULL));
- EXPECT_FALSE(main_dict.GetBinary("string", NULL));
- EXPECT_TRUE(main_dict.GetBinary("binary", NULL));
- EXPECT_FALSE(main_dict.GetBinary("dict", NULL));
- EXPECT_FALSE(main_dict.GetBinary("list", NULL));
- EXPECT_FALSE(main_dict.GetBinary("DNE", NULL));
-
- EXPECT_FALSE(main_dict.GetDictionary("bool", NULL));
- EXPECT_FALSE(main_dict.GetDictionary("int", NULL));
- EXPECT_FALSE(main_dict.GetDictionary("double", NULL));
- EXPECT_FALSE(main_dict.GetDictionary("string", NULL));
- EXPECT_FALSE(main_dict.GetDictionary("binary", NULL));
- EXPECT_TRUE(main_dict.GetDictionary("dict", NULL));
- EXPECT_FALSE(main_dict.GetDictionary("list", NULL));
- EXPECT_FALSE(main_dict.GetDictionary("DNE", NULL));
-
- EXPECT_FALSE(main_dict.GetList("bool", NULL));
- EXPECT_FALSE(main_dict.GetList("int", NULL));
- EXPECT_FALSE(main_dict.GetList("double", NULL));
- EXPECT_FALSE(main_dict.GetList("string", NULL));
- EXPECT_FALSE(main_dict.GetList("binary", NULL));
- EXPECT_FALSE(main_dict.GetList("dict", NULL));
- EXPECT_TRUE(main_dict.GetList("list", NULL));
- EXPECT_FALSE(main_dict.GetList("DNE", NULL));
-
- EXPECT_TRUE(main_dict.GetWithoutPathExpansion("bool", NULL));
- EXPECT_TRUE(main_dict.GetWithoutPathExpansion("int", NULL));
- EXPECT_TRUE(main_dict.GetWithoutPathExpansion("double", NULL));
- EXPECT_TRUE(main_dict.GetWithoutPathExpansion("string", NULL));
- EXPECT_TRUE(main_dict.GetWithoutPathExpansion("binary", NULL));
- EXPECT_TRUE(main_dict.GetWithoutPathExpansion("dict", NULL));
- EXPECT_TRUE(main_dict.GetWithoutPathExpansion("list", NULL));
- EXPECT_FALSE(main_dict.GetWithoutPathExpansion("DNE", NULL));
-
- EXPECT_TRUE(main_dict.GetBooleanWithoutPathExpansion("bool", NULL));
- EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("int", NULL));
- EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("double", NULL));
- EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("string", NULL));
- EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("binary", NULL));
- EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("dict", NULL));
- EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("list", NULL));
- EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("DNE", NULL));
-
- EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("bool", NULL));
- EXPECT_TRUE(main_dict.GetIntegerWithoutPathExpansion("int", NULL));
- EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("double", NULL));
- EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("string", NULL));
- EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("binary", NULL));
- EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("dict", NULL));
- EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("list", NULL));
- EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("DNE", NULL));
-
- EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("bool", NULL));
- EXPECT_TRUE(main_dict.GetDoubleWithoutPathExpansion("int", NULL));
- EXPECT_TRUE(main_dict.GetDoubleWithoutPathExpansion("double", NULL));
- EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("string", NULL));
- EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("binary", NULL));
- EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("dict", NULL));
- EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("list", NULL));
- EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("DNE", NULL));
+ EXPECT_FALSE(main_dict.GetDouble("bool", nullptr));
+ EXPECT_TRUE(main_dict.GetDouble("int", nullptr));
+ EXPECT_TRUE(main_dict.GetDouble("double", nullptr));
+ EXPECT_FALSE(main_dict.GetDouble("string", nullptr));
+ EXPECT_FALSE(main_dict.GetDouble("binary", nullptr));
+ EXPECT_FALSE(main_dict.GetDouble("dict", nullptr));
+ EXPECT_FALSE(main_dict.GetDouble("list", nullptr));
+ EXPECT_FALSE(main_dict.GetDouble("DNE", nullptr));
+
+ EXPECT_FALSE(main_dict.GetString("bool", static_cast<std::string*>(nullptr)));
+ EXPECT_FALSE(main_dict.GetString("int", static_cast<std::string*>(nullptr)));
+ EXPECT_FALSE(
+ main_dict.GetString("double", static_cast<std::string*>(nullptr)));
+ EXPECT_TRUE(
+ main_dict.GetString("string", static_cast<std::string*>(nullptr)));
+ EXPECT_FALSE(
+ main_dict.GetString("binary", static_cast<std::string*>(nullptr)));
+ EXPECT_FALSE(main_dict.GetString("dict", static_cast<std::string*>(nullptr)));
+ EXPECT_FALSE(main_dict.GetString("list", static_cast<std::string*>(nullptr)));
+ EXPECT_FALSE(main_dict.GetString("DNE", static_cast<std::string*>(nullptr)));
+
+ EXPECT_FALSE(main_dict.GetString("bool", static_cast<string16*>(nullptr)));
+ EXPECT_FALSE(main_dict.GetString("int", static_cast<string16*>(nullptr)));
+ EXPECT_FALSE(main_dict.GetString("double", static_cast<string16*>(nullptr)));
+ EXPECT_TRUE(main_dict.GetString("string", static_cast<string16*>(nullptr)));
+ EXPECT_FALSE(main_dict.GetString("binary", static_cast<string16*>(nullptr)));
+ EXPECT_FALSE(main_dict.GetString("dict", static_cast<string16*>(nullptr)));
+ EXPECT_FALSE(main_dict.GetString("list", static_cast<string16*>(nullptr)));
+ EXPECT_FALSE(main_dict.GetString("DNE", static_cast<string16*>(nullptr)));
+
+ EXPECT_FALSE(main_dict.GetBinary("bool", nullptr));
+ EXPECT_FALSE(main_dict.GetBinary("int", nullptr));
+ EXPECT_FALSE(main_dict.GetBinary("double", nullptr));
+ EXPECT_FALSE(main_dict.GetBinary("string", nullptr));
+ EXPECT_TRUE(main_dict.GetBinary("binary", nullptr));
+ EXPECT_FALSE(main_dict.GetBinary("dict", nullptr));
+ EXPECT_FALSE(main_dict.GetBinary("list", nullptr));
+ EXPECT_FALSE(main_dict.GetBinary("DNE", nullptr));
+
+ EXPECT_FALSE(main_dict.GetDictionary("bool", nullptr));
+ EXPECT_FALSE(main_dict.GetDictionary("int", nullptr));
+ EXPECT_FALSE(main_dict.GetDictionary("double", nullptr));
+ EXPECT_FALSE(main_dict.GetDictionary("string", nullptr));
+ EXPECT_FALSE(main_dict.GetDictionary("binary", nullptr));
+ EXPECT_TRUE(main_dict.GetDictionary("dict", nullptr));
+ EXPECT_FALSE(main_dict.GetDictionary("list", nullptr));
+ EXPECT_FALSE(main_dict.GetDictionary("DNE", nullptr));
+
+ EXPECT_FALSE(main_dict.GetList("bool", nullptr));
+ EXPECT_FALSE(main_dict.GetList("int", nullptr));
+ EXPECT_FALSE(main_dict.GetList("double", nullptr));
+ EXPECT_FALSE(main_dict.GetList("string", nullptr));
+ EXPECT_FALSE(main_dict.GetList("binary", nullptr));
+ EXPECT_FALSE(main_dict.GetList("dict", nullptr));
+ EXPECT_TRUE(main_dict.GetList("list", nullptr));
+ EXPECT_FALSE(main_dict.GetList("DNE", nullptr));
+
+ EXPECT_TRUE(main_dict.GetWithoutPathExpansion("bool", nullptr));
+ EXPECT_TRUE(main_dict.GetWithoutPathExpansion("int", nullptr));
+ EXPECT_TRUE(main_dict.GetWithoutPathExpansion("double", nullptr));
+ EXPECT_TRUE(main_dict.GetWithoutPathExpansion("string", nullptr));
+ EXPECT_TRUE(main_dict.GetWithoutPathExpansion("binary", nullptr));
+ EXPECT_TRUE(main_dict.GetWithoutPathExpansion("dict", nullptr));
+ EXPECT_TRUE(main_dict.GetWithoutPathExpansion("list", nullptr));
+ EXPECT_FALSE(main_dict.GetWithoutPathExpansion("DNE", nullptr));
+
+ EXPECT_TRUE(main_dict.GetBooleanWithoutPathExpansion("bool", nullptr));
+ EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("int", nullptr));
+ EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("double", nullptr));
+ EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("string", nullptr));
+ EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("binary", nullptr));
+ EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("dict", nullptr));
+ EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("list", nullptr));
+ EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("DNE", nullptr));
+
+ EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("bool", nullptr));
+ EXPECT_TRUE(main_dict.GetIntegerWithoutPathExpansion("int", nullptr));
+ EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("double", nullptr));
+ EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("string", nullptr));
+ EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("binary", nullptr));
+ EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("dict", nullptr));
+ EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("list", nullptr));
+ EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("DNE", nullptr));
+
+ EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("bool", nullptr));
+ EXPECT_TRUE(main_dict.GetDoubleWithoutPathExpansion("int", nullptr));
+ EXPECT_TRUE(main_dict.GetDoubleWithoutPathExpansion("double", nullptr));
+ EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("string", nullptr));
+ EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("binary", nullptr));
+ EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("dict", nullptr));
+ EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("list", nullptr));
+ EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("DNE", nullptr));
EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
- "bool", static_cast<std::string*>(NULL)));
+ "bool", static_cast<std::string*>(nullptr)));
EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
- "int", static_cast<std::string*>(NULL)));
+ "int", static_cast<std::string*>(nullptr)));
EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
- "double", static_cast<std::string*>(NULL)));
+ "double", static_cast<std::string*>(nullptr)));
EXPECT_TRUE(main_dict.GetStringWithoutPathExpansion(
- "string", static_cast<std::string*>(NULL)));
+ "string", static_cast<std::string*>(nullptr)));
EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
- "binary", static_cast<std::string*>(NULL)));
+ "binary", static_cast<std::string*>(nullptr)));
EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
- "dict", static_cast<std::string*>(NULL)));
+ "dict", static_cast<std::string*>(nullptr)));
EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
- "list", static_cast<std::string*>(NULL)));
+ "list", static_cast<std::string*>(nullptr)));
EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
- "DNE", static_cast<std::string*>(NULL)));
+ "DNE", static_cast<std::string*>(nullptr)));
EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
- "bool", static_cast<string16*>(NULL)));
+ "bool", static_cast<string16*>(nullptr)));
EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
- "int", static_cast<string16*>(NULL)));
+ "int", static_cast<string16*>(nullptr)));
EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
- "double", static_cast<string16*>(NULL)));
+ "double", static_cast<string16*>(nullptr)));
EXPECT_TRUE(main_dict.GetStringWithoutPathExpansion(
- "string", static_cast<string16*>(NULL)));
+ "string", static_cast<string16*>(nullptr)));
EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
- "binary", static_cast<string16*>(NULL)));
+ "binary", static_cast<string16*>(nullptr)));
EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
- "dict", static_cast<string16*>(NULL)));
+ "dict", static_cast<string16*>(nullptr)));
EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
- "list", static_cast<string16*>(NULL)));
+ "list", static_cast<string16*>(nullptr)));
EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
- "DNE", static_cast<string16*>(NULL)));
+ "DNE", static_cast<string16*>(nullptr)));
// There is no GetBinaryWithoutPathExpansion for some reason, but if there
// were it should be tested here...
- EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("bool", NULL));
- EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("int", NULL));
- EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("double", NULL));
- EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("string", NULL));
- EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("binary", NULL));
- EXPECT_TRUE(main_dict.GetDictionaryWithoutPathExpansion("dict", NULL));
- EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("list", NULL));
- EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("DNE", NULL));
-
- EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("bool", NULL));
- EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("int", NULL));
- EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("double", NULL));
- EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("string", NULL));
- EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("binary", NULL));
- EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("dict", NULL));
- EXPECT_TRUE(main_dict.GetListWithoutPathExpansion("list", NULL));
- EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("DNE", NULL));
-
- EXPECT_TRUE(main_list.Get(0, NULL));
- EXPECT_TRUE(main_list.Get(1, NULL));
- EXPECT_TRUE(main_list.Get(2, NULL));
- EXPECT_TRUE(main_list.Get(3, NULL));
- EXPECT_TRUE(main_list.Get(4, NULL));
- EXPECT_TRUE(main_list.Get(5, NULL));
- EXPECT_TRUE(main_list.Get(6, NULL));
- EXPECT_FALSE(main_list.Get(7, NULL));
-
- EXPECT_TRUE(main_list.GetBoolean(0, NULL));
- EXPECT_FALSE(main_list.GetBoolean(1, NULL));
- EXPECT_FALSE(main_list.GetBoolean(2, NULL));
- EXPECT_FALSE(main_list.GetBoolean(3, NULL));
- EXPECT_FALSE(main_list.GetBoolean(4, NULL));
- EXPECT_FALSE(main_list.GetBoolean(5, NULL));
- EXPECT_FALSE(main_list.GetBoolean(6, NULL));
- EXPECT_FALSE(main_list.GetBoolean(7, NULL));
-
- EXPECT_FALSE(main_list.GetInteger(0, NULL));
- EXPECT_TRUE(main_list.GetInteger(1, NULL));
- EXPECT_FALSE(main_list.GetInteger(2, NULL));
- EXPECT_FALSE(main_list.GetInteger(3, NULL));
- EXPECT_FALSE(main_list.GetInteger(4, NULL));
- EXPECT_FALSE(main_list.GetInteger(5, NULL));
- EXPECT_FALSE(main_list.GetInteger(6, NULL));
- EXPECT_FALSE(main_list.GetInteger(7, NULL));
-
- EXPECT_FALSE(main_list.GetDouble(0, NULL));
- EXPECT_TRUE(main_list.GetDouble(1, NULL));
- EXPECT_TRUE(main_list.GetDouble(2, NULL));
- EXPECT_FALSE(main_list.GetDouble(3, NULL));
- EXPECT_FALSE(main_list.GetDouble(4, NULL));
- EXPECT_FALSE(main_list.GetDouble(5, NULL));
- EXPECT_FALSE(main_list.GetDouble(6, NULL));
- EXPECT_FALSE(main_list.GetDouble(7, NULL));
-
- EXPECT_FALSE(main_list.GetString(0, static_cast<std::string*>(NULL)));
- EXPECT_FALSE(main_list.GetString(1, static_cast<std::string*>(NULL)));
- EXPECT_FALSE(main_list.GetString(2, static_cast<std::string*>(NULL)));
- EXPECT_TRUE(main_list.GetString(3, static_cast<std::string*>(NULL)));
- EXPECT_FALSE(main_list.GetString(4, static_cast<std::string*>(NULL)));
- EXPECT_FALSE(main_list.GetString(5, static_cast<std::string*>(NULL)));
- EXPECT_FALSE(main_list.GetString(6, static_cast<std::string*>(NULL)));
- EXPECT_FALSE(main_list.GetString(7, static_cast<std::string*>(NULL)));
-
- EXPECT_FALSE(main_list.GetString(0, static_cast<string16*>(NULL)));
- EXPECT_FALSE(main_list.GetString(1, static_cast<string16*>(NULL)));
- EXPECT_FALSE(main_list.GetString(2, static_cast<string16*>(NULL)));
- EXPECT_TRUE(main_list.GetString(3, static_cast<string16*>(NULL)));
- EXPECT_FALSE(main_list.GetString(4, static_cast<string16*>(NULL)));
- EXPECT_FALSE(main_list.GetString(5, static_cast<string16*>(NULL)));
- EXPECT_FALSE(main_list.GetString(6, static_cast<string16*>(NULL)));
- EXPECT_FALSE(main_list.GetString(7, static_cast<string16*>(NULL)));
-
- EXPECT_FALSE(main_list.GetBinary(0, NULL));
- EXPECT_FALSE(main_list.GetBinary(1, NULL));
- EXPECT_FALSE(main_list.GetBinary(2, NULL));
- EXPECT_FALSE(main_list.GetBinary(3, NULL));
- EXPECT_TRUE(main_list.GetBinary(4, NULL));
- EXPECT_FALSE(main_list.GetBinary(5, NULL));
- EXPECT_FALSE(main_list.GetBinary(6, NULL));
- EXPECT_FALSE(main_list.GetBinary(7, NULL));
-
- EXPECT_FALSE(main_list.GetDictionary(0, NULL));
- EXPECT_FALSE(main_list.GetDictionary(1, NULL));
- EXPECT_FALSE(main_list.GetDictionary(2, NULL));
- EXPECT_FALSE(main_list.GetDictionary(3, NULL));
- EXPECT_FALSE(main_list.GetDictionary(4, NULL));
- EXPECT_TRUE(main_list.GetDictionary(5, NULL));
- EXPECT_FALSE(main_list.GetDictionary(6, NULL));
- EXPECT_FALSE(main_list.GetDictionary(7, NULL));
-
- EXPECT_FALSE(main_list.GetList(0, NULL));
- EXPECT_FALSE(main_list.GetList(1, NULL));
- EXPECT_FALSE(main_list.GetList(2, NULL));
- EXPECT_FALSE(main_list.GetList(3, NULL));
- EXPECT_FALSE(main_list.GetList(4, NULL));
- EXPECT_FALSE(main_list.GetList(5, NULL));
- EXPECT_TRUE(main_list.GetList(6, NULL));
- EXPECT_FALSE(main_list.GetList(7, NULL));
+ EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("bool", nullptr));
+ EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("int", nullptr));
+ EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("double", nullptr));
+ EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("string", nullptr));
+ EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("binary", nullptr));
+ EXPECT_TRUE(main_dict.GetDictionaryWithoutPathExpansion("dict", nullptr));
+ EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("list", nullptr));
+ EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("DNE", nullptr));
+
+ EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("bool", nullptr));
+ EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("int", nullptr));
+ EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("double", nullptr));
+ EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("string", nullptr));
+ EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("binary", nullptr));
+ EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("dict", nullptr));
+ EXPECT_TRUE(main_dict.GetListWithoutPathExpansion("list", nullptr));
+ EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("DNE", nullptr));
+
+ EXPECT_TRUE(main_list.Get(0, nullptr));
+ EXPECT_TRUE(main_list.Get(1, nullptr));
+ EXPECT_TRUE(main_list.Get(2, nullptr));
+ EXPECT_TRUE(main_list.Get(3, nullptr));
+ EXPECT_TRUE(main_list.Get(4, nullptr));
+ EXPECT_TRUE(main_list.Get(5, nullptr));
+ EXPECT_TRUE(main_list.Get(6, nullptr));
+ EXPECT_FALSE(main_list.Get(7, nullptr));
+
+ EXPECT_TRUE(main_list.GetBoolean(0, nullptr));
+ EXPECT_FALSE(main_list.GetBoolean(1, nullptr));
+ EXPECT_FALSE(main_list.GetBoolean(2, nullptr));
+ EXPECT_FALSE(main_list.GetBoolean(3, nullptr));
+ EXPECT_FALSE(main_list.GetBoolean(4, nullptr));
+ EXPECT_FALSE(main_list.GetBoolean(5, nullptr));
+ EXPECT_FALSE(main_list.GetBoolean(6, nullptr));
+ EXPECT_FALSE(main_list.GetBoolean(7, nullptr));
+
+ EXPECT_FALSE(main_list.GetInteger(0, nullptr));
+ EXPECT_TRUE(main_list.GetInteger(1, nullptr));
+ EXPECT_FALSE(main_list.GetInteger(2, nullptr));
+ EXPECT_FALSE(main_list.GetInteger(3, nullptr));
+ EXPECT_FALSE(main_list.GetInteger(4, nullptr));
+ EXPECT_FALSE(main_list.GetInteger(5, nullptr));
+ EXPECT_FALSE(main_list.GetInteger(6, nullptr));
+ EXPECT_FALSE(main_list.GetInteger(7, nullptr));
+
+ EXPECT_FALSE(main_list.GetDouble(0, nullptr));
+ EXPECT_TRUE(main_list.GetDouble(1, nullptr));
+ EXPECT_TRUE(main_list.GetDouble(2, nullptr));
+ EXPECT_FALSE(main_list.GetDouble(3, nullptr));
+ EXPECT_FALSE(main_list.GetDouble(4, nullptr));
+ EXPECT_FALSE(main_list.GetDouble(5, nullptr));
+ EXPECT_FALSE(main_list.GetDouble(6, nullptr));
+ EXPECT_FALSE(main_list.GetDouble(7, nullptr));
+
+ EXPECT_FALSE(main_list.GetString(0, static_cast<std::string*>(nullptr)));
+ EXPECT_FALSE(main_list.GetString(1, static_cast<std::string*>(nullptr)));
+ EXPECT_FALSE(main_list.GetString(2, static_cast<std::string*>(nullptr)));
+ EXPECT_TRUE(main_list.GetString(3, static_cast<std::string*>(nullptr)));
+ EXPECT_FALSE(main_list.GetString(4, static_cast<std::string*>(nullptr)));
+ EXPECT_FALSE(main_list.GetString(5, static_cast<std::string*>(nullptr)));
+ EXPECT_FALSE(main_list.GetString(6, static_cast<std::string*>(nullptr)));
+ EXPECT_FALSE(main_list.GetString(7, static_cast<std::string*>(nullptr)));
+
+ EXPECT_FALSE(main_list.GetString(0, static_cast<string16*>(nullptr)));
+ EXPECT_FALSE(main_list.GetString(1, static_cast<string16*>(nullptr)));
+ EXPECT_FALSE(main_list.GetString(2, static_cast<string16*>(nullptr)));
+ EXPECT_TRUE(main_list.GetString(3, static_cast<string16*>(nullptr)));
+ EXPECT_FALSE(main_list.GetString(4, static_cast<string16*>(nullptr)));
+ EXPECT_FALSE(main_list.GetString(5, static_cast<string16*>(nullptr)));
+ EXPECT_FALSE(main_list.GetString(6, static_cast<string16*>(nullptr)));
+ EXPECT_FALSE(main_list.GetString(7, static_cast<string16*>(nullptr)));
+
+ EXPECT_FALSE(main_list.GetDictionary(0, nullptr));
+ EXPECT_FALSE(main_list.GetDictionary(1, nullptr));
+ EXPECT_FALSE(main_list.GetDictionary(2, nullptr));
+ EXPECT_FALSE(main_list.GetDictionary(3, nullptr));
+ EXPECT_FALSE(main_list.GetDictionary(4, nullptr));
+ EXPECT_TRUE(main_list.GetDictionary(5, nullptr));
+ EXPECT_FALSE(main_list.GetDictionary(6, nullptr));
+ EXPECT_FALSE(main_list.GetDictionary(7, nullptr));
+
+ EXPECT_FALSE(main_list.GetList(0, nullptr));
+ EXPECT_FALSE(main_list.GetList(1, nullptr));
+ EXPECT_FALSE(main_list.GetList(2, nullptr));
+ EXPECT_FALSE(main_list.GetList(3, nullptr));
+ EXPECT_FALSE(main_list.GetList(4, nullptr));
+ EXPECT_FALSE(main_list.GetList(5, nullptr));
+ EXPECT_TRUE(main_list.GetList(6, nullptr));
+ EXPECT_FALSE(main_list.GetList(7, nullptr));
}
TEST(ValuesTest, SelfSwap) {
base::Value test(1);
std::swap(test, test);
- EXPECT_TRUE(test.GetInt() == 1);
+ EXPECT_EQ(1, test.GetInt());
+}
+
+TEST(ValuesTest, FromToUniquePtrValue) {
+ std::unique_ptr<DictionaryValue> dict = std::make_unique<DictionaryValue>();
+ dict->SetString("name", "Froogle");
+ dict->SetString("url", "http://froogle.com");
+ Value dict_copy = dict->Clone();
+
+ Value dict_converted = Value::FromUniquePtrValue(std::move(dict));
+ EXPECT_EQ(dict_copy, dict_converted);
+
+ std::unique_ptr<Value> val =
+ Value::ToUniquePtrValue(std::move(dict_converted));
+ EXPECT_EQ(dict_copy, *val);
}
} // namespace base
diff --git a/chromium/base/version.cc b/chromium/base/version.cc
index ca97a842226..7e897b238aa 100644
--- a/chromium/base/version.cc
+++ b/chromium/base/version.cc
@@ -77,13 +77,11 @@ int CompareVersionComponents(const std::vector<uint32_t>& components1,
} // namespace
-Version::Version() {
-}
+Version::Version() = default;
Version::Version(const Version& other) = default;
-Version::~Version() {
-}
+Version::~Version() = default;
Version::Version(const std::string& version_str) {
std::vector<uint32_t> parsed;
diff --git a/chromium/base/vlog.cc b/chromium/base/vlog.cc
index c00e63185a3..fbe18976fa6 100644
--- a/chromium/base/vlog.cc
+++ b/chromium/base/vlog.cc
@@ -49,7 +49,7 @@ VlogInfo::VlogInfo(const std::string& v_switch,
const std::string& vmodule_switch,
int* min_log_level)
: min_log_level_(min_log_level) {
- DCHECK(min_log_level != NULL);
+ DCHECK_NE(min_log_level, nullptr);
int vlog_level = 0;
if (!v_switch.empty()) {
@@ -78,7 +78,7 @@ VlogInfo::VlogInfo(const std::string& v_switch,
}
}
-VlogInfo::~VlogInfo() {}
+VlogInfo::~VlogInfo() = default;
namespace {
diff --git a/chromium/base/win/OWNERS b/chromium/base/win/OWNERS
index 69c8613e25e..082fae6a95e 100644
--- a/chromium/base/win/OWNERS
+++ b/chromium/base/win/OWNERS
@@ -1,6 +1,6 @@
-cpu@chromium.org
grt@chromium.org
jschuh@chromium.org
+robliao@chromium.org
scottmg@chromium.org
# COMPONENT: Internals>PlatformIntegration
diff --git a/chromium/base/win/com_init_check_hook.cc b/chromium/base/win/com_init_check_hook.cc
index 8c320e8ba47..be40598b792 100644
--- a/chromium/base/win/com_init_check_hook.cc
+++ b/chromium/base/win/com_init_check_hook.cc
@@ -149,11 +149,11 @@ class HookManager {
co_create_instance_padded_address_);
return;
} else if (format == HotpatchPlaceholderFormat::EXTERNALLY_PATCHED) {
- // TODO(robliao): Make this crash after resolving http://crbug.com/737090.
hotpatch_placeholder_format_ = format;
- DLOG(WARNING)
- << "CoCreateInstance appears to be previously patched. Skipping. ("
- << FirstSevenBytesToString(co_create_instance_padded_address_) << ")";
+ NOTREACHED() << "CoCreateInstance appears to be previously patched. ("
+ << FirstSevenBytesToString(
+ co_create_instance_padded_address_)
+ << ")";
return;
}
diff --git a/chromium/base/win/com_init_check_hook.h b/chromium/base/win/com_init_check_hook.h
index c064eea1e93..c99823356c5 100644
--- a/chromium/base/win/com_init_check_hook.h
+++ b/chromium/base/win/com_init_check_hook.h
@@ -19,7 +19,8 @@ namespace win {
#if DCHECK_IS_ON() && defined(ARCH_CPU_X86_FAMILY) && \
defined(ARCH_CPU_32_BITS) && !defined(GOOGLE_CHROME_BUILD) && \
- !defined(OFFICIAL_BUILD)
+ !defined(OFFICIAL_BUILD) && \
+ !defined(COM_INIT_CHECK_HOOK_DISABLED) // See crbug/737090 for details.
#define COM_INIT_CHECK_HOOK_ENABLED
#endif
diff --git a/chromium/base/win/com_init_check_hook_unittest.cc b/chromium/base/win/com_init_check_hook_unittest.cc
index 00677a7d633..32aede46a00 100644
--- a/chromium/base/win/com_init_check_hook_unittest.cc
+++ b/chromium/base/win/com_init_check_hook_unittest.cc
@@ -114,9 +114,8 @@ TEST(ComInitCheckHook, ExternallyHooked) {
reinterpret_cast<void*>(co_create_instance_address),
reinterpret_cast<const void*>(&jmp_byte), sizeof(jmp_byte)));
- // This line shouldn't crash if a hook is already in place.
- // TODO(robliao): Make it crash after resolving http://crbug.com/737090.
- { ComInitCheckHook com_check_hook; }
+ // Externally patched instances should crash so we catch these cases on bots.
+ EXPECT_DCHECK_DEATH({ ComInitCheckHook com_check_hook; });
// If this call fails, really bad things are going to happen to other tests
// so CHECK here.
diff --git a/chromium/base/win/com_init_util_unittest.cc b/chromium/base/win/com_init_util_unittest.cc
index f5387d0ce1b..fb897dd916c 100644
--- a/chromium/base/win/com_init_util_unittest.cc
+++ b/chromium/base/win/com_init_util_unittest.cc
@@ -20,21 +20,21 @@ TEST(ComInitUtil, AssertUninitialized) {
// status will be updated. This covers that case.
{
ScopedCOMInitializer com_initializer;
- ASSERT_TRUE(com_initializer.succeeded());
+ ASSERT_TRUE(com_initializer.Succeeded());
}
EXPECT_DCHECK_DEATH(AssertComInitialized());
}
TEST(ComInitUtil, AssertSTAInitialized) {
ScopedCOMInitializer com_initializer;
- ASSERT_TRUE(com_initializer.succeeded());
+ ASSERT_TRUE(com_initializer.Succeeded());
AssertComInitialized();
}
TEST(ComInitUtil, AssertMTAInitialized) {
ScopedCOMInitializer com_initializer(ScopedCOMInitializer::kMTA);
- ASSERT_TRUE(com_initializer.succeeded());
+ ASSERT_TRUE(com_initializer.Succeeded());
AssertComInitialized();
}
@@ -50,7 +50,7 @@ TEST(ComInitUtil, AssertNoneApartmentTypeUninitialized) {
// status will be updated. This covers that case.
{
ScopedCOMInitializer com_initializer;
- ASSERT_TRUE(com_initializer.succeeded());
+ ASSERT_TRUE(com_initializer.Succeeded());
}
AssertComApartmentType(ComApartmentType::NONE);
EXPECT_DCHECK_DEATH(AssertComApartmentType(ComApartmentType::STA));
diff --git a/chromium/base/win/core_winrt_util.cc b/chromium/base/win/core_winrt_util.cc
index aad9d477ee3..9ae2d352164 100644
--- a/chromium/base/win/core_winrt_util.cc
+++ b/chromium/base/win/core_winrt_util.cc
@@ -4,8 +4,6 @@
#include "base/win/core_winrt_util.h"
-#include <roapi.h>
-
namespace {
void* LoadComBaseFunction(const char* function_name) {
@@ -13,6 +11,20 @@ void* LoadComBaseFunction(const char* function_name) {
return handle ? ::GetProcAddress(handle, function_name) : nullptr;
}
+decltype(&::RoInitialize) GetRoInitializeFunction() {
+ static decltype(&::RoInitialize) const function =
+ reinterpret_cast<decltype(&::RoInitialize)>(
+ LoadComBaseFunction("RoInitialize"));
+ return function;
+}
+
+decltype(&::RoUninitialize) GetRoUninitializeFunction() {
+ static decltype(&::RoUninitialize) const function =
+ reinterpret_cast<decltype(&::RoUninitialize)>(
+ LoadComBaseFunction("RoUninitialize"));
+ return function;
+}
+
decltype(&::RoActivateInstance) GetRoActivateInstanceFunction() {
static decltype(&::RoActivateInstance) const function =
reinterpret_cast<decltype(&::RoActivateInstance)>(
@@ -34,23 +46,34 @@ namespace win {
bool ResolveCoreWinRTDelayload() {
// TODO(finnur): Add AssertIOAllowed once crbug.com/770193 is fixed.
+ return GetRoInitializeFunction() && GetRoUninitializeFunction() &&
+ GetRoActivateInstanceFunction() && GetRoGetActivationFactoryFunction();
+}
+
+HRESULT RoInitialize(RO_INIT_TYPE init_type) {
+ auto ro_initialize_func = GetRoInitializeFunction();
+ if (!ro_initialize_func)
+ return E_FAIL;
+ return ro_initialize_func(init_type);
+}
- return GetRoActivateInstanceFunction() && GetRoGetActivationFactoryFunction();
+void RoUninitialize() {
+ auto ro_uninitialize_func = GetRoUninitializeFunction();
+ if (ro_uninitialize_func)
+ ro_uninitialize_func();
}
HRESULT RoGetActivationFactory(HSTRING class_id,
const IID& iid,
void** out_factory) {
- decltype(&::RoGetActivationFactory) get_factory_func =
- GetRoGetActivationFactoryFunction();
+ auto get_factory_func = GetRoGetActivationFactoryFunction();
if (!get_factory_func)
return E_FAIL;
return get_factory_func(class_id, iid, out_factory);
}
HRESULT RoActivateInstance(HSTRING class_id, IInspectable** instance) {
- decltype(&::RoActivateInstance) activate_instance_func =
- GetRoActivateInstanceFunction();
+ auto activate_instance_func = GetRoActivateInstanceFunction();
if (!activate_instance_func)
return E_FAIL;
return activate_instance_func(class_id, instance);
diff --git a/chromium/base/win/core_winrt_util.h b/chromium/base/win/core_winrt_util.h
index 4a613c0cca8..c86aed672de 100644
--- a/chromium/base/win/core_winrt_util.h
+++ b/chromium/base/win/core_winrt_util.h
@@ -7,9 +7,12 @@
#include <hstring.h>
#include <inspectable.h>
+#include <roapi.h>
#include <windef.h>
#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/win/scoped_hstring.h"
namespace base {
namespace win {
@@ -21,7 +24,11 @@ namespace win {
BASE_EXPORT bool ResolveCoreWinRTDelayload();
// The following stubs are provided for when component build is enabled, in
-// order to avoid the propogation of delay-loading CoreWinRT to other modules.
+// order to avoid the propagation of delay-loading CoreWinRT to other modules.
+
+BASE_EXPORT HRESULT RoInitialize(RO_INIT_TYPE init_type);
+
+BASE_EXPORT void RoUninitialize();
BASE_EXPORT HRESULT RoGetActivationFactory(HSTRING class_id,
const IID& iid,
@@ -30,6 +37,17 @@ BASE_EXPORT HRESULT RoGetActivationFactory(HSTRING class_id,
BASE_EXPORT HRESULT RoActivateInstance(HSTRING class_id,
IInspectable** instance);
+// Retrieves an activation factory for the type specified.
+template <typename InterfaceType, char16 const* runtime_class_id>
+HRESULT GetActivationFactory(InterfaceType** factory) {
+ ScopedHString class_id_hstring = ScopedHString::Create(runtime_class_id);
+ if (!class_id_hstring.is_valid())
+ return E_FAIL;
+
+ return base::win::RoGetActivationFactory(class_id_hstring.get(),
+ IID_PPV_ARGS(factory));
+}
+
} // namespace win
} // namespace base
diff --git a/chromium/base/win/core_winrt_util_unittest.cc b/chromium/base/win/core_winrt_util_unittest.cc
index 883077d0909..11d08b87597 100644
--- a/chromium/base/win/core_winrt_util_unittest.cc
+++ b/chromium/base/win/core_winrt_util_unittest.cc
@@ -4,6 +4,7 @@
#include "base/win/core_winrt_util.h"
+#include "base/win/com_init_util.h"
#include "base/win/scoped_com_initializer.h"
#include "base/win/windows_version.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -12,13 +13,22 @@ namespace base {
namespace win {
TEST(CoreWinrtUtilTest, PreloadFunctions) {
- ScopedCOMInitializer com_initializer(ScopedCOMInitializer::kMTA);
-
if (GetVersion() < VERSION_WIN8)
EXPECT_FALSE(ResolveCoreWinRTDelayload());
else
EXPECT_TRUE(ResolveCoreWinRTDelayload());
}
+TEST(CoreWinrtUtilTest, RoInitializeAndUninitialize) {
+ if (GetVersion() < VERSION_WIN8)
+ return;
+
+ ASSERT_TRUE(ResolveCoreWinRTDelayload());
+ ASSERT_HRESULT_SUCCEEDED(base::win::RoInitialize(RO_INIT_MULTITHREADED));
+ AssertComApartmentType(ComApartmentType::MTA);
+ base::win::RoUninitialize();
+ AssertComApartmentType(ComApartmentType::NONE);
+}
+
} // namespace win
} // namespace base
diff --git a/chromium/base/win/scoped_com_initializer.cc b/chromium/base/win/scoped_com_initializer.cc
new file mode 100644
index 00000000000..73e1b5cf28d
--- /dev/null
+++ b/chromium/base/win/scoped_com_initializer.cc
@@ -0,0 +1,37 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_com_initializer.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace win {
+
+ScopedCOMInitializer::ScopedCOMInitializer() {
+ Initialize(COINIT_APARTMENTTHREADED);
+}
+
+ScopedCOMInitializer::ScopedCOMInitializer(SelectMTA mta) {
+ Initialize(COINIT_MULTITHREADED);
+}
+
+ScopedCOMInitializer::~ScopedCOMInitializer() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ if (Succeeded())
+ CoUninitialize();
+}
+
+bool ScopedCOMInitializer::Succeeded() const {
+ return SUCCEEDED(hr_);
+}
+
+void ScopedCOMInitializer::Initialize(COINIT init) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ hr_ = CoInitializeEx(NULL, init);
+ DCHECK_NE(RPC_E_CHANGED_MODE, hr_) << "Invalid COM thread model change";
+}
+
+} // namespace win
+} // namespace base
diff --git a/chromium/base/win/scoped_com_initializer.h b/chromium/base/win/scoped_com_initializer.h
index 70deba49031..3bb57954939 100644
--- a/chromium/base/win/scoped_com_initializer.h
+++ b/chromium/base/win/scoped_com_initializer.h
@@ -7,10 +7,10 @@
#include <objbase.h>
-#include "base/logging.h"
+#include "base/base_export.h"
#include "base/macros.h"
#include "base/threading/thread_checker.h"
-#include "build/build_config.h"
+#include "base/win/scoped_windows_thread_environment.h"
namespace base {
namespace win {
@@ -22,35 +22,24 @@ namespace win {
// similar lifetime as the thread itself. You should not be using this in
// random utility functions that make COM calls -- instead ensure these
// functions are running on a COM-supporting thread!
-class ScopedCOMInitializer {
+class BASE_EXPORT ScopedCOMInitializer : public ScopedWindowsThreadEnvironment {
public:
// Enum value provided to initialize the thread as an MTA instead of STA.
enum SelectMTA { kMTA };
// Constructor for STA initialization.
- ScopedCOMInitializer() {
- Initialize(COINIT_APARTMENTTHREADED);
- }
+ ScopedCOMInitializer();
// Constructor for MTA initialization.
- explicit ScopedCOMInitializer(SelectMTA mta) {
- Initialize(COINIT_MULTITHREADED);
- }
+ explicit ScopedCOMInitializer(SelectMTA mta);
- ~ScopedCOMInitializer() {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- if (succeeded())
- CoUninitialize();
- }
+ ~ScopedCOMInitializer() override;
- bool succeeded() const { return SUCCEEDED(hr_); }
+ // ScopedWindowsThreadEnvironment:
+ bool Succeeded() const override;
private:
- void Initialize(COINIT init) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- hr_ = CoInitializeEx(NULL, init);
- DCHECK_NE(RPC_E_CHANGED_MODE, hr_) << "Invalid COM thread model change";
- }
+ void Initialize(COINIT init);
HRESULT hr_;
THREAD_CHECKER(thread_checker_);
diff --git a/chromium/base/win/scoped_comptr.h b/chromium/base/win/scoped_comptr.h
deleted file mode 100644
index 6f40050c5e5..00000000000
--- a/chromium/base/win/scoped_comptr.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_WIN_SCOPED_COMPTR_H_
-#define BASE_WIN_SCOPED_COMPTR_H_
-
-#include <wrl/client.h>
-
-namespace base {
-namespace win {
-
-template <typename T>
-using ScopedComPtr = Microsoft::WRL::ComPtr<T>;
-
-} // namespace win
-} // namespace base
-
-#endif // BASE_WIN_SCOPED_COMPTR_H_
diff --git a/chromium/base/win/scoped_handle.cc b/chromium/base/win/scoped_handle.cc
index b2d7d595ca0..d8c92124dfa 100644
--- a/chromium/base/win/scoped_handle.cc
+++ b/chromium/base/win/scoped_handle.cc
@@ -196,7 +196,8 @@ void ActiveVerifier::StartTracking(HANDLE handle, const void* owner,
if (!result.second) {
Info other = result.first->second;
base::debug::Alias(&other);
- base::debug::Alias(&creation_stack_);
+ auto creation_stack = creation_stack_;
+ base::debug::Alias(&creation_stack);
CHECK(false); // Attempt to start tracking already tracked handle.
}
}
@@ -209,14 +210,16 @@ void ActiveVerifier::StopTracking(HANDLE handle, const void* owner,
AutoNativeLock lock(*lock_);
HandleMap::iterator i = map_.find(handle);
if (i == map_.end()) {
- base::debug::Alias(&creation_stack_);
+ auto creation_stack = creation_stack_;
+ base::debug::Alias(&creation_stack);
CHECK(false); // Attempting to close an untracked handle.
}
Info other = i->second;
if (other.owner != owner) {
base::debug::Alias(&other);
- base::debug::Alias(&creation_stack_);
+ auto creation_stack = creation_stack_;
+ base::debug::Alias(&creation_stack);
CHECK(false); // Attempting to close a handle not owned by opener.
}
@@ -241,7 +244,8 @@ void ActiveVerifier::OnHandleBeingClosed(HANDLE handle) {
Info other = i->second;
base::debug::Alias(&other);
- base::debug::Alias(&creation_stack_);
+ auto creation_stack = creation_stack_;
+ base::debug::Alias(&creation_stack);
CHECK(false); // CloseHandle called on tracked handle.
}
diff --git a/chromium/base/win/scoped_hstring.h b/chromium/base/win/scoped_hstring.h
index 5d60c111346..6ba1dab6b3e 100644
--- a/chromium/base/win/scoped_hstring.h
+++ b/chromium/base/win/scoped_hstring.h
@@ -52,7 +52,7 @@ namespace win {
// ScopedHString string(win_string);
//
class BASE_EXPORT ScopedHString
- : public ScopedGeneric<HSTRING, internal::ScopedHStringTraits> {
+ : public ScopedGeneric<HSTRING, base::internal::ScopedHStringTraits> {
public:
// Constructs a ScopedHString from an HSTRING, and takes ownership of |hstr|.
explicit ScopedHString(HSTRING hstr);
diff --git a/chromium/base/win/scoped_variant.h b/chromium/base/win/scoped_variant.h
index 16d43f99b6c..81f4b2b15f6 100644
--- a/chromium/base/win/scoped_variant.h
+++ b/chromium/base/win/scoped_variant.h
@@ -125,8 +125,9 @@ class BASE_EXPORT ScopedVariant {
// over that.
const VARIANT* ptr() const { return &var_; }
- // Like other scoped classes (e.g scoped_refptr, ScopedComPtr, ScopedBstr)
- // we support the assignment operator for the type we wrap.
+ // Like other scoped classes (e.g. scoped_refptr, ScopedBstr,
+ // Microsoft::WRL::ComPtr) we support the assignment operator for the type we
+ // wrap.
ScopedVariant& operator=(const VARIANT& var);
// A hack to pass a pointer to the variant where the accepting
diff --git a/chromium/base/win/scoped_windows_thread_environment.h b/chromium/base/win/scoped_windows_thread_environment.h
new file mode 100644
index 00000000000..51f2a0d981a
--- /dev/null
+++ b/chromium/base/win/scoped_windows_thread_environment.h
@@ -0,0 +1,28 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_WINDOWS_THREAD_ENVIRONMENT_H_
+#define BASE_WIN_SCOPED_WINDOWS_THREAD_ENVIRONMENT_H_
+
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// Serves as a root class for ScopedCOMInitializer and ScopedWinrtInitializer.
+class ScopedWindowsThreadEnvironment {
+ public:
+ ScopedWindowsThreadEnvironment() {}
+ virtual ~ScopedWindowsThreadEnvironment() {}
+
+ virtual bool Succeeded() const = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ScopedWindowsThreadEnvironment);
+};
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_SCOPED_WINDOWS_THREAD_ENVIRONMENT_H_
diff --git a/chromium/base/win/scoped_winrt_initializer.cc b/chromium/base/win/scoped_winrt_initializer.cc
new file mode 100644
index 00000000000..e05679ab33c
--- /dev/null
+++ b/chromium/base/win/scoped_winrt_initializer.cc
@@ -0,0 +1,38 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_winrt_initializer.h"
+
+#include "base/logging.h"
+#include "base/win/com_init_util.h"
+#include "base/win/core_winrt_util.h"
+#include "base/win/windows_version.h"
+
+namespace base {
+namespace win {
+
+ScopedWinrtInitializer::ScopedWinrtInitializer()
+ : hr_(base::win::RoInitialize(RO_INIT_MULTITHREADED)) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK_GE(GetVersion(), VERSION_WIN8);
+#if DCHECK_IS_ON()
+ if (SUCCEEDED(hr_))
+ AssertComApartmentType(ComApartmentType::MTA);
+ else
+ DCHECK_NE(RPC_E_CHANGED_MODE, hr_) << "Invalid COM thread model change";
+#endif
+}
+
+ScopedWinrtInitializer::~ScopedWinrtInitializer() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ if (SUCCEEDED(hr_))
+ base::win::RoUninitialize();
+}
+
+bool ScopedWinrtInitializer::Succeeded() const {
+ return SUCCEEDED(hr_);
+}
+
+} // namespace win
+} // namespace base
diff --git a/chromium/base/win/scoped_winrt_initializer.h b/chromium/base/win/scoped_winrt_initializer.h
new file mode 100644
index 00000000000..7c765150890
--- /dev/null
+++ b/chromium/base/win/scoped_winrt_initializer.h
@@ -0,0 +1,48 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_WINRT_INITIALIZER_H_
+#define BASE_WIN_SCOPED_WINRT_INITIALIZER_H_
+
+#include <objbase.h>
+
+#include "base/base_export.h"
+#include "base/threading/thread_checker.h"
+#include "base/win/scoped_windows_thread_environment.h"
+
+namespace base {
+namespace win {
+
+// Initializes the Windows Runtime in the constructor and uninitalizes the
+// Windows Runtime in the destructor. As a side effect, COM is also initialized
+// as an MTA in the constructor and correspondingly uninitialized in the
+// destructor.
+//
+// Generally, you should only use this on Windows 8 or above. It is redundant
+// to use ScopedComInitializer in conjunction with ScopedWinrtInitializer.
+//
+// WARNING: This should only be used once per thread, ideally scoped to a
+// similar lifetime as the thread itself. You should not be using this in random
+// utility functions that make Windows Runtime calls -- instead ensure these
+// functions are running on a Windows Runtime supporting thread!
+class BASE_EXPORT ScopedWinrtInitializer
+ : public ScopedWindowsThreadEnvironment {
+ public:
+ ScopedWinrtInitializer();
+ ~ScopedWinrtInitializer() override;
+
+ // ScopedWindowsThreadEnvironment:
+ bool Succeeded() const override;
+
+ private:
+ const HRESULT hr_;
+ THREAD_CHECKER(thread_checker_);
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedWinrtInitializer);
+};
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_SCOPED_WINRT_INITIALIZER_H_
diff --git a/chromium/base/win/scoped_winrt_initializer_unittest.cc b/chromium/base/win/scoped_winrt_initializer_unittest.cc
new file mode 100644
index 00000000000..9df11872168
--- /dev/null
+++ b/chromium/base/win/scoped_winrt_initializer_unittest.cc
@@ -0,0 +1,47 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_winrt_initializer.h"
+
+#include "base/test/gtest_util.h"
+#include "base/win/com_init_util.h"
+#include "base/win/scoped_com_initializer.h"
+#include "base/win/windows_version.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+TEST(ScopedWinrtInitializer, BasicFunctionality) {
+ if (GetVersion() < VERSION_WIN8)
+ return;
+
+ AssertComApartmentType(ComApartmentType::NONE);
+ {
+ ScopedWinrtInitializer scoped_winrt_initializer;
+ AssertComApartmentType(ComApartmentType::MTA);
+ }
+ AssertComApartmentType(ComApartmentType::NONE);
+}
+
+TEST(ScopedWinrtInitializer, ApartmentChangeCheck) {
+ if (GetVersion() < VERSION_WIN8)
+ return;
+
+ ScopedCOMInitializer com_initializer;
+ // ScopedCOMInitializer initialized an STA and the following should be a
+ // failed request for an MTA.
+ EXPECT_DCHECK_DEATH({ ScopedWinrtInitializer scoped_winrt_initializer; });
+}
+
+TEST(ScopedWinrtInitializer, VersionCheck) {
+ if (GetVersion() >= VERSION_WIN8)
+ return;
+
+ // ScopedWinrtInitializer is unsupported on versions prior to Windows 8.
+ EXPECT_DCHECK_DEATH({ ScopedWinrtInitializer scoped_winrt_initializer; });
+}
+
+} // namespace win
+} // namespace base
diff --git a/chromium/base/win/shortcut.cc b/chromium/base/win/shortcut.cc
index 95adfce06d2..56634527e2a 100644
--- a/chromium/base/win/shortcut.cc
+++ b/chromium/base/win/shortcut.cc
@@ -8,10 +8,10 @@
#include <shellapi.h>
#include <shlobj.h>
#include <propkey.h>
+#include <wrl/client.h>
#include "base/files/file_util.h"
#include "base/threading/thread_restrictions.h"
-#include "base/win/scoped_comptr.h"
#include "base/win/scoped_propvariant.h"
#include "base/win/win_util.h"
#include "base/win/windows_version.h"
@@ -21,15 +21,16 @@ namespace win {
namespace {
+using Microsoft::WRL::ComPtr;
+
// Initializes |i_shell_link| and |i_persist_file| (releasing them first if they
// are already initialized).
// If |shortcut| is not NULL, loads |shortcut| into |i_persist_file|.
// If any of the above steps fail, both |i_shell_link| and |i_persist_file| will
// be released.
-void InitializeShortcutInterfaces(
- const wchar_t* shortcut,
- ScopedComPtr<IShellLink>* i_shell_link,
- ScopedComPtr<IPersistFile>* i_persist_file) {
+void InitializeShortcutInterfaces(const wchar_t* shortcut,
+ ComPtr<IShellLink>* i_shell_link,
+ ComPtr<IPersistFile>* i_persist_file) {
i_shell_link->Reset();
i_persist_file->Reset();
if (FAILED(::CoCreateInstance(CLSID_ShellLink, NULL, CLSCTX_INPROC_SERVER,
@@ -56,7 +57,7 @@ ShortcutProperties::~ShortcutProperties() {
bool CreateOrUpdateShortcutLink(const FilePath& shortcut_path,
const ShortcutProperties& properties,
ShortcutOperation operation) {
- base::ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
// A target is required unless |operation| is SHORTCUT_UPDATE_EXISTING.
if (operation != SHORTCUT_UPDATE_EXISTING &&
@@ -68,12 +69,12 @@ bool CreateOrUpdateShortcutLink(const FilePath& shortcut_path,
bool shortcut_existed = PathExists(shortcut_path);
// Interfaces to the old shortcut when replacing an existing shortcut.
- ScopedComPtr<IShellLink> old_i_shell_link;
- ScopedComPtr<IPersistFile> old_i_persist_file;
+ ComPtr<IShellLink> old_i_shell_link;
+ ComPtr<IPersistFile> old_i_persist_file;
// Interfaces to the shortcut being created/updated.
- ScopedComPtr<IShellLink> i_shell_link;
- ScopedComPtr<IPersistFile> i_persist_file;
+ ComPtr<IShellLink> i_shell_link;
+ ComPtr<IPersistFile> i_persist_file;
switch (operation) {
case SHORTCUT_CREATE_ALWAYS:
InitializeShortcutInterfaces(NULL, &i_shell_link, &i_persist_file);
@@ -137,8 +138,11 @@ bool CreateOrUpdateShortcutLink(const FilePath& shortcut_path,
(properties.options & ShortcutProperties::PROPERTIES_APP_ID) != 0;
bool has_dual_mode =
(properties.options & ShortcutProperties::PROPERTIES_DUAL_MODE) != 0;
- if (has_app_id || has_dual_mode) {
- ScopedComPtr<IPropertyStore> property_store;
+ bool has_toast_activator_clsid =
+ (properties.options &
+ ShortcutProperties::PROPERTIES_TOAST_ACTIVATOR_CLSID) != 0;
+ if (has_app_id || has_dual_mode || has_toast_activator_clsid) {
+ ComPtr<IPropertyStore> property_store;
if (FAILED(i_shell_link.CopyTo(property_store.GetAddressOf())) ||
!property_store.Get())
return false;
@@ -154,6 +158,12 @@ bool CreateOrUpdateShortcutLink(const FilePath& shortcut_path,
properties.dual_mode)) {
return false;
}
+ if (has_toast_activator_clsid &&
+ !SetClsidForPropertyStore(property_store.Get(),
+ PKEY_AppUserModel_ToastActivatorCLSID,
+ properties.toast_activator_clsid)) {
+ return false;
+ }
}
// Release the interfaces to the old shortcut to make sure it doesn't prevent
@@ -189,12 +199,12 @@ bool ResolveShortcutProperties(const FilePath& shortcut_path,
uint32_t options,
ShortcutProperties* properties) {
DCHECK(options && properties);
- base::ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
if (options & ~ShortcutProperties::PROPERTIES_ALL)
NOTREACHED() << "Unhandled property is used.";
- ScopedComPtr<IShellLink> i_shell_link;
+ ComPtr<IShellLink> i_shell_link;
// Get pointer to the IShellLink interface.
if (FAILED(::CoCreateInstance(CLSID_ShellLink, NULL, CLSCTX_INPROC_SERVER,
@@ -202,7 +212,7 @@ bool ResolveShortcutProperties(const FilePath& shortcut_path,
return false;
}
- ScopedComPtr<IPersistFile> persist;
+ ComPtr<IPersistFile> persist;
// Query IShellLink for the IPersistFile interface.
if (FAILED(i_shell_link.CopyTo(persist.GetAddressOf())))
return false;
@@ -247,8 +257,10 @@ bool ResolveShortcutProperties(const FilePath& shortcut_path,
properties->set_icon(FilePath(temp), temp_index);
}
- if (options & ShortcutProperties::PROPERTIES_WIN7) {
- ScopedComPtr<IPropertyStore> property_store;
+ if (options & (ShortcutProperties::PROPERTIES_APP_ID |
+ ShortcutProperties::PROPERTIES_DUAL_MODE |
+ ShortcutProperties::PROPERTIES_TOAST_ACTIVATOR_CLSID)) {
+ ComPtr<IPropertyStore> property_store;
if (FAILED(i_shell_link.CopyTo(property_store.GetAddressOf())))
return false;
@@ -289,6 +301,28 @@ bool ResolveShortcutProperties(const FilePath& shortcut_path,
return false;
}
}
+
+ if (options & ShortcutProperties::PROPERTIES_TOAST_ACTIVATOR_CLSID) {
+ ScopedPropVariant pv_toast_activator_clsid;
+ if (property_store->GetValue(PKEY_AppUserModel_ToastActivatorCLSID,
+ pv_toast_activator_clsid.Receive()) !=
+ S_OK) {
+ return false;
+ }
+ switch (pv_toast_activator_clsid.get().vt) {
+ case VT_EMPTY:
+ properties->set_toast_activator_clsid(CLSID_NULL);
+ break;
+ case VT_CLSID:
+ properties->set_toast_activator_clsid(
+ *(pv_toast_activator_clsid.get().puuid));
+ break;
+ default:
+ NOTREACHED() << "Unexpected variant type: "
+ << pv_toast_activator_clsid.get().vt;
+ return false;
+ }
+ }
}
return true;
@@ -321,7 +355,7 @@ bool CanPinShortcutToTaskbar() {
}
bool PinShortcutToTaskbar(const FilePath& shortcut) {
- base::ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
DCHECK(CanPinShortcutToTaskbar());
intptr_t result = reinterpret_cast<intptr_t>(ShellExecute(
@@ -330,7 +364,7 @@ bool PinShortcutToTaskbar(const FilePath& shortcut) {
}
bool UnpinShortcutFromTaskbar(const FilePath& shortcut) {
- base::ThreadRestrictions::AssertIOAllowed();
+ AssertBlockingAllowed();
intptr_t result = reinterpret_cast<intptr_t>(ShellExecute(
NULL, L"taskbarunpin", shortcut.value().c_str(), NULL, NULL, 0));
diff --git a/chromium/base/win/shortcut.h b/chromium/base/win/shortcut.h
index 3cd70fc9ef0..38c12b77f4d 100644
--- a/chromium/base/win/shortcut.h
+++ b/chromium/base/win/shortcut.h
@@ -40,16 +40,12 @@ struct BASE_EXPORT ShortcutProperties {
PROPERTIES_ICON = 1U << 4,
PROPERTIES_APP_ID = 1U << 5,
PROPERTIES_DUAL_MODE = 1U << 6,
+ PROPERTIES_TOAST_ACTIVATOR_CLSID = 1U << 7,
// Be sure to update the values below when adding a new property.
- PROPERTIES_BASIC = PROPERTIES_TARGET |
- PROPERTIES_WORKING_DIR |
- PROPERTIES_ARGUMENTS |
- PROPERTIES_DESCRIPTION |
- PROPERTIES_ICON,
- // TODO(pmonette): Get rid of PROPERTIES_WIN7 now that Windows 7 is the last
- // supported Windows version.
- PROPERTIES_WIN7 = PROPERTIES_APP_ID | PROPERTIES_DUAL_MODE,
- PROPERTIES_ALL = PROPERTIES_BASIC | PROPERTIES_WIN7
+ PROPERTIES_ALL = PROPERTIES_TARGET | PROPERTIES_WORKING_DIR |
+ PROPERTIES_ARGUMENTS | PROPERTIES_DESCRIPTION |
+ PROPERTIES_ICON | PROPERTIES_APP_ID |
+ PROPERTIES_DUAL_MODE | PROPERTIES_TOAST_ACTIVATOR_CLSID
};
ShortcutProperties();
@@ -96,6 +92,11 @@ struct BASE_EXPORT ShortcutProperties {
options |= PROPERTIES_DUAL_MODE;
}
+ void set_toast_activator_clsid(const CLSID& toast_activator_clsid_in) {
+ toast_activator_clsid = toast_activator_clsid_in;
+ options |= PROPERTIES_TOAST_ACTIVATOR_CLSID;
+ }
+
// The target to launch from this shortcut. This is mandatory when creating
// a shortcut.
FilePath target;
@@ -111,10 +112,14 @@ struct BASE_EXPORT ShortcutProperties {
// the resource id).
FilePath icon;
int icon_index;
- // The app model id for the shortcut (Win7+).
+ // The app model id for the shortcut.
string16 app_id;
// Whether this is a dual mode shortcut (Win8+).
bool dual_mode;
+ // The CLSID of the COM object registered with the OS via the shortcut. This
+ // is for app activation via user interaction with a toast notification in the
+ // Action Center. (Win10 version 1607, build 14393, and beyond).
+ CLSID toast_activator_clsid;
// Bitfield made of IndividualProperties. Properties set in |options| will be
// set on the shortcut, others will be ignored.
uint32_t options;
diff --git a/chromium/base/win/shortcut_unittest.cc b/chromium/base/win/shortcut_unittest.cc
index b1d9345e871..f16791572dc 100644
--- a/chromium/base/win/shortcut_unittest.cc
+++ b/chromium/base/win/shortcut_unittest.cc
@@ -45,6 +45,14 @@ class ShortcutTest : public testing::Test {
link_properties_.set_icon(link_properties_.target, 4);
link_properties_.set_app_id(L"Chrome");
link_properties_.set_dual_mode(false);
+
+ // The CLSID below was randomly selected.
+ static constexpr CLSID toast_activator_clsid = {
+ 0x08d401c2,
+ 0x3f79,
+ 0x41d8,
+ {0x89, 0xd0, 0x99, 0x25, 0xee, 0x16, 0x28, 0x63}};
+ link_properties_.set_toast_activator_clsid(toast_activator_clsid);
}
// Shortcut 2's properties (all different from properties of shortcut 1).
@@ -62,6 +70,7 @@ class ShortcutTest : public testing::Test {
link_properties_2_.set_icon(icon_path_2, 0);
link_properties_2_.set_app_id(L"Chrome.UserLevelCrazySuffix");
link_properties_2_.set_dual_mode(true);
+ link_properties_2_.set_toast_activator_clsid(CLSID_NULL);
}
}
@@ -82,9 +91,6 @@ class ShortcutTest : public testing::Test {
} // namespace
TEST_F(ShortcutTest, CreateAndResolveShortcutProperties) {
- uint32_t valid_properties = ShortcutProperties::PROPERTIES_BASIC;
- valid_properties |= ShortcutProperties::PROPERTIES_WIN7;
-
// Test all properties.
FilePath file_1(temp_dir_.GetPath().Append(L"Link1.lnk"));
ASSERT_TRUE(CreateOrUpdateShortcutLink(
@@ -93,7 +99,7 @@ TEST_F(ShortcutTest, CreateAndResolveShortcutProperties) {
ShortcutProperties properties_read_1;
ASSERT_TRUE(ResolveShortcutProperties(
file_1, ShortcutProperties::PROPERTIES_ALL, &properties_read_1));
- EXPECT_EQ(valid_properties, properties_read_1.options);
+ EXPECT_EQ(ShortcutProperties::PROPERTIES_ALL, properties_read_1.options);
ValidatePathsAreEqual(link_properties_.target, properties_read_1.target);
ValidatePathsAreEqual(link_properties_.working_dir,
properties_read_1.working_dir);
@@ -103,6 +109,8 @@ TEST_F(ShortcutTest, CreateAndResolveShortcutProperties) {
EXPECT_EQ(link_properties_.icon_index, properties_read_1.icon_index);
EXPECT_EQ(link_properties_.app_id, properties_read_1.app_id);
EXPECT_EQ(link_properties_.dual_mode, properties_read_1.dual_mode);
+ EXPECT_EQ(link_properties_.toast_activator_clsid,
+ properties_read_1.toast_activator_clsid);
// Test simple shortcut with no special properties set.
FilePath file_2(temp_dir_.GetPath().Append(L"Link2.lnk"));
@@ -114,7 +122,7 @@ TEST_F(ShortcutTest, CreateAndResolveShortcutProperties) {
ShortcutProperties properties_read_2;
ASSERT_TRUE(ResolveShortcutProperties(
file_2, ShortcutProperties::PROPERTIES_ALL, &properties_read_2));
- EXPECT_EQ(valid_properties, properties_read_2.options);
+ EXPECT_EQ(ShortcutProperties::PROPERTIES_ALL, properties_read_2.options);
ValidatePathsAreEqual(only_target_properties.target,
properties_read_2.target);
ValidatePathsAreEqual(FilePath(), properties_read_2.working_dir);
@@ -124,6 +132,7 @@ TEST_F(ShortcutTest, CreateAndResolveShortcutProperties) {
EXPECT_EQ(0, properties_read_2.icon_index);
EXPECT_EQ(L"", properties_read_2.app_id);
EXPECT_FALSE(properties_read_2.dual_mode);
+ EXPECT_EQ(CLSID_NULL, properties_read_2.toast_activator_clsid);
}
TEST_F(ShortcutTest, CreateAndResolveShortcut) {
diff --git a/chromium/base/win/win_util.cc b/chromium/base/win/win_util.cc
index 7de52ae7fb2..fccafc901fe 100644
--- a/chromium/base/win/win_util.cc
+++ b/chromium/base/win/win_util.cc
@@ -28,6 +28,7 @@
#include <uiviewsettingsinterop.h>
#include <windows.ui.viewmanagement.h>
#include <winstring.h>
+#include <wrl/client.h>
#include <wrl/wrappers/corewrappers.h>
#include <memory>
@@ -45,7 +46,6 @@
#include "base/win/core_winrt_util.h"
#include "base/win/registry.h"
#include "base/win/scoped_co_mem.h"
-#include "base/win/scoped_comptr.h"
#include "base/win/scoped_handle.h"
#include "base/win/scoped_hstring.h"
#include "base/win/scoped_propvariant.h"
@@ -137,13 +137,14 @@ bool IsWindows10TabletMode(HWND hwnd) {
ScopedHString view_settings_guid = ScopedHString::Create(
RuntimeClass_Windows_UI_ViewManagement_UIViewSettings);
- ScopedComPtr<IUIViewSettingsInterop> view_settings_interop;
+ Microsoft::WRL::ComPtr<IUIViewSettingsInterop> view_settings_interop;
HRESULT hr = base::win::RoGetActivationFactory(
view_settings_guid.get(), IID_PPV_ARGS(&view_settings_interop));
if (FAILED(hr))
return false;
- ScopedComPtr<ABI::Windows::UI::ViewManagement::IUIViewSettings> view_settings;
+ Microsoft::WRL::ComPtr<ABI::Windows::UI::ViewManagement::IUIViewSettings>
+ view_settings;
hr = view_settings_interop->GetForWindow(hwnd, IID_PPV_ARGS(&view_settings));
if (FAILED(hr))
return false;
@@ -375,6 +376,19 @@ bool SetStringValueForPropertyStore(IPropertyStore* property_store,
property_value);
}
+bool SetClsidForPropertyStore(IPropertyStore* property_store,
+ const PROPERTYKEY& property_key,
+ const CLSID& property_clsid_value) {
+ ScopedPropVariant property_value;
+ if (FAILED(InitPropVariantFromCLSID(property_clsid_value,
+ property_value.Receive()))) {
+ return false;
+ }
+
+ return SetPropVariantValueForPropertyStore(property_store, property_key,
+ property_value);
+}
+
bool SetAppIdForPropertyStore(IPropertyStore* property_store,
const wchar_t* app_id) {
// App id should be less than 64 chars and contain no space. And recommended
diff --git a/chromium/base/win/win_util.h b/chromium/base/win/win_util.h
index ca784482a63..570dad4ce8f 100644
--- a/chromium/base/win/win_util.h
+++ b/chromium/base/win/win_util.h
@@ -96,6 +96,11 @@ BASE_EXPORT bool SetStringValueForPropertyStore(
const PROPERTYKEY& property_key,
const wchar_t* property_string_value);
+// Sets the CLSID value for a given key in a given IPropertyStore.
+BASE_EXPORT bool SetClsidForPropertyStore(IPropertyStore* property_store,
+ const PROPERTYKEY& property_key,
+ const CLSID& property_clsid_value);
+
// Sets the application id in given IPropertyStore. The function is intended
// for tagging application/chromium shortcut, browser window and jump list for
// Win7.
diff --git a/chromium/base/win/winrt_storage_util.cc b/chromium/base/win/winrt_storage_util.cc
new file mode 100644
index 00000000000..262d8171965
--- /dev/null
+++ b/chromium/base/win/winrt_storage_util.cc
@@ -0,0 +1,72 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/winrt_storage_util.h"
+
+#include <robuffer.h>
+#include <string.h>
+#include <wrl/client.h>
+
+#include "base/strings/string_util.h"
+#include "base/win/core_winrt_util.h"
+#include "base/win/scoped_hstring.h"
+
+namespace base {
+namespace win {
+
+using IBuffer = ABI::Windows::Storage::Streams::IBuffer;
+
+HRESULT GetPointerToBufferData(IBuffer* buffer, uint8_t** out, UINT32* length) {
+ *out = nullptr;
+
+ Microsoft::WRL::ComPtr<Windows::Storage::Streams::IBufferByteAccess>
+ buffer_byte_access;
+ HRESULT hr = buffer->QueryInterface(IID_PPV_ARGS(&buffer_byte_access));
+ if (FAILED(hr))
+ return hr;
+
+ hr = buffer->get_Length(length);
+ if (FAILED(hr))
+ return hr;
+
+ // Lifetime of the pointing buffer is controlled by the buffer object.
+ return buffer_byte_access->Buffer(out);
+}
+
+HRESULT CreateIBufferFromData(const uint8_t* data,
+ UINT32 length,
+ Microsoft::WRL::ComPtr<IBuffer>* buffer) {
+ *buffer = nullptr;
+
+ Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IBufferFactory>
+ buffer_factory;
+ HRESULT hr = base::win::GetActivationFactory<
+ ABI::Windows::Storage::Streams::IBufferFactory,
+ RuntimeClass_Windows_Storage_Streams_Buffer>(&buffer_factory);
+ if (FAILED(hr))
+ return hr;
+
+ Microsoft::WRL::ComPtr<IBuffer> internal_buffer;
+ hr = buffer_factory->Create(length, internal_buffer.GetAddressOf());
+ if (FAILED(hr))
+ return hr;
+
+ hr = internal_buffer->put_Length(length);
+ if (FAILED(hr))
+ return hr;
+
+ uint8_t* p_buffer_data;
+ hr = GetPointerToBufferData(internal_buffer.Get(), &p_buffer_data, &length);
+ if (FAILED(hr))
+ return hr;
+
+ memcpy(p_buffer_data, data, length);
+
+ *buffer = std::move(internal_buffer);
+
+ return S_OK;
+}
+
+} // namespace win
+} // namespace base
diff --git a/chromium/base/win/winrt_storage_util.h b/chromium/base/win/winrt_storage_util.h
new file mode 100644
index 00000000000..e24336cb3c4
--- /dev/null
+++ b/chromium/base/win/winrt_storage_util.h
@@ -0,0 +1,34 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_WINRT_STORAGE_UTIL_H_
+#define BASE_WIN_WINRT_STORAGE_UTIL_H_
+
+#include <stdint.h>
+#include <windows.storage.streams.h>
+#include <wrl/client.h>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace win {
+
+// Gets an array of bytes in the |buffer|, |out| represents a array of
+// bytes used by byte stream read and write.
+BASE_EXPORT HRESULT
+GetPointerToBufferData(ABI::Windows::Storage::Streams::IBuffer* buffer,
+ uint8_t** out,
+ UINT32* length);
+
+// Creates stream |buffer| from |data| that represents a array of bytes
+// and the |length| of bytes.
+BASE_EXPORT HRESULT CreateIBufferFromData(
+ const uint8_t* data,
+ UINT32 length,
+ Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IBuffer>* buffer);
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_WINRT_STORAGE_UTIL_H_
diff --git a/chromium/base/win/winrt_storage_util_unittest.cc b/chromium/base/win/winrt_storage_util_unittest.cc
new file mode 100644
index 00000000000..530ab23e0a8
--- /dev/null
+++ b/chromium/base/win/winrt_storage_util_unittest.cc
@@ -0,0 +1,42 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/winrt_storage_util.h"
+
+#include <string.h>
+#include <wrl/client.h>
+
+#include "base/strings/string_util.h"
+#include "base/win/core_winrt_util.h"
+#include "base/win/scoped_com_initializer.h"
+#include "base/win/scoped_hstring.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+TEST(WinrtStorageUtilTest, CreateBufferFromData) {
+ ScopedCOMInitializer com_initializer(ScopedCOMInitializer::kMTA);
+
+ if (!ResolveCoreWinRTDelayload() ||
+ !ScopedHString::ResolveCoreWinRTStringDelayload()) {
+ return;
+ }
+
+ const std::vector<uint8_t> data = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+ Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IBuffer> buffer;
+ ASSERT_HRESULT_SUCCEEDED(
+ CreateIBufferFromData(data.data(), data.size(), &buffer));
+
+ uint8_t* p_buffer_data;
+ uint32_t length;
+ ASSERT_HRESULT_SUCCEEDED(
+ GetPointerToBufferData(buffer.Get(), &p_buffer_data, &length));
+
+ ASSERT_EQ(data.size(), length);
+ EXPECT_EQ(0, memcmp(p_buffer_data, data.data(), data.size()));
+}
+
+} // namespace win
+} // namespace base