summaryrefslogtreecommitdiffstats
path: root/chromium/cc
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2018-12-10 16:19:40 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2018-12-10 16:01:50 +0000
commit51f6c2793adab2d864b3d2b360000ef8db1d3e92 (patch)
tree835b3b4446b012c75e80177cef9fbe6972cc7dbe /chromium/cc
parent6036726eb981b6c4b42047513b9d3f4ac865daac (diff)
BASELINE: Update Chromium to 71.0.3578.93
Change-Id: I6a32086c33670e1b033f8b10e6bf1fd4da1d105d Reviewed-by: Alexandru Croitor <alexandru.croitor@qt.io>
Diffstat (limited to 'chromium/cc')
-rw-r--r--chromium/cc/BUILD.gn17
-rw-r--r--chromium/cc/DEPS6
-rw-r--r--chromium/cc/animation/animation.cc2
-rw-r--r--chromium/cc/animation/animation_host.cc2
-rw-r--r--chromium/cc/animation/animation_host_unittest.cc8
-rw-r--r--chromium/cc/animation/element_animations.cc7
-rw-r--r--chromium/cc/base/histograms.h74
-rw-r--r--chromium/cc/base/list_container_unittest.cc5
-rw-r--r--chromium/cc/base/rolling_time_delta_history.cc6
-rw-r--r--chromium/cc/base/unique_notifier.cc3
-rw-r--r--chromium/cc/base/unique_notifier.h2
-rw-r--r--chromium/cc/base/unique_notifier_unittest.cc21
-rw-r--r--chromium/cc/cc.gni26
-rw-r--r--chromium/cc/input/main_thread_scrolling_reason.cc4
-rw-r--r--chromium/cc/input/main_thread_scrolling_reason.h7
-rw-r--r--chromium/cc/input/main_thread_scrolling_reason_unittest.cc4
-rw-r--r--chromium/cc/input/scroll_snap_data.h16
-rw-r--r--chromium/cc/input/scroll_snap_data_unittest.cc24
-rw-r--r--chromium/cc/layers/append_quads_data.h6
-rw-r--r--chromium/cc/layers/deadline_policy.h1
-rw-r--r--chromium/cc/layers/heads_up_display_layer.cc24
-rw-r--r--chromium/cc/layers/heads_up_display_layer.h3
-rw-r--r--chromium/cc/layers/heads_up_display_layer_impl.cc64
-rw-r--r--chromium/cc/layers/layer.cc204
-rw-r--r--chromium/cc/layers/layer.h22
-rw-r--r--chromium/cc/layers/layer_impl.cc40
-rw-r--r--chromium/cc/layers/layer_impl.h30
-rw-r--r--chromium/cc/layers/layer_impl_test_properties.h1
-rw-r--r--chromium/cc/layers/layer_impl_unittest.cc101
-rw-r--r--chromium/cc/layers/layer_unittest.cc124
-rw-r--r--chromium/cc/layers/painted_overlay_scrollbar_layer.cc1
-rw-r--r--chromium/cc/layers/painted_scrollbar_layer.cc4
-rw-r--r--chromium/cc/layers/picture_layer_impl.cc81
-rw-r--r--chromium/cc/layers/picture_layer_impl_unittest.cc16
-rw-r--r--chromium/cc/layers/picture_layer_unittest.cc16
-rw-r--r--chromium/cc/layers/render_surface_impl.cc22
-rw-r--r--chromium/cc/layers/scrollbar_layer_impl_base.cc4
-rw-r--r--chromium/cc/layers/scrollbar_layer_unittest.cc10
-rw-r--r--chromium/cc/layers/solid_color_layer_impl.cc34
-rw-r--r--chromium/cc/layers/solid_color_layer_impl_unittest.cc8
-rw-r--r--chromium/cc/layers/solid_color_scrollbar_layer.cc1
-rw-r--r--chromium/cc/layers/surface_layer.cc13
-rw-r--r--chromium/cc/layers/surface_layer.h9
-rw-r--r--chromium/cc/layers/surface_layer_impl.cc9
-rw-r--r--chromium/cc/layers/surface_layer_impl.h8
-rw-r--r--chromium/cc/layers/texture_layer_impl.cc10
-rw-r--r--chromium/cc/layers/texture_layer_unittest.cc13
-rw-r--r--chromium/cc/mojo_embedder/async_layer_tree_frame_sink.cc112
-rw-r--r--chromium/cc/mojo_embedder/async_layer_tree_frame_sink.h40
-rw-r--r--chromium/cc/paint/BUILD.gn4
-rw-r--r--chromium/cc/paint/discardable_image_map.cc5
-rw-r--r--chromium/cc/paint/oop_pixeltest.cc23
-rw-r--r--chromium/cc/paint/paint_font.cc5
-rw-r--r--chromium/cc/paint/paint_font.h9
-rw-r--r--chromium/cc/paint/paint_image.cc38
-rw-r--r--chromium/cc/paint/paint_image.h7
-rw-r--r--chromium/cc/paint/paint_image_builder.h4
-rw-r--r--chromium/cc/paint/paint_op_buffer.h6
-rw-r--r--chromium/cc/paint/paint_op_buffer_serializer.cc7
-rw-r--r--chromium/cc/paint/paint_op_buffer_unittest.cc14
-rw-r--r--chromium/cc/paint/paint_op_perftest.cc6
-rw-r--r--chromium/cc/paint/paint_op_reader.cc7
-rw-r--r--chromium/cc/paint/paint_op_writer.cc1
-rw-r--r--chromium/cc/paint/paint_text_blob.cc3
-rw-r--r--chromium/cc/paint/paint_text_blob.h10
-rw-r--r--chromium/cc/paint/paint_text_blob_builder.h2
-rw-r--r--chromium/cc/paint/paint_typeface.cc157
-rw-r--r--chromium/cc/paint/paint_typeface.h79
-rw-r--r--chromium/cc/paint/paint_typeface_transfer_cache_entry.cc231
-rw-r--r--chromium/cc/paint/paint_typeface_transfer_cache_entry.h61
-rw-r--r--chromium/cc/paint/solid_color_analyzer.cc1
-rw-r--r--chromium/cc/paint/transfer_cache_entry.cc19
-rw-r--r--chromium/cc/paint/transfer_cache_entry.h4
-rw-r--r--chromium/cc/raster/bitmap_raster_buffer_provider.cc4
-rw-r--r--chromium/cc/raster/bitmap_raster_buffer_provider.h1
-rw-r--r--chromium/cc/raster/gpu_raster_buffer_provider.cc285
-rw-r--r--chromium/cc/raster/gpu_raster_buffer_provider.h56
-rw-r--r--chromium/cc/raster/one_copy_raster_buffer_provider.cc211
-rw-r--r--chromium/cc/raster/one_copy_raster_buffer_provider.h11
-rw-r--r--chromium/cc/raster/raster_buffer_provider.cc30
-rw-r--r--chromium/cc/raster/raster_buffer_provider.h8
-rw-r--r--chromium/cc/raster/raster_buffer_provider_perftest.cc8
-rw-r--r--chromium/cc/raster/raster_buffer_provider_unittest.cc34
-rw-r--r--chromium/cc/raster/raster_source.cc10
-rw-r--r--chromium/cc/raster/staging_buffer_pool.cc46
-rw-r--r--chromium/cc/raster/staging_buffer_pool.h35
-rw-r--r--chromium/cc/raster/staging_buffer_pool_unittest.cc7
-rw-r--r--chromium/cc/raster/task_graph_work_queue.cc25
-rw-r--r--chromium/cc/raster/task_graph_work_queue_unittest.cc157
-rw-r--r--chromium/cc/raster/texture_compressor.cc35
-rw-r--r--chromium/cc/raster/texture_compressor.h47
-rw-r--r--chromium/cc/raster/texture_compressor_etc1.cc333
-rw-r--r--chromium/cc/raster/texture_compressor_etc1.h204
-rw-r--r--chromium/cc/raster/texture_compressor_etc1_sse.cc818
-rw-r--r--chromium/cc/raster/texture_compressor_etc1_sse.h34
-rw-r--r--chromium/cc/raster/texture_compressor_etc1_unittest.cc57
-rw-r--r--chromium/cc/raster/texture_compressor_perftest.cc123
-rw-r--r--chromium/cc/raster/zero_copy_raster_buffer_provider.cc4
-rw-r--r--chromium/cc/raster/zero_copy_raster_buffer_provider.h1
-rw-r--r--chromium/cc/resources/resource_pool.cc41
-rw-r--r--chromium/cc/resources/resource_pool.h19
-rw-r--r--chromium/cc/resources/resource_pool_unittest.cc67
-rw-r--r--chromium/cc/scheduler/scheduler.cc83
-rw-r--r--chromium/cc/scheduler/scheduler.h9
-rw-r--r--chromium/cc/scheduler/scheduler_state_machine.cc8
-rw-r--r--chromium/cc/scheduler/scheduler_unittest.cc37
-rw-r--r--chromium/cc/tiles/gpu_image_decode_cache.cc78
-rw-r--r--chromium/cc/tiles/gpu_image_decode_cache.h9
-rw-r--r--chromium/cc/tiles/gpu_image_decode_cache_unittest.cc257
-rw-r--r--chromium/cc/tiles/image_controller.cc4
-rw-r--r--chromium/cc/tiles/image_controller_unittest.cc104
-rw-r--r--chromium/cc/tiles/image_decode_cache.h1
-rw-r--r--chromium/cc/tiles/image_decode_cache_utils.cc52
-rw-r--r--chromium/cc/tiles/image_decode_cache_utils.h42
-rw-r--r--chromium/cc/tiles/picture_layer_tiling.cc6
-rw-r--r--chromium/cc/tiles/picture_layer_tiling_set.cc4
-rw-r--r--chromium/cc/tiles/picture_layer_tiling_set.h6
-rw-r--r--chromium/cc/tiles/picture_layer_tiling_unittest.cc4
-rw-r--r--chromium/cc/tiles/software_image_decode_cache.cc38
-rw-r--r--chromium/cc/tiles/software_image_decode_cache.h10
-rw-r--r--chromium/cc/tiles/software_image_decode_cache_unittest_combinations.cc96
-rw-r--r--chromium/cc/tiles/software_image_decode_cache_utils.cc17
-rw-r--r--chromium/cc/tiles/software_image_decode_cache_utils.h1
-rw-r--r--chromium/cc/tiles/tile_draw_info.h4
-rw-r--r--chromium/cc/tiles/tile_manager.cc85
-rw-r--r--chromium/cc/tiles/tile_manager.h9
-rw-r--r--chromium/cc/tiles/tile_manager_unittest.cc29
-rw-r--r--chromium/cc/trees/damage_tracker.cc19
-rw-r--r--chromium/cc/trees/draw_property_utils.cc21
-rw-r--r--chromium/cc/trees/effect_node.cc4
-rw-r--r--chromium/cc/trees/effect_node.h1
-rw-r--r--chromium/cc/trees/layer_tree_host.cc353
-rw-r--r--chromium/cc/trees/layer_tree_host.h138
-rw-r--r--chromium/cc/trees/layer_tree_host_client.h40
-rw-r--r--chromium/cc/trees/layer_tree_host_common_unittest.cc10
-rw-r--r--chromium/cc/trees/layer_tree_host_impl.cc270
-rw-r--r--chromium/cc/trees/layer_tree_host_impl.h16
-rw-r--r--chromium/cc/trees/layer_tree_host_impl_unittest.cc94
-rw-r--r--chromium/cc/trees/layer_tree_host_perftest.cc3
-rw-r--r--chromium/cc/trees/layer_tree_host_unittest.cc654
-rw-r--r--chromium/cc/trees/layer_tree_host_unittest_animation.cc9
-rw-r--r--chromium/cc/trees/layer_tree_host_unittest_context.cc5
-rw-r--r--chromium/cc/trees/layer_tree_host_unittest_scroll.cc39
-rw-r--r--chromium/cc/trees/layer_tree_impl.cc66
-rw-r--r--chromium/cc/trees/layer_tree_impl.h22
-rw-r--r--chromium/cc/trees/layer_tree_mutator.cc11
-rw-r--r--chromium/cc/trees/layer_tree_mutator.h9
-rw-r--r--chromium/cc/trees/layer_tree_settings.h5
-rw-r--r--chromium/cc/trees/property_tree.cc2
-rw-r--r--chromium/cc/trees/property_tree.h4
-rw-r--r--chromium/cc/trees/property_tree_builder.cc12
-rw-r--r--chromium/cc/trees/proxy_impl.cc9
-rw-r--r--chromium/cc/trees/proxy_impl.h2
-rw-r--r--chromium/cc/trees/proxy_main.cc18
-rw-r--r--chromium/cc/trees/render_frame_metadata.cc2
-rw-r--r--chromium/cc/trees/render_frame_metadata.h6
-rw-r--r--chromium/cc/trees/scroll_node.cc1
-rw-r--r--chromium/cc/trees/scroll_node.h2
-rw-r--r--chromium/cc/trees/single_thread_proxy.cc19
-rw-r--r--chromium/cc/trees/target_property.h3
-rw-r--r--chromium/cc/trees/transform_node.cc2
-rw-r--r--chromium/cc/trees/tree_synchronizer.cc156
-rw-r--r--chromium/cc/trees/tree_synchronizer_unittest.cc92
163 files changed, 3524 insertions, 4267 deletions
diff --git a/chromium/cc/BUILD.gn b/chromium/cc/BUILD.gn
index 793b33e1f25..b5a24d07eca 100644
--- a/chromium/cc/BUILD.gn
+++ b/chromium/cc/BUILD.gn
@@ -171,10 +171,6 @@ cc_component("cc") {
"raster/task_graph_runner.h",
"raster/task_graph_work_queue.cc",
"raster/task_graph_work_queue.h",
- "raster/texture_compressor.cc",
- "raster/texture_compressor.h",
- "raster/texture_compressor_etc1.cc",
- "raster/texture_compressor_etc1.h",
"raster/tile_task.cc",
"raster/tile_task.h",
"raster/zero_copy_raster_buffer_provider.cc",
@@ -223,6 +219,8 @@ cc_component("cc") {
"tiles/image_controller.h",
"tiles/image_decode_cache.cc",
"tiles/image_decode_cache.h",
+ "tiles/image_decode_cache_utils.cc",
+ "tiles/image_decode_cache_utils.h",
"tiles/mipmap_util.cc",
"tiles/mipmap_util.h",
"tiles/picture_layer_tiling.cc",
@@ -345,13 +343,6 @@ cc_component("cc") {
"trees/ukm_manager.h",
]
- if (current_cpu == "x86" || current_cpu == "x64") {
- sources += [
- "raster/texture_compressor_etc1_sse.cc",
- "raster/texture_compressor_etc1_sse.h",
- ]
- }
-
# TODO(khushalsagar): Remove once crbug.com/683263 is fixed.
configs = [ "//build/config/compiler:no_size_t_to_int_warning" ]
@@ -385,7 +376,7 @@ cc_component("cc") {
defines = [ "CC_IMPLEMENTATION=1" ]
}
-cc_static_library("test_support") {
+cc_test_static_library("test_support") {
testonly = true
sources = [
"test/animation_test_common.cc",
@@ -637,7 +628,6 @@ cc_test("cc_unittests") {
"raster/staging_buffer_pool_unittest.cc",
"raster/synchronous_task_graph_runner_unittest.cc",
"raster/task_graph_work_queue_unittest.cc",
- "raster/texture_compressor_etc1_unittest.cc",
"resources/resource_pool_unittest.cc",
"scheduler/compositor_timing_history_unittest.cc",
"scheduler/scheduler_state_machine_unittest.cc",
@@ -766,7 +756,6 @@ cc_test("cc_perftests") {
"paint/paint_op_perftest.cc",
"raster/raster_buffer_provider_perftest.cc",
"raster/task_graph_runner_perftest.cc",
- "raster/texture_compressor_perftest.cc",
"test/cc_test_suite.cc",
"test/cc_test_suite.h",
"test/run_all_perftests.cc",
diff --git a/chromium/cc/DEPS b/chromium/cc/DEPS
index 7b0e07a0097..6f352e8c510 100644
--- a/chromium/cc/DEPS
+++ b/chromium/cc/DEPS
@@ -8,13 +8,17 @@ include_rules = [
"+gpu/command_buffer/client/gles2_interface_stub.h", # for tests
"+gpu/command_buffer/client/gpu_memory_buffer_manager.h",
"+gpu/command_buffer/client/raster_implementation_gles.h",
+ "+gpu/command_buffer/client/raster_interface.h",
+ "+gpu/command_buffer/client/shared_image_interface.h",
"+gpu/command_buffer/common/capabilities.h",
"+gpu/command_buffer/common/discardable_handle.h",
+ "+gpu/command_buffer/common/gl2_types.h",
"+gpu/command_buffer/common/gpu_memory_allocation.h",
"+gpu/command_buffer/common/gpu_memory_buffer_support.h",
"+gpu/command_buffer/common/mailbox.h",
"+gpu/command_buffer/common/mailbox_holder.h",
- "+gpu/command_buffer/client/raster_interface.h",
+ "+gpu/command_buffer/common/shared_image_trace_utils.h",
+ "+gpu/command_buffer/common/shared_image_usage.h",
"+gpu/command_buffer/common/sync_token.h",
"+gpu/command_buffer/common/texture_in_use_response.h",
"+gpu/config/gpu_feature_info.h",
diff --git a/chromium/cc/animation/animation.cc b/chromium/cc/animation/animation.cc
index 8da03807dcf..644de3770cb 100644
--- a/chromium/cc/animation/animation.cc
+++ b/chromium/cc/animation/animation.cc
@@ -48,7 +48,7 @@ ElementId Animation::element_id_of_keyframe_effect(
}
bool Animation::IsElementAttached(ElementId id) const {
- return !!element_to_keyframe_effect_id_map_.count(id);
+ return base::ContainsKey(element_to_keyframe_effect_id_map_, id);
}
void Animation::SetAnimationHost(AnimationHost* animation_host) {
diff --git a/chromium/cc/animation/animation_host.cc b/chromium/cc/animation/animation_host.cc
index 0cac7a4ba12..696e69e8e9b 100644
--- a/chromium/cc/animation/animation_host.cc
+++ b/chromium/cc/animation/animation_host.cc
@@ -279,7 +279,7 @@ bool AnimationHost::NeedsTickAnimations() const {
bool AnimationHost::TickMutator(base::TimeTicks monotonic_time,
const ScrollTree& scroll_tree,
bool is_active_tree) {
- if (!mutator_ || !mutator_->HasAnimators())
+ if (!mutator_ || !mutator_->HasMutators())
return false;
std::unique_ptr<MutatorInputState> state = CollectWorkletAnimationsState(
diff --git a/chromium/cc/animation/animation_host_unittest.cc b/chromium/cc/animation/animation_host_unittest.cc
index a100d702987..104b16c7843 100644
--- a/chromium/cc/animation/animation_host_unittest.cc
+++ b/chromium/cc/animation/animation_host_unittest.cc
@@ -165,7 +165,7 @@ TEST_F(AnimationHostTest, LayerTreeMutatorUpdateTakesEffectInSameFrame) {
MockLayerTreeMutator* mock_mutator = new NiceMock<MockLayerTreeMutator>();
host_impl_->SetLayerTreeMutator(
base::WrapUnique<LayerTreeMutator>(mock_mutator));
- ON_CALL(*mock_mutator, HasAnimators()).WillByDefault(Return(true));
+ ON_CALL(*mock_mutator, HasMutators()).WillByDefault(Return(true));
ON_CALL(*mock_mutator, MutateRef(_))
.WillByDefault(InvokeWithoutArgs(
[this, local_time]() { this->SetOutputState(local_time); }));
@@ -191,7 +191,7 @@ TEST_F(AnimationHostTest, LayerTreeMutatorsIsMutatedWithCorrectInputState) {
MockLayerTreeMutator* mock_mutator = new NiceMock<MockLayerTreeMutator>();
host_impl_->SetLayerTreeMutator(
base::WrapUnique<LayerTreeMutator>(mock_mutator));
- ON_CALL(*mock_mutator, HasAnimators()).WillByDefault(Return(true));
+ ON_CALL(*mock_mutator, HasMutators()).WillByDefault(Return(true));
const float start_opacity = .7f;
const float end_opacity = .3f;
@@ -216,7 +216,7 @@ TEST_F(AnimationHostTest, LayerTreeMutatorsIsMutatedOnlyWhenInputChanges) {
MockLayerTreeMutator* mock_mutator = new NiceMock<MockLayerTreeMutator>();
host_impl_->SetLayerTreeMutator(
base::WrapUnique<LayerTreeMutator>(mock_mutator));
- ON_CALL(*mock_mutator, HasAnimators()).WillByDefault(Return(true));
+ ON_CALL(*mock_mutator, HasMutators()).WillByDefault(Return(true));
const float start_opacity = .7f;
const float end_opacity = .3f;
@@ -342,7 +342,7 @@ TEST_F(AnimationHostTest, LayerTreeMutatorUpdateReflectsScrollAnimations) {
MockLayerTreeMutator* mock_mutator = new NiceMock<MockLayerTreeMutator>();
host_impl_->SetLayerTreeMutator(
base::WrapUnique<LayerTreeMutator>(mock_mutator));
- ON_CALL(*mock_mutator, HasAnimators()).WillByDefault(Return(true));
+ ON_CALL(*mock_mutator, HasMutators()).WillByDefault(Return(true));
EXPECT_CALL(*mock_mutator,
MutateRef(::testing::Truly(Animation1TimeEquals20)))
.Times(1);
diff --git a/chromium/cc/animation/element_animations.cc b/chromium/cc/animation/element_animations.cc
index cea0c141feb..2af7060976c 100644
--- a/chromium/cc/animation/element_animations.cc
+++ b/chromium/cc/animation/element_animations.cc
@@ -76,14 +76,17 @@ void ElementAnimations::ClearAffectedElementTypes() {
disabled_state_mask.currently_running = disable_properties;
disabled_state_mask.potentially_animating = disable_properties;
- if (has_element_in_active_list()) {
+ // This method may get called from AnimationHost dtor so it is possible for
+ // mutator_host_client() to be null.
+ if (has_element_in_active_list() && animation_host()->mutator_host_client()) {
animation_host()->mutator_host_client()->ElementIsAnimatingChanged(
element_id(), ElementListType::ACTIVE, disabled_state_mask,
disabled_state);
}
set_has_element_in_active_list(false);
- if (has_element_in_pending_list()) {
+ if (has_element_in_pending_list() &&
+ animation_host()->mutator_host_client()) {
animation_host()->mutator_host_client()->ElementIsAnimatingChanged(
element_id(), ElementListType::PENDING, disabled_state_mask,
disabled_state);
diff --git a/chromium/cc/base/histograms.h b/chromium/cc/base/histograms.h
index 5c634b9cc11..dd88f8c04d4 100644
--- a/chromium/cc/base/histograms.h
+++ b/chromium/cc/base/histograms.h
@@ -48,46 +48,46 @@ CC_BASE_EXPORT const char* GetClientNameForMetrics();
// ScopedReticulateSplinesTimer timer;
// timer.AddArea(some_rect.size().GetArea());
//
-#define DEFINE_SCOPED_UMA_HISTOGRAM_AREA_TIMER(class_name, time_histogram, \
- area_histogram) \
- class class_name : public ScopedUMAHistogramAreaTimerBase { \
- public: \
- ~class_name(); \
- }; \
- class_name::~class_name() { \
- Sample time_sample; \
- Sample area_sample; \
- const char* client_name = GetClientNameForMetrics(); \
- if (client_name && GetHistogramValues(&time_sample, &area_sample)) { \
- /* GetClientNameForMetrics only returns one non-null value over */ \
- /* the lifetime of the process, so these histogram names are */ \
- /* runtime constant. */ \
- UMA_HISTOGRAM_COUNTS(base::StringPrintf(time_histogram, client_name), \
- time_sample); \
- UMA_HISTOGRAM_CUSTOM_COUNTS( \
- base::StringPrintf(area_histogram, client_name), area_sample, 1, \
- 100000000, 50); \
- } \
+#define DEFINE_SCOPED_UMA_HISTOGRAM_AREA_TIMER(class_name, time_histogram, \
+ area_histogram) \
+ class class_name : public ScopedUMAHistogramAreaTimerBase { \
+ public: \
+ ~class_name(); \
+ }; \
+ class_name::~class_name() { \
+ Sample time_sample; \
+ Sample area_sample; \
+ const char* client_name = GetClientNameForMetrics(); \
+ if (client_name && GetHistogramValues(&time_sample, &area_sample)) { \
+ /* GetClientNameForMetrics only returns one non-null value over */ \
+ /* the lifetime of the process, so these histogram names are */ \
+ /* runtime constant. */ \
+ UMA_HISTOGRAM_COUNTS_1M(base::StringPrintf(time_histogram, client_name), \
+ time_sample); \
+ UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ base::StringPrintf(area_histogram, client_name), area_sample, 1, \
+ 100000000, 50); \
+ } \
}
// Version of the above macro for cases which only care about time, not area.
-#define DEFINE_SCOPED_UMA_HISTOGRAM_TIMER(class_name, time_histogram) \
- class class_name : public ScopedUMAHistogramAreaTimerBase { \
- public: \
- ~class_name(); \
- }; \
- class_name::~class_name() { \
- Sample time_sample; \
- Sample area_sample; \
- const char* client_name = GetClientNameForMetrics(); \
- if (client_name && GetHistogramValues(&time_sample, &area_sample)) { \
- DCHECK_EQ(0, area_sample); \
- /* GetClientNameForMetrics only returns one non-null value over */ \
- /* the lifetime of the process, so these histogram names are */ \
- /* runtime constant. */ \
- UMA_HISTOGRAM_COUNTS(base::StringPrintf(time_histogram, client_name), \
- time_sample); \
- } \
+#define DEFINE_SCOPED_UMA_HISTOGRAM_TIMER(class_name, time_histogram) \
+ class class_name : public ScopedUMAHistogramAreaTimerBase { \
+ public: \
+ ~class_name(); \
+ }; \
+ class_name::~class_name() { \
+ Sample time_sample; \
+ Sample area_sample; \
+ const char* client_name = GetClientNameForMetrics(); \
+ if (client_name && GetHistogramValues(&time_sample, &area_sample)) { \
+ DCHECK_EQ(0, area_sample); \
+ /* GetClientNameForMetrics only returns one non-null value over */ \
+ /* the lifetime of the process, so these histogram names are */ \
+ /* runtime constant. */ \
+ UMA_HISTOGRAM_COUNTS_1M(base::StringPrintf(time_histogram, client_name), \
+ time_sample); \
+ } \
}
class CC_BASE_EXPORT ScopedUMAHistogramAreaTimerBase {
diff --git a/chromium/cc/base/list_container_unittest.cc b/chromium/cc/base/list_container_unittest.cc
index c58597b2abd..e1da2989d42 100644
--- a/chromium/cc/base/list_container_unittest.cc
+++ b/chromium/cc/base/list_container_unittest.cc
@@ -572,9 +572,8 @@ TEST(ListContainerTest, SimpleReverseInsertionNonDerivedElement) {
{
ListContainer<NonDerivedElement>::ReverseIterator iter = list.rbegin();
- for (std::vector<NonDerivedElement*>::reverse_iterator nde_iter =
- nde_list.rbegin();
- nde_iter != nde_list.rend(); ++nde_iter) {
+ for (auto nde_iter = nde_list.rbegin(); nde_iter != nde_list.rend();
+ ++nde_iter) {
EXPECT_EQ(*nde_iter, *iter);
++iter;
}
diff --git a/chromium/cc/base/rolling_time_delta_history.cc b/chromium/cc/base/rolling_time_delta_history.cc
index a79722ccfc3..dde20a7bbe4 100644
--- a/chromium/cc/base/rolling_time_delta_history.cc
+++ b/chromium/cc/base/rolling_time_delta_history.cc
@@ -24,7 +24,7 @@ void RollingTimeDeltaHistory::InsertSample(base::TimeDelta time) {
chronological_sample_deque_.pop_front();
}
- TimeDeltaMultiset::iterator it = sample_set_.insert(time);
+ auto it = sample_set_.insert(time);
chronological_sample_deque_.push_back(it);
percentile_cache_.clear();
}
@@ -63,13 +63,13 @@ base::TimeDelta RollingTimeDeltaHistory::ComputePercentile(
if (num_smaller_samples > sample_set_.size() / 2) {
size_t num_larger_samples = sample_set_.size() - num_smaller_samples - 1;
- TimeDeltaMultiset::const_reverse_iterator it = sample_set_.rbegin();
+ auto it = sample_set_.rbegin();
for (size_t i = 0; i < num_larger_samples; i++)
it++;
return *it;
}
- TimeDeltaMultiset::const_iterator it = sample_set_.begin();
+ auto it = sample_set_.begin();
for (size_t i = 0; i < num_smaller_samples; i++)
it++;
return *it;
diff --git a/chromium/cc/base/unique_notifier.cc b/chromium/cc/base/unique_notifier.cc
index d7b59ae916e..2ad6a291c73 100644
--- a/chromium/cc/base/unique_notifier.cc
+++ b/chromium/cc/base/unique_notifier.cc
@@ -22,11 +22,13 @@ UniqueNotifier::UniqueNotifier(base::SequencedTaskRunner* task_runner,
UniqueNotifier::~UniqueNotifier() = default;
void UniqueNotifier::Cancel() {
+ DCHECK(task_runner_->RunsTasksInCurrentSequence());
base::AutoLock hold(lock_);
notification_pending_ = false;
}
void UniqueNotifier::Schedule() {
+ DCHECK(task_runner_->RunsTasksInCurrentSequence());
base::AutoLock hold(lock_);
if (notification_pending_)
return;
@@ -38,6 +40,7 @@ void UniqueNotifier::Schedule() {
}
void UniqueNotifier::Notify() {
+ DCHECK(task_runner_->RunsTasksInCurrentSequence());
// Scope to release |lock_| before running the closure.
{
base::AutoLock hold(lock_);
diff --git a/chromium/cc/base/unique_notifier.h b/chromium/cc/base/unique_notifier.h
index e4fb7b55732..d753e1f352a 100644
--- a/chromium/cc/base/unique_notifier.h
+++ b/chromium/cc/base/unique_notifier.h
@@ -16,6 +16,8 @@ class SequencedTaskRunner;
namespace cc {
+// Callers must ensure that they only schedule the notifier on the same thread
+// that the provided |task_runner| runs on.
class CC_BASE_EXPORT UniqueNotifier {
public:
// Configure this notifier to issue the |closure| notification when scheduled.
diff --git a/chromium/cc/base/unique_notifier_unittest.cc b/chromium/cc/base/unique_notifier_unittest.cc
index 2c547588198..cfac61efaef 100644
--- a/chromium/cc/base/unique_notifier_unittest.cc
+++ b/chromium/cc/base/unique_notifier_unittest.cc
@@ -29,6 +29,24 @@ class UniqueNotifierTest : public testing::Test {
int notification_count_;
};
+// Need to guarantee that Schedule and Notify happen in the same thread.
+// Multiple schedules may result in multiple runs when notify task is posted to
+// a different thread. So we use thread checker to avoid this.
+// Example which may result in multiple runs:
+// base::Thread notifier_thread("NotifierThread");
+// notifier_thread.Start();
+// UniqueNotifier notifier(
+// notifier_thread.task_runner().get(),
+// base::BindRepeating(&UniqueNotifierTest::Notify,
+// base::Unretained(this)));
+// EXPECT_EQ(0, NotificationCount());
+// for (int i = 0; i < 50000; ++i)
+// notifier.Schedule();
+// base::RunLoop().RunUntilIdle();
+
+// notifier_thread.Stop();
+// EXPECT_LE(1, NotificationCount());
+// 50000 can be any number bigger than 1. The bigger the easier to more runs.
TEST_F(UniqueNotifierTest, Schedule) {
{
UniqueNotifier notifier(
@@ -43,7 +61,8 @@ TEST_F(UniqueNotifierTest, Schedule) {
base::RunLoop().RunUntilIdle();
EXPECT_EQ(1, NotificationCount());
- // Multiple schedules should only result in one run.
+ // UniqueNotifier can only runs in the main thread, and multiple schedules
+ // should result in one run.
for (int i = 0; i < 5; ++i)
notifier.Schedule();
diff --git a/chromium/cc/cc.gni b/chromium/cc/cc.gni
index 44a5eebc517..8539c01a8b2 100644
--- a/chromium/cc/cc.gni
+++ b/chromium/cc/cc.gni
@@ -6,24 +6,16 @@ import("//build/config/jumbo.gni")
import("//testing/test.gni")
cc_remove_configs = []
-cc_add_configs = [ "//build/config:precompiled_headers" ]
+cc_add_configs = [
+ "//build/config:precompiled_headers",
+ "//build/config/compiler:wexit_time_destructors",
+]
-if (!is_debug && (is_win || is_android)) {
+if (!is_debug) {
cc_remove_configs += [ "//build/config/compiler:default_optimization" ]
cc_add_configs += [ "//build/config/compiler:optimize_max" ]
}
-template("cc_source_set") {
- jumbo_source_set(target_name) {
- forward_variables_from(invoker, "*", [ "configs" ])
- if (defined(invoker.configs)) {
- configs += invoker.configs
- }
- configs -= cc_remove_configs
- configs += cc_add_configs
- }
-}
-
template("cc_component") {
jumbo_component(target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
@@ -35,7 +27,7 @@ template("cc_component") {
}
}
-template("cc_static_library") {
+template("cc_test_static_library") {
jumbo_static_library(target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
if (defined(invoker.configs)) {
@@ -43,6 +35,9 @@ template("cc_static_library") {
}
configs -= cc_remove_configs
configs += cc_add_configs
+
+ # Not needed in test code.
+ configs -= [ "//build/config/compiler:wexit_time_destructors" ]
}
}
@@ -54,5 +49,8 @@ template("cc_test") {
}
configs -= cc_remove_configs
configs += cc_add_configs
+
+ # Not needed in test code.
+ configs -= [ "//build/config/compiler:wexit_time_destructors" ]
}
}
diff --git a/chromium/cc/input/main_thread_scrolling_reason.cc b/chromium/cc/input/main_thread_scrolling_reason.cc
index 6653ac98fde..a85b60f97a7 100644
--- a/chromium/cc/input/main_thread_scrolling_reason.cc
+++ b/chromium/cc/input/main_thread_scrolling_reason.cc
@@ -71,6 +71,10 @@ void MainThreadScrollingReason::AddToTracedValue(
traced_value.AppendString("Non-invertible transform");
if (reasons & kPageBasedScrolling)
traced_value.AppendString("Page-based scrolling");
+ if (reasons & kWheelEventHandlerRegion)
+ traced_value.AppendString("Wheel event handler region");
+ if (reasons & kTouchEventHandlerRegion)
+ traced_value.AppendString("Touch event handler region");
traced_value.EndArray();
}
diff --git a/chromium/cc/input/main_thread_scrolling_reason.h b/chromium/cc/input/main_thread_scrolling_reason.h
index b926c75117d..cf2deff0ce8 100644
--- a/chromium/cc/input/main_thread_scrolling_reason.h
+++ b/chromium/cc/input/main_thread_scrolling_reason.h
@@ -60,12 +60,14 @@ struct CC_EXPORT MainThreadScrollingReason {
kContinuingMainThreadScroll = 1 << 10,
kNonInvertibleTransform = 1 << 11,
kPageBasedScrolling = 1 << 12,
+ kWheelEventHandlerRegion = 1 << 23,
+ kTouchEventHandlerRegion = 1 << 24,
// The maximum number of flags in this struct (excluding itself).
// New flags should increment this number but it should never be decremented
// because the values are used in UMA histograms. It should also be noted
// that it excludes the kNotScrollingOnMain value.
- kMainThreadScrollingReasonCount = 23,
+ kMainThreadScrollingReasonCount = 25,
};
static const uint32_t kNonCompositedReasons =
@@ -91,7 +93,8 @@ struct CC_EXPORT MainThreadScrollingReason {
uint32_t reasons_set_by_compositor =
kNonFastScrollableRegion | kFailedHitTest | kNoScrollingLayer |
kNotScrollable | kContinuingMainThreadScroll | kNonInvertibleTransform |
- kPageBasedScrolling;
+ kPageBasedScrolling | kWheelEventHandlerRegion |
+ kTouchEventHandlerRegion;
return (reasons & reasons_set_by_compositor) == reasons;
}
diff --git a/chromium/cc/input/main_thread_scrolling_reason_unittest.cc b/chromium/cc/input/main_thread_scrolling_reason_unittest.cc
index ec49ff98001..9f8e6b641d8 100644
--- a/chromium/cc/input/main_thread_scrolling_reason_unittest.cc
+++ b/chromium/cc/input/main_thread_scrolling_reason_unittest.cc
@@ -31,7 +31,9 @@ TEST_F(MainThreadScrollingReasonTest, AsText) {
"Not scrollable,"
"Continuing main thread scroll,"
"Non-invertible transform,"
- "Page-based scrolling",
+ "Page-based scrolling,"
+ "Wheel event handler region,"
+ "Touch event handler region",
MainThreadScrollingReason::AsText(0xffffffffu));
}
diff --git a/chromium/cc/input/scroll_snap_data.h b/chromium/cc/input/scroll_snap_data.h
index e44b9d882e2..c18f7374229 100644
--- a/chromium/cc/input/scroll_snap_data.h
+++ b/chromium/cc/input/scroll_snap_data.h
@@ -63,26 +63,26 @@ struct ScrollSnapType {
struct ScrollSnapAlign {
ScrollSnapAlign()
- : alignment_inline(SnapAlignment::kNone),
- alignment_block(SnapAlignment::kNone) {}
+ : alignment_block(SnapAlignment::kNone),
+ alignment_inline(SnapAlignment::kNone) {}
explicit ScrollSnapAlign(SnapAlignment alignment)
- : alignment_inline(alignment), alignment_block(alignment) {}
+ : alignment_block(alignment), alignment_inline(alignment) {}
- ScrollSnapAlign(SnapAlignment i, SnapAlignment b)
- : alignment_inline(i), alignment_block(b) {}
+ ScrollSnapAlign(SnapAlignment b, SnapAlignment i)
+ : alignment_block(b), alignment_inline(i) {}
bool operator==(const ScrollSnapAlign& other) const {
- return alignment_inline == other.alignment_inline &&
- alignment_block == other.alignment_block;
+ return alignment_block == other.alignment_block &&
+ alignment_inline == other.alignment_inline;
}
bool operator!=(const ScrollSnapAlign& other) const {
return !(*this == other);
}
- SnapAlignment alignment_inline;
SnapAlignment alignment_block;
+ SnapAlignment alignment_inline;
};
// We should really use gfx::RangeF. However, it includes windows.h which would
diff --git a/chromium/cc/input/scroll_snap_data_unittest.cc b/chromium/cc/input/scroll_snap_data_unittest.cc
index a688448ab2d..d2347148d0b 100644
--- a/chromium/cc/input/scroll_snap_data_unittest.cc
+++ b/chromium/cc/input/scroll_snap_data_unittest.cc
@@ -59,7 +59,7 @@ TEST_F(ScrollSnapDataTest, UnreachableSnapPositionCalculation) {
SnapContainerData container(
ScrollSnapType(false, SnapAxis::kBoth, SnapStrictness::kMandatory),
gfx::RectF(0, 0, 200, 200), gfx::ScrollOffset(100, 100));
- SnapAreaData area(ScrollSnapAlign(SnapAlignment::kStart, SnapAlignment::kEnd),
+ SnapAreaData area(ScrollSnapAlign(SnapAlignment::kEnd, SnapAlignment::kStart),
gfx::RectF(200, 0, 100, 100), false);
container.AddSnapAreaData(area);
gfx::ScrollOffset current_position(50, 50);
@@ -78,10 +78,10 @@ TEST_F(ScrollSnapDataTest, FindsClosestSnapPositionIndependently) {
ScrollSnapType(false, SnapAxis::kBoth, SnapStrictness::kMandatory),
gfx::RectF(0, 0, 200, 200), gfx::ScrollOffset(600, 800));
SnapAreaData snap_x_only(
- ScrollSnapAlign(SnapAlignment::kStart, SnapAlignment::kNone),
+ ScrollSnapAlign(SnapAlignment::kNone, SnapAlignment::kStart),
gfx::RectF(80, 0, 150, 150), false);
SnapAreaData snap_y_only(
- ScrollSnapAlign(SnapAlignment::kNone, SnapAlignment::kStart),
+ ScrollSnapAlign(SnapAlignment::kStart, SnapAlignment::kNone),
gfx::RectF(0, 70, 150, 150), false);
SnapAreaData snap_on_both(ScrollSnapAlign(SnapAlignment::kStart),
gfx::RectF(50, 150, 150, 150), false);
@@ -101,10 +101,10 @@ TEST_F(ScrollSnapDataTest, FindsClosestSnapPositionOnAxisValueBoth) {
ScrollSnapType(false, SnapAxis::kBoth, SnapStrictness::kMandatory),
gfx::RectF(0, 0, 200, 200), gfx::ScrollOffset(600, 800));
SnapAreaData snap_x_only(
- ScrollSnapAlign(SnapAlignment::kStart, SnapAlignment::kNone),
+ ScrollSnapAlign(SnapAlignment::kNone, SnapAlignment::kStart),
gfx::RectF(80, 0, 150, 150), false);
SnapAreaData snap_y_only(
- ScrollSnapAlign(SnapAlignment::kNone, SnapAlignment::kStart),
+ ScrollSnapAlign(SnapAlignment::kStart, SnapAlignment::kNone),
gfx::RectF(0, 70, 150, 150), false);
SnapAreaData snap_on_both(ScrollSnapAlign(SnapAlignment::kStart),
gfx::RectF(50, 150, 150, 150), false);
@@ -124,10 +124,10 @@ TEST_F(ScrollSnapDataTest, DoesNotSnapOnNonScrolledAxis) {
ScrollSnapType(false, SnapAxis::kBoth, SnapStrictness::kMandatory),
gfx::RectF(0, 0, 200, 200), gfx::ScrollOffset(600, 800));
SnapAreaData snap_x_only(
- ScrollSnapAlign(SnapAlignment::kStart, SnapAlignment::kNone),
+ ScrollSnapAlign(SnapAlignment::kNone, SnapAlignment::kStart),
gfx::RectF(80, 0, 150, 150), false);
SnapAreaData snap_y_only(
- ScrollSnapAlign(SnapAlignment::kNone, SnapAlignment::kStart),
+ ScrollSnapAlign(SnapAlignment::kStart, SnapAlignment::kNone),
gfx::RectF(0, 70, 150, 150), false);
gfx::ScrollOffset current_position(100, 100);
container.AddSnapAreaData(snap_x_only);
@@ -144,10 +144,10 @@ TEST_F(ScrollSnapDataTest, DoesNotSnapOnNonVisibleAreas) {
ScrollSnapType(false, SnapAxis::kBoth, SnapStrictness::kMandatory),
gfx::RectF(0, 0, 200, 200), gfx::ScrollOffset(600, 800));
SnapAreaData snap_x_only(
- ScrollSnapAlign(SnapAlignment::kStart, SnapAlignment::kNone),
+ ScrollSnapAlign(SnapAlignment::kNone, SnapAlignment::kStart),
gfx::RectF(300, 400, 100, 100), false);
SnapAreaData snap_y_only(
- ScrollSnapAlign(SnapAlignment::kNone, SnapAlignment::kStart),
+ ScrollSnapAlign(SnapAlignment::kStart, SnapAlignment::kNone),
gfx::RectF(400, 300, 100, 100), false);
gfx::ScrollOffset current_position(0, 0);
container.AddSnapAreaData(snap_x_only);
@@ -169,13 +169,13 @@ TEST_F(ScrollSnapDataTest, SnapOnClosestAxisFirstIfVisibilityConflicts) {
// After that, we look for another snap point on y axis which does not
// conflict with the snap point on x.
SnapAreaData snap_x(
- ScrollSnapAlign(SnapAlignment::kStart, SnapAlignment::kNone),
+ ScrollSnapAlign(SnapAlignment::kNone, SnapAlignment::kStart),
gfx::RectF(150, 0, 100, 100), false);
SnapAreaData snap_y1(
- ScrollSnapAlign(SnapAlignment::kNone, SnapAlignment::kStart),
+ ScrollSnapAlign(SnapAlignment::kStart, SnapAlignment::kNone),
gfx::RectF(0, 180, 100, 100), false);
SnapAreaData snap_y2(
- ScrollSnapAlign(SnapAlignment::kNone, SnapAlignment::kStart),
+ ScrollSnapAlign(SnapAlignment::kStart, SnapAlignment::kNone),
gfx::RectF(250, 80, 100, 100), false);
container.AddSnapAreaData(snap_x);
container.AddSnapAreaData(snap_y1);
diff --git a/chromium/cc/layers/append_quads_data.h b/chromium/cc/layers/append_quads_data.h
index a681a97b489..d207cf28626 100644
--- a/chromium/cc/layers/append_quads_data.h
+++ b/chromium/cc/layers/append_quads_data.h
@@ -25,6 +25,12 @@ class CC_EXPORT AppendQuadsData {
int64_t visible_layer_area = 0;
int64_t approximated_visible_content_area = 0;
+ // TODO(enne): These are temporary to evaluate mask layer optimizations.
+ int num_mask_layers = 0;
+ int num_rounded_corner_mask_layers = 0;
+ int64_t visible_mask_layer_area = 0;
+ int64_t visible_rounded_corner_mask_layer_area = 0;
+
// This is total of the following two areas.
int64_t checkerboarded_visible_content_area = 0;
// This is the area outside interest rect.
diff --git a/chromium/cc/layers/deadline_policy.h b/chromium/cc/layers/deadline_policy.h
index 156c6df9432..268ca233b98 100644
--- a/chromium/cc/layers/deadline_policy.h
+++ b/chromium/cc/layers/deadline_policy.h
@@ -7,6 +7,7 @@
#include <cstdint>
+#include "base/logging.h"
#include "base/macros.h"
#include "base/optional.h"
#include "cc/cc_export.h"
diff --git a/chromium/cc/layers/heads_up_display_layer.cc b/chromium/cc/layers/heads_up_display_layer.cc
index a07431ef5d8..e8cd98328ae 100644
--- a/chromium/cc/layers/heads_up_display_layer.cc
+++ b/chromium/cc/layers/heads_up_display_layer.cc
@@ -8,8 +8,6 @@
#include "base/trace_event/trace_event.h"
#include "cc/layers/heads_up_display_layer_impl.h"
-#include "cc/trees/layer_tree_host.h"
-#include "cc/trees/layer_tree_settings.h"
namespace cc {
@@ -29,28 +27,6 @@ HeadsUpDisplayLayer::HeadsUpDisplayLayer()
HeadsUpDisplayLayer::~HeadsUpDisplayLayer() = default;
-void HeadsUpDisplayLayer::PrepareForCalculateDrawProperties(
- const gfx::Size& device_viewport, float device_scale_factor) {
- gfx::Size device_viewport_in_layout_pixels = gfx::Size(
- device_viewport.width() / device_scale_factor,
- device_viewport.height() / device_scale_factor);
-
- gfx::Size bounds;
- gfx::Transform matrix;
- matrix.MakeIdentity();
-
- if (layer_tree_host()->GetDebugState().ShowHudRects()) {
- bounds = device_viewport_in_layout_pixels;
- } else {
- int size = 256;
- bounds.SetSize(size, size);
- matrix.Translate(device_viewport_in_layout_pixels.width() - size, 0.0);
- }
-
- SetBounds(bounds);
- SetTransform(matrix);
-}
-
bool HeadsUpDisplayLayer::HasDrawableContent() const {
return true;
}
diff --git a/chromium/cc/layers/heads_up_display_layer.h b/chromium/cc/layers/heads_up_display_layer.h
index 4cb2946dbdf..ee3402e67c2 100644
--- a/chromium/cc/layers/heads_up_display_layer.h
+++ b/chromium/cc/layers/heads_up_display_layer.h
@@ -20,9 +20,6 @@ class CC_EXPORT HeadsUpDisplayLayer : public Layer {
public:
static scoped_refptr<HeadsUpDisplayLayer> Create();
- void PrepareForCalculateDrawProperties(
- const gfx::Size& device_viewport, float device_scale_factor);
-
std::unique_ptr<LayerImpl> CreateLayerImpl(LayerTreeImpl* tree_impl) override;
// Layer overrides.
diff --git a/chromium/cc/layers/heads_up_display_layer_impl.cc b/chromium/cc/layers/heads_up_display_layer_impl.cc
index 3ee5b67c27b..ee4a8ff3813 100644
--- a/chromium/cc/layers/heads_up_display_layer_impl.cc
+++ b/chromium/cc/layers/heads_up_display_layer_impl.cc
@@ -33,6 +33,9 @@
#include "components/viz/common/resources/platform_color.h"
#include "gpu/command_buffer/client/context_support.h"
#include "gpu/command_buffer/client/gles2_interface.h"
+#include "gpu/command_buffer/client/shared_image_interface.h"
+#include "gpu/command_buffer/common/shared_image_trace_utils.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
#include "skia/ext/platform_canvas.h"
#include "third_party/skia/include/core/SkCanvas.h"
#include "third_party/skia/include/core/SkPaint.h"
@@ -101,12 +104,13 @@ std::unique_ptr<LayerImpl> HeadsUpDisplayLayerImpl::CreateLayerImpl(
class HudGpuBacking : public ResourcePool::GpuBacking {
public:
~HudGpuBacking() override {
- gpu::gles2::GLES2Interface* gl = compositor_context_provider->ContextGL();
+ if (mailbox.IsZero())
+ return;
+ auto* sii = compositor_context_provider->SharedImageInterface();
if (returned_sync_token.HasData())
- gl->WaitSyncTokenCHROMIUM(returned_sync_token.GetConstData());
- if (mailbox_sync_token.HasData())
- gl->WaitSyncTokenCHROMIUM(mailbox_sync_token.GetConstData());
- gl->DeleteTextures(1, &texture_id);
+ sii->DestroySharedImage(returned_sync_token, mailbox);
+ else if (mailbox_sync_token.HasData())
+ sii->DestroySharedImage(mailbox_sync_token, mailbox);
}
void OnMemoryDump(
@@ -114,15 +118,15 @@ class HudGpuBacking : public ResourcePool::GpuBacking {
const base::trace_event::MemoryAllocatorDumpGuid& buffer_dump_guid,
uint64_t tracing_process_id,
int importance) const override {
- auto texture_tracing_guid = gl::GetGLTextureClientGUIDForTracing(
- compositor_context_provider->ContextSupport()->ShareGroupTracingGUID(),
- texture_id);
- pmd->CreateSharedGlobalAllocatorDump(texture_tracing_guid);
- pmd->AddOwnershipEdge(buffer_dump_guid, texture_tracing_guid, importance);
+ if (mailbox.IsZero())
+ return;
+
+ auto tracing_guid = gpu::GetSharedImageGUIDForTracing(mailbox);
+ pmd->CreateSharedGlobalAllocatorDump(tracing_guid);
+ pmd->AddOwnershipEdge(buffer_dump_guid, tracing_guid, importance);
}
viz::ContextProvider* compositor_context_provider;
- GLuint texture_id;
};
class HudSoftwareBacking : public ResourcePool::SoftwareBacking {
@@ -229,22 +233,22 @@ void HeadsUpDisplayLayerImpl::UpdateHudTexture(
if (!pool_resource.gpu_backing()) {
auto backing = std::make_unique<HudGpuBacking>();
backing->compositor_context_provider = context_provider;
- auto alloc = viz::TextureAllocation::MakeTextureId(
- context_provider->ContextGL(),
- context_provider->ContextCapabilities(), pool_resource.format(),
+ backing->InitOverlayCandidateAndTextureTarget(
+ pool_resource.format(), context_provider->ContextCapabilities(),
layer_tree_impl()
->settings()
- .resource_settings.use_gpu_memory_buffer_resources,
- gpu_raster);
- viz::TextureAllocation::AllocateStorage(
- context_provider->ContextGL(),
- context_provider->ContextCapabilities(), pool_resource.format(),
- pool_resource.size(), alloc, pool_resource.color_space());
- backing->texture_id = alloc.texture_id;
- backing->texture_target = alloc.texture_target;
- backing->overlay_candidate = alloc.overlay_candidate;
- context_provider->ContextGL()->ProduceTextureDirectCHROMIUM(
- backing->texture_id, backing->mailbox.name);
+ .resource_settings.use_gpu_memory_buffer_resources);
+ auto* sii = context_provider->SharedImageInterface();
+ uint32_t flags = gpu::SHARED_IMAGE_USAGE_GLES2;
+ if (gpu_raster)
+ flags |= gpu::SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT;
+ if (backing->overlay_candidate)
+ flags |= gpu::SHARED_IMAGE_USAGE_SCANOUT;
+ backing->mailbox =
+ sii->CreateSharedImage(pool_resource.format(), pool_resource.size(),
+ pool_resource.color_space(), flags);
+ gpu::gles2::GLES2Interface* gl = context_provider->ContextGL();
+ gl->WaitSyncTokenCHROMIUM(sii->GenUnverifiedSyncToken().GetConstData());
pool_resource.set_gpu_backing(std::move(backing));
} else if (pool_resource.gpu_backing()->returned_sync_token.HasData()) {
context_provider->ContextGL()->WaitSyncTokenCHROMIUM(
@@ -284,11 +288,13 @@ void HeadsUpDisplayLayerImpl::UpdateHudTexture(
viz::ContextProvider* context_provider =
layer_tree_impl()->context_provider();
gpu::gles2::GLES2Interface* gl = context_provider->ContextGL();
+ GLuint mailbox_texture_id =
+ gl->CreateAndConsumeTextureCHROMIUM(backing->mailbox.name);
{
ScopedGpuRaster gpu_raster(context_provider);
viz::ClientResourceProvider::ScopedSkSurface scoped_surface(
- context_provider->GrContext(), backing->texture_id,
+ context_provider->GrContext(), mailbox_texture_id,
backing->texture_target, pool_resource.size(), pool_resource.format(),
false /* can_use_lcd_text */, 0 /* msaa_sample_count */);
SkSurface* surface = scoped_surface.surface();
@@ -299,6 +305,7 @@ void HeadsUpDisplayLayerImpl::UpdateHudTexture(
DrawHudContents(surface->getCanvas());
}
+ gl->DeleteTextures(1, &mailbox_texture_id);
backing->mailbox_sync_token =
viz::ClientResourceProvider::GenerateSyncTokenHelper(gl);
} else if (draw_mode == DRAW_MODE_HARDWARE) {
@@ -322,12 +329,15 @@ void HeadsUpDisplayLayerImpl::UpdateHudTexture(
TRACE_EVENT0("cc", "UploadHudTexture");
SkPixmap pixmap;
staging_surface_->peekPixels(&pixmap);
- gl->BindTexture(backing->texture_target, backing->texture_id);
+ GLuint mailbox_texture_id =
+ gl->CreateAndConsumeTextureCHROMIUM(backing->mailbox.name);
+ gl->BindTexture(backing->texture_target, mailbox_texture_id);
DCHECK(GLSupportsFormat(pool_resource.format()));
gl->TexSubImage2D(
backing->texture_target, 0, 0, 0, pool_resource.size().width(),
pool_resource.size().height(), GLDataFormat(pool_resource.format()),
GLDataType(pool_resource.format()), pixmap.addr());
+ gl->DeleteTextures(1, &mailbox_texture_id);
backing->mailbox_sync_token =
viz::ClientResourceProvider::GenerateSyncTokenHelper(gl);
} else {
diff --git a/chromium/cc/layers/layer.cc b/chromium/cc/layers/layer.cc
index 269226aca78..dfc03bb5898 100644
--- a/chromium/cc/layers/layer.cc
+++ b/chromium/cc/layers/layer.cc
@@ -53,7 +53,9 @@ Layer::Inputs::Inputs(int layer_id)
sorting_context_id(0),
use_parent_backface_visibility(false),
background_color(0),
+ backdrop_filter_quality(1.0f),
scrollable(false),
+ is_scrollbar(false),
user_scrollable_horizontal(true),
user_scrollable_vertical(true),
main_thread_scrolling_reasons(
@@ -94,6 +96,7 @@ Layer::Layer()
may_contain_video_(false),
needs_show_scrollbars_(false),
has_transform_node_(false),
+ is_rounded_corner_mask_(false),
subtree_has_copy_request_(false),
safe_opaque_background_color_(0) {}
@@ -119,24 +122,30 @@ void Layer::SetLayerTreeHost(LayerTreeHost* host) {
if (layer_tree_host_ == host)
return;
+ bool property_tree_indices_invalid = false;
if (layer_tree_host_) {
layer_tree_host_->property_trees()->needs_rebuild = true;
layer_tree_host_->UnregisterLayer(this);
- if (!layer_tree_host_->IsUsingLayerLists() && inputs_.element_id) {
+ if (inputs_.element_id) {
layer_tree_host_->UnregisterElement(inputs_.element_id,
ElementListType::ACTIVE);
}
+ if (!layer_tree_host_->IsUsingLayerLists())
+ property_tree_indices_invalid = true;
}
if (host) {
host->property_trees()->needs_rebuild = true;
host->RegisterLayer(this);
- if (!host->IsUsingLayerLists() && inputs_.element_id) {
+ if (inputs_.element_id)
host->RegisterElement(inputs_.element_id, ElementListType::ACTIVE, this);
- }
+ if (!host->IsUsingLayerLists())
+ property_tree_indices_invalid = true;
}
layer_tree_host_ = host;
- InvalidatePropertyTreesIndices();
+
+ if (property_tree_indices_invalid)
+ InvalidatePropertyTreesIndices();
// When changing hosts, the layer needs to commit its properties to the impl
// side for the new host.
@@ -236,8 +245,8 @@ void Layer::RemoveChildOrDependent(Layer* child) {
return;
}
- for (LayerList::iterator iter = inputs_.children.begin();
- iter != inputs_.children.end(); ++iter) {
+ for (auto iter = inputs_.children.begin(); iter != inputs_.children.end();
+ ++iter) {
if (iter->get() != child)
continue;
@@ -290,7 +299,7 @@ void Layer::SetBounds(const gfx::Size& size) {
SetPropertyTreesNeedRebuild();
}
- if (scrollable()) {
+ if (scrollable() && !layer_tree_host_->IsUsingLayerLists()) {
auto& scroll_tree = layer_tree_host_->property_trees()->scroll_tree;
if (auto* scroll_node = scroll_tree.Node(scroll_tree_index_))
scroll_node->bounds = inputs_.bounds;
@@ -309,7 +318,7 @@ void Layer::SetOverscrollBehavior(const OverscrollBehavior& behavior) {
if (!layer_tree_host_)
return;
- if (scrollable()) {
+ if (scrollable() && !layer_tree_host_->IsUsingLayerLists()) {
auto& scroll_tree = layer_tree_host_->property_trees()->scroll_tree;
if (auto* scroll_node = scroll_tree.Node(scroll_tree_index_))
scroll_node->overscroll_behavior = behavior;
@@ -328,7 +337,7 @@ void Layer::SetSnapContainerData(base::Optional<SnapContainerData> data) {
if (!layer_tree_host_)
return;
- if (scrollable()) {
+ if (scrollable() && !layer_tree_host_->IsUsingLayerLists()) {
auto& scroll_tree = layer_tree_host_->property_trees()->scroll_tree;
if (auto* scroll_node = scroll_tree.Node(scroll_tree_index_))
scroll_node->snap_container_data = inputs_.snap_container_data;
@@ -355,6 +364,57 @@ void Layer::RemoveAllChildren() {
}
}
+void Layer::SetChildLayerList(LayerList new_children) {
+ DCHECK(layer_tree_host_->IsUsingLayerLists());
+
+ // Early out without calling |LayerTreeHost::SetNeedsFullTreeSync| if no
+ // layer has changed.
+ if (children() == new_children)
+ return;
+
+ // Remove existing children that will not be in the new child list.
+ {
+ std::unordered_set<Layer*> children_to_remove;
+ for (auto& existing_child : children())
+ children_to_remove.insert(existing_child.get());
+ for (auto& new_child : new_children)
+ children_to_remove.erase(new_child.get());
+ for (auto* child : children_to_remove) {
+ child->SetParent(nullptr);
+ AddDrawableDescendants(-child->NumDescendantsThatDrawContent() -
+ (child->DrawsContent() ? 1 : 0));
+ }
+ }
+
+ // Mark existing children as changed if their order changes.
+ auto existing_child_it = children().begin();
+ for (auto& child : new_children) {
+ if (child->parent() == this) {
+ // Search forward in the existing child list to find the new child.
+ existing_child_it = std::find(existing_child_it, children().end(), child);
+ if (existing_child_it == children().end())
+ child->SetSubtreePropertyChanged();
+ }
+ }
+
+ // Process new children and mark them as changed.
+ // Because this changes the child's parent, it must be after code that uses
+ // |child->parent()| such as the above loop.
+ for (auto& child : new_children) {
+ if (child->parent() != this) {
+ child->RemoveFromParent();
+ AddDrawableDescendants(child->NumDescendantsThatDrawContent() +
+ (child->DrawsContent() ? 1 : 0));
+ child->SetParent(this);
+ child->SetSubtreePropertyChanged();
+ }
+ }
+
+ inputs_.children = std::move(new_children);
+
+ layer_tree_host_->SetNeedsFullTreeSync();
+}
+
bool Layer::HasAncestor(const Layer* ancestor) const {
for (const Layer* layer = parent(); layer; layer = layer->parent()) {
if (layer == ancestor)
@@ -495,6 +555,10 @@ void Layer::SetBackgroundFilters(const FilterOperations& filters) {
SetNeedsCommit();
}
+void Layer::SetBackdropFilterQuality(const float quality) {
+ inputs_.backdrop_filter_quality = quality;
+}
+
void Layer::SetFiltersOrigin(const gfx::PointF& filters_origin) {
DCHECK(IsPropertyChangeAllowed());
if (inputs_.filters_origin == filters_origin)
@@ -517,17 +581,21 @@ void Layer::SetOpacity(float opacity) {
bool force_rebuild = opacity == 1.f || inputs_.opacity == 1.f;
inputs_.opacity = opacity;
SetSubtreePropertyChanged();
- if (layer_tree_host_ && !force_rebuild) {
- PropertyTrees* property_trees = layer_tree_host_->property_trees();
- if (EffectNode* node =
- property_trees->effect_tree.Node(effect_tree_index())) {
- node->opacity = opacity;
- node->effect_changed = true;
- property_trees->effect_tree.set_needs_update(true);
+
+ if (layer_tree_host_ && !layer_tree_host_->IsUsingLayerLists()) {
+ if (!force_rebuild) {
+ PropertyTrees* property_trees = layer_tree_host_->property_trees();
+ if (EffectNode* node =
+ property_trees->effect_tree.Node(effect_tree_index())) {
+ node->opacity = opacity;
+ node->effect_changed = true;
+ property_trees->effect_tree.set_needs_update(true);
+ }
+ } else {
+ SetPropertyTreesNeedRebuild();
}
}
- if (force_rebuild)
- SetPropertyTreesNeedRebuild();
+
SetNeedsCommit();
}
@@ -629,16 +697,19 @@ void Layer::SetPosition(const gfx::PointF& position) {
return;
SetSubtreePropertyChanged();
- if (has_transform_node_) {
- TransformNode* transform_node =
- layer_tree_host_->property_trees()->transform_tree.Node(
- transform_tree_index_);
- transform_node->update_post_local_transform(position, transform_origin());
- transform_node->needs_local_transform_update = true;
- transform_node->transform_changed = true;
- layer_tree_host_->property_trees()->transform_tree.set_needs_update(true);
- } else {
- SetPropertyTreesNeedRebuild();
+
+ if (!layer_tree_host_->IsUsingLayerLists()) {
+ if (has_transform_node_) {
+ TransformNode* transform_node =
+ layer_tree_host_->property_trees()->transform_tree.Node(
+ transform_tree_index_);
+ transform_node->update_post_local_transform(position, transform_origin());
+ transform_node->needs_local_transform_update = true;
+ transform_node->transform_changed = true;
+ layer_tree_host_->property_trees()->transform_tree.set_needs_update(true);
+ } else {
+ SetPropertyTreesNeedRebuild();
+ }
}
SetNeedsCommit();
@@ -669,7 +740,7 @@ void Layer::SetTransform(const gfx::Transform& transform) {
return;
SetSubtreePropertyChanged();
- if (layer_tree_host_) {
+ if (layer_tree_host_ && !layer_tree_host_->IsUsingLayerLists()) {
if (has_transform_node_) {
TransformNode* transform_node =
layer_tree_host_->property_trees()->transform_tree.Node(
@@ -705,18 +776,21 @@ void Layer::SetTransformOrigin(const gfx::Point3F& transform_origin) {
return;
SetSubtreePropertyChanged();
- if (has_transform_node_) {
- TransformNode* transform_node =
- layer_tree_host_->property_trees()->transform_tree.Node(
- transform_tree_index_);
- DCHECK_EQ(transform_tree_index(), transform_node->id);
- transform_node->update_pre_local_transform(transform_origin);
- transform_node->update_post_local_transform(position(), transform_origin);
- transform_node->needs_local_transform_update = true;
- transform_node->transform_changed = true;
- layer_tree_host_->property_trees()->transform_tree.set_needs_update(true);
- } else {
- SetPropertyTreesNeedRebuild();
+
+ if (!layer_tree_host_->IsUsingLayerLists()) {
+ if (has_transform_node_) {
+ TransformNode* transform_node =
+ layer_tree_host_->property_trees()->transform_tree.Node(
+ transform_tree_index_);
+ DCHECK_EQ(transform_tree_index(), transform_node->id);
+ transform_node->update_pre_local_transform(transform_origin);
+ transform_node->update_post_local_transform(position(), transform_origin);
+ transform_node->needs_local_transform_update = true;
+ transform_node->transform_changed = true;
+ layer_tree_host_->property_trees()->transform_tree.set_needs_update(true);
+ } else {
+ SetPropertyTreesNeedRebuild();
+ }
}
SetNeedsCommit();
@@ -801,6 +875,12 @@ void Layer::SetScrollOffsetFromImplSide(
void Layer::UpdateScrollOffset(const gfx::ScrollOffset& scroll_offset) {
DCHECK(scrollable());
+
+ // This function updates the property tree scroll offsets but in layer list
+ // mode this should occur during the main -> cc property tree push.
+ if (layer_tree_host_->IsUsingLayerLists())
+ return;
+
if (scroll_tree_index() == ScrollTree::kInvalidNodeId) {
// Ensure the property trees just have not been built yet but are marked for
// being built which will set the correct scroll offset values.
@@ -832,13 +912,23 @@ void Layer::SetScrollable(const gfx::Size& bounds) {
if (!layer_tree_host_)
return;
- auto& scroll_tree = layer_tree_host_->property_trees()->scroll_tree;
- auto* scroll_node = scroll_tree.Node(scroll_tree_index_);
- if (was_scrollable && scroll_node)
- scroll_node->container_bounds = inputs_.scroll_container_bounds;
- else
- SetPropertyTreesNeedRebuild();
+ if (!layer_tree_host_->IsUsingLayerLists()) {
+ auto& scroll_tree = layer_tree_host_->property_trees()->scroll_tree;
+ auto* scroll_node = scroll_tree.Node(scroll_tree_index_);
+ if (was_scrollable && scroll_node)
+ scroll_node->container_bounds = inputs_.scroll_container_bounds;
+ else
+ SetPropertyTreesNeedRebuild();
+ }
+
+ SetNeedsCommit();
+}
+
+void Layer::SetIsScrollbar(bool is_scrollbar) {
+ if (inputs_.is_scrollbar == is_scrollbar)
+ return;
+ inputs_.is_scrollbar = is_scrollbar;
SetNeedsCommit();
}
@@ -852,7 +942,7 @@ void Layer::SetUserScrollable(bool horizontal, bool vertical) {
if (!layer_tree_host_)
return;
- if (scrollable()) {
+ if (scrollable() && !layer_tree_host_->IsUsingLayerLists()) {
auto& scroll_tree = layer_tree_host_->property_trees()->scroll_tree;
if (auto* scroll_node = scroll_tree.Node(scroll_tree_index_)) {
scroll_node->user_scrollable_horizontal = horizontal;
@@ -1172,6 +1262,7 @@ void Layer::PushPropertiesTo(LayerImpl* layer) {
// deprecated. http://crbug.com/709137
layer->SetElementId(inputs_.element_id);
layer->SetHasTransformNode(has_transform_node_);
+ layer->set_is_rounded_corner_mask(is_rounded_corner_mask_);
layer->SetBackgroundColor(inputs_.background_color);
layer->SetSafeOpaqueBackgroundColor(safe_opaque_background_color_);
layer->SetBounds(inputs_.bounds);
@@ -1196,12 +1287,11 @@ void Layer::PushPropertiesTo(LayerImpl* layer) {
layer->SetTouchActionRegion(inputs_.touch_action_region);
// TODO(sunxd): Pass the correct region for wheel event handlers, see
// https://crbug.com/841364.
- if (layer_tree_host()->event_listener_properties(
- EventListenerClass::kMouseWheel) ==
- EventListenerProperties::kBlocking ||
+ EventListenerProperties mouse_wheel_props =
layer_tree_host()->event_listener_properties(
- EventListenerClass::kMouseWheel) ==
- EventListenerProperties::kBlockingAndPassive) {
+ EventListenerClass::kMouseWheel);
+ if (mouse_wheel_props == EventListenerProperties::kBlocking ||
+ mouse_wheel_props == EventListenerProperties::kBlockingAndPassive) {
layer->SetWheelEventHandlerRegion(Region(gfx::Rect(bounds())));
} else {
layer->SetWheelEventHandlerRegion(Region());
@@ -1216,6 +1306,8 @@ void Layer::PushPropertiesTo(LayerImpl* layer) {
if (scrollable())
layer->SetScrollable(inputs_.scroll_container_bounds);
+ layer->set_is_scrollbar(inputs_.is_scrollbar);
+
// The property trees must be safe to access because they will be used below
// to call |SetScrollOffsetClobberActiveValue|.
DCHECK(layer->layer_tree_impl()->lifecycle().AllowsPropertyTreeAccess());
@@ -1250,7 +1342,6 @@ void Layer::PushPropertiesTo(LayerImpl* layer) {
if (mask_layer())
DCHECK_EQ(bounds().ToString(), mask_layer()->bounds().ToString());
- layer_tree_host_->RemoveLayerShouldPushProperties(this);
}
void Layer::TakeCopyRequests(
@@ -1391,7 +1482,7 @@ ElementListType Layer::GetElementTypeForAnimation() const {
void Layer::RemoveFromClipTree() {
if (clip_children_.get()) {
std::set<Layer*> copy = *clip_children_;
- for (std::set<Layer*>::iterator it = copy.begin(); it != copy.end(); ++it)
+ for (auto it = copy.begin(); it != copy.end(); ++it)
(*it)->SetClipParent(nullptr);
}
@@ -1414,8 +1505,7 @@ void Layer::RunMicroBenchmark(MicroBenchmark* benchmark) {}
void Layer::SetElementId(ElementId id) {
DCHECK(IsPropertyChangeAllowed());
- if ((layer_tree_host_ && layer_tree_host_->IsUsingLayerLists()) ||
- inputs_.element_id == id)
+ if (inputs_.element_id == id)
return;
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("cc.debug"), "Layer::SetElementId",
"element", id.AsValue().release());
diff --git a/chromium/cc/layers/layer.h b/chromium/cc/layers/layer.h
index 6f7a7bce8d5..4e288f8cfb0 100644
--- a/chromium/cc/layers/layer.h
+++ b/chromium/cc/layers/layer.h
@@ -67,8 +67,6 @@ class PictureLayer;
// from parents to children.
class CC_EXPORT Layer : public base::RefCounted<Layer> {
public:
- using LayerListType = LayerList;
-
// An invalid layer id, as all layer ids are positive.
enum LayerIdLabels {
INVALID_ID = -1,
@@ -118,6 +116,9 @@ class CC_EXPORT Layer : public base::RefCounted<Layer> {
// Removes all children from this layer's list of children, removing ownership
// of those children.
void RemoveAllChildren();
+ // Sets the children while minimizing changes to layers that are already
+ // children of this layer.
+ void SetChildLayerList(LayerList children);
// Returns true if |ancestor| is this layer's parent or higher ancestor.
bool HasAncestor(const Layer* ancestor) const;
@@ -282,6 +283,11 @@ class CC_EXPORT Layer : public base::RefCounted<Layer> {
return inputs_.background_filters;
}
+ void SetBackdropFilterQuality(const float quality);
+ float backdrop_filter_quality() const {
+ return inputs_.backdrop_filter_quality;
+ }
+
// Set or get an optimization hint that the contents of this layer are fully
// opaque or not. If true, every pixel of content inside the layer's bounds
// must be opaque or visual errors can occur. This applies only to this layer
@@ -394,6 +400,9 @@ class CC_EXPORT Layer : public base::RefCounted<Layer> {
return inputs_.scroll_container_bounds;
}
+ void SetIsScrollbar(bool is_scrollbar);
+ bool is_scrollbar() const { return inputs_.is_scrollbar; }
+
// Set or get if this layer is able to be scrolled along each axis. These are
// independant of the scrollable state, or size of the scrollable area
// specified in SetScrollable(), as these may be enabled or disabled
@@ -755,6 +764,10 @@ class CC_EXPORT Layer : public base::RefCounted<Layer> {
return should_flatten_screen_space_transform_from_property_tree_;
}
+ void set_is_rounded_corner_mask(bool rounded) {
+ is_rounded_corner_mask_ = rounded;
+ }
+
protected:
friend class LayerImpl;
friend class TreeSynchronizer;
@@ -889,6 +902,7 @@ class CC_EXPORT Layer : public base::RefCounted<Layer> {
FilterOperations filters;
FilterOperations background_filters;
gfx::PointF filters_origin;
+ float backdrop_filter_quality;
gfx::ScrollOffset scroll_offset;
@@ -900,6 +914,9 @@ class CC_EXPORT Layer : public base::RefCounted<Layer> {
// |scroll_container_bounds|).
bool scrollable : 1;
+ // Indicates that this layer is a scrollbar.
+ bool is_scrollbar : 1;
+
bool user_scrollable_horizontal : 1;
bool user_scrollable_vertical : 1;
@@ -971,6 +988,7 @@ class CC_EXPORT Layer : public base::RefCounted<Layer> {
bool may_contain_video_ : 1;
bool needs_show_scrollbars_ : 1;
bool has_transform_node_ : 1;
+ bool is_rounded_corner_mask_ : 1;
// This value is valid only when LayerTreeHost::has_copy_request() is true
bool subtree_has_copy_request_ : 1;
SkColor safe_opaque_background_color_;
diff --git a/chromium/cc/layers/layer_impl.cc b/chromium/cc/layers/layer_impl.cc
index e5dad6df494..3a4bf4536a8 100644
--- a/chromium/cc/layers/layer_impl.cc
+++ b/chromium/cc/layers/layer_impl.cc
@@ -45,9 +45,12 @@
#include "ui/gfx/geometry/vector2d_conversions.h"
namespace cc {
-LayerImpl::LayerImpl(LayerTreeImpl* tree_impl, int id)
+LayerImpl::LayerImpl(LayerTreeImpl* tree_impl,
+ int id,
+ bool will_always_push_properties)
: layer_id_(id),
layer_tree_impl_(tree_impl),
+ will_always_push_properties_(will_always_push_properties),
test_properties_(nullptr),
main_thread_scrolling_reasons_(
MainThreadScrollingReason::kNotScrollingOnMain),
@@ -75,15 +78,17 @@ LayerImpl::LayerImpl(LayerTreeImpl* tree_impl, int id)
debug_info_(nullptr),
has_will_change_transform_hint_(false),
needs_push_properties_(false),
+ is_scrollbar_(false),
scrollbars_hidden_(false),
needs_show_scrollbars_(false),
raster_even_if_not_drawn_(false),
- has_transform_node_(false) {
+ has_transform_node_(false),
+ is_rounded_corner_mask_(false) {
DCHECK_GT(layer_id_, 0);
DCHECK(layer_tree_impl_);
layer_tree_impl_->RegisterLayer(this);
- layer_tree_impl_->AddToElementLayerList(element_id_);
+ layer_tree_impl_->AddToElementLayerList(element_id_, this);
SetNeedsPushProperties();
}
@@ -107,7 +112,6 @@ void LayerImpl::SetDebugInfo(
std::unique_ptr<base::trace_event::TracedValue> debug_info) {
owned_debug_info_ = std::move(debug_info);
debug_info_ = owned_debug_info_.get();
- SetNeedsPushProperties();
}
void LayerImpl::SetTransformTreeIndex(int index) {
@@ -267,12 +271,17 @@ gfx::Vector2dF LayerImpl::ScrollBy(const gfx::Vector2dF& scroll) {
void LayerImpl::SetScrollable(const gfx::Size& bounds) {
if (scrollable_ && scroll_container_bounds_ == bounds)
return;
+
+ bool was_scrollable = scrollable_;
scrollable_ = true;
scroll_container_bounds_ = bounds;
// Scrollbar positions depend on the bounds.
layer_tree_impl()->SetScrollbarGeometriesNeedUpdate();
+ if (!was_scrollable)
+ layer_tree_impl()->AddScrollableLayer(this);
+
if (layer_tree_impl()->settings().scrollbar_animator ==
LayerTreeSettings::AURA_OVERLAY) {
set_needs_show_scrollbars(true);
@@ -299,6 +308,7 @@ void LayerImpl::PushPropertiesTo(LayerImpl* layer) {
layer->SetElementId(element_id_);
layer->has_transform_node_ = has_transform_node_;
+ layer->is_rounded_corner_mask_ = is_rounded_corner_mask_;
layer->offset_to_transform_parent_ = offset_to_transform_parent_;
layer->main_thread_scrolling_reasons_ = main_thread_scrolling_reasons_;
layer->should_flatten_screen_space_transform_from_property_tree_ =
@@ -338,6 +348,8 @@ void LayerImpl::PushPropertiesTo(LayerImpl* layer) {
if (scrollable_)
layer->SetScrollable(scroll_container_bounds_);
+ layer->set_is_scrollbar(is_scrollbar_);
+
// If the main thread commits multiple times before the impl thread actually
// draws, then damage tracking will become incorrect if we simply clobber the
// update_rect here. The LayerImpl's update_rect needs to accumulate (i.e.
@@ -354,7 +366,6 @@ void LayerImpl::PushPropertiesTo(LayerImpl* layer) {
layer_property_changed_from_property_trees_ = false;
needs_push_properties_ = false;
update_rect_ = gfx::Rect();
- layer_tree_impl()->RemoveLayerShouldPushProperties(this);
}
bool LayerImpl::IsAffectedByPageScale() const {
@@ -462,13 +473,11 @@ bool LayerImpl::LayerPropertyChangedNotFromPropertyTrees() const {
void LayerImpl::NoteLayerPropertyChanged() {
layer_property_changed_not_from_property_trees_ = true;
layer_tree_impl()->set_needs_update_draw_properties();
- SetNeedsPushProperties();
}
void LayerImpl::NoteLayerPropertyChangedFromPropertyTrees() {
layer_property_changed_from_property_trees_ = true;
layer_tree_impl()->set_needs_update_draw_properties();
- SetNeedsPushProperties();
}
void LayerImpl::ValidateQuadResourcesInternal(viz::DrawQuad* quad) const {
@@ -642,9 +651,7 @@ void LayerImpl::SetElementId(ElementId element_id) {
layer_tree_impl_->RemoveFromElementLayerList(element_id_);
element_id_ = element_id;
- layer_tree_impl_->AddToElementLayerList(element_id_);
-
- SetNeedsPushProperties();
+ layer_tree_impl_->AddToElementLayerList(element_id_, this);
}
void LayerImpl::SetPosition(const gfx::PointF& position) {
@@ -653,7 +660,6 @@ void LayerImpl::SetPosition(const gfx::PointF& position) {
void LayerImpl::SetUpdateRect(const gfx::Rect& update_rect) {
update_rect_ = update_rect;
- SetNeedsPushProperties();
}
void LayerImpl::AddDamageRect(const gfx::Rect& damage_rect) {
@@ -709,8 +715,11 @@ gfx::Vector2dF LayerImpl::ClampScrollToMaxScrollOffset() {
}
void LayerImpl::SetNeedsPushProperties() {
- // There's no need to push layer properties on the active tree.
- if (!needs_push_properties_ && !layer_tree_impl()->IsActiveTree()) {
+ // There's no need to push layer properties on the active tree, or when
+ // |will_always_push_properties_| is true.
+ if (will_always_push_properties_ || layer_tree_impl()->IsActiveTree())
+ return;
+ if (!needs_push_properties_) {
needs_push_properties_ = true;
layer_tree_impl()->AddLayerShouldPushProperties(this);
}
@@ -731,6 +740,11 @@ void LayerImpl::AsValueInto(base::trace_event::TracedValue* state) const {
MathUtil::AddToTracedValue("position", position_, state);
+ state->SetInteger("transform_tree_index", transform_tree_index());
+ state->SetInteger("clip_tree_index", clip_tree_index());
+ state->SetInteger("effect_tree_index", effect_tree_index());
+ state->SetInteger("scroll_tree_index", scroll_tree_index());
+
state->SetInteger("draws_content", DrawsContent());
state->SetInteger("gpu_memory_usage",
base::saturated_cast<int>(GPUMemoryUsageInBytes()));
diff --git a/chromium/cc/layers/layer_impl.h b/chromium/cc/layers/layer_impl.h
index 98a0aa1f7e1..bdfd63a5e70 100644
--- a/chromium/cc/layers/layer_impl.h
+++ b/chromium/cc/layers/layer_impl.h
@@ -401,6 +401,12 @@ class CC_EXPORT LayerImpl {
virtual size_t GPUMemoryUsageInBytes() const;
+ // Mark a layer on pending tree that needs to push its properties to the
+ // active tree. These properties should not be changed during pending tree
+ // lifetime, and only changed by being pushed from the main thread. There are
+ // two cases where this function needs to be called: when main thread layer
+ // has properties that need to be pushed, or when a new LayerImpl is created
+ // on pending tree when syncing layers from main thread.
void SetNeedsPushProperties();
virtual void RunMicroBenchmark(MicroBenchmarkImpl* benchmark);
@@ -415,9 +421,9 @@ class CC_EXPORT LayerImpl {
return contributes_to_drawn_render_surface_;
}
- bool IsDrawnScrollbar() {
- return ToScrollbarLayer() && contributes_to_drawn_render_surface_;
- }
+ bool is_scrollbar() const { return is_scrollbar_; }
+
+ void set_is_scrollbar(bool is_scrollbar) { is_scrollbar_ = is_scrollbar; }
void set_may_contain_video(bool yes) { may_contain_video_ = yes; }
bool may_contain_video() const { return may_contain_video_; }
@@ -460,11 +466,18 @@ class CC_EXPORT LayerImpl {
// TODO(sunxd): Remove this function and replace it with visitor pattern.
virtual bool is_surface_layer() const;
+ void set_is_rounded_corner_mask(bool rounded) {
+ is_rounded_corner_mask_ = rounded;
+ }
+ bool is_rounded_corner_mask() const { return is_rounded_corner_mask_; }
+
protected:
+ // When |will_always_push_properties| is true, the layer will not itself set
+ // its SetNeedsPushProperties() state, as it expects to be always pushed to
+ // the active tree regardless.
LayerImpl(LayerTreeImpl* layer_impl,
int id,
- scoped_refptr<SyncedScrollOffset> scroll_offset);
- LayerImpl(LayerTreeImpl* layer_impl, int id);
+ bool will_always_push_properties = false);
// Get the color and size of the layer's debug border.
virtual void GetDebugBorderProperties(SkColor* color, float* width) const;
@@ -487,8 +500,9 @@ class CC_EXPORT LayerImpl {
virtual const char* LayerTypeAsString() const;
- int layer_id_;
- LayerTreeImpl* layer_tree_impl_;
+ const int layer_id_;
+ LayerTreeImpl* const layer_tree_impl_;
+ const bool will_always_push_properties_ : 1;
std::unique_ptr<LayerImplTestProperties> test_properties_;
@@ -583,6 +597,7 @@ class CC_EXPORT LayerImpl {
bool has_will_change_transform_hint_ : 1;
bool needs_push_properties_ : 1;
+ bool is_scrollbar_ : 1;
bool scrollbars_hidden_ : 1;
// The needs_show_scrollbars_ bit tracks a pending request from Blink to show
@@ -597,6 +612,7 @@ class CC_EXPORT LayerImpl {
bool raster_even_if_not_drawn_ : 1;
bool has_transform_node_ : 1;
+ bool is_rounded_corner_mask_ : 1;
DISALLOW_COPY_AND_ASSIGN(LayerImpl);
};
diff --git a/chromium/cc/layers/layer_impl_test_properties.h b/chromium/cc/layers/layer_impl_test_properties.h
index 0171895c604..038c420cac6 100644
--- a/chromium/cc/layers/layer_impl_test_properties.h
+++ b/chromium/cc/layers/layer_impl_test_properties.h
@@ -48,6 +48,7 @@ struct CC_EXPORT LayerImplTestProperties {
float opacity;
FilterOperations filters;
FilterOperations background_filters;
+ float backdrop_filter_quality;
gfx::PointF filters_origin;
SkBlendMode blend_mode;
LayerPositionConstraint position_constraint;
diff --git a/chromium/cc/layers/layer_impl_unittest.cc b/chromium/cc/layers/layer_impl_unittest.cc
index 551eced4977..cca256cf143 100644
--- a/chromium/cc/layers/layer_impl_unittest.cc
+++ b/chromium/cc/layers/layer_impl_unittest.cc
@@ -4,6 +4,7 @@
#include "cc/layers/layer_impl.h"
+#include "base/stl_util.h"
#include "cc/layers/painted_scrollbar_layer_impl.h"
#include "cc/layers/solid_color_scrollbar_layer_impl.h"
#include "cc/paint/filter_operation.h"
@@ -24,67 +25,34 @@
namespace cc {
namespace {
-#define EXECUTE_AND_VERIFY_SUBTREE_DID_NOT_CHANGE(code_to_test) \
- root->layer_tree_impl()->ResetAllChangeTracking(); \
- code_to_test; \
- EXPECT_FALSE( \
- root->layer_tree_impl()->LayerNeedsPushPropertiesForTesting(root)); \
- EXPECT_FALSE( \
- root->layer_tree_impl()->LayerNeedsPushPropertiesForTesting(child)); \
- EXPECT_FALSE(root->layer_tree_impl()->LayerNeedsPushPropertiesForTesting( \
- grand_child)); \
- EXPECT_FALSE(root->LayerPropertyChanged()); \
- EXPECT_FALSE(child->LayerPropertyChanged()); \
+#define EXECUTE_AND_VERIFY_SUBTREE_DID_NOT_CHANGE(code_to_test) \
+ root->layer_tree_impl()->ResetAllChangeTracking(); \
+ code_to_test; \
+ EXPECT_FALSE(root->LayerPropertyChanged()); \
+ EXPECT_FALSE(child->LayerPropertyChanged()); \
EXPECT_FALSE(grand_child->LayerPropertyChanged());
-#define EXECUTE_AND_VERIFY_NEEDS_PUSH_PROPERTIES_AND_SUBTREE_DID_NOT_CHANGE( \
- code_to_test) \
- root->layer_tree_impl()->ResetAllChangeTracking(); \
- code_to_test; \
- EXPECT_TRUE( \
- root->layer_tree_impl()->LayerNeedsPushPropertiesForTesting(root)); \
- EXPECT_FALSE( \
- root->layer_tree_impl()->LayerNeedsPushPropertiesForTesting(child)); \
- EXPECT_FALSE(root->layer_tree_impl()->LayerNeedsPushPropertiesForTesting( \
- grand_child)); \
- EXPECT_FALSE(root->LayerPropertyChanged()); \
- EXPECT_FALSE(child->LayerPropertyChanged()); \
- EXPECT_FALSE(grand_child->LayerPropertyChanged());
-
-#define EXECUTE_AND_VERIFY_NO_NEED_TO_PUSH_PROPERTIES_AND_SUBTREE_CHANGED( \
- code_to_test) \
- root->layer_tree_impl()->ResetAllChangeTracking(); \
- code_to_test; \
- EXPECT_FALSE( \
- root->layer_tree_impl()->LayerNeedsPushPropertiesForTesting(root)); \
- EXPECT_FALSE( \
- root->layer_tree_impl()->LayerNeedsPushPropertiesForTesting(child)); \
- EXPECT_FALSE(root->layer_tree_impl()->LayerNeedsPushPropertiesForTesting( \
- grand_child)); \
- EXPECT_TRUE(root->LayerPropertyChanged()); \
- EXPECT_TRUE(root->LayerPropertyChangedFromPropertyTrees()); \
- EXPECT_FALSE(root->LayerPropertyChangedNotFromPropertyTrees()); \
- EXPECT_TRUE(child->LayerPropertyChanged()); \
- EXPECT_TRUE(child->LayerPropertyChangedFromPropertyTrees()); \
- EXPECT_FALSE(child->LayerPropertyChangedNotFromPropertyTrees()); \
- EXPECT_TRUE(grand_child->LayerPropertyChanged()); \
- EXPECT_TRUE(grand_child->LayerPropertyChangedFromPropertyTrees()); \
+#define EXECUTE_AND_VERIFY_SUBTREE_CHANGED(code_to_test) \
+ root->layer_tree_impl()->ResetAllChangeTracking(); \
+ code_to_test; \
+ EXPECT_TRUE(root->LayerPropertyChanged()); \
+ EXPECT_TRUE(root->LayerPropertyChangedFromPropertyTrees()); \
+ EXPECT_FALSE(root->LayerPropertyChangedNotFromPropertyTrees()); \
+ EXPECT_TRUE(child->LayerPropertyChanged()); \
+ EXPECT_TRUE(child->LayerPropertyChangedFromPropertyTrees()); \
+ EXPECT_FALSE(child->LayerPropertyChangedNotFromPropertyTrees()); \
+ EXPECT_TRUE(grand_child->LayerPropertyChanged()); \
+ EXPECT_TRUE(grand_child->LayerPropertyChangedFromPropertyTrees()); \
EXPECT_FALSE(grand_child->LayerPropertyChangedNotFromPropertyTrees());
-#define EXECUTE_AND_VERIFY_ONLY_LAYER_CHANGED(code_to_test) \
- root->layer_tree_impl()->ResetAllChangeTracking(); \
- root->layer_tree_impl()->property_trees()->full_tree_damaged = false; \
- code_to_test; \
- EXPECT_TRUE( \
- root->layer_tree_impl()->LayerNeedsPushPropertiesForTesting(root)); \
- EXPECT_FALSE( \
- root->layer_tree_impl()->LayerNeedsPushPropertiesForTesting(child)); \
- EXPECT_FALSE(root->layer_tree_impl()->LayerNeedsPushPropertiesForTesting( \
- grand_child)); \
- EXPECT_TRUE(root->LayerPropertyChanged()); \
- EXPECT_FALSE(root->LayerPropertyChangedFromPropertyTrees()); \
- EXPECT_TRUE(root->LayerPropertyChangedNotFromPropertyTrees()); \
- EXPECT_FALSE(child->LayerPropertyChanged()); \
+#define EXECUTE_AND_VERIFY_ONLY_LAYER_CHANGED(code_to_test) \
+ root->layer_tree_impl()->ResetAllChangeTracking(); \
+ root->layer_tree_impl()->property_trees()->full_tree_damaged = false; \
+ code_to_test; \
+ EXPECT_TRUE(root->LayerPropertyChanged()); \
+ EXPECT_FALSE(root->LayerPropertyChangedFromPropertyTrees()); \
+ EXPECT_TRUE(root->LayerPropertyChangedNotFromPropertyTrees()); \
+ EXPECT_FALSE(child->LayerPropertyChanged()); \
EXPECT_FALSE(grand_child->LayerPropertyChanged());
#define VERIFY_NEEDS_UPDATE_DRAW_PROPERTIES(code_to_test) \
@@ -161,23 +129,21 @@ TEST(LayerImplTest, VerifyPendingLayerChangesAreTrackedProperly) {
// These properties are internal, and should not be considered "change" when
// they are used.
- EXECUTE_AND_VERIFY_NEEDS_PUSH_PROPERTIES_AND_SUBTREE_DID_NOT_CHANGE(
+ EXECUTE_AND_VERIFY_SUBTREE_DID_NOT_CHANGE(
root->SetUpdateRect(arbitrary_rect));
EXECUTE_AND_VERIFY_ONLY_LAYER_CHANGED(root->SetBounds(arbitrary_size));
host_impl.pending_tree()->property_trees()->needs_rebuild = true;
host_impl.pending_tree()->BuildLayerListAndPropertyTreesForTesting();
// Changing these properties affects the entire subtree of layers.
- EXECUTE_AND_VERIFY_NO_NEED_TO_PUSH_PROPERTIES_AND_SUBTREE_CHANGED(
- host_impl.pending_tree()->SetFilterMutated(root->element_id(),
- arbitrary_filters));
- EXECUTE_AND_VERIFY_NO_NEED_TO_PUSH_PROPERTIES_AND_SUBTREE_CHANGED(
- host_impl.pending_tree()->SetFilterMutated(root->element_id(),
- FilterOperations()));
- EXECUTE_AND_VERIFY_NO_NEED_TO_PUSH_PROPERTIES_AND_SUBTREE_CHANGED(
+ EXECUTE_AND_VERIFY_SUBTREE_CHANGED(host_impl.pending_tree()->SetFilterMutated(
+ root->element_id(), arbitrary_filters));
+ EXECUTE_AND_VERIFY_SUBTREE_CHANGED(host_impl.pending_tree()->SetFilterMutated(
+ root->element_id(), FilterOperations()));
+ EXECUTE_AND_VERIFY_SUBTREE_CHANGED(
host_impl.pending_tree()->SetOpacityMutated(root->element_id(),
arbitrary_number));
- EXECUTE_AND_VERIFY_NO_NEED_TO_PUSH_PROPERTIES_AND_SUBTREE_CHANGED(
+ EXECUTE_AND_VERIFY_SUBTREE_CHANGED(
host_impl.pending_tree()->SetTransformMutated(root->element_id(),
arbitrary_transform));
@@ -188,8 +154,7 @@ TEST(LayerImplTest, VerifyPendingLayerChangesAreTrackedProperly) {
// Changing these properties does not cause the layer to be marked as changed
// but does cause the layer to need to push properties.
- EXECUTE_AND_VERIFY_NEEDS_PUSH_PROPERTIES_AND_SUBTREE_DID_NOT_CHANGE(
- root->SetElementId(ElementId(2)));
+ EXECUTE_AND_VERIFY_SUBTREE_DID_NOT_CHANGE(root->SetElementId(ElementId(2)));
// After setting all these properties already, setting to the exact same
// values again should not cause any change.
diff --git a/chromium/cc/layers/layer_unittest.cc b/chromium/cc/layers/layer_unittest.cc
index d8c5f9616d4..1b4dc11a35e 100644
--- a/chromium/cc/layers/layer_unittest.cc
+++ b/chromium/cc/layers/layer_unittest.cc
@@ -6,6 +6,7 @@
#include <stddef.h>
+#include "base/stl_util.h"
#include "base/threading/thread_task_runner_handle.h"
#include "cc/animation/animation_host.h"
#include "cc/animation/animation_id_provider.h"
@@ -53,46 +54,41 @@ using ::testing::_;
Mock::VerifyAndClearExpectations(layer_tree_host_.get()); \
} while (false)
-#define EXECUTE_AND_VERIFY_SUBTREE_CHANGED(code_to_test) \
- code_to_test; \
- root->layer_tree_host()->BuildPropertyTreesForTesting(); \
- EXPECT_TRUE(root->subtree_property_changed()); \
- EXPECT_TRUE(root->layer_tree_host()->LayerNeedsPushPropertiesForTesting( \
- root.get())); \
- EXPECT_TRUE(child->subtree_property_changed()); \
- EXPECT_TRUE(child->layer_tree_host()->LayerNeedsPushPropertiesForTesting( \
- child.get())); \
- EXPECT_TRUE(grand_child->subtree_property_changed()); \
- EXPECT_TRUE( \
- grand_child->layer_tree_host()->LayerNeedsPushPropertiesForTesting( \
- grand_child.get()));
-
-#define EXECUTE_AND_VERIFY_SUBTREE_NOT_CHANGED(code_to_test) \
- code_to_test; \
- root->layer_tree_host()->BuildPropertyTreesForTesting(); \
- EXPECT_FALSE(root->subtree_property_changed()); \
- EXPECT_FALSE(root->layer_tree_host()->LayerNeedsPushPropertiesForTesting( \
- root.get())); \
- EXPECT_FALSE(child->subtree_property_changed()); \
- EXPECT_FALSE(child->layer_tree_host()->LayerNeedsPushPropertiesForTesting( \
- child.get())); \
- EXPECT_FALSE(grand_child->subtree_property_changed()); \
- EXPECT_FALSE( \
- grand_child->layer_tree_host()->LayerNeedsPushPropertiesForTesting( \
- grand_child.get()));
-
-#define EXECUTE_AND_VERIFY_SUBTREE_CHANGES_RESET(code_to_test) \
- code_to_test; \
- EXPECT_FALSE(root->subtree_property_changed()); \
- EXPECT_FALSE(root->layer_tree_host()->LayerNeedsPushPropertiesForTesting( \
- root.get())); \
- EXPECT_FALSE(child->subtree_property_changed()); \
- EXPECT_FALSE(child->layer_tree_host()->LayerNeedsPushPropertiesForTesting( \
- child.get())); \
- EXPECT_FALSE(grand_child->subtree_property_changed()); \
- EXPECT_FALSE( \
- grand_child->layer_tree_host()->LayerNeedsPushPropertiesForTesting( \
- grand_child.get()));
+#define EXECUTE_AND_VERIFY_SUBTREE_CHANGED(code_to_test) \
+ code_to_test; \
+ root->layer_tree_host()->BuildPropertyTreesForTesting(); \
+ EXPECT_TRUE(root->subtree_property_changed()); \
+ EXPECT_TRUE(base::ContainsKey( \
+ root->layer_tree_host()->LayersThatShouldPushProperties(), root.get())); \
+ EXPECT_TRUE(child->subtree_property_changed()); \
+ EXPECT_TRUE(base::ContainsKey( \
+ child->layer_tree_host()->LayersThatShouldPushProperties(), \
+ child.get())); \
+ EXPECT_TRUE(grand_child->subtree_property_changed()); \
+ EXPECT_TRUE(base::ContainsKey( \
+ grand_child->layer_tree_host()->LayersThatShouldPushProperties(), \
+ grand_child.get()));
+
+#define EXECUTE_AND_VERIFY_SUBTREE_NOT_CHANGED(code_to_test) \
+ code_to_test; \
+ root->layer_tree_host()->BuildPropertyTreesForTesting(); \
+ EXPECT_FALSE(root->subtree_property_changed()); \
+ EXPECT_FALSE(base::ContainsKey( \
+ root->layer_tree_host()->LayersThatShouldPushProperties(), root.get())); \
+ EXPECT_FALSE(child->subtree_property_changed()); \
+ EXPECT_FALSE(base::ContainsKey( \
+ child->layer_tree_host()->LayersThatShouldPushProperties(), \
+ child.get())); \
+ EXPECT_FALSE(grand_child->subtree_property_changed()); \
+ EXPECT_FALSE(base::ContainsKey( \
+ grand_child->layer_tree_host()->LayersThatShouldPushProperties(), \
+ grand_child.get()));
+
+#define EXECUTE_AND_VERIFY_SUBTREE_CHANGES_RESET(code_to_test) \
+ code_to_test; \
+ EXPECT_FALSE(root->subtree_property_changed()); \
+ EXPECT_FALSE(child->subtree_property_changed()); \
+ EXPECT_FALSE(grand_child->subtree_property_changed());
namespace cc {
@@ -106,8 +102,8 @@ static auto kArbitrarySourceId2 =
class MockLayerTreeHost : public LayerTreeHost {
public:
MockLayerTreeHost(LayerTreeHostSingleThreadClient* single_thread_client,
- LayerTreeHost::InitParams* params)
- : LayerTreeHost(params, CompositorMode::SINGLE_THREADED) {
+ LayerTreeHost::InitParams params)
+ : LayerTreeHost(std::move(params), CompositorMode::SINGLE_THREADED) {
InitializeSingleThreaded(single_thread_client,
base::ThreadTaskRunnerHandle::Get());
}
@@ -146,8 +142,8 @@ class LayerTest : public testing::Test {
params.task_graph_runner = &task_graph_runner_;
params.mutator_host = animation_host_.get();
- layer_tree_host_.reset(
- new StrictMock<MockLayerTreeHost>(&single_thread_client_, &params));
+ layer_tree_host_.reset(new StrictMock<MockLayerTreeHost>(
+ &single_thread_client_, std::move(params)));
}
void TearDown() override {
@@ -668,8 +664,8 @@ TEST_F(LayerTest, DeleteRemovedScrollParent) {
SimulateCommitForLayer(child1.get());
EXPECT_SET_NEEDS_COMMIT(1, child1->SetScrollParent(nullptr));
- EXPECT_TRUE(
- layer_tree_host_->LayerNeedsPushPropertiesForTesting(child1.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host_->LayersThatShouldPushProperties(), child1.get()));
EXPECT_SET_NEEDS_FULL_TREE_SYNC(1, layer_tree_host_->SetRootLayer(nullptr));
}
@@ -1064,7 +1060,8 @@ class LayerTreeHostFactory {
params.main_task_runner = base::ThreadTaskRunnerHandle::Get();
params.mutator_host = mutator_host;
- return LayerTreeHost::CreateSingleThreaded(&single_thread_client_, &params);
+ return LayerTreeHost::CreateSingleThreaded(&single_thread_client_,
+ std::move(params));
}
private:
@@ -1551,25 +1548,34 @@ class LayerTestWithLayerLists : public LayerTest {
}
};
-TEST_F(LayerTestWithLayerLists,
- SetLayerTreeHostUsingLayerListsDoesNotManageElementId) {
+TEST_F(LayerTestWithLayerLists, LayerTreeHostRegistersElementId) {
scoped_refptr<Layer> test_layer = Layer::Create();
ElementId element_id = ElementId(2);
test_layer->SetElementId(element_id);
- // Only one call expected since we should skip the has-animation check.
- EXPECT_CALL(*layer_tree_host_, SetNeedsCommit()).Times(1);
- scoped_refptr<AnimationTimeline> timeline =
- AnimationTimeline::Create(AnimationIdProvider::NextTimelineId());
- animation_host_->AddAnimationTimeline(timeline);
-
- AddOpacityTransitionToElementWithAnimation(element_id, timeline, 10.0, 1.f,
- 0.f, false);
- EXPECT_TRUE(animation_host_->IsElementAnimating(element_id));
+ EXPECT_EQ(nullptr, layer_tree_host_->LayerByElementId(element_id));
+ test_layer->SetLayerTreeHost(layer_tree_host_.get());
+ EXPECT_EQ(test_layer, layer_tree_host_->LayerByElementId(element_id));
+ test_layer->SetLayerTreeHost(nullptr);
EXPECT_EQ(nullptr, layer_tree_host_->LayerByElementId(element_id));
+}
+
+TEST_F(LayerTestWithLayerLists, ChangingElementIdRegistersElement) {
+ scoped_refptr<Layer> test_layer = Layer::Create();
+ EXPECT_CALL(*layer_tree_host_, SetNeedsCommit()).Times(1);
test_layer->SetLayerTreeHost(layer_tree_host_.get());
- // Layer shouldn't have been registered by element id.
+
+ ElementId element_id = ElementId(2);
+ EXPECT_EQ(nullptr, layer_tree_host_->LayerByElementId(element_id));
+
+ // Setting the element id should register the layer.
+ test_layer->SetElementId(element_id);
+ EXPECT_CALL(*layer_tree_host_, SetNeedsCommit()).Times(1);
+ EXPECT_EQ(test_layer, layer_tree_host_->LayerByElementId(element_id));
+
+ // Unsetting the element id should unregister the layer.
+ test_layer->SetElementId(ElementId());
EXPECT_EQ(nullptr, layer_tree_host_->LayerByElementId(element_id));
test_layer->SetLayerTreeHost(nullptr);
diff --git a/chromium/cc/layers/painted_overlay_scrollbar_layer.cc b/chromium/cc/layers/painted_overlay_scrollbar_layer.cc
index 8bea0f87987..ca94e661013 100644
--- a/chromium/cc/layers/painted_overlay_scrollbar_layer.cc
+++ b/chromium/cc/layers/painted_overlay_scrollbar_layer.cc
@@ -45,6 +45,7 @@ PaintedOverlayScrollbarLayer::PaintedOverlayScrollbarLayer(
thumb_thickness_(scrollbar_->ThumbThickness()),
thumb_length_(scrollbar_->ThumbLength()) {
DCHECK(scrollbar_->UsesNinePatchThumbResource());
+ SetIsScrollbar(true);
}
PaintedOverlayScrollbarLayer::~PaintedOverlayScrollbarLayer() = default;
diff --git a/chromium/cc/layers/painted_scrollbar_layer.cc b/chromium/cc/layers/painted_scrollbar_layer.cc
index cf65df0e6e2..541c024eba5 100644
--- a/chromium/cc/layers/painted_scrollbar_layer.cc
+++ b/chromium/cc/layers/painted_scrollbar_layer.cc
@@ -54,9 +54,11 @@ PaintedScrollbarLayer::PaintedScrollbarLayer(
is_overlay_(scrollbar_->IsOverlay()),
has_thumb_(scrollbar_->HasThumb()),
thumb_opacity_(scrollbar_->ThumbOpacity()) {
- if (!scrollbar_->IsOverlay())
+ if (!scrollbar_->IsOverlay()) {
AddMainThreadScrollingReasons(
MainThreadScrollingReason::kScrollbarScrolling);
+ }
+ SetIsScrollbar(true);
}
PaintedScrollbarLayer::~PaintedScrollbarLayer() = default;
diff --git a/chromium/cc/layers/picture_layer_impl.cc b/chromium/cc/layers/picture_layer_impl.cc
index ec9a79d73fe..94772c09ab4 100644
--- a/chromium/cc/layers/picture_layer_impl.cc
+++ b/chromium/cc/layers/picture_layer_impl.cc
@@ -13,6 +13,7 @@
#include <set>
#include "base/metrics/histogram_macros.h"
+#include "base/no_destructor.h"
#include "base/time/time.h"
#include "base/trace_event/trace_event_argument.h"
#include "build/build_config.h"
@@ -144,7 +145,7 @@ gfx::Size CalculateGpuTileSize(const gfx::Size& base_tile_size,
PictureLayerImpl::PictureLayerImpl(LayerTreeImpl* tree_impl,
int id,
Layer::LayerMaskType mask_type)
- : LayerImpl(tree_impl, id),
+ : LayerImpl(tree_impl, id, /*will_always_push_properties=*/true),
twin_layer_(nullptr),
tilings_(CreatePictureLayerTilingSet()),
ideal_page_scale_(0.f),
@@ -241,11 +242,6 @@ void PictureLayerImpl::PushPropertiesTo(LayerImpl* base_layer) {
layer_impl->can_use_lcd_text_ = can_use_lcd_text_;
layer_impl->SanityCheckTilingState();
-
- // We always need to push properties.
- // See http://crbug.com/303943
- // TODO(danakj): Stop always pushing properties since we don't swap tilings.
- layer_tree_impl()->AddLayerShouldPushProperties(this);
}
void PictureLayerImpl::AppendQuads(viz::RenderPass* render_pass,
@@ -260,6 +256,12 @@ void PictureLayerImpl::AppendQuads(viz::RenderPass* render_pass,
viz::SharedQuadState* shared_quad_state =
render_pass->CreateAndAppendSharedQuadState();
+ if (mask_type_ != Layer::LayerMaskType::NOT_MASK) {
+ append_quads_data->num_mask_layers++;
+ if (is_rounded_corner_mask())
+ append_quads_data->num_rounded_corner_mask_layers++;
+ }
+
if (raster_source_->IsSolidColor()) {
// TODO(sunxd): Solid color non-mask layers are forced to have contents
// scale = 1. This is a workaround to temperarily fix
@@ -391,9 +393,6 @@ void PictureLayerImpl::AppendQuads(viz::RenderPass* render_pass,
} else if (mode == TileDrawInfo::OOM_MODE) {
color = DebugColors::OOMTileBorderColor();
width = DebugColors::OOMTileBorderWidth(device_scale_factor);
- } else if (iter->draw_info().has_compressed_resource()) {
- color = DebugColors::CompressedTileBorderColor();
- width = DebugColors::CompressedTileBorderWidth(device_scale_factor);
} else if (iter.resolution() == HIGH_RESOLUTION) {
color = DebugColors::HighResTileBorderColor();
width = DebugColors::HighResTileBorderWidth(device_scale_factor);
@@ -462,6 +461,13 @@ void PictureLayerImpl::AppendQuads(viz::RenderPass* render_pass,
visible_geometry_rect.height();
append_quads_data->visible_layer_area += visible_geometry_area;
+ if (mask_type_ != Layer::LayerMaskType::NOT_MASK) {
+ append_quads_data->visible_mask_layer_area += visible_geometry_area;
+ if (is_rounded_corner_mask())
+ append_quads_data->visible_rounded_corner_mask_layer_area +=
+ visible_geometry_area;
+ }
+
bool has_draw_quad = false;
if (*iter && iter->draw_info().IsReadyToDraw()) {
const TileDrawInfo& draw_info = iter->draw_info();
@@ -653,11 +659,11 @@ bool PictureLayerImpl::UpdateTiles() {
!layer_tree_impl()->SmoothnessTakesPriority();
}
- static const Occlusion kEmptyOcclusion;
+ static const base::NoDestructor<Occlusion> kEmptyOcclusion;
const Occlusion& occlusion_in_content_space =
layer_tree_impl()->settings().use_occlusion_for_tile_prioritization
? draw_properties().occlusion_in_content_space
- : kEmptyOcclusion;
+ : *kEmptyOcclusion;
// Pass |occlusion_in_content_space| for |occlusion_in_layer_space| since
// they are the same space in picture layer, as contents scale is always 1.
@@ -704,26 +710,25 @@ void PictureLayerImpl::UpdateViewportRectForTilePriorityInContentSpace() {
}
viewport_rect_for_tile_priority_in_content_space_ =
visible_rect_in_content_space;
-#if defined(OS_ANDROID)
- // On android, if we're in a scrolling gesture, the pending tree does not
- // reflect the fact that we may be hiding the top or bottom controls. Thus,
- // it would believe that the viewport is smaller than it actually is which
- // can cause activation flickering issues. So, if we're in this situation
- // adjust the visible rect by the top/bottom controls height. This isn't
- // ideal since we're not always in this case, but since we should be
- // prioritizing the active tree anyway, it doesn't cause any serious issues.
- // https://crbug.com/794456.
- if (layer_tree_impl()->IsPendingTree() &&
- layer_tree_impl()->IsActivelyScrolling()) {
- float total_controls_height = layer_tree_impl()->top_controls_height() +
- layer_tree_impl()->bottom_controls_height();
- viewport_rect_for_tile_priority_in_content_space_.Inset(
- 0, // left
- 0, // top,
- 0, // right,
- -total_controls_height); // bottom
+
+ float total_controls_height = layer_tree_impl()->top_controls_height() +
+ layer_tree_impl()->bottom_controls_height();
+ if (total_controls_height) {
+ // If sliding top controls are being used, the pending tree does not
+ // reflect the fact that we may be hiding the top or bottom controls. Thus,
+ // it would believe that the viewport is smaller than it actually is which
+ // can cause activation flickering issues. So, if we're in this situation
+ // adjust the visible rect by the the controls height.
+ if (layer_tree_impl()->IsPendingTree() &&
+ layer_tree_impl()->IsActivelyScrolling() &&
+ layer_tree_impl()->browser_controls_shrink_blink_size()) {
+ viewport_rect_for_tile_priority_in_content_space_.Inset(
+ 0, // left
+ 0, // top,
+ 0, // right,
+ -total_controls_height); // bottom
+ }
}
-#endif
}
PictureLayerImpl* PictureLayerImpl::GetPendingOrActiveTwinLayer() const {
@@ -1426,9 +1431,18 @@ gfx::Vector2dF PictureLayerImpl::CalculateRasterTranslation(
if (!use_transformed_rasterization_)
return gfx::Vector2dF();
- DCHECK(!draw_properties().screen_space_transform_is_animating);
gfx::Transform draw_transform = DrawTransform();
- DCHECK(draw_transform.IsScaleOrTranslation());
+ // TODO(enne): for performance reasons, we should only have a raster
+ // translation when the screen space transform is not animating. We try to
+ // avoid this elsewhere but it still happens: http://crbug.com/778440
+ // TODO(enne): Also, we shouldn't ever get here if the draw transform is not
+ // just a scale + translation, but we do sometimes: http://crbug.com/740113
+ if (draw_properties().screen_space_transform_is_animating ||
+ !draw_transform.IsScaleOrTranslation()) {
+ // For now, while these problems are not well understood, avoid changing
+ // the raster scale in these cases.
+ return gfx::Vector2dF();
+ }
// It is only useful to align the content space to the target space if their
// relative pixel ratio is some small rational number. Currently we only
@@ -1694,7 +1708,8 @@ PictureLayerImpl::InvalidateRegionForImages(
invalidation_.Union(invalidation);
tilings_->Invalidate(invalidation);
- SetNeedsPushProperties();
+ // TODO(crbug.com/303943): SetNeedsPushProperties() would be needed here if
+ // PictureLayerImpl didn't always push properties every activation.
return ImageInvalidationResult::kInvalidated;
}
diff --git a/chromium/cc/layers/picture_layer_impl_unittest.cc b/chromium/cc/layers/picture_layer_impl_unittest.cc
index d9470a54d23..147364120ac 100644
--- a/chromium/cc/layers/picture_layer_impl_unittest.cc
+++ b/chromium/cc/layers/picture_layer_impl_unittest.cc
@@ -3095,8 +3095,7 @@ TEST_F(PictureLayerImplTest, TilingSetRasterQueue) {
std::vector<Tile*> high_res_tiles =
pending_layer()->HighResTiling()->AllTilesForTesting();
- for (std::vector<Tile*>::iterator tile_it = high_res_tiles.begin();
- tile_it != high_res_tiles.end();
+ for (auto tile_it = high_res_tiles.begin(); tile_it != high_res_tiles.end();
++tile_it) {
Tile* tile = *tile_it;
TileDrawInfo& draw_info = tile->draw_info();
@@ -3370,7 +3369,7 @@ TEST_F(PictureLayerImplTest, OcclusionOnSolidColorPictureLayer) {
{
SCOPED_TRACE("Scaled occlusion");
- gfx::Rect occluded(300, 0, 400, 2000);
+ gfx::Rect occluded(300, 0, 2000, 2000);
impl.AppendQuadsWithOcclusion(active_layer(), occluded);
size_t partial_occluded_count = 0;
@@ -3378,11 +3377,9 @@ TEST_F(PictureLayerImplTest, OcclusionOnSolidColorPictureLayer) {
&partial_occluded_count);
// Because of the implementation of test helper AppendQuadsWithOcclusion,
// the occlusion will have a scale transform resulted from the device scale
- // factor. However, the AppendQuads function will try to tile a solid color
- // layer ignoring the scale factor, and its visible layer bounds is 500x500.
- // So we end up having 4 partially occluded quads.
- EXPECT_EQ(4u, impl.quad_list().size());
- EXPECT_EQ(4u, partial_occluded_count);
+ // factor. A single partially overlapped DrawQuad of 500x500 will be added.
+ EXPECT_EQ(1u, impl.quad_list().size());
+ EXPECT_EQ(1u, partial_occluded_count);
}
}
@@ -3410,7 +3407,7 @@ TEST_F(PictureLayerImplTest, IgnoreOcclusionOnSolidColorMask) {
&partial_occluded_count);
// None of the quads shall be occluded because mask layers ignores
// occlusion.
- EXPECT_EQ(16u, impl.quad_list().size());
+ EXPECT_EQ(1u, impl.quad_list().size());
EXPECT_EQ(0u, partial_occluded_count);
}
}
@@ -4428,7 +4425,6 @@ TEST_F(OcclusionTrackingPictureLayerImplTest,
active_occluding_layer->SetDrawsContent(true);
active_occluding_layer->SetContentsOpaque(true);
active_occluding_layer->SetPosition(active_occluding_layer_position);
-
ActivateTree();
// Partially invalidate the pending layer. Tiles inside the invalidation rect
diff --git a/chromium/cc/layers/picture_layer_unittest.cc b/chromium/cc/layers/picture_layer_unittest.cc
index 7f2661cbbc8..40772076c72 100644
--- a/chromium/cc/layers/picture_layer_unittest.cc
+++ b/chromium/cc/layers/picture_layer_unittest.cc
@@ -327,8 +327,8 @@ TEST(PictureLayerTest, NonMonotonicSourceFrameNumber) {
params.task_graph_runner = &task_graph_runner;
params.main_task_runner = base::ThreadTaskRunnerHandle::Get();
params.mutator_host = animation_host.get();
- std::unique_ptr<LayerTreeHost> host1 =
- LayerTreeHost::CreateSingleThreaded(&single_thread_client, &params);
+ std::unique_ptr<LayerTreeHost> host1 = LayerTreeHost::CreateSingleThreaded(
+ &single_thread_client, std::move(params));
host1->SetVisible(true);
host_client1.SetLayerTreeHost(host1.get());
@@ -341,8 +341,8 @@ TEST(PictureLayerTest, NonMonotonicSourceFrameNumber) {
params2.main_task_runner = base::ThreadTaskRunnerHandle::Get();
params2.client = &host_client2;
params2.mutator_host = animation_host2.get();
- std::unique_ptr<LayerTreeHost> host2 =
- LayerTreeHost::CreateSingleThreaded(&single_thread_client, &params2);
+ std::unique_ptr<LayerTreeHost> host2 = LayerTreeHost::CreateSingleThreaded(
+ &single_thread_client, std::move(params2));
host2->SetVisible(true);
host_client2.SetLayerTreeHost(host2.get());
@@ -399,8 +399,8 @@ TEST(PictureLayerTest, ChangingHostsWithCollidingFrames) {
params.task_graph_runner = &task_graph_runner;
params.main_task_runner = base::ThreadTaskRunnerHandle::Get();
params.mutator_host = animation_host.get();
- std::unique_ptr<LayerTreeHost> host1 =
- LayerTreeHost::CreateSingleThreaded(&single_thread_client, &params);
+ std::unique_ptr<LayerTreeHost> host1 = LayerTreeHost::CreateSingleThreaded(
+ &single_thread_client, std::move(params));
host1->SetVisible(true);
host_client1.SetLayerTreeHost(host1.get());
@@ -413,8 +413,8 @@ TEST(PictureLayerTest, ChangingHostsWithCollidingFrames) {
params2.main_task_runner = base::ThreadTaskRunnerHandle::Get();
params2.client = &host_client2;
params2.mutator_host = animation_host2.get();
- std::unique_ptr<LayerTreeHost> host2 =
- LayerTreeHost::CreateSingleThreaded(&single_thread_client, &params2);
+ std::unique_ptr<LayerTreeHost> host2 = LayerTreeHost::CreateSingleThreaded(
+ &single_thread_client, std::move(params2));
host2->SetVisible(true);
host_client2.SetLayerTreeHost(host2.get());
diff --git a/chromium/cc/layers/render_surface_impl.cc b/chromium/cc/layers/render_surface_impl.cc
index 4df28434eb0..1461252b8ed 100644
--- a/chromium/cc/layers/render_surface_impl.cc
+++ b/chromium/cc/layers/render_surface_impl.cc
@@ -429,6 +429,18 @@ void RenderSurfaceImpl::AppendQuads(DrawMode draw_mode,
TRACE_EVENT1("cc", "RenderSurfaceImpl::AppendQuads",
"mask_layer_gpu_memory_usage",
mask_layer->GPUMemoryUsageInBytes());
+
+ int64_t visible_geometry_area =
+ static_cast<int64_t>(unoccluded_content_rect.width()) *
+ unoccluded_content_rect.height();
+ append_quads_data->num_mask_layers++;
+ append_quads_data->visible_mask_layer_area += visible_geometry_area;
+ if (mask_layer->is_rounded_corner_mask()) {
+ append_quads_data->num_rounded_corner_mask_layers++;
+ append_quads_data->visible_rounded_corner_mask_layer_area +=
+ visible_geometry_area;
+ }
+
if (mask_layer->mask_type() == Layer::LayerMaskType::MULTI_TEXTURE_MASK) {
TileMaskLayer(render_pass, shared_quad_state, unoccluded_content_rect);
return;
@@ -452,7 +464,8 @@ void RenderSurfaceImpl::AppendQuads(DrawMode draw_mode,
quad->SetNew(shared_quad_state, content_rect(), unoccluded_content_rect, id(),
mask_resource_id, mask_uv_rect, mask_texture_size,
surface_contents_scale, FiltersOrigin(), tex_coord_rect,
- !layer_tree_impl_->settings().enable_edge_anti_aliasing);
+ !layer_tree_impl_->settings().enable_edge_anti_aliasing,
+ OwningEffectNode()->backdrop_filter_quality);
}
void RenderSurfaceImpl::TileMaskLayer(
@@ -564,6 +577,7 @@ void RenderSurfaceImpl::TileMaskLayer(
quad_space_to_surface_space_transform, gfx::RectF(quad_rect));
tex_coord_rect.Offset(-content_rect().OffsetFromOrigin());
+ constexpr float backdrop_filter_quality = 1.0;
switch (temp_quad->material) {
case viz::DrawQuad::TILED_CONTENT: {
DCHECK_EQ(1U, temp_quad->resources.count);
@@ -611,7 +625,8 @@ void RenderSurfaceImpl::TileMaskLayer(
temp_quad->resources.ids[0], mask_uv_rect,
mask_texture_size, owning_layer_to_surface_contents_scale,
FiltersOrigin(), tex_coord_rect,
- !layer_tree_impl_->settings().enable_edge_anti_aliasing);
+ !layer_tree_impl_->settings().enable_edge_anti_aliasing,
+ backdrop_filter_quality);
} break;
case viz::DrawQuad::SOLID_COLOR: {
SkColor temp_color =
@@ -630,7 +645,8 @@ void RenderSurfaceImpl::TileMaskLayer(
gfx::RectF(), gfx::Size(),
owning_layer_to_surface_contents_scale, FiltersOrigin(),
tex_coord_rect,
- !layer_tree_impl_->settings().enable_edge_anti_aliasing);
+ !layer_tree_impl_->settings().enable_edge_anti_aliasing,
+ backdrop_filter_quality);
} break;
case viz::DrawQuad::DEBUG_BORDER:
NOTIMPLEMENTED();
diff --git a/chromium/cc/layers/scrollbar_layer_impl_base.cc b/chromium/cc/layers/scrollbar_layer_impl_base.cc
index 6a0f9eb613e..24a855e2f84 100644
--- a/chromium/cc/layers/scrollbar_layer_impl_base.cc
+++ b/chromium/cc/layers/scrollbar_layer_impl_base.cc
@@ -26,7 +26,9 @@ ScrollbarLayerImplBase::ScrollbarLayerImplBase(
scroll_layer_length_(0.f),
orientation_(orientation),
is_left_side_vertical_scrollbar_(is_left_side_vertical_scrollbar),
- vertical_adjust_(0.f) {}
+ vertical_adjust_(0.f) {
+ set_is_scrollbar(true);
+}
ScrollbarLayerImplBase::~ScrollbarLayerImplBase() {
layer_tree_impl()->UnregisterScrollbar(this);
diff --git a/chromium/cc/layers/scrollbar_layer_unittest.cc b/chromium/cc/layers/scrollbar_layer_unittest.cc
index f90363297e0..031d23cf451 100644
--- a/chromium/cc/layers/scrollbar_layer_unittest.cc
+++ b/chromium/cc/layers/scrollbar_layer_unittest.cc
@@ -62,7 +62,7 @@ class FakeResourceTrackingUIResourceManager : public UIResourceManager {
// Deletes a UI resource. May safely be called more than once.
void DeleteUIResource(UIResourceId id) override {
- UIResourceBitmapMap::iterator iter = ui_resource_bitmap_map_.find(id);
+ auto iter = ui_resource_bitmap_map_.find(id);
if (iter != ui_resource_bitmap_map_.end()) {
ui_resource_bitmap_map_.erase(iter);
total_ui_resource_deleted_++;
@@ -74,14 +74,14 @@ class FakeResourceTrackingUIResourceManager : public UIResourceManager {
int TotalUIResourceCreated() { return total_ui_resource_created_; }
gfx::Size ui_resource_size(UIResourceId id) {
- UIResourceBitmapMap::iterator iter = ui_resource_bitmap_map_.find(id);
+ auto iter = ui_resource_bitmap_map_.find(id);
if (iter != ui_resource_bitmap_map_.end())
return iter->second.GetSize();
return gfx::Size();
}
UIResourceBitmap* ui_resource_bitmap(UIResourceId id) {
- UIResourceBitmapMap::iterator iter = ui_resource_bitmap_map_.find(id);
+ auto iter = ui_resource_bitmap_map_.find(id);
if (iter != ui_resource_bitmap_map_.end())
return &iter->second;
return nullptr;
@@ -125,8 +125,8 @@ class BaseScrollbarLayerTest : public testing::Test {
std::make_unique<FakeResourceTrackingUIResourceManager>();
fake_ui_resource_manager_ = fake_ui_resource_manager.get();
- layer_tree_host_.reset(new FakeLayerTreeHost(
- &fake_client_, &params, CompositorMode::SINGLE_THREADED));
+ layer_tree_host_ = std::make_unique<FakeLayerTreeHost>(
+ &fake_client_, std::move(params), CompositorMode::SINGLE_THREADED);
layer_tree_host_->SetUIResourceManagerForTesting(
std::move(fake_ui_resource_manager));
layer_tree_host_->InitializeSingleThreaded(
diff --git a/chromium/cc/layers/solid_color_layer_impl.cc b/chromium/cc/layers/solid_color_layer_impl.cc
index bec16b85ce5..add281997be 100644
--- a/chromium/cc/layers/solid_color_layer_impl.cc
+++ b/chromium/cc/layers/solid_color_layer_impl.cc
@@ -13,10 +13,6 @@
namespace cc {
-namespace {
-const int kSolidQuadTileSize = 256;
-}
-
SolidColorLayerImpl::SolidColorLayerImpl(LayerTreeImpl* tree_impl, int id)
: LayerImpl(tree_impl, id) {
}
@@ -41,32 +37,12 @@ void SolidColorLayerImpl::AppendSolidQuads(
DCHECK_EQ(SkBlendMode::kSrcOver, shared_quad_state->blend_mode);
if (alpha < std::numeric_limits<float>::epsilon())
return;
- // We create a series of smaller quads instead of just one large one so that
- // the culler can reduce the total pixels drawn.
- int right = visible_layer_rect.right();
- int bottom = visible_layer_rect.bottom();
- for (int x = visible_layer_rect.x(); x < visible_layer_rect.right();
- x += kSolidQuadTileSize) {
- for (int y = visible_layer_rect.y(); y < visible_layer_rect.bottom();
- y += kSolidQuadTileSize) {
- gfx::Rect quad_rect(x,
- y,
- std::min(right - x, kSolidQuadTileSize),
- std::min(bottom - y, kSolidQuadTileSize));
- gfx::Rect visible_quad_rect =
- occlusion_in_layer_space.GetUnoccludedContentRect(quad_rect);
- if (visible_quad_rect.IsEmpty())
- continue;
-
- append_quads_data->visible_layer_area +=
- visible_quad_rect.width() * visible_quad_rect.height();
- auto* quad =
- render_pass->CreateAndAppendDrawQuad<viz::SolidColorDrawQuad>();
- quad->SetNew(shared_quad_state, quad_rect, visible_quad_rect, color,
- force_anti_aliasing_off);
- }
- }
+ gfx::Rect visible_quad_rect =
+ occlusion_in_layer_space.GetUnoccludedContentRect(visible_layer_rect);
+ auto* quad = render_pass->CreateAndAppendDrawQuad<viz::SolidColorDrawQuad>();
+ quad->SetNew(shared_quad_state, visible_layer_rect, visible_quad_rect, color,
+ force_anti_aliasing_off);
}
void SolidColorLayerImpl::AppendQuads(viz::RenderPass* render_pass,
diff --git a/chromium/cc/layers/solid_color_layer_impl_unittest.cc b/chromium/cc/layers/solid_color_layer_impl_unittest.cc
index 5f0a8e8a419..83497731afc 100644
--- a/chromium/cc/layers/solid_color_layer_impl_unittest.cc
+++ b/chromium/cc/layers/solid_color_layer_impl_unittest.cc
@@ -263,7 +263,7 @@ TEST(SolidColorLayerImplTest, Occlusion) {
LayerTestCommon::VerifyQuadsExactlyCoverRect(impl.quad_list(),
gfx::Rect(layer_size));
- EXPECT_EQ(16u, impl.quad_list().size());
+ EXPECT_EQ(1u, impl.quad_list().size());
}
{
@@ -277,15 +277,15 @@ TEST(SolidColorLayerImplTest, Occlusion) {
{
SCOPED_TRACE("Partial occlusion");
- gfx::Rect occluded(200, 200, 256 * 3, 256 * 3);
+ gfx::Rect occluded(200, 0, 800, 1000);
impl.AppendQuadsWithOcclusion(solid_color_layer_impl, occluded);
size_t partially_occluded_count = 0;
LayerTestCommon::VerifyQuadsAreOccluded(
impl.quad_list(), occluded, &partially_occluded_count);
// 4 quads are completely occluded, 8 are partially occluded.
- EXPECT_EQ(16u - 4u, impl.quad_list().size());
- EXPECT_EQ(8u, partially_occluded_count);
+ EXPECT_EQ(1u, impl.quad_list().size());
+ EXPECT_EQ(1u, partially_occluded_count);
}
}
diff --git a/chromium/cc/layers/solid_color_scrollbar_layer.cc b/chromium/cc/layers/solid_color_scrollbar_layer.cc
index f514b6dd762..c73f241a7ad 100644
--- a/chromium/cc/layers/solid_color_scrollbar_layer.cc
+++ b/chromium/cc/layers/solid_color_scrollbar_layer.cc
@@ -60,6 +60,7 @@ SolidColorScrollbarLayer::SolidColorScrollbarLayer(
is_left_side_vertical_scrollbar,
scroll_element_id) {
Layer::SetOpacity(0.f);
+ SetIsScrollbar(true);
}
SolidColorScrollbarLayer::~SolidColorScrollbarLayer() = default;
diff --git a/chromium/cc/layers/surface_layer.cc b/chromium/cc/layers/surface_layer.cc
index 7f90bd5d470..89dca4bd5c4 100644
--- a/chromium/cc/layers/surface_layer.cc
+++ b/chromium/cc/layers/surface_layer.cc
@@ -68,6 +68,9 @@ void SurfaceLayer::SetPrimarySurfaceId(const viz::SurfaceId& surface_id,
}
void SurfaceLayer::SetFallbackSurfaceId(const viz::SurfaceId& surface_id) {
+ // The fallback should never move backwards.
+ DCHECK(!surface_range_.start() ||
+ !surface_range_.start()->IsNewerThan(surface_id));
if (surface_range_.start() == surface_id)
return;
TRACE_EVENT_WITH_FLOW2(
@@ -103,7 +106,16 @@ void SurfaceLayer::SetSurfaceHitTestable(bool surface_hit_testable) {
if (surface_hit_testable_ == surface_hit_testable)
return;
surface_hit_testable_ = surface_hit_testable;
+}
+
+void SurfaceLayer::SetHasPointerEventsNone(bool has_pointer_events_none) {
+ if (has_pointer_events_none_ == has_pointer_events_none)
+ return;
+ has_pointer_events_none_ = has_pointer_events_none;
SetNeedsPushProperties();
+ // Change of pointer-events property triggers an update of viz hit test data,
+ // we need to commit in order to submit the new data with compositor frame.
+ SetNeedsCommit();
}
void SurfaceLayer::SetMayContainVideo(bool may_contain_video) {
@@ -145,6 +157,7 @@ void SurfaceLayer::PushPropertiesTo(LayerImpl* layer) {
deadline_in_frames_ = 0u;
layer_impl->SetStretchContentToFillBounds(stretch_content_to_fill_bounds_);
layer_impl->SetSurfaceHitTestable(surface_hit_testable_);
+ layer_impl->SetHasPointerEventsNone(has_pointer_events_none_);
}
} // namespace cc
diff --git a/chromium/cc/layers/surface_layer.h b/chromium/cc/layers/surface_layer.h
index 1d1d474c9ee..8a832fba879 100644
--- a/chromium/cc/layers/surface_layer.h
+++ b/chromium/cc/layers/surface_layer.h
@@ -39,7 +39,8 @@ class CC_EXPORT SurfaceLayer : public Layer {
}
void SetSurfaceHitTestable(bool surface_hit_testable);
- bool surface_hit_testable() const { return surface_hit_testable_; }
+
+ void SetHasPointerEventsNone(bool has_pointer_events_none);
void SetMayContainVideo(bool);
@@ -84,6 +85,12 @@ class CC_EXPORT SurfaceLayer : public Layer {
// be surface hit testable (e.g., a surface layer created by video).
bool surface_hit_testable_ = false;
+ // Whether or not the surface can accept pointer events. It is set to true if
+ // the frame owner has pointer-events: none property.
+ // TODO(sunxd): consider renaming it to oopif_has_pointer_events_none_ for
+ // disambiguation.
+ bool has_pointer_events_none_ = false;
+
DISALLOW_COPY_AND_ASSIGN(SurfaceLayer);
};
diff --git a/chromium/cc/layers/surface_layer_impl.cc b/chromium/cc/layers/surface_layer_impl.cc
index b86a1288c27..ad3031ebcc1 100644
--- a/chromium/cc/layers/surface_layer_impl.cc
+++ b/chromium/cc/layers/surface_layer_impl.cc
@@ -86,6 +86,14 @@ void SurfaceLayerImpl::SetSurfaceHitTestable(bool surface_hit_testable) {
NoteLayerPropertyChanged();
}
+void SurfaceLayerImpl::SetHasPointerEventsNone(bool has_pointer_events_none) {
+ if (has_pointer_events_none_ == has_pointer_events_none)
+ return;
+
+ has_pointer_events_none_ = has_pointer_events_none;
+ NoteLayerPropertyChanged();
+}
+
void SurfaceLayerImpl::PushPropertiesTo(LayerImpl* layer) {
LayerImpl::PushPropertiesTo(layer);
SurfaceLayerImpl* layer_impl = static_cast<SurfaceLayerImpl*>(layer);
@@ -95,6 +103,7 @@ void SurfaceLayerImpl::PushPropertiesTo(LayerImpl* layer) {
deadline_in_frames_ = 0u;
layer_impl->SetStretchContentToFillBounds(stretch_content_to_fill_bounds_);
layer_impl->SetSurfaceHitTestable(surface_hit_testable_);
+ layer_impl->SetHasPointerEventsNone(has_pointer_events_none_);
}
bool SurfaceLayerImpl::WillDraw(
diff --git a/chromium/cc/layers/surface_layer_impl.h b/chromium/cc/layers/surface_layer_impl.h
index f67e362b826..64ff796a87e 100644
--- a/chromium/cc/layers/surface_layer_impl.h
+++ b/chromium/cc/layers/surface_layer_impl.h
@@ -54,7 +54,12 @@ class CC_EXPORT SurfaceLayerImpl : public LayerImpl {
}
void SetSurfaceHitTestable(bool surface_hit_testable);
- bool surface_hit_testable() const { return surface_hit_testable_; }
+ bool ShouldGenerateSurfaceHitTestData() const {
+ return surface_hit_testable_ && !has_pointer_events_none_;
+ }
+
+ void SetHasPointerEventsNone(bool has_pointer_events_none);
+ bool has_pointer_events_none() const { return has_pointer_events_none_; }
// LayerImpl overrides.
std::unique_ptr<LayerImpl> CreateLayerImpl(LayerTreeImpl* tree_impl) override;
@@ -84,6 +89,7 @@ class CC_EXPORT SurfaceLayerImpl : public LayerImpl {
bool stretch_content_to_fill_bounds_ = false;
bool surface_hit_testable_ = false;
+ bool has_pointer_events_none_ = false;
bool will_draw_ = false;
DISALLOW_COPY_AND_ASSIGN(SurfaceLayerImpl);
diff --git a/chromium/cc/layers/texture_layer_impl.cc b/chromium/cc/layers/texture_layer_impl.cc
index 7d34344e47e..f1b6e748a16 100644
--- a/chromium/cc/layers/texture_layer_impl.cc
+++ b/chromium/cc/layers/texture_layer_impl.cc
@@ -199,32 +199,26 @@ void TextureLayerImpl::ReleaseResources() {
void TextureLayerImpl::SetPremultipliedAlpha(bool premultiplied_alpha) {
premultiplied_alpha_ = premultiplied_alpha;
- SetNeedsPushProperties();
}
void TextureLayerImpl::SetBlendBackgroundColor(bool blend) {
blend_background_color_ = blend;
- SetNeedsPushProperties();
}
void TextureLayerImpl::SetFlipped(bool flipped) {
flipped_ = flipped;
- SetNeedsPushProperties();
}
void TextureLayerImpl::SetNearestNeighbor(bool nearest_neighbor) {
nearest_neighbor_ = nearest_neighbor;
- SetNeedsPushProperties();
}
void TextureLayerImpl::SetUVTopLeft(const gfx::PointF& top_left) {
uv_top_left_ = top_left;
- SetNeedsPushProperties();
}
void TextureLayerImpl::SetUVBottomRight(const gfx::PointF& bottom_right) {
uv_bottom_right_ = bottom_right;
- SetNeedsPushProperties();
}
// 1--2
@@ -235,7 +229,6 @@ void TextureLayerImpl::SetVertexOpacity(const float vertex_opacity[4]) {
vertex_opacity_[1] = vertex_opacity[1];
vertex_opacity_[2] = vertex_opacity[2];
vertex_opacity_[3] = vertex_opacity[3];
- SetNeedsPushProperties();
}
void TextureLayerImpl::SetTransferableResource(
@@ -246,7 +239,6 @@ void TextureLayerImpl::SetTransferableResource(
transferable_resource_ = resource;
release_callback_ = std::move(release_callback);
own_resource_ = true;
- SetNeedsPushProperties();
}
void TextureLayerImpl::RegisterSharedBitmapId(
@@ -263,7 +255,6 @@ void TextureLayerImpl::RegisterSharedBitmapId(
to_register_bitmaps_[id] = std::move(bitmap);
}
base::Erase(to_unregister_bitmap_ids_, id);
- SetNeedsPushProperties();
}
void TextureLayerImpl::UnregisterSharedBitmapId(viz::SharedBitmapId id) {
@@ -278,7 +269,6 @@ void TextureLayerImpl::UnregisterSharedBitmapId(viz::SharedBitmapId id) {
// SharedBitmapId, so we should remove the SharedBitmapId only after we've
// had a chance to replace it with activation.
to_unregister_bitmap_ids_.push_back(id);
- SetNeedsPushProperties();
}
}
diff --git a/chromium/cc/layers/texture_layer_unittest.cc b/chromium/cc/layers/texture_layer_unittest.cc
index 66656e3e005..ef8c52d3697 100644
--- a/chromium/cc/layers/texture_layer_unittest.cc
+++ b/chromium/cc/layers/texture_layer_unittest.cc
@@ -83,7 +83,7 @@ class MockLayerTreeHost : public LayerTreeHost {
params.mutator_host = mutator_host;
LayerTreeSettings settings;
params.settings = &settings;
- return base::WrapUnique(new MockLayerTreeHost(&params));
+ return base::WrapUnique(new MockLayerTreeHost(std::move(params)));
}
MOCK_METHOD0(SetNeedsCommit, void());
@@ -91,8 +91,8 @@ class MockLayerTreeHost : public LayerTreeHost {
MOCK_METHOD0(StopRateLimiter, void());
private:
- explicit MockLayerTreeHost(LayerTreeHost::InitParams* params)
- : LayerTreeHost(params, CompositorMode::SINGLE_THREADED) {
+ explicit MockLayerTreeHost(LayerTreeHost::InitParams params)
+ : LayerTreeHost(std::move(params), CompositorMode::SINGLE_THREADED) {
InitializeSingleThreaded(&single_thread_client_,
base::ThreadTaskRunnerHandle::Get());
}
@@ -248,8 +248,8 @@ TEST_F(TextureLayerTest, ShutdownWithResource) {
LayerTreeSettings settings;
params.settings = &settings;
params.main_task_runner = base::ThreadTaskRunnerHandle::Get();
- auto host =
- LayerTreeHost::CreateSingleThreaded(&single_thread_client, &params);
+ auto host = LayerTreeHost::CreateSingleThreaded(&single_thread_client,
+ std::move(params));
client.SetLayerTreeHost(host.get());
client.SetUseSoftwareCompositing(!gpu);
@@ -1664,7 +1664,8 @@ class SoftwareTextureLayerPurgeMemoryTest : public SoftwareTextureLayerTest {
// Call OnPurgeMemory() to ensure that the same SharedBitmapId doesn't get
// registered again on the next draw.
if (step_ == 1)
- static_cast<base::MemoryCoordinatorClient*>(host_impl)->OnPurgeMemory();
+ base::MemoryPressureListener::SimulatePressureNotification(
+ base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
}
void DisplayReceivedCompositorFrameOnThread(
diff --git a/chromium/cc/mojo_embedder/async_layer_tree_frame_sink.cc b/chromium/cc/mojo_embedder/async_layer_tree_frame_sink.cc
index 11ed7c1322e..34be99f811b 100644
--- a/chromium/cc/mojo_embedder/async_layer_tree_frame_sink.cc
+++ b/chromium/cc/mojo_embedder/async_layer_tree_frame_sink.cc
@@ -7,6 +7,9 @@
#include <utility>
#include "base/bind.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/trace_event/trace_event.h"
#include "cc/base/histograms.h"
@@ -17,9 +20,45 @@
#include "components/viz/common/hit_test/hit_test_region_list.h"
#include "components/viz/common/quads/compositor_frame.h"
+namespace {
+
+base::HistogramBase* GetHistogramNamed(const char* histogram_name_format,
+ const char* client_name) {
+ if (!client_name)
+ return nullptr;
+
+ return base::LinearHistogram::FactoryMicrosecondsTimeGet(
+ base::StringPrintf(histogram_name_format, client_name),
+ base::TimeDelta::FromMicroseconds(1),
+ base::TimeDelta::FromMilliseconds(200), 50,
+ base::HistogramBase::kUmaTargetedHistogramFlag);
+}
+} // namespace
+
namespace cc {
namespace mojo_embedder {
+AsyncLayerTreeFrameSink::PipelineReporting::PipelineReporting(
+ const viz::BeginFrameArgs args,
+ base::TimeTicks now,
+ base::HistogramBase* submit_begin_frame_histogram)
+ : trace_id_(args.trace_id),
+ frame_time_(now),
+ submit_begin_frame_histogram_(submit_begin_frame_histogram) {}
+
+AsyncLayerTreeFrameSink::PipelineReporting::~PipelineReporting() = default;
+
+void AsyncLayerTreeFrameSink::PipelineReporting::Report() {
+ TRACE_EVENT_WITH_FLOW1("viz,benchmark", "Graphics.Pipeline",
+ TRACE_ID_GLOBAL(trace_id_),
+ TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
+ "step", "SubmitCompositorFrame");
+ auto report_time = base::TimeTicks::Now() - frame_time_;
+
+ if (submit_begin_frame_histogram_)
+ submit_begin_frame_histogram_->AddTimeMicrosecondsGranularity(report_time);
+}
+
AsyncLayerTreeFrameSink::InitParams::InitParams() = default;
AsyncLayerTreeFrameSink::InitParams::~InitParams() = default;
@@ -51,6 +90,12 @@ AsyncLayerTreeFrameSink::AsyncLayerTreeFrameSink(
client_binding_(this),
enable_surface_synchronization_(params->enable_surface_synchronization),
wants_animate_only_begin_frames_(params->wants_animate_only_begin_frames),
+ receive_begin_frame_histogram_(
+ GetHistogramNamed("GraphicsPipeline.%s.ReceivedBeginFrame",
+ params->client_name)),
+ submit_begin_frame_histogram_(GetHistogramNamed(
+ "GraphicsPipeline.%s.SubmitCompositorFrameAfterBeginFrame",
+ params->client_name)),
weak_factory_(this) {
DETACH_FROM_THREAD(thread_checker_);
}
@@ -124,11 +169,15 @@ void AsyncLayerTreeFrameSink::SubmitCompositorFrame(
frame.metadata.begin_frame_ack.sequence_number);
TRACE_EVENT0("cc,benchmark",
"AsyncLayerTreeFrameSink::SubmitCompositorFrame");
- TRACE_EVENT_WITH_FLOW1(
- "viz,benchmark", "Graphics.Pipeline",
- TRACE_ID_GLOBAL(frame.metadata.begin_frame_ack.trace_id),
- TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, "step",
- "SubmitCompositorFrame");
+
+ // It's possible to request an immediate composite from cc which will bypass
+ // BeginFrame. In that case, we cannot collect full graphics pipeline data.
+ auto it = pipeline_reporting_frame_times_.find(
+ frame.metadata.begin_frame_ack.trace_id);
+ if (it != pipeline_reporting_frame_times_.end()) {
+ it->second.Report();
+ pipeline_reporting_frame_times_.erase(it);
+ }
if (!enable_surface_synchronization_) {
local_surface_id_ =
@@ -169,6 +218,14 @@ void AsyncLayerTreeFrameSink::SubmitCompositorFrame(
"SubmitCompositorFrame", "surface_id", local_surface_id_.ToString());
}
+ // The trace_id is negated in order to keep the Graphics.Pipeline and
+ // Event.Pipeline flows separated.
+ const int64_t trace_id = ~frame.metadata.begin_frame_ack.trace_id;
+ TRACE_EVENT_WITH_FLOW1(TRACE_DISABLED_BY_DEFAULT("viz.hit_testing_flow"),
+ "Event.Pipeline", TRACE_ID_GLOBAL(trace_id),
+ TRACE_EVENT_FLAG_FLOW_OUT, "step",
+ "SubmitHitTestData");
+
compositor_frame_sink_ptr_->SubmitCompositorFrame(
local_surface_id_, std::move(frame), std::move(hit_test_region_list),
tracing_enabled ? base::TimeTicks::Now().since_origin().InMicroseconds()
@@ -180,7 +237,14 @@ void AsyncLayerTreeFrameSink::DidNotProduceFrame(
DCHECK(compositor_frame_sink_ptr_);
DCHECK(!ack.has_damage);
DCHECK_LE(viz::BeginFrameArgs::kStartingFrameNumber, ack.sequence_number);
- compositor_frame_sink_ptr_->DidNotProduceFrame(ack);
+
+ // TODO(yiyix): Remove duplicated calls of DidNotProduceFrame from the same
+ // BeginFrames. https://crbug.com/881949
+ auto it = pipeline_reporting_frame_times_.find(ack.trace_id);
+ if (it != pipeline_reporting_frame_times_.end()) {
+ compositor_frame_sink_ptr_->DidNotProduceFrame(ack);
+ pipeline_reporting_frame_times_.erase(it);
+ }
}
void AsyncLayerTreeFrameSink::DidAllocateSharedBitmap(
@@ -211,15 +275,22 @@ void AsyncLayerTreeFrameSink::DidPresentCompositorFrame(
}
void AsyncLayerTreeFrameSink::OnBeginFrame(const viz::BeginFrameArgs& args) {
- // Note that client_name is constant during the lifetime of the process and
- // it's either "Browser" or "Renderer".
- if (const char* client_name = GetClientNameForMetrics()) {
- base::TimeDelta frame_difference = base::TimeTicks::Now() - args.frame_time;
- UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(
- base::StringPrintf("GraphicsPipeline.%s.ReceivedBeginFrame",
- client_name),
- frame_difference, base::TimeDelta::FromMicroseconds(1),
- base::TimeDelta::FromMilliseconds(100), 50);
+ DCHECK_LE(pipeline_reporting_frame_times_.size(), 25u);
+ if (args.trace_id != -1) {
+ base::TimeTicks current_time = base::TimeTicks::Now();
+ PipelineReporting report(args, current_time, submit_begin_frame_histogram_);
+ pipeline_reporting_frame_times_.emplace(args.trace_id, report);
+ // Missed BeginFrames use the frame time of the last received BeginFrame
+ // which is bogus from a reporting perspective if nothing has been updating
+ // on screen for a while.
+ if (args.type != viz::BeginFrameArgs::MISSED) {
+ base::TimeDelta frame_difference = current_time - args.frame_time;
+
+ if (receive_begin_frame_histogram_) {
+ receive_begin_frame_histogram_->AddTimeMicrosecondsGranularity(
+ frame_difference);
+ }
+ }
}
if (!needs_begin_frames_) {
TRACE_EVENT_WITH_FLOW1("viz,benchmark", "Graphics.Pipeline",
@@ -229,12 +300,13 @@ void AsyncLayerTreeFrameSink::OnBeginFrame(const viz::BeginFrameArgs& args) {
// We had a race with SetNeedsBeginFrame(false) and still need to let the
// sink know that we didn't use this BeginFrame.
DidNotProduceFrame(viz::BeginFrameAck(args, false));
- } else {
- TRACE_EVENT_WITH_FLOW1("viz,benchmark", "Graphics.Pipeline",
- TRACE_ID_GLOBAL(args.trace_id),
- TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
- "step", "ReceiveBeginFrame");
+ return;
}
+ TRACE_EVENT_WITH_FLOW1("viz,benchmark", "Graphics.Pipeline",
+ TRACE_ID_GLOBAL(args.trace_id),
+ TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
+ "step", "ReceiveBeginFrame");
+
if (begin_frame_source_)
begin_frame_source_->OnBeginFrame(args);
}
diff --git a/chromium/cc/mojo_embedder/async_layer_tree_frame_sink.h b/chromium/cc/mojo_embedder/async_layer_tree_frame_sink.h
index 28bbfdf7252..71be58d39f3 100644
--- a/chromium/cc/mojo_embedder/async_layer_tree_frame_sink.h
+++ b/chromium/cc/mojo_embedder/async_layer_tree_frame_sink.h
@@ -21,6 +21,10 @@
#include "mojo/public/cpp/bindings/binding.h"
#include "services/viz/public/interfaces/compositing/compositor_frame_sink.mojom.h"
+namespace base {
+class HistogramBase;
+} // namespace base
+
namespace viz {
class HitTestDataProvider;
class LocalSurfaceIdProvider;
@@ -37,6 +41,32 @@ class CC_MOJO_EMBEDDER_EXPORT AsyncLayerTreeFrameSink
public viz::mojom::CompositorFrameSinkClient,
public viz::ExternalBeginFrameSourceClient {
public:
+ // This class is used to handle the graphics pipeline related metrics
+ // reporting.
+ class PipelineReporting {
+ public:
+ PipelineReporting(viz::BeginFrameArgs args,
+ base::TimeTicks now,
+ base::HistogramBase* submit_begin_frame_histogram);
+ ~PipelineReporting();
+
+ void Report();
+
+ int64_t trace_id() const { return trace_id_; }
+
+ private:
+ // The trace id of a BeginFrame which is used to track its progress on the
+ // client side.
+ int64_t trace_id_;
+
+ // The time stamp for the begin frame to arrive on client side.
+ base::TimeTicks frame_time_;
+
+ // Histogram metrics used to record
+ // GraphicsPipeline.ClientName.SubmitCompositorFrameAfterBeginFrame
+ base::HistogramBase* submit_begin_frame_histogram_;
+ };
+
struct CC_MOJO_EMBEDDER_EXPORT UnboundMessagePipes {
UnboundMessagePipes();
~UnboundMessagePipes();
@@ -65,6 +95,7 @@ class CC_MOJO_EMBEDDER_EXPORT AsyncLayerTreeFrameSink
UnboundMessagePipes pipes;
bool enable_surface_synchronization = false;
bool wants_animate_only_begin_frames = false;
+ const char* client_name = nullptr;
};
AsyncLayerTreeFrameSink(
@@ -137,7 +168,16 @@ class CC_MOJO_EMBEDDER_EXPORT AsyncLayerTreeFrameSink
viz::LocalSurfaceId last_submitted_local_surface_id_;
float last_submitted_device_scale_factor_ = 1.f;
gfx::Size last_submitted_size_in_pixels_;
+ // Use this map to record the time when client received the BeginFrameArgs.
+ base::flat_map<int64_t, PipelineReporting> pipeline_reporting_frame_times_;
+
+ // Histogram metrics used to record
+ // GraphicsPipeline.ClientName.ReceivedBeginFrame
+ base::HistogramBase* const receive_begin_frame_histogram_;
+ // Histogram metrics used to record
+ // GraphicsPipeline.ClientName.SubmitCompositorFrameAfterBeginFrame
+ base::HistogramBase* const submit_begin_frame_histogram_;
base::WeakPtrFactory<AsyncLayerTreeFrameSink> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(AsyncLayerTreeFrameSink);
diff --git a/chromium/cc/paint/BUILD.gn b/chromium/cc/paint/BUILD.gn
index 92306d62373..6ea4596b91b 100644
--- a/chromium/cc/paint/BUILD.gn
+++ b/chromium/cc/paint/BUILD.gn
@@ -63,10 +63,6 @@ cc_component("paint") {
"paint_text_blob.h",
"paint_text_blob_builder.cc",
"paint_text_blob_builder.h",
- "paint_typeface.cc",
- "paint_typeface.h",
- "paint_typeface_transfer_cache_entry.cc",
- "paint_typeface_transfer_cache_entry.h",
"path_transfer_cache_entry.cc",
"path_transfer_cache_entry.h",
"raw_memory_transfer_cache_entry.cc",
diff --git a/chromium/cc/paint/discardable_image_map.cc b/chromium/cc/paint/discardable_image_map.cc
index 232dab6a1d4..1c53bb9b523 100644
--- a/chromium/cc/paint/discardable_image_map.cc
+++ b/chromium/cc/paint/discardable_image_map.cc
@@ -12,6 +12,7 @@
#include "base/auto_reset.h"
#include "base/containers/adapters.h"
#include "base/metrics/histogram_macros.h"
+#include "base/no_destructor.h"
#include "base/trace_event/trace_event.h"
#include "cc/paint/paint_filter.h"
#include "cc/paint/paint_op_buffer.h"
@@ -456,9 +457,9 @@ void DiscardableImageMap::GetDiscardableImagesInRect(
const DiscardableImageMap::Rects& DiscardableImageMap::GetRectsForImage(
PaintImage::Id image_id) const {
- static const Rects kEmptyRects;
+ static const base::NoDestructor<Rects> kEmptyRects;
auto it = image_id_to_rects_.find(image_id);
- return it == image_id_to_rects_.end() ? kEmptyRects : it->second;
+ return it == image_id_to_rects_.end() ? *kEmptyRects : it->second;
}
void DiscardableImageMap::Reset() {
diff --git a/chromium/cc/paint/oop_pixeltest.cc b/chromium/cc/paint/oop_pixeltest.cc
index 8d83df688f5..d79fa1c341c 100644
--- a/chromium/cc/paint/oop_pixeltest.cc
+++ b/chromium/cc/paint/oop_pixeltest.cc
@@ -1387,11 +1387,10 @@ TEST_F(OopPixelTest, DrawRectColorSpace) {
}
scoped_refptr<PaintTextBlob> BuildTextBlob(
- PaintTypeface typeface = PaintTypeface(),
+ sk_sp<SkTypeface> typeface = SkTypeface::MakeDefault(),
bool use_lcd_text = false) {
- SkFontStyle style;
if (!typeface) {
- typeface = PaintTypeface::FromFamilyNameAndFontStyle("monospace", style);
+ typeface = SkTypeface::MakeFromName("monospace", SkFontStyle());
}
PaintFont font;
@@ -1457,8 +1456,8 @@ class OopRecordShaderPixelTest : public OopPixelTest,
PaintFlags flags;
flags.setStyle(PaintFlags::kFill_Style);
flags.setColor(SK_ColorGREEN);
- paint_record->push<DrawTextBlobOp>(BuildTextBlob(PaintTypeface(), UseLcdText()), 0u, 0u,
- flags);
+ paint_record->push<DrawTextBlobOp>(
+ BuildTextBlob(SkTypeface::MakeDefault(), UseLcdText()), 0u, 0u, flags);
auto paint_record_shader = PaintShader::MakePaintRecord(
paint_record, SkRect::MakeWH(25, 25), SkShader::kRepeat_TileMode,
SkShader::kRepeat_TileMode, nullptr);
@@ -1501,8 +1500,8 @@ class OopRecordFilterPixelTest : public OopPixelTest,
PaintFlags flags;
flags.setStyle(PaintFlags::kFill_Style);
flags.setColor(SK_ColorGREEN);
- paint_record->push<DrawTextBlobOp>(BuildTextBlob(PaintTypeface(), UseLcdText()), 0u, 0u,
- flags);
+ paint_record->push<DrawTextBlobOp>(
+ BuildTextBlob(SkTypeface::MakeDefault(), UseLcdText()), 0u, 0u, flags);
auto paint_record_filter =
sk_make_sp<RecordPaintFilter>(paint_record, SkRect::MakeWH(100, 100));
@@ -1546,18 +1545,16 @@ TEST_F(OopPixelTest, DrawTextMultipleRasterCHROMIUM) {
PaintFlags flags;
flags.setStyle(PaintFlags::kFill_Style);
flags.setColor(SK_ColorGREEN);
- display_item_list->push<DrawTextBlobOp>(
- BuildTextBlob(PaintTypeface::FromSkTypeface(sk_typeface_1)), 0u, 0u,
- flags);
+ display_item_list->push<DrawTextBlobOp>(BuildTextBlob(sk_typeface_1), 0u, 0u,
+ flags);
display_item_list->EndPaintOfUnpaired(options.full_raster_rect);
display_item_list->Finalize();
// Create another list with a different typeface.
auto display_item_list_2 = base::MakeRefCounted<DisplayItemList>();
display_item_list_2->StartPaint();
- display_item_list_2->push<DrawTextBlobOp>(
- BuildTextBlob(PaintTypeface::FromSkTypeface(sk_typeface_2)), 0u, 0u,
- flags);
+ display_item_list_2->push<DrawTextBlobOp>(BuildTextBlob(sk_typeface_2), 0u,
+ 0u, flags);
display_item_list_2->EndPaintOfUnpaired(options.full_raster_rect);
display_item_list_2->Finalize();
diff --git a/chromium/cc/paint/paint_font.cc b/chromium/cc/paint/paint_font.cc
index d04d40b0d3f..96818286705 100644
--- a/chromium/cc/paint/paint_font.cc
+++ b/chromium/cc/paint/paint_font.cc
@@ -5,7 +5,6 @@
#include "cc/paint/paint_font.h"
#include "cc/paint/paint_export.h"
-#include "cc/paint/paint_typeface.h"
#include "third_party/skia/include/core/SkPaint.h"
namespace cc {
@@ -45,9 +44,9 @@ void PaintFont::SetTextSize(SkScalar size) {
sk_paint_.setTextSize(size);
}
-void PaintFont::SetTypeface(const PaintTypeface& typeface) {
+void PaintFont::SetTypeface(sk_sp<SkTypeface> typeface) {
typeface_ = typeface;
- sk_paint_.setTypeface(typeface.ToSkTypeface());
+ sk_paint_.setTypeface(typeface);
}
void PaintFont::SetFakeBoldText(bool bold_text) {
diff --git a/chromium/cc/paint/paint_font.h b/chromium/cc/paint/paint_font.h
index bb947a0f1c8..3b34ac404fb 100644
--- a/chromium/cc/paint/paint_font.h
+++ b/chromium/cc/paint/paint_font.h
@@ -6,8 +6,9 @@
#define CC_PAINT_PAINT_FONT_H_
#include "cc/paint/paint_export.h"
-#include "cc/paint/paint_typeface.h"
#include "third_party/skia/include/core/SkPaint.h"
+#include "third_party/skia/include/core/SkRefCnt.h"
+#include "third_party/skia/include/core/SkTypeface.h"
namespace cc {
@@ -24,18 +25,18 @@ class CC_PAINT_EXPORT PaintFont {
void SetLcdRenderText(bool lcd_text);
void SetSubpixelText(bool subpixel_text);
void SetTextSize(SkScalar size);
- void SetTypeface(const PaintTypeface& typeface);
+ void SetTypeface(sk_sp<SkTypeface> typeface);
void SetFakeBoldText(bool bold_text);
void SetTextSkewX(SkScalar skew);
void SetFlags(uint32_t flags);
uint32_t flags() const { return sk_paint_.getFlags(); }
- const PaintTypeface& typeface() const { return typeface_; }
+ const sk_sp<SkTypeface> typeface() const { return typeface_; }
const SkPaint& ToSkPaint() const { return sk_paint_; }
private:
- PaintTypeface typeface_;
+ sk_sp<SkTypeface> typeface_;
SkPaint sk_paint_;
};
diff --git a/chromium/cc/paint/paint_image.cc b/chromium/cc/paint/paint_image.cc
index 66b14ec3c75..dfdcc1fd5e5 100644
--- a/chromium/cc/paint/paint_image.cc
+++ b/chromium/cc/paint/paint_image.cc
@@ -197,36 +197,6 @@ bool PaintImage::DecodeFromGenerator(void* memory,
// First convert the info to have the requested color space, since the decoder
// will convert this for us.
*info = info->makeColorSpace(std::move(color_space));
- if (info->colorType() != kN32_SkColorType) {
- // Since the decoders only support N32 color types, make one of those and
- // decode into temporary memory. Then read the bitmap which will convert it
- // to the target color type.
- SkImageInfo n32info = info->makeColorType(kN32_SkColorType);
- std::unique_ptr<char[]> n32memory(
- new char[n32info.minRowBytes() * n32info.height()]);
-
- bool result = paint_image_generator_->GetPixels(
- n32info, n32memory.get(), n32info.minRowBytes(), frame_index, client_id,
- unique_id());
- if (!result)
- return false;
-
- // The following block will use Skia to do the color type conversion from
- // N32 to the destination color type. Since color space conversion was
- // already done in GetPixels() above, remove the color space information
- // first in case Skia tries to use it for something. In practice, n32info
- // and *info color spaces match, so it should work without removing the
- // color spaces, but better be safe.
- SkImageInfo n32info_no_colorspace = n32info.makeColorSpace(nullptr);
- SkImageInfo info_no_colorspace = info->makeColorSpace(nullptr);
-
- SkBitmap bitmap;
- bitmap.installPixels(n32info_no_colorspace, n32memory.get(),
- n32info.minRowBytes());
- return bitmap.readPixels(info_no_colorspace, memory, info->minRowBytes(), 0,
- 0);
- }
-
return paint_image_generator_->GetPixels(*info, memory, info->minRowBytes(),
frame_index, client_id, unique_id());
}
@@ -271,6 +241,14 @@ PaintImage::FrameKey PaintImage::GetKeyForFrame(size_t frame_index) const {
return FrameKey(content_id, frame_index, subset_rect_);
}
+SkColorType PaintImage::GetColorType() const {
+ if (paint_image_generator_)
+ return paint_image_generator_->GetSkImageInfo().colorType();
+ if (GetSkImage())
+ return GetSkImage()->colorType();
+ return kUnknown_SkColorType;
+}
+
const std::vector<FrameMetadata>& PaintImage::GetFrameMetadata() const {
DCHECK_EQ(animation_type_, AnimationType::ANIMATED);
DCHECK(paint_image_generator_);
diff --git a/chromium/cc/paint/paint_image.h b/chromium/cc/paint/paint_image.h
index 1c7fc39ad37..efde11152ae 100644
--- a/chromium/cc/paint/paint_image.h
+++ b/chromium/cc/paint/paint_image.h
@@ -149,6 +149,7 @@ class CC_PAINT_EXPORT PaintImage {
AnimationType animation_type() const { return animation_type_; }
CompletionState completion_state() const { return completion_state_; }
bool is_multipart() const { return is_multipart_; }
+ bool is_high_bit_depth() const { return is_high_bit_depth_; }
int repetition_count() const { return repetition_count_; }
bool ShouldAnimate() const;
AnimationSequenceId reset_animation_sequence_id() const {
@@ -165,6 +166,9 @@ class CC_PAINT_EXPORT PaintImage {
int height() const { return GetSkImage()->height(); }
SkColorSpace* color_space() const { return GetSkImage()->colorSpace(); }
+ // Returns the color type of this image.
+ SkColorType GetColorType() const;
+
// Returns a unique id for the pixel data for the frame at |frame_index|.
FrameKey GetKeyForFrame(size_t frame_index) const;
@@ -223,6 +227,9 @@ class CC_PAINT_EXPORT PaintImage {
// Whether the data fetched for this image is a part of a multpart response.
bool is_multipart_ = false;
+ // Whether this image has more than 8 bits per color channel.
+ bool is_high_bit_depth_ = false;
+
// An incrementing sequence number maintained by the painter to indicate if
// this animation should be reset in the compositor. Incrementing this number
// will reset this animation in the compositor for the first frame which has a
diff --git a/chromium/cc/paint/paint_image_builder.h b/chromium/cc/paint/paint_image_builder.h
index 16ef5a77d27..d12705003c1 100644
--- a/chromium/cc/paint/paint_image_builder.h
+++ b/chromium/cc/paint/paint_image_builder.h
@@ -73,6 +73,10 @@ class CC_PAINT_EXPORT PaintImageBuilder {
paint_image_.is_multipart_ = is_multipart;
return std::move(*this);
}
+ PaintImageBuilder&& set_is_high_bit_depth(bool is_high_bit_depth) {
+ paint_image_.is_high_bit_depth_ = is_high_bit_depth;
+ return std::move(*this);
+ }
PaintImageBuilder&& set_repetition_count(int count) {
paint_image_.repetition_count_ = count;
return std::move(*this);
diff --git a/chromium/cc/paint/paint_op_buffer.h b/chromium/cc/paint/paint_op_buffer.h
index 909f0cdc756..b65491d6ac1 100644
--- a/chromium/cc/paint/paint_op_buffer.h
+++ b/chromium/cc/paint/paint_op_buffer.h
@@ -7,6 +7,7 @@
#include <stdint.h>
+#include <limits>
#include <string>
#include <type_traits>
@@ -930,8 +931,9 @@ class CC_PAINT_EXPORT PaintOpBuffer : public SkRefCnt {
void push(Args&&... args) {
static_assert(std::is_convertible<T, PaintOp>::value, "T not a PaintOp.");
static_assert(alignof(T) <= PaintOpAlign, "");
-
- size_t skip = ComputeOpSkip(sizeof(T));
+ static_assert(sizeof(T) < std::numeric_limits<uint16_t>::max(),
+ "Cannot fit op code in skip");
+ uint16_t skip = static_cast<uint16_t>(ComputeOpSkip(sizeof(T)));
T* op = reinterpret_cast<T*>(AllocatePaintOp(skip));
new (op) T{std::forward<Args>(args)...};
diff --git a/chromium/cc/paint/paint_op_buffer_serializer.cc b/chromium/cc/paint/paint_op_buffer_serializer.cc
index 62b6c1d42b2..0c56de338da 100644
--- a/chromium/cc/paint/paint_op_buffer_serializer.cc
+++ b/chromium/cc/paint/paint_op_buffer_serializer.cc
@@ -203,8 +203,8 @@ void PaintOpBufferSerializer::ClearForOpaqueRaster(
// clear inside of that rect if needed.
if (device_column.intersect(playback_device_rect)) {
Save(options, params);
- ClipRectOp clip_op(SkRect::MakeFromIRect(device_column),
- SkClipOp::kIntersect, false);
+ ClipRectOp clip_op(SkRect::Make(device_column), SkClipOp::kIntersect,
+ false);
SerializeOp(&clip_op, options, params);
DrawColorOp clear_op(preamble.background_color, SkBlendMode::kSrc);
SerializeOp(&clear_op, options, params);
@@ -212,8 +212,7 @@ void PaintOpBufferSerializer::ClearForOpaqueRaster(
}
if (device_row.intersect(playback_device_rect)) {
Save(options, params);
- ClipRectOp clip_op(SkRect::MakeFromIRect(device_row), SkClipOp::kIntersect,
- false);
+ ClipRectOp clip_op(SkRect::Make(device_row), SkClipOp::kIntersect, false);
SerializeOp(&clip_op, options, params);
DrawColorOp clear_op(preamble.background_color, SkBlendMode::kSrc);
SerializeOp(&clear_op, options, params);
diff --git a/chromium/cc/paint/paint_op_buffer_unittest.cc b/chromium/cc/paint/paint_op_buffer_unittest.cc
index a2ca6a77438..557a1187a17 100644
--- a/chromium/cc/paint/paint_op_buffer_unittest.cc
+++ b/chromium/cc/paint/paint_op_buffer_unittest.cc
@@ -1152,11 +1152,11 @@ std::vector<std::vector<SkPoint>> test_point_arrays = {
SkPoint::Make(9, 9), SkPoint::Make(50, 50), SkPoint::Make(100, 100)},
};
-std::vector<std::vector<PaintTypeface>> test_typefaces = {
- [] { return std::vector<PaintTypeface>{PaintTypeface::TestTypeface()}; }(),
+std::vector<std::vector<sk_sp<SkTypeface>>> test_typefaces = {
+ [] { return std::vector<sk_sp<SkTypeface>>{SkTypeface::MakeDefault()}; }(),
[] {
- return std::vector<PaintTypeface>{PaintTypeface::TestTypeface(),
- PaintTypeface::TestTypeface()};
+ return std::vector<sk_sp<SkTypeface>>{SkTypeface::MakeDefault(),
+ SkTypeface::MakeDefault()};
}(),
};
@@ -1164,7 +1164,7 @@ std::vector<scoped_refptr<PaintTextBlob>> test_paint_blobs = {
[] {
SkPaint font;
font.setTextEncoding(SkPaint::kGlyphID_TextEncoding);
- font.setTypeface(test_typefaces[0][0].ToSkTypeface());
+ font.setTypeface(test_typefaces[0][0]);
SkTextBlobBuilder builder;
int glyph_count = 5;
@@ -1178,7 +1178,7 @@ std::vector<scoped_refptr<PaintTextBlob>> test_paint_blobs = {
[] {
SkPaint font;
font.setTextEncoding(SkPaint::kGlyphID_TextEncoding);
- font.setTypeface(test_typefaces[1][0].ToSkTypeface());
+ font.setTypeface(test_typefaces[1][0]);
SkTextBlobBuilder builder;
int glyph_count = 5;
@@ -1194,7 +1194,7 @@ std::vector<scoped_refptr<PaintTextBlob>> test_paint_blobs = {
std::fill(run2.glyphs, run2.glyphs + glyph_count, 0);
std::fill(run2.pos, run2.pos + glyph_count * 2, 0);
- font.setTypeface(test_typefaces[1][1].ToSkTypeface());
+ font.setTypeface(test_typefaces[1][1]);
glyph_count = 8;
const auto& run3 =
builder.allocRunPosH(font, glyph_count, 0, &test_rects[2]);
diff --git a/chromium/cc/paint/paint_op_perftest.cc b/chromium/cc/paint/paint_op_perftest.cc
index d0616652bcc..2f9220f2ae1 100644
--- a/chromium/cc/paint/paint_op_perftest.cc
+++ b/chromium/cc/paint/paint_op_perftest.cc
@@ -157,18 +157,18 @@ TEST_F(PaintOpPerfTest, ManyFlagsOps) {
TEST_F(PaintOpPerfTest, TextOps) {
PaintOpBuffer buffer;
- auto typeface = PaintTypeface::TestTypeface();
+ auto typeface = SkTypeface::MakeDefault();
SkPaint font;
font.setTextEncoding(SkPaint::kGlyphID_TextEncoding);
- font.setTypeface(typeface.ToSkTypeface());
+ font.setTypeface(typeface);
SkTextBlobBuilder builder;
int glyph_count = 5;
SkRect rect = SkRect::MakeXYWH(1, 1, 1, 1);
const auto& run = builder.allocRun(font, glyph_count, 1.2f, 2.3f, &rect);
std::fill(run.glyphs, run.glyphs + glyph_count, 0);
- std::vector<PaintTypeface> typefaces = {typeface};
+ std::vector<sk_sp<SkTypeface>> typefaces = {typeface};
auto blob = base::MakeRefCounted<PaintTextBlob>(builder.make(), typefaces);
PaintFlags flags;
diff --git a/chromium/cc/paint/paint_op_reader.cc b/chromium/cc/paint/paint_op_reader.cc
index b675b056fa1..f5a344fd349 100644
--- a/chromium/cc/paint/paint_op_reader.cc
+++ b/chromium/cc/paint/paint_op_reader.cc
@@ -13,7 +13,6 @@
#include "cc/paint/paint_image_builder.h"
#include "cc/paint/paint_op_buffer.h"
#include "cc/paint/paint_shader.h"
-#include "cc/paint/paint_typeface_transfer_cache_entry.h"
#include "cc/paint/path_transfer_cache_entry.h"
#include "cc/paint/shader_transfer_cache_entry.h"
#include "cc/paint/transfer_cache_deserialize_helper.h"
@@ -82,6 +81,8 @@ bool PaintOpReader::ReadAndValidateOpHeader(const volatile void* input,
size_t input_size,
uint8_t* type,
uint32_t* skip) {
+ if (input_size < 4)
+ return false;
uint32_t first_word = reinterpret_cast<const volatile uint32_t*>(input)[0];
*type = static_cast<uint8_t>(first_word & 0xFF);
*skip = first_word >> 8;
@@ -394,7 +395,7 @@ void PaintOpReader::Read(scoped_refptr<PaintTextBlob>* paint_blob) {
}
*paint_blob = base::MakeRefCounted<PaintTextBlob>(
- std::move(blob), std::vector<PaintTypeface>());
+ std::move(blob), std::vector<sk_sp<SkTypeface>>());
memory_ += data_bytes;
remaining_bytes_ -= data_bytes;
}
@@ -461,6 +462,8 @@ void PaintOpReader::Read(sk_sp<PaintShader>* shader) {
size_t record_size = Read(&ref.record_);
size_t post_size = options_.transfer_cache->GetTotalEntrySizes();
shader_size = post_size - pre_size + record_size;
+
+ ref.id_ = shader_id;
}
decltype(ref.colors_)::size_type colors_size = 0;
ReadSize(&colors_size);
diff --git a/chromium/cc/paint/paint_op_writer.cc b/chromium/cc/paint/paint_op_writer.cc
index 20bb8668b92..ceaf05e6cdb 100644
--- a/chromium/cc/paint/paint_op_writer.cc
+++ b/chromium/cc/paint/paint_op_writer.cc
@@ -10,7 +10,6 @@
#include "cc/paint/paint_flags.h"
#include "cc/paint/paint_op_buffer_serializer.h"
#include "cc/paint/paint_shader.h"
-#include "cc/paint/paint_typeface_transfer_cache_entry.h"
#include "cc/paint/path_transfer_cache_entry.h"
#include "cc/paint/transfer_cache_serialize_helper.h"
#include "third_party/skia/include/core/SkSerialProcs.h"
diff --git a/chromium/cc/paint/paint_text_blob.cc b/chromium/cc/paint/paint_text_blob.cc
index b992ae1ed0a..d82d1b7b927 100644
--- a/chromium/cc/paint/paint_text_blob.cc
+++ b/chromium/cc/paint/paint_text_blob.cc
@@ -6,14 +6,13 @@
#include <vector>
-#include "cc/paint/paint_typeface.h"
#include "third_party/skia/include/core/SkTextBlob.h"
namespace cc {
PaintTextBlob::PaintTextBlob() = default;
PaintTextBlob::PaintTextBlob(sk_sp<SkTextBlob> blob,
- std::vector<PaintTypeface> typefaces)
+ std::vector<sk_sp<SkTypeface>> typefaces)
: sk_blob_(std::move(blob)), typefaces_(std::move(typefaces)) {}
PaintTextBlob::~PaintTextBlob() = default;
diff --git a/chromium/cc/paint/paint_text_blob.h b/chromium/cc/paint/paint_text_blob.h
index ff6b4f54e99..cbe9ed1ba64 100644
--- a/chromium/cc/paint/paint_text_blob.h
+++ b/chromium/cc/paint/paint_text_blob.h
@@ -10,8 +10,9 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "cc/paint/paint_export.h"
-#include "cc/paint/paint_typeface.h"
+#include "third_party/skia/include/core/SkRefCnt.h"
#include "third_party/skia/include/core/SkTextBlob.h"
+#include "third_party/skia/include/core/SkTypeface.h"
namespace cc {
@@ -19,10 +20,11 @@ class CC_PAINT_EXPORT PaintTextBlob
: public base::RefCountedThreadSafe<PaintTextBlob> {
public:
PaintTextBlob();
- PaintTextBlob(sk_sp<SkTextBlob> blob, std::vector<PaintTypeface> typefaces);
+ PaintTextBlob(sk_sp<SkTextBlob> blob,
+ std::vector<sk_sp<SkTypeface>> typefaces);
const sk_sp<SkTextBlob>& ToSkTextBlob() const { return sk_blob_; }
- const std::vector<PaintTypeface>& typefaces() const { return typefaces_; }
+ const std::vector<sk_sp<SkTypeface>>& typefaces() const { return typefaces_; }
operator bool() const { return !!sk_blob_; }
@@ -32,7 +34,7 @@ class CC_PAINT_EXPORT PaintTextBlob
~PaintTextBlob();
sk_sp<SkTextBlob> sk_blob_;
- std::vector<PaintTypeface> typefaces_;
+ std::vector<sk_sp<SkTypeface>> typefaces_;
DISALLOW_COPY_AND_ASSIGN(PaintTextBlob);
};
diff --git a/chromium/cc/paint/paint_text_blob_builder.h b/chromium/cc/paint/paint_text_blob_builder.h
index 57a86c1397b..8c87f9b5cfa 100644
--- a/chromium/cc/paint/paint_text_blob_builder.h
+++ b/chromium/cc/paint/paint_text_blob_builder.h
@@ -38,7 +38,7 @@ class CC_PAINT_EXPORT PaintTextBlobBuilder {
const SkRect* bounds = nullptr);
private:
- std::vector<PaintTypeface> typefaces_;
+ std::vector<sk_sp<SkTypeface>> typefaces_;
SkTextBlobBuilder sk_builder_;
DISALLOW_COPY_AND_ASSIGN(PaintTextBlobBuilder);
diff --git a/chromium/cc/paint/paint_typeface.cc b/chromium/cc/paint/paint_typeface.cc
deleted file mode 100644
index d6ee8c0bb04..00000000000
--- a/chromium/cc/paint/paint_typeface.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "cc/paint/paint_typeface.h"
-#include "build/build_config.h"
-#include "third_party/skia/include/core/SkFontMgr.h"
-#include "third_party/skia/include/ports/SkFontConfigInterface.h"
-
-namespace cc {
-
-// static
-PaintTypeface PaintTypeface::TestTypeface() {
- PaintTypeface typeface;
- typeface.type_ = Type::kTestTypeface;
- typeface.sk_typeface_ = SkTypeface::MakeDefault();
- typeface.CreateSkTypeface();
- return typeface;
-}
-
-// static
-PaintTypeface PaintTypeface::FromSkTypeface(const sk_sp<SkTypeface>& tf) {
- PaintTypeface typeface;
- typeface.type_ = Type::kSkTypeface;
- typeface.sk_typeface_ = tf;
- typeface.CreateSkTypeface();
- return typeface;
-}
-
-// static
-PaintTypeface PaintTypeface::FromFontConfigInterfaceIdAndTtcIndex(
- int config_id,
- int ttc_index) {
- PaintTypeface typeface;
- typeface.type_ = Type::kFontConfigInterfaceIdAndTtcIndex;
- typeface.font_config_interface_id_ = config_id;
- typeface.ttc_index_ = ttc_index;
- typeface.CreateSkTypeface();
- return typeface;
-}
-
-// static
-PaintTypeface PaintTypeface::FromFilenameAndTtcIndex(
- const std::string& filename,
- int ttc_index) {
- PaintTypeface typeface;
- typeface.type_ = Type::kFilenameAndTtcIndex;
- typeface.filename_ = filename;
- typeface.ttc_index_ = ttc_index;
- typeface.CreateSkTypeface();
- return typeface;
-}
-
-// static
-PaintTypeface PaintTypeface::FromFamilyNameAndFontStyle(
- const std::string& family_name,
- const SkFontStyle& font_style) {
- PaintTypeface typeface;
- typeface.type_ = Type::kFamilyNameAndFontStyle;
- typeface.family_name_ = family_name;
- typeface.font_style_ = font_style;
- typeface.CreateSkTypeface();
- return typeface;
-}
-
-PaintTypeface::PaintTypeface() = default;
-PaintTypeface::PaintTypeface(const PaintTypeface& other) {
- *this = other;
-}
-PaintTypeface::PaintTypeface(PaintTypeface&& other) = default;
-PaintTypeface::~PaintTypeface() {
- // TODO(crbug.com/785682): Debugging possible ref counting issue.
- if (sk_typeface_) {
- // We should be a strong owner of this reference. However, to debug a
- // possible ref counting issue, we also ensured that we're a weak owner of
- // this as well. This means that we should be able to always acquire a
- // strong reference (ie the class isn't deleted because we're weak owners,
- // and it should always succeed because we're strong owners).
- if (sk_typeface_->try_ref()) {
- sk_typeface_->unref();
- } else {
- CHECK(false) << "SkTypeface ref-counting problem detected.";
- }
- sk_typeface_->weak_unref();
- }
- // Explicitly delete the typeface so that any crashes resulting from this
- // would point to this line.
- sk_typeface_ = nullptr;
-}
-
-PaintTypeface& PaintTypeface::operator=(const PaintTypeface& other) {
- sk_id_ = other.sk_id_;
- sk_typeface_ = other.sk_typeface_;
-
- // TODO(crbug.com/785682): Debugging possible ref counting issue.
- if (sk_typeface_) {
- // Since we're copying this object which will do a weak unref in the dtor,
- // ensure to bump the weak ref one more time.
- sk_typeface_->weak_ref();
- }
-
- type_ = other.type_;
- font_config_interface_id_ = other.font_config_interface_id_;
- ttc_index_ = other.ttc_index_;
- filename_ = other.filename_;
- family_name_ = other.family_name_;
- font_style_ = other.font_style_;
- return *this;
-}
-
-PaintTypeface& PaintTypeface::operator=(PaintTypeface&& other) = default;
-
-void PaintTypeface::CreateSkTypeface() {
-// MacOS doesn't support this type of creation and relies on NSFonts instead.
-#if !defined(OS_MACOSX)
- switch (type_) {
- case Type::kTestTypeface:
- // Nothing to do here.
- break;
- case Type::kSkTypeface:
- // Nothing to do here.
- break;
- case Type::kFontConfigInterfaceIdAndTtcIndex: {
-// Mimic FontCache::CreateTypeface defines to ensure same behavior.
-#if !defined(OS_WIN) && !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
- sk_sp<SkFontConfigInterface> fci(SkFontConfigInterface::RefGlobal());
- SkFontConfigInterface::FontIdentity font_identity;
- font_identity.fID = font_config_interface_id_;
- font_identity.fTTCIndex = ttc_index_;
- sk_typeface_ = fci->makeTypeface(font_identity);
-#endif
- break;
- }
- case Type::kFilenameAndTtcIndex:
-// Mimic FontCache::CreateTypeface defines to ensure same behavior.
-#if !defined(OS_WIN) && !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
- sk_typeface_ = SkTypeface::MakeFromFile(filename_.c_str(), ttc_index_);
-#endif
- break;
- case Type::kFamilyNameAndFontStyle: {
- // This is a fallthrough in all cases in FontCache::CreateTypeface, so
- // this is done unconditionally. Since we create the typeface upon
- // PaintTypeface creation, this should be safe in all cases.
- auto fm(SkFontMgr::RefDefault());
- sk_typeface_ = fm->legacyMakeTypeface(family_name_.c_str(), font_style_);
- break;
- }
- }
-#endif // !defined(OS_MACOSX)
- sk_id_ = sk_typeface_ ? sk_typeface_->uniqueID() : 0;
-
- // TODO(crbug.com/785682): Debugging possible ref counting issue.
- if (sk_typeface_)
- sk_typeface_->weak_ref();
-}
-
-} // namespace cc
diff --git a/chromium/cc/paint/paint_typeface.h b/chromium/cc/paint/paint_typeface.h
deleted file mode 100644
index 0e2f370e885..00000000000
--- a/chromium/cc/paint/paint_typeface.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CC_PAINT_PAINT_TYPEFACE_H_
-#define CC_PAINT_PAINT_TYPEFACE_H_
-
-#include "base/logging.h"
-#include "cc/paint/paint_export.h"
-#include "third_party/skia/include/core/SkTypeface.h"
-
-namespace cc {
-
-class CC_PAINT_EXPORT PaintTypeface {
- public:
- enum class Type : uint8_t {
- kTestTypeface, // This should only be used in tests.
- kSkTypeface,
- kFontConfigInterfaceIdAndTtcIndex,
- kFilenameAndTtcIndex,
- kFamilyNameAndFontStyle,
- // We need to update this if the list is modified.
- kLastType = kFamilyNameAndFontStyle
- };
-
- static PaintTypeface TestTypeface();
- static PaintTypeface FromSkTypeface(const sk_sp<SkTypeface>& typeface);
- static PaintTypeface FromFontConfigInterfaceIdAndTtcIndex(int config_id,
- int ttc_index);
- static PaintTypeface FromFilenameAndTtcIndex(const std::string& filename,
- int ttc_index);
- static PaintTypeface FromFamilyNameAndFontStyle(
- const std::string& family_name,
- const SkFontStyle& font_style);
- // TODO(vmpstr): Need to add FromWebFont?
-
- PaintTypeface();
- PaintTypeface(const PaintTypeface& other);
- PaintTypeface(PaintTypeface&& other);
- ~PaintTypeface();
-
- PaintTypeface& operator=(const PaintTypeface& other);
- PaintTypeface& operator=(PaintTypeface&& other);
- operator bool() const { return !!sk_typeface_; }
-
- // This is used when deserialized to force a different typeface id so that it
- // can be matched from SkTextBlob deserialization.
- void SetSkId(SkFontID id) { sk_id_ = id; }
-
- SkFontID sk_id() const { return sk_id_; }
- const sk_sp<SkTypeface>& ToSkTypeface() const { return sk_typeface_; }
-
- Type type() const { return type_; }
- int font_config_interface_id() const { return font_config_interface_id_; }
- int ttc_index() const { return ttc_index_; }
- const std::string& filename() const { return filename_; }
- const std::string& family_name() const { return family_name_; }
- const SkFontStyle font_style() const { return font_style_; }
-
- private:
- void CreateSkTypeface();
-
- // This is the font ID that should be used by this PaintTypeface, regardless
- // of the sk_typeface_ on the deserialized end. This value is initialized but
- // can be overridden by SetSkId().
- SkFontID sk_id_;
- sk_sp<SkTypeface> sk_typeface_;
- Type type_ = Type::kSkTypeface;
-
- int font_config_interface_id_ = 0;
- int ttc_index_ = 0;
- std::string filename_;
- std::string family_name_;
- SkFontStyle font_style_;
-};
-
-} // namespace cc
-
-#endif // CC_PAINT_PAINT_TYPEFACE_H_
diff --git a/chromium/cc/paint/paint_typeface_transfer_cache_entry.cc b/chromium/cc/paint/paint_typeface_transfer_cache_entry.cc
deleted file mode 100644
index 41c4d398b04..00000000000
--- a/chromium/cc/paint/paint_typeface_transfer_cache_entry.cc
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "cc/paint/paint_typeface_transfer_cache_entry.h"
-
-namespace cc {
-namespace {
-
-size_t kMaxFilenameSize = 1024;
-size_t kMaxFamilyNameSize = 128;
-
-class DataWriter {
- public:
- explicit DataWriter(base::span<uint8_t> data) : data_(data) {}
-
- template <typename T>
- void WriteSimple(const T& val) {
- DCHECK_LE(sizeof(T), data_.size());
- *reinterpret_cast<T*>(data_.data()) = val;
- data_ = data_.subspan(sizeof(T));
- }
- void WriteData(size_t bytes, const void* input) {
- DCHECK_LE(bytes, data_.size());
- memcpy(data_.data(), input, bytes);
- data_ = data_.subspan(bytes);
- }
-
- private:
- base::span<uint8_t> data_;
-};
-
-class SizeCounter {
- public:
- SizeCounter() = default;
-
- template <typename T>
- void WriteSimple(const T& val) {
- size_ += sizeof(T);
- }
- void WriteData(size_t bytes, const void* input) { size_ += bytes; }
- size_t size() const { return size_; }
-
- private:
- size_t size_ = 0;
-};
-
-} // namespace
-
-ClientPaintTypefaceTransferCacheEntry::ClientPaintTypefaceTransferCacheEntry(
- const PaintTypeface& typeface)
- : typeface_(typeface) {
- SizeCounter counter;
- SerializeInternal(&counter);
- size_ = counter.size();
-}
-
-ClientPaintTypefaceTransferCacheEntry::
- ~ClientPaintTypefaceTransferCacheEntry() = default;
-
-uint32_t ClientPaintTypefaceTransferCacheEntry::Id() const {
- return typeface_.sk_id();
-}
-
-size_t ClientPaintTypefaceTransferCacheEntry::SerializedSize() const {
- return size_;
-}
-
-bool ClientPaintTypefaceTransferCacheEntry::Serialize(
- base::span<uint8_t> data) const {
- DataWriter writer(data);
- return SerializeInternal(&writer);
-}
-
-template <typename Writer>
-bool ClientPaintTypefaceTransferCacheEntry::SerializeInternal(
- Writer* writer) const {
- writer->WriteSimple(typeface_.sk_id());
- writer->WriteSimple(static_cast<uint8_t>(typeface_.type()));
- switch (typeface_.type()) {
- case PaintTypeface::Type::kTestTypeface:
- // Nothing to serialize here.
- break;
- case PaintTypeface::Type::kSkTypeface:
- // Nothing to do here. This should never be the case when everything is
- // implemented. This should be a NOTREACHED() eventually.
- break;
- case PaintTypeface::Type::kFontConfigInterfaceIdAndTtcIndex:
- writer->WriteSimple(typeface_.font_config_interface_id());
- writer->WriteSimple(typeface_.ttc_index());
- break;
- case PaintTypeface::Type::kFilenameAndTtcIndex:
- writer->template WriteSimple<uint64_t>(typeface_.filename().size());
- writer->WriteData(typeface_.filename().size(),
- typeface_.filename().data());
- writer->WriteSimple(typeface_.ttc_index());
- break;
- case PaintTypeface::Type::kFamilyNameAndFontStyle:
- writer->template WriteSimple<uint64_t>(typeface_.family_name().size());
- writer->WriteData(typeface_.family_name().size(),
- typeface_.family_name().data());
- writer->WriteSimple(typeface_.font_style().weight());
- writer->WriteSimple(typeface_.font_style().width());
- writer->WriteSimple(typeface_.font_style().slant());
- break;
- }
- return true;
-}
-
-ServicePaintTypefaceTransferCacheEntry::
- ServicePaintTypefaceTransferCacheEntry() = default;
-ServicePaintTypefaceTransferCacheEntry::
- ~ServicePaintTypefaceTransferCacheEntry() = default;
-
-size_t ServicePaintTypefaceTransferCacheEntry::CachedSize() const {
- return size_;
-}
-
-bool ServicePaintTypefaceTransferCacheEntry::Deserialize(
- GrContext* context,
- base::span<const uint8_t> data) {
- data_ = data;
- size_t initial_size = data_.size();
-
- SkFontID id;
- uint8_t type;
- ReadSimple(&id);
- ReadSimple(&type);
- if (!valid_ || type > static_cast<uint8_t>(PaintTypeface::Type::kLastType)) {
- valid_ = false;
- return false;
- }
- switch (static_cast<PaintTypeface::Type>(type)) {
- case PaintTypeface::Type::kTestTypeface:
- typeface_ = PaintTypeface::TestTypeface();
- break;
- case PaintTypeface::Type::kSkTypeface:
- // TODO(vmpstr): This shouldn't ever happen once everything is
- // implemented. So this should be a failure (ie |valid_| = false).
- break;
- case PaintTypeface::Type::kFontConfigInterfaceIdAndTtcIndex: {
- int font_config_interface_id = 0;
- int ttc_index = 0;
- ReadSimple(&font_config_interface_id);
- ReadSimple(&ttc_index);
- if (!valid_)
- return false;
- typeface_ = PaintTypeface::FromFontConfigInterfaceIdAndTtcIndex(
- font_config_interface_id, ttc_index);
- break;
- }
- case PaintTypeface::Type::kFilenameAndTtcIndex: {
- size_t size;
- ReadSize(&size);
- if (!valid_ || size > kMaxFilenameSize) {
- valid_ = false;
- return false;
- }
-
- std::unique_ptr<char[]> buffer(new char[size]);
- ReadData(size, buffer.get());
- std::string filename(buffer.get(), size);
-
- int ttc_index = 0;
- ReadSimple(&ttc_index);
- if (!valid_)
- return false;
- typeface_ = PaintTypeface::FromFilenameAndTtcIndex(filename, ttc_index);
- break;
- }
- case PaintTypeface::Type::kFamilyNameAndFontStyle: {
- size_t size;
- ReadSize(&size);
- if (!valid_ || size > kMaxFamilyNameSize) {
- valid_ = false;
- return false;
- }
-
- std::unique_ptr<char[]> buffer(new char[size]);
- ReadData(size, buffer.get());
- std::string family_name(buffer.get(), size);
-
- int weight = 0;
- int width = 0;
- SkFontStyle::Slant slant = SkFontStyle::kUpright_Slant;
- ReadSimple(&weight);
- ReadSimple(&width);
- ReadSimple(&slant);
- if (!valid_)
- return false;
-
- typeface_ = PaintTypeface::FromFamilyNameAndFontStyle(
- family_name, SkFontStyle(weight, width, slant));
- break;
- }
- }
- typeface_.SetSkId(id);
-
- // Set the size to however much data we read.
- size_ = initial_size - data_.size();
- data_ = base::span<uint8_t>();
- return valid_;
-}
-
-template <typename T>
-void ServicePaintTypefaceTransferCacheEntry::ReadSimple(T* val) {
- if (data_.size() < sizeof(T))
- valid_ = false;
- if (!valid_)
- return;
- *val = *reinterpret_cast<const T*>(data_.data());
- data_ = data_.subspan(sizeof(T));
-}
-
-void ServicePaintTypefaceTransferCacheEntry::ReadSize(size_t* size) {
- uint64_t size64;
- ReadSimple(&size64);
- *size = size64;
-}
-
-void ServicePaintTypefaceTransferCacheEntry::ReadData(size_t bytes,
- void* data) {
- if (data_.size() < bytes)
- valid_ = false;
- if (!valid_)
- return;
- memcpy(data, data_.data(), bytes);
- data_ = data_.subspan(bytes);
-}
-
-} // namespace cc
diff --git a/chromium/cc/paint/paint_typeface_transfer_cache_entry.h b/chromium/cc/paint/paint_typeface_transfer_cache_entry.h
deleted file mode 100644
index 294df1594fb..00000000000
--- a/chromium/cc/paint/paint_typeface_transfer_cache_entry.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CC_PAINT_PAINT_TYPEFACE_TRANSFER_CACHE_ENTRY_H_
-#define CC_PAINT_PAINT_TYPEFACE_TRANSFER_CACHE_ENTRY_H_
-
-#include "base/containers/span.h"
-#include "cc/paint/paint_export.h"
-#include "cc/paint/paint_typeface.h"
-#include "cc/paint/transfer_cache_entry.h"
-
-namespace cc {
-
-class CC_PAINT_EXPORT ClientPaintTypefaceTransferCacheEntry
- : public ClientTransferCacheEntryBase<
- TransferCacheEntryType::kPaintTypeface> {
- public:
- explicit ClientPaintTypefaceTransferCacheEntry(const PaintTypeface& typeface);
- ~ClientPaintTypefaceTransferCacheEntry() final;
- uint32_t Id() const final;
- size_t SerializedSize() const final;
- bool Serialize(base::span<uint8_t> data) const final;
-
- private:
- template <typename Writer>
- bool SerializeInternal(Writer* writer) const;
-
- const PaintTypeface typeface_;
- size_t size_ = 0u;
-};
-
-class CC_PAINT_EXPORT ServicePaintTypefaceTransferCacheEntry
- : public ServiceTransferCacheEntryBase<
- TransferCacheEntryType::kPaintTypeface> {
- public:
- ServicePaintTypefaceTransferCacheEntry();
- ~ServicePaintTypefaceTransferCacheEntry() final;
- size_t CachedSize() const final;
- bool Deserialize(GrContext* context, base::span<const uint8_t> data) final;
-
- const PaintTypeface& typeface() const { return typeface_; }
-
- private:
- template <typename T>
- void ReadSimple(T* val);
- void ReadSize(size_t* size);
-
- void ReadData(size_t bytes, void* data);
-
- PaintTypeface typeface_;
- size_t size_ = 0;
- bool valid_ = true;
- // TODO(enne): this transient value shouldn't be a member and should just be
- // passed around internally to functions that need it.
- base::span<const uint8_t> data_;
-};
-
-} // namespace cc
-
-#endif // CC_PAINT_PAINT_TYPEFACE_TRANSFER_CACHE_ENTRY_H_
diff --git a/chromium/cc/paint/solid_color_analyzer.cc b/chromium/cc/paint/solid_color_analyzer.cc
index 1863cfb3778..2ecc07bb767 100644
--- a/chromium/cc/paint/solid_color_analyzer.cc
+++ b/chromium/cc/paint/solid_color_analyzer.cc
@@ -152,7 +152,6 @@ base::Optional<SkColor> SolidColorAnalyzer::DetermineIfSolidColor(
SkColor color = SK_ColorTRANSPARENT;
struct Frame {
- Frame() = default;
Frame(PaintOpBuffer::CompositeIterator iter,
const SkMatrix& original_ctm,
int save_count)
diff --git a/chromium/cc/paint/transfer_cache_entry.cc b/chromium/cc/paint/transfer_cache_entry.cc
index eb51292b2aa..bb34a4cada8 100644
--- a/chromium/cc/paint/transfer_cache_entry.cc
+++ b/chromium/cc/paint/transfer_cache_entry.cc
@@ -9,7 +9,6 @@
#include "base/logging.h"
#include "cc/paint/color_space_transfer_cache_entry.h"
#include "cc/paint/image_transfer_cache_entry.h"
-#include "cc/paint/paint_typeface_transfer_cache_entry.h"
#include "cc/paint/path_transfer_cache_entry.h"
#include "cc/paint/raw_memory_transfer_cache_entry.h"
#include "cc/paint/shader_transfer_cache_entry.h"
@@ -23,8 +22,6 @@ std::unique_ptr<ServiceTransferCacheEntry> ServiceTransferCacheEntry::Create(
return std::make_unique<ServiceRawMemoryTransferCacheEntry>();
case TransferCacheEntryType::kImage:
return std::make_unique<ServiceImageTransferCacheEntry>();
- case TransferCacheEntryType::kPaintTypeface:
- return std::make_unique<ServicePaintTypefaceTransferCacheEntry>();
case TransferCacheEntryType::kColorSpace:
return std::make_unique<ServiceColorSpaceTransferCacheEntry>();
case TransferCacheEntryType::kPath:
@@ -48,4 +45,20 @@ bool ServiceTransferCacheEntry::SafeConvertToType(
return true;
}
+// static
+bool ServiceTransferCacheEntry::UsesGrContext(TransferCacheEntryType type) {
+ switch (type) {
+ case TransferCacheEntryType::kRawMemory:
+ case TransferCacheEntryType::kColorSpace:
+ case TransferCacheEntryType::kPath:
+ case TransferCacheEntryType::kShader:
+ return false;
+ case TransferCacheEntryType::kImage:
+ return true;
+ }
+
+ NOTREACHED();
+ return true;
+}
+
} // namespace cc
diff --git a/chromium/cc/paint/transfer_cache_entry.h b/chromium/cc/paint/transfer_cache_entry.h
index 474bd970344..55f1ce7dba3 100644
--- a/chromium/cc/paint/transfer_cache_entry.h
+++ b/chromium/cc/paint/transfer_cache_entry.h
@@ -22,7 +22,6 @@ namespace cc {
enum class TransferCacheEntryType : uint32_t {
kRawMemory,
kImage,
- kPaintTypeface,
kColorSpace,
kPath,
kShader,
@@ -72,6 +71,9 @@ class CC_PAINT_EXPORT ServiceTransferCacheEntry {
static bool SafeConvertToType(uint32_t raw_type,
TransferCacheEntryType* type);
+ // Returns true if the entry needs a GrContext during deserialization.
+ static bool UsesGrContext(TransferCacheEntryType type);
+
virtual ~ServiceTransferCacheEntry() {}
// Returns the type of this entry.
diff --git a/chromium/cc/raster/bitmap_raster_buffer_provider.cc b/chromium/cc/raster/bitmap_raster_buffer_provider.cc
index 12eaae7efa7..1e4e21bb134 100644
--- a/chromium/cc/raster/bitmap_raster_buffer_provider.cc
+++ b/chromium/cc/raster/bitmap_raster_buffer_provider.cc
@@ -164,4 +164,8 @@ uint64_t BitmapRasterBufferProvider::SetReadyToDrawCallback(
void BitmapRasterBufferProvider::Shutdown() {}
+bool BitmapRasterBufferProvider::CheckRasterFinishedQueries() {
+ return false;
+}
+
} // namespace cc
diff --git a/chromium/cc/raster/bitmap_raster_buffer_provider.h b/chromium/cc/raster/bitmap_raster_buffer_provider.h
index 5174176fd11..9ceb0012b23 100644
--- a/chromium/cc/raster/bitmap_raster_buffer_provider.h
+++ b/chromium/cc/raster/bitmap_raster_buffer_provider.h
@@ -43,6 +43,7 @@ class CC_EXPORT BitmapRasterBufferProvider : public RasterBufferProvider {
const base::Closure& callback,
uint64_t pending_callback_id) const override;
void Shutdown() override;
+ bool CheckRasterFinishedQueries() override;
private:
std::unique_ptr<base::trace_event::ConvertableToTraceFormat> StateAsValue()
diff --git a/chromium/cc/raster/gpu_raster_buffer_provider.cc b/chromium/cc/raster/gpu_raster_buffer_provider.cc
index 3db004cf645..2b9b386ee08 100644
--- a/chromium/cc/raster/gpu_raster_buffer_provider.cc
+++ b/chromium/cc/raster/gpu_raster_buffer_provider.cc
@@ -10,6 +10,8 @@
#include "base/macros.h"
#include "base/metrics/histogram_macros.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
#include "cc/base/histograms.h"
@@ -28,13 +30,14 @@
#include "gpu/command_buffer/client/context_support.h"
#include "gpu/command_buffer/client/gles2_interface.h"
#include "gpu/command_buffer/client/raster_interface.h"
-#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
+#include "gpu/command_buffer/client/shared_image_interface.h"
+#include "gpu/command_buffer/common/shared_image_trace_utils.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
#include "third_party/skia/include/core/SkMultiPictureDraw.h"
#include "third_party/skia/include/core/SkPictureRecorder.h"
#include "third_party/skia/include/core/SkSurface.h"
#include "third_party/skia/include/gpu/GrContext.h"
#include "ui/gfx/geometry/axis_transform2d.h"
-#include "ui/gl/trace_util.h"
#include "url/gurl.h"
namespace cc {
@@ -114,10 +117,10 @@ class ScopedSkSurfaceForUnpremultiplyAndDither {
static void RasterizeSourceOOP(
const RasterSource* raster_source,
bool resource_has_previous_content,
- const gpu::Mailbox& mailbox,
+ gpu::Mailbox* mailbox,
+ const gpu::SyncToken& sync_token,
GLenum texture_target,
bool texture_is_overlay_candidate,
- bool texture_storage_allocated,
const gfx::Size& resource_size,
viz::ResourceFormat resource_format,
const gfx::ColorSpace& color_space,
@@ -128,16 +131,22 @@ static void RasterizeSourceOOP(
viz::RasterContextProvider* context_provider,
int msaa_sample_count) {
gpu::raster::RasterInterface* ri = context_provider->RasterInterface();
+ if (mailbox->IsZero()) {
+ DCHECK(!sync_token.HasData());
+ auto* sii = context_provider->SharedImageInterface();
+ uint32_t flags = gpu::SHARED_IMAGE_USAGE_RASTER;
+ if (texture_is_overlay_candidate)
+ flags |= gpu::SHARED_IMAGE_USAGE_SCANOUT;
+ *mailbox = sii->CreateSharedImage(resource_format, resource_size,
+ color_space, flags);
+ ri->WaitSyncTokenCHROMIUM(sii->GenUnverifiedSyncToken().GetConstData());
+ } else {
+ ri->WaitSyncTokenCHROMIUM(sync_token.GetConstData());
+ }
+
GLuint texture_id = ri->CreateAndConsumeTexture(
texture_is_overlay_candidate, gfx::BufferUsage::SCANOUT, resource_format,
- mailbox.name);
- if (!texture_storage_allocated) {
- viz::TextureAllocation alloc = {texture_id, texture_target,
- texture_is_overlay_candidate};
- viz::TextureAllocation::AllocateStorage(
- ri, context_provider->ContextCapabilities(), resource_format,
- resource_size, alloc, color_space);
- }
+ mailbox->name);
// TODO(enne): Use the |texture_target|? GpuMemoryBuffer backed textures don't
// use GL_TEXTURE_2D.
@@ -145,10 +154,11 @@ static void RasterizeSourceOOP(
playback_settings.use_lcd_text,
viz::ResourceFormatToClosestSkColorType(
/*gpu_compositing=*/true, resource_format),
- playback_settings.raster_color_space, mailbox.name);
+ playback_settings.raster_color_space, mailbox->name);
float recording_to_raster_scale =
transform.scale() / raster_source->recording_scale_factor();
gfx::Size content_size = raster_source->GetContentSize(transform.scale());
+
// TODO(enne): could skip the clear on new textures, as the service side has
// to do that anyway. resource_has_previous_content implies that the texture
// is not new, but the reverse does not hold, so more plumbing is needed.
@@ -168,10 +178,10 @@ static void RasterizeSourceOOP(
static void RasterizeSource(
const RasterSource* raster_source,
bool resource_has_previous_content,
- const gpu::Mailbox& mailbox,
+ gpu::Mailbox* mailbox,
+ const gpu::SyncToken& sync_token,
GLenum texture_target,
bool texture_is_overlay_candidate,
- bool texture_storage_allocated,
const gfx::Size& resource_size,
viz::ResourceFormat resource_format,
const gfx::ColorSpace& color_space,
@@ -184,17 +194,24 @@ static void RasterizeSource(
bool unpremultiply_and_dither,
const gfx::Size& max_tile_size) {
gpu::raster::RasterInterface* ri = context_provider->RasterInterface();
+ if (mailbox->IsZero()) {
+ auto* sii = context_provider->SharedImageInterface();
+ uint32_t flags = gpu::SHARED_IMAGE_USAGE_GLES2 |
+ gpu::SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT;
+ if (texture_is_overlay_candidate)
+ flags |= gpu::SHARED_IMAGE_USAGE_SCANOUT;
+ *mailbox = sii->CreateSharedImage(resource_format, resource_size,
+ color_space, flags);
+ ri->WaitSyncTokenCHROMIUM(sii->GenUnverifiedSyncToken().GetConstData());
+ } else {
+ // Wait on the SyncToken that was created on the compositor thread after
+ // making the mailbox. This ensures that the mailbox we consume here is
+ // valid by the time the consume command executes.
+ ri->WaitSyncTokenCHROMIUM(sync_token.GetConstData());
+ }
GLuint texture_id = ri->CreateAndConsumeTexture(
texture_is_overlay_candidate, gfx::BufferUsage::SCANOUT, resource_format,
- mailbox.name);
- if (!texture_storage_allocated) {
- viz::TextureAllocation alloc = {texture_id, texture_target,
- texture_is_overlay_candidate};
- viz::TextureAllocation::AllocateStorage(
- ri, context_provider->ContextCapabilities(), resource_format,
- resource_size, alloc, color_space);
- }
-
+ mailbox->name);
{
ScopedGrContextAccess gr_context_access(context_provider);
base::Optional<viz::ClientResourceProvider::ScopedSkSurface> scoped_surface;
@@ -244,13 +261,13 @@ class GpuRasterBufferProvider::GpuRasterBacking
: public ResourcePool::GpuBacking {
public:
~GpuRasterBacking() override {
- gpu::gles2::GLES2Interface* gl = compositor_context_provider->ContextGL();
+ if (mailbox.IsZero())
+ return;
+ auto* sii = worker_context_provider->SharedImageInterface();
if (returned_sync_token.HasData())
- gl->WaitSyncTokenCHROMIUM(returned_sync_token.GetConstData());
- if (mailbox_sync_token.HasData())
- gl->WaitSyncTokenCHROMIUM(mailbox_sync_token.GetConstData());
- if (texture_id)
- gl->DeleteTextures(1, &texture_id);
+ sii->DestroySharedImage(returned_sync_token, mailbox);
+ else if (mailbox_sync_token.HasData())
+ sii->DestroySharedImage(mailbox_sync_token, mailbox);
}
void OnMemoryDump(
@@ -258,23 +275,16 @@ class GpuRasterBufferProvider::GpuRasterBacking
const base::trace_event::MemoryAllocatorDumpGuid& buffer_dump_guid,
uint64_t tracing_process_id,
int importance) const override {
- if (!storage_allocated)
+ if (mailbox.IsZero())
return;
- auto texture_tracing_guid = gl::GetGLTextureClientGUIDForTracing(
- compositor_context_provider->ContextSupport()->ShareGroupTracingGUID(),
- texture_id);
- pmd->CreateSharedGlobalAllocatorDump(texture_tracing_guid);
- pmd->AddOwnershipEdge(buffer_dump_guid, texture_tracing_guid, importance);
+ auto tracing_guid = gpu::GetSharedImageGUIDForTracing(mailbox);
+ pmd->CreateSharedGlobalAllocatorDump(tracing_guid);
+ pmd->AddOwnershipEdge(buffer_dump_guid, tracing_guid, importance);
}
- // The ContextProvider used to clean up the texture id.
- viz::ContextProvider* compositor_context_provider = nullptr;
- // The texture backing of the resource.
- GLuint texture_id = 0;
- // The allocation of storage for the |texture_id| is deferred, and this tracks
- // if it has been done.
- bool storage_allocated = false;
+ // The ContextProvider used to clean up the mailbox
+ viz::RasterContextProvider* worker_context_provider = nullptr;
};
GpuRasterBufferProvider::RasterBufferImpl::RasterBufferImpl(
@@ -289,10 +299,9 @@ GpuRasterBufferProvider::RasterBufferImpl::RasterBufferImpl(
color_space_(in_use_resource.color_space()),
resource_has_previous_content_(resource_has_previous_content),
before_raster_sync_token_(backing->returned_sync_token),
- mailbox_(backing->mailbox),
texture_target_(backing->texture_target),
texture_is_overlay_candidate_(backing->overlay_candidate),
- texture_storage_allocated_(backing->storage_allocated) {}
+ mailbox_(backing->mailbox) {}
GpuRasterBufferProvider::RasterBufferImpl::~RasterBufferImpl() {
// This SyncToken was created on the worker context after rastering the
@@ -303,7 +312,7 @@ GpuRasterBufferProvider::RasterBufferImpl::~RasterBufferImpl() {
// happened if the |after_raster_sync_token_| was set.
backing_->returned_sync_token = gpu::SyncToken();
}
- backing_->storage_allocated = texture_storage_allocated_;
+ backing_->mailbox = mailbox_;
}
void GpuRasterBufferProvider::RasterBufferImpl::Playback(
@@ -320,12 +329,10 @@ void GpuRasterBufferProvider::RasterBufferImpl::Playback(
// returns another SyncToken generated on the worker thread to synchronize
// with after the raster is complete.
after_raster_sync_token_ = client_->PlaybackOnWorkerThread(
- mailbox_, texture_target_, texture_is_overlay_candidate_,
- texture_storage_allocated_, before_raster_sync_token_, resource_size_,
- resource_format_, color_space_, resource_has_previous_content_,
- raster_source, raster_full_rect, raster_dirty_rect, new_content_id,
- transform, playback_settings, url);
- texture_storage_allocated_ = true;
+ &mailbox_, texture_target_, texture_is_overlay_candidate_,
+ before_raster_sync_token_, resource_size_, resource_format_, color_space_,
+ resource_has_previous_content_, raster_source, raster_full_rect,
+ raster_dirty_rect, new_content_id, transform, playback_settings, url);
}
GpuRasterBufferProvider::GpuRasterBufferProvider(
@@ -336,7 +343,8 @@ GpuRasterBufferProvider::GpuRasterBufferProvider(
viz::ResourceFormat tile_format,
const gfx::Size& max_tile_size,
bool unpremultiply_and_dither_low_bit_depth_tiles,
- bool enable_oop_rasterization)
+ bool enable_oop_rasterization,
+ int raster_metric_frequency)
: compositor_context_provider_(compositor_context_provider),
worker_context_provider_(worker_context_provider),
use_gpu_memory_buffer_resources_(use_gpu_memory_buffer_resources),
@@ -345,7 +353,10 @@ GpuRasterBufferProvider::GpuRasterBufferProvider(
max_tile_size_(max_tile_size),
unpremultiply_and_dither_low_bit_depth_tiles_(
unpremultiply_and_dither_low_bit_depth_tiles),
- enable_oop_rasterization_(enable_oop_rasterization) {
+ enable_oop_rasterization_(enable_oop_rasterization),
+ raster_metric_frequency_(raster_metric_frequency),
+ random_generator_(base::RandUint64()),
+ uniform_distribution_(1, raster_metric_frequency) {
DCHECK(compositor_context_provider);
DCHECK(worker_context_provider);
}
@@ -359,24 +370,10 @@ std::unique_ptr<RasterBuffer> GpuRasterBufferProvider::AcquireBufferForRaster(
uint64_t previous_content_id) {
if (!resource.gpu_backing()) {
auto backing = std::make_unique<GpuRasterBacking>();
- backing->compositor_context_provider = compositor_context_provider_;
-
- gpu::gles2::GLES2Interface* gl = compositor_context_provider_->ContextGL();
- const auto& caps = compositor_context_provider_->ContextCapabilities();
-
- viz::TextureAllocation alloc = viz::TextureAllocation::MakeTextureId(
- gl, caps, resource.format(), use_gpu_memory_buffer_resources_,
- /*for_framebuffer_attachment=*/true);
- backing->texture_id = alloc.texture_id;
- backing->texture_target = alloc.texture_target;
- backing->overlay_candidate = alloc.overlay_candidate;
- gl->ProduceTextureDirectCHROMIUM(backing->texture_id,
- backing->mailbox.name);
- // Save a sync token in the backing so that we always wait on it even if
- // this task is cancelled between being scheduled and running.
- backing->returned_sync_token =
- viz::ClientResourceProvider::GenerateSyncTokenHelper(gl);
-
+ backing->worker_context_provider = worker_context_provider_;
+ backing->InitOverlayCandidateAndTextureTarget(
+ resource.format(), compositor_context_provider_->ContextCapabilities(),
+ use_gpu_memory_buffer_resources_);
resource.set_gpu_backing(std::move(backing));
}
GpuRasterBacking* backing =
@@ -454,10 +451,9 @@ void GpuRasterBufferProvider::Shutdown() {
}
gpu::SyncToken GpuRasterBufferProvider::PlaybackOnWorkerThread(
- const gpu::Mailbox& mailbox,
+ gpu::Mailbox* mailbox,
GLenum texture_target,
bool texture_is_overlay_candidate,
- bool texture_storage_allocated,
const gpu::SyncToken& sync_token,
const gfx::Size& resource_size,
viz::ResourceFormat resource_format,
@@ -470,15 +466,50 @@ gpu::SyncToken GpuRasterBufferProvider::PlaybackOnWorkerThread(
const gfx::AxisTransform2d& transform,
const RasterSource::PlaybackSettings& playback_settings,
const GURL& url) {
+ PendingRasterQuery query;
+ gpu::SyncToken raster_finished_token = PlaybackOnWorkerThreadInternal(
+ mailbox, texture_target, texture_is_overlay_candidate, sync_token,
+ resource_size, resource_format, color_space,
+ resource_has_previous_content, raster_source, raster_full_rect,
+ raster_dirty_rect, new_content_id, transform, playback_settings, url,
+ &query);
+
+ if (query.query_id != 0u) {
+ // Note that it is important to scope the raster context lock to
+ // PlaybackOnWorkerThreadInternal and release it before acquiring this lock
+ // to avoid a deadlock in CheckRasterFinishedQueries which acquires the
+ // raster context lock while holding this lock.
+ base::AutoLock hold(pending_raster_queries_lock_);
+ pending_raster_queries_.push_back(query);
+ }
+
+ return raster_finished_token;
+}
+
+gpu::SyncToken GpuRasterBufferProvider::PlaybackOnWorkerThreadInternal(
+ gpu::Mailbox* mailbox,
+ GLenum texture_target,
+ bool texture_is_overlay_candidate,
+ const gpu::SyncToken& sync_token,
+ const gfx::Size& resource_size,
+ viz::ResourceFormat resource_format,
+ const gfx::ColorSpace& color_space,
+ bool resource_has_previous_content,
+ const RasterSource* raster_source,
+ const gfx::Rect& raster_full_rect,
+ const gfx::Rect& raster_dirty_rect,
+ uint64_t new_content_id,
+ const gfx::AxisTransform2d& transform,
+ const RasterSource::PlaybackSettings& playback_settings,
+ const GURL& url,
+ PendingRasterQuery* query) {
viz::RasterContextProvider::ScopedRasterContextLock scoped_context(
worker_context_provider_, url.possibly_invalid_spec().c_str());
gpu::raster::RasterInterface* ri = scoped_context.RasterInterface();
DCHECK(ri);
- // Wait on the SyncToken that was created on the compositor thread after
- // making the mailbox. This ensures that the mailbox we consume here is valid
- // by the time the consume command executes.
- ri->WaitSyncTokenCHROMIUM(sync_token.GetConstData());
+ const bool measure_raster_metric =
+ uniform_distribution_(random_generator_) == raster_metric_frequency_;
gfx::Rect playback_rect = raster_full_rect;
if (resource_has_previous_content) {
@@ -501,23 +532,39 @@ gpu::SyncToken GpuRasterBufferProvider::PlaybackOnWorkerThread(
100.0f * fraction_saved);
}
- if (enable_oop_rasterization_) {
- RasterizeSourceOOP(raster_source, resource_has_previous_content, mailbox,
- texture_target, texture_is_overlay_candidate,
- texture_storage_allocated, resource_size,
- resource_format, color_space, raster_full_rect,
- playback_rect, transform, playback_settings,
- worker_context_provider_, msaa_sample_count_);
- } else {
- RasterizeSource(
- raster_source, resource_has_previous_content, mailbox, texture_target,
- texture_is_overlay_candidate, texture_storage_allocated, resource_size,
- resource_format, color_space, raster_full_rect, playback_rect,
- transform, playback_settings, worker_context_provider_,
- msaa_sample_count_,
- ShouldUnpremultiplyAndDitherResource(resource_format), max_tile_size_);
+ if (measure_raster_metric) {
+ // Use a query to time the GPU side work for rasterizing this tile.
+ ri->GenQueriesEXT(1, &query->query_id);
+ ri->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, query->query_id);
+ }
+
+ {
+ base::Optional<base::ElapsedTimer> timer;
+ if (measure_raster_metric)
+ timer.emplace();
+ if (enable_oop_rasterization_) {
+ RasterizeSourceOOP(raster_source, resource_has_previous_content, mailbox,
+ sync_token, texture_target,
+ texture_is_overlay_candidate, resource_size,
+ resource_format, color_space, raster_full_rect,
+ playback_rect, transform, playback_settings,
+ worker_context_provider_, msaa_sample_count_);
+ } else {
+ RasterizeSource(raster_source, resource_has_previous_content, mailbox,
+ sync_token, texture_target, texture_is_overlay_candidate,
+ resource_size, resource_format, color_space,
+ raster_full_rect, playback_rect, transform,
+ playback_settings, worker_context_provider_,
+ msaa_sample_count_,
+ ShouldUnpremultiplyAndDitherResource(resource_format),
+ max_tile_size_);
+ }
+ if (measure_raster_metric)
+ query->worker_duration = timer->Elapsed();
}
+ ri->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
+
// Generate sync token for cross context synchronization.
return viz::ClientResourceProvider::GenerateSyncTokenHelper(ri);
}
@@ -532,4 +579,56 @@ bool GpuRasterBufferProvider::ShouldUnpremultiplyAndDitherResource(
}
}
+#define UMA_HISTOGRAM_RASTER_TIME_CUSTOM_MICROSECONDS(name, total_time) \
+ UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES( \
+ name, total_time, base::TimeDelta::FromMicroseconds(1), \
+ base::TimeDelta::FromMilliseconds(100), 100);
+
+bool GpuRasterBufferProvider::CheckRasterFinishedQueries() {
+ base::AutoLock hold(pending_raster_queries_lock_);
+ if (pending_raster_queries_.empty())
+ return false;
+
+ viz::RasterContextProvider::ScopedRasterContextLock scoped_context(
+ worker_context_provider_);
+ auto* ri = scoped_context.RasterInterface();
+
+ auto it = pending_raster_queries_.begin();
+ while (it != pending_raster_queries_.end()) {
+ GLuint complete = 1;
+ ri->GetQueryObjectuivEXT(it->query_id,
+ GL_QUERY_RESULT_AVAILABLE_NO_FLUSH_CHROMIUM_EXT,
+ &complete);
+ if (!complete)
+ break;
+
+ GLuint gpu_duration = 0u;
+ ri->GetQueryObjectuivEXT(it->query_id, GL_QUERY_RESULT_EXT, &gpu_duration);
+ ri->DeleteQueriesEXT(1, &it->query_id);
+
+ base::TimeDelta total_time =
+ it->worker_duration + base::TimeDelta::FromMicroseconds(gpu_duration);
+
+ // It is safe to use the UMA macros here with runtime generated strings
+ // because the client name should be initialized once in the process, before
+ // recording any metrics here.
+ const char* client_name = GetClientNameForMetrics();
+ if (enable_oop_rasterization_) {
+ UMA_HISTOGRAM_RASTER_TIME_CUSTOM_MICROSECONDS(
+ base::StringPrintf("Renderer4.%s.RasterTaskTotalDuration.Oop",
+ client_name),
+ total_time);
+ } else {
+ UMA_HISTOGRAM_RASTER_TIME_CUSTOM_MICROSECONDS(
+ base::StringPrintf("Renderer4.%s.RasterTaskTotalDuration.Gpu",
+ client_name),
+ total_time);
+ }
+
+ it = pending_raster_queries_.erase(it);
+ }
+
+ return pending_raster_queries_.size() > 0u;
+}
+
} // namespace cc
diff --git a/chromium/cc/raster/gpu_raster_buffer_provider.h b/chromium/cc/raster/gpu_raster_buffer_provider.h
index 54c4fc9db4b..0ef41dd0bea 100644
--- a/chromium/cc/raster/gpu_raster_buffer_provider.h
+++ b/chromium/cc/raster/gpu_raster_buffer_provider.h
@@ -6,11 +6,18 @@
#define CC_RASTER_GPU_RASTER_BUFFER_PROVIDER_H_
#include <stdint.h>
+#include <random>
#include "base/macros.h"
#include "cc/raster/raster_buffer_provider.h"
#include "gpu/command_buffer/common/sync_token.h"
+namespace gpu {
+namespace raster {
+class RasterInterface;
+} // namespace raster
+} // namespace gpu
+
namespace viz {
class ContextProvider;
class RasterContextProvider;
@@ -20,6 +27,7 @@ namespace cc {
class CC_EXPORT GpuRasterBufferProvider : public RasterBufferProvider {
public:
+ static constexpr int kRasterMetricFrequency = 100;
GpuRasterBufferProvider(viz::ContextProvider* compositor_context_provider,
viz::RasterContextProvider* worker_context_provider,
bool use_gpu_memory_buffer_resources,
@@ -27,7 +35,8 @@ class CC_EXPORT GpuRasterBufferProvider : public RasterBufferProvider {
viz::ResourceFormat tile_format,
const gfx::Size& max_tile_size,
bool unpremultiply_and_dither_low_bit_depth_tiles,
- bool enable_oop_rasterization);
+ bool enable_oop_rasterization,
+ int raster_metric_frequency = kRasterMetricFrequency);
~GpuRasterBufferProvider() override;
// Overridden from RasterBufferProvider:
@@ -47,12 +56,12 @@ class CC_EXPORT GpuRasterBufferProvider : public RasterBufferProvider {
const base::Closure& callback,
uint64_t pending_callback_id) const override;
void Shutdown() override;
+ bool CheckRasterFinishedQueries() override;
gpu::SyncToken PlaybackOnWorkerThread(
- const gpu::Mailbox& mailbox,
+ gpu::Mailbox* mailbox,
GLenum texture_target,
bool texture_is_overlay_candidate,
- bool texture_storage_allocated,
const gpu::SyncToken& sync_token,
const gfx::Size& resource_size,
viz::ResourceFormat resource_format,
@@ -97,11 +106,10 @@ class CC_EXPORT GpuRasterBufferProvider : public RasterBufferProvider {
const gfx::ColorSpace color_space_;
const bool resource_has_previous_content_;
const gpu::SyncToken before_raster_sync_token_;
- const gpu::Mailbox mailbox_;
const GLenum texture_target_;
const bool texture_is_overlay_candidate_;
- // Set to true once allocation is done in the worker thread.
- bool texture_storage_allocated_;
+
+ gpu::Mailbox mailbox_;
// A SyncToken to be returned from the worker thread, and waited on before
// using the rastered resource.
gpu::SyncToken after_raster_sync_token_;
@@ -109,7 +117,32 @@ class CC_EXPORT GpuRasterBufferProvider : public RasterBufferProvider {
DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
};
+ struct PendingRasterQuery {
+ // The id for querying the duration in executing the GPU side work.
+ GLuint query_id = 0u;
+
+ // The duration for executing the work on the raster worker thread.
+ base::TimeDelta worker_duration;
+ };
+
bool ShouldUnpremultiplyAndDitherResource(viz::ResourceFormat format) const;
+ gpu::SyncToken PlaybackOnWorkerThreadInternal(
+ gpu::Mailbox* mailbox,
+ GLenum texture_target,
+ bool texture_is_overlay_candidate,
+ const gpu::SyncToken& sync_token,
+ const gfx::Size& resource_size,
+ viz::ResourceFormat resource_format,
+ const gfx::ColorSpace& color_space,
+ bool resource_has_previous_content,
+ const RasterSource* raster_source,
+ const gfx::Rect& raster_full_rect,
+ const gfx::Rect& raster_dirty_rect,
+ uint64_t new_content_id,
+ const gfx::AxisTransform2d& transform,
+ const RasterSource::PlaybackSettings& playback_settings,
+ const GURL& url,
+ PendingRasterQuery* query);
viz::ContextProvider* const compositor_context_provider_;
viz::RasterContextProvider* const worker_context_provider_;
@@ -119,6 +152,17 @@ class CC_EXPORT GpuRasterBufferProvider : public RasterBufferProvider {
const gfx::Size max_tile_size_;
const bool unpremultiply_and_dither_low_bit_depth_tiles_;
const bool enable_oop_rasterization_;
+ const int raster_metric_frequency_;
+
+ // Note that this lock should never be acquired while holding the raster
+ // context lock.
+ base::Lock pending_raster_queries_lock_;
+ base::circular_deque<PendingRasterQuery> pending_raster_queries_
+ GUARDED_BY(pending_raster_queries_lock_);
+
+ // Accessed with the worker context lock acquired.
+ std::mt19937 random_generator_;
+ std::uniform_int_distribution<int> uniform_distribution_;
DISALLOW_COPY_AND_ASSIGN(GpuRasterBufferProvider);
};
diff --git a/chromium/cc/raster/one_copy_raster_buffer_provider.cc b/chromium/cc/raster/one_copy_raster_buffer_provider.cc
index bf284018663..562d2e21bfe 100644
--- a/chromium/cc/raster/one_copy_raster_buffer_provider.cc
+++ b/chromium/cc/raster/one_copy_raster_buffer_provider.cc
@@ -13,21 +13,23 @@
#include "base/debug/alias.h"
#include "base/macros.h"
#include "base/metrics/histogram_macros.h"
+#include "base/strings/stringprintf.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
#include "cc/base/histograms.h"
#include "cc/base/math_util.h"
#include "components/viz/common/gpu/context_provider.h"
#include "components/viz/common/gpu/raster_context_provider.h"
-#include "components/viz/common/gpu/texture_allocation.h"
#include "components/viz/common/resources/platform_color.h"
#include "components/viz/common/resources/resource_format.h"
#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/GLES2/gl2extchromium.h"
#include "gpu/command_buffer/client/context_support.h"
-#include "gpu/command_buffer/client/gles2_interface.h"
#include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
#include "gpu/command_buffer/client/raster_interface.h"
+#include "gpu/command_buffer/client/shared_image_interface.h"
+#include "gpu/command_buffer/common/shared_image_trace_utils.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
#include "ui/gfx/buffer_format_util.h"
#include "ui/gl/trace_util.h"
@@ -46,13 +48,13 @@ class OneCopyRasterBufferProvider::OneCopyGpuBacking
: public ResourcePool::GpuBacking {
public:
~OneCopyGpuBacking() override {
- gpu::gles2::GLES2Interface* gl = compositor_context_provider->ContextGL();
+ if (mailbox.IsZero())
+ return;
+ auto* sii = worker_context_provider->SharedImageInterface();
if (returned_sync_token.HasData())
- gl->WaitSyncTokenCHROMIUM(returned_sync_token.GetConstData());
- if (mailbox_sync_token.HasData())
- gl->WaitSyncTokenCHROMIUM(mailbox_sync_token.GetConstData());
- if (texture_id)
- gl->DeleteTextures(1, &texture_id);
+ sii->DestroySharedImage(returned_sync_token, mailbox);
+ else if (mailbox_sync_token.HasData())
+ sii->DestroySharedImage(mailbox_sync_token, mailbox);
}
void OnMemoryDump(
@@ -60,23 +62,16 @@ class OneCopyRasterBufferProvider::OneCopyGpuBacking
const base::trace_event::MemoryAllocatorDumpGuid& buffer_dump_guid,
uint64_t tracing_process_id,
int importance) const override {
- if (!storage_allocated)
+ if (mailbox.IsZero())
return;
- auto texture_tracing_guid = gl::GetGLTextureClientGUIDForTracing(
- compositor_context_provider->ContextSupport()->ShareGroupTracingGUID(),
- texture_id);
- pmd->CreateSharedGlobalAllocatorDump(texture_tracing_guid);
- pmd->AddOwnershipEdge(buffer_dump_guid, texture_tracing_guid, importance);
+ auto tracing_guid = gpu::GetSharedImageGUIDForTracing(mailbox);
+ pmd->CreateSharedGlobalAllocatorDump(tracing_guid);
+ pmd->AddOwnershipEdge(buffer_dump_guid, tracing_guid, importance);
}
- // The ContextProvider used to clean up the texture id.
- viz::ContextProvider* compositor_context_provider = nullptr;
- // The texture backing of the resource.
- GLuint texture_id = 0;
- // The allocation of storage for the |texture_id| is deferred, and this tracks
- // if it has been done.
- bool storage_allocated = false;
+ // The ContextProvider used to clean up the mailbox
+ viz::RasterContextProvider* worker_context_provider = nullptr;
};
OneCopyRasterBufferProvider::RasterBufferImpl::RasterBufferImpl(
@@ -94,8 +89,7 @@ OneCopyRasterBufferProvider::RasterBufferImpl::RasterBufferImpl(
before_raster_sync_token_(backing->returned_sync_token),
mailbox_(backing->mailbox),
mailbox_texture_target_(backing->texture_target),
- mailbox_texture_is_overlay_candidate_(backing->overlay_candidate),
- mailbox_texture_storage_allocated_(backing->storage_allocated) {}
+ mailbox_texture_is_overlay_candidate_(backing->overlay_candidate) {}
OneCopyRasterBufferProvider::RasterBufferImpl::~RasterBufferImpl() {
// This SyncToken was created on the worker context after uploading the
@@ -106,7 +100,7 @@ OneCopyRasterBufferProvider::RasterBufferImpl::~RasterBufferImpl() {
// happened if the |after_raster_sync_token_| was set.
backing_->returned_sync_token = gpu::SyncToken();
}
- backing_->storage_allocated = mailbox_texture_storage_allocated_;
+ backing_->mailbox = mailbox_;
}
void OneCopyRasterBufferProvider::RasterBufferImpl::Playback(
@@ -123,12 +117,10 @@ void OneCopyRasterBufferProvider::RasterBufferImpl::Playback(
// returns another SyncToken generated on the worker thread to synchronize
// with after the raster is complete.
after_raster_sync_token_ = client_->PlaybackAndCopyOnWorkerThread(
- mailbox_, mailbox_texture_target_, mailbox_texture_is_overlay_candidate_,
- mailbox_texture_storage_allocated_, before_raster_sync_token_,
- raster_source, raster_full_rect, raster_dirty_rect, transform,
- resource_size_, resource_format_, color_space_, playback_settings,
- previous_content_id_, new_content_id);
- mailbox_texture_storage_allocated_ = true;
+ &mailbox_, mailbox_texture_target_, mailbox_texture_is_overlay_candidate_,
+ before_raster_sync_token_, raster_source, raster_full_rect,
+ raster_dirty_rect, transform, resource_size_, resource_format_,
+ color_space_, playback_settings, previous_content_id_, new_content_id);
}
OneCopyRasterBufferProvider::OneCopyRasterBufferProvider(
@@ -159,6 +151,7 @@ OneCopyRasterBufferProvider::OneCopyRasterBufferProvider(
max_staging_buffer_usage_in_bytes) {
DCHECK(compositor_context_provider);
DCHECK(worker_context_provider);
+ DCHECK(!IsResourceFormatCompressed(tile_format));
}
OneCopyRasterBufferProvider::~OneCopyRasterBufferProvider() {}
@@ -170,24 +163,10 @@ OneCopyRasterBufferProvider::AcquireBufferForRaster(
uint64_t previous_content_id) {
if (!resource.gpu_backing()) {
auto backing = std::make_unique<OneCopyGpuBacking>();
- backing->compositor_context_provider = compositor_context_provider_;
-
- gpu::gles2::GLES2Interface* gl = compositor_context_provider_->ContextGL();
- const auto& caps = compositor_context_provider_->ContextCapabilities();
-
- viz::TextureAllocation alloc = viz::TextureAllocation::MakeTextureId(
- gl, caps, resource.format(), use_gpu_memory_buffer_resources_,
- /*for_framebuffer_attachment=*/false);
- backing->texture_id = alloc.texture_id;
- backing->texture_target = alloc.texture_target;
- backing->overlay_candidate = alloc.overlay_candidate;
- gl->ProduceTextureDirectCHROMIUM(backing->texture_id,
- backing->mailbox.name);
- // Save a sync token in the backing so that we always wait on it even if
- // this task is cancelled between being scheduled and running.
- backing->returned_sync_token =
- viz::ClientResourceProvider::GenerateSyncTokenHelper(gl);
-
+ backing->worker_context_provider = worker_context_provider_;
+ backing->InitOverlayCandidateAndTextureTarget(
+ resource.format(), compositor_context_provider_->ContextCapabilities(),
+ use_gpu_memory_buffer_resources_);
resource.set_gpu_backing(std::move(backing));
}
OneCopyGpuBacking* backing =
@@ -272,10 +251,9 @@ void OneCopyRasterBufferProvider::Shutdown() {
}
gpu::SyncToken OneCopyRasterBufferProvider::PlaybackAndCopyOnWorkerThread(
- const gpu::Mailbox& mailbox,
+ gpu::Mailbox* mailbox,
GLenum mailbox_texture_target,
bool mailbox_texture_is_overlay_candidate,
- bool mailbox_texture_storage_allocated,
const gpu::SyncToken& sync_token,
const RasterSource* raster_source,
const gfx::Rect& raster_full_rect,
@@ -299,8 +277,7 @@ gpu::SyncToken OneCopyRasterBufferProvider::PlaybackAndCopyOnWorkerThread(
gpu::SyncToken sync_token_after_upload = CopyOnWorkerThread(
staging_buffer.get(), raster_source, raster_full_rect, resource_format,
resource_size, mailbox, mailbox_texture_target,
- mailbox_texture_is_overlay_candidate, mailbox_texture_storage_allocated,
- sync_token, color_space);
+ mailbox_texture_is_overlay_candidate, sync_token, color_space);
staging_pool_.ReleaseStagingBuffer(std::move(staging_buffer));
return sync_token_after_upload;
}
@@ -384,10 +361,9 @@ gpu::SyncToken OneCopyRasterBufferProvider::CopyOnWorkerThread(
const gfx::Rect& rect_to_copy,
viz::ResourceFormat resource_format,
const gfx::Size& resource_size,
- const gpu::Mailbox& mailbox,
+ gpu::Mailbox* mailbox,
GLenum mailbox_texture_target,
bool mailbox_texture_is_overlay_candidate,
- bool mailbox_texture_storage_allocated,
const gpu::SyncToken& sync_token,
const gfx::ColorSpace& color_space) {
viz::RasterContextProvider::ScopedRasterContextLock scoped_context(
@@ -395,21 +371,21 @@ gpu::SyncToken OneCopyRasterBufferProvider::CopyOnWorkerThread(
gpu::raster::RasterInterface* ri = scoped_context.RasterInterface();
DCHECK(ri);
- // Wait on the SyncToken that was created on the compositor thread after
- // making the mailbox. This ensures that the mailbox we consume here is valid
- // by the time the consume command executes.
- ri->WaitSyncTokenCHROMIUM(sync_token.GetConstData());
+ if (mailbox->IsZero()) {
+ auto* sii = worker_context_provider_->SharedImageInterface();
+ uint32_t flags = gpu::SHARED_IMAGE_USAGE_RASTER;
+ if (mailbox_texture_is_overlay_candidate)
+ flags |= gpu::SHARED_IMAGE_USAGE_SCANOUT;
+ *mailbox = sii->CreateSharedImage(resource_format, resource_size,
+ color_space, flags);
+ ri->WaitSyncTokenCHROMIUM(sii->GenUnverifiedSyncToken().GetConstData());
+ } else {
+ ri->WaitSyncTokenCHROMIUM(sync_token.GetConstData());
+ }
+
GLuint mailbox_texture_id = ri->CreateAndConsumeTexture(
mailbox_texture_is_overlay_candidate, gfx::BufferUsage::SCANOUT,
- resource_format, mailbox.name);
-
- if (!mailbox_texture_storage_allocated) {
- viz::TextureAllocation alloc = {mailbox_texture_id, mailbox_texture_target,
- mailbox_texture_is_overlay_candidate};
- viz::TextureAllocation::AllocateStorage(
- ri, worker_context_provider_->ContextCapabilities(), resource_format,
- resource_size, alloc, color_space);
- }
+ resource_format, mailbox->name);
// Create and bind staging texture.
if (!staging_buffer->texture_id) {
@@ -446,61 +422,68 @@ gpu::SyncToken OneCopyRasterBufferProvider::CopyOnWorkerThread(
// TODO(vmiura): Need a way to ensure we don't hold onto bindings?
// ri->BindTexture(image_target, 0);
+ // Do not use queries unless COMMANDS_COMPLETED queries are supported, or
+ // COMMANDS_ISSUED queries are sufficient.
+ GLenum query_target = GL_NONE;
+
if (worker_context_provider_->ContextCapabilities().sync_query) {
- if (!staging_buffer->query_id)
- ri->GenQueriesEXT(1, &staging_buffer->query_id);
+ // Use GL_COMMANDS_COMPLETED_CHROMIUM when supported because native
+ // GpuMemoryBuffers can be accessed by the GPU after commands are issued
+ // until GPU reads are done.
+ query_target = GL_COMMANDS_COMPLETED_CHROMIUM;
+ }
#if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
- // TODO(reveman): This avoids a performance problem on ARM ChromeOS
- // devices. crbug.com/580166
- ri->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id);
-#else
- ri->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, staging_buffer->query_id);
+ // TODO(reveman): This avoids a performance problem on ARM ChromeOS devices.
+ // https://crbug.com/580166
+ query_target = GL_COMMANDS_ISSUED_CHROMIUM;
#endif
+
+ // COMMANDS_ISSUED is sufficient for shared memory GpuMemoryBuffers because
+ // they're uploaded using glTexImage2D (see gl::GLImageMemory::CopyTexImage).
+ const auto* buffer = staging_buffer->gpu_memory_buffer.get();
+ if (buffer &&
+ buffer->GetType() == gfx::GpuMemoryBufferType::SHARED_MEMORY_BUFFER) {
+ query_target = GL_COMMANDS_ISSUED_CHROMIUM;
}
- // Since compressed texture's cannot be pre-allocated we might have an
- // unallocated resource in which case we need to perform a full size copy.
- if (IsResourceFormatCompressed(staging_buffer->format)) {
- ri->CompressedCopyTextureCHROMIUM(staging_buffer->texture_id,
- mailbox_texture_id);
- } else {
- int bytes_per_row = viz::ResourceSizes::UncheckedWidthInBytes<int>(
- rect_to_copy.width(), staging_buffer->format);
- int chunk_size_in_rows =
- std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
- // Align chunk size to 4. Required to support compressed texture formats.
- chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4);
- int y = 0;
- int height = rect_to_copy.height();
- while (y < height) {
- // Copy at most |chunk_size_in_rows|.
- int rows_to_copy = std::min(chunk_size_in_rows, height - y);
- DCHECK_GT(rows_to_copy, 0);
-
- ri->CopySubTexture(staging_buffer->texture_id, mailbox_texture_id, 0, y,
- 0, y, rect_to_copy.width(), rows_to_copy);
- y += rows_to_copy;
-
- // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
- // used for this copy operation.
- bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
-
- if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
- ri->ShallowFlushCHROMIUM();
- bytes_scheduled_since_last_flush_ = 0;
- }
- }
+ if (query_target != GL_NONE) {
+ if (!staging_buffer->query_id)
+ ri->GenQueriesEXT(1, &staging_buffer->query_id);
+
+ ri->BeginQueryEXT(query_target, staging_buffer->query_id);
}
- if (worker_context_provider_->ContextCapabilities().sync_query) {
-#if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
- ri->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
-#else
- ri->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
-#endif
+ int bytes_per_row = viz::ResourceSizes::UncheckedWidthInBytes<int>(
+ rect_to_copy.width(), staging_buffer->format);
+ int chunk_size_in_rows =
+ std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
+ // Align chunk size to 4. Required to support compressed texture formats.
+ chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4);
+ int y = 0;
+ int height = rect_to_copy.height();
+ while (y < height) {
+ // Copy at most |chunk_size_in_rows|.
+ int rows_to_copy = std::min(chunk_size_in_rows, height - y);
+ DCHECK_GT(rows_to_copy, 0);
+
+ ri->CopySubTexture(staging_buffer->texture_id, mailbox_texture_id, 0, y, 0,
+ y, rect_to_copy.width(), rows_to_copy);
+ y += rows_to_copy;
+
+ // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
+ // used for this copy operation.
+ bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
+
+ if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
+ ri->ShallowFlushCHROMIUM();
+ bytes_scheduled_since_last_flush_ = 0;
+ }
}
+ if (query_target != GL_NONE)
+ ri->EndQueryEXT(query_target);
+
ri->DeleteTextures(1, &mailbox_texture_id);
// Generate sync token on the worker context that will be sent to and waited
@@ -514,4 +497,8 @@ gfx::BufferUsage OneCopyRasterBufferProvider::StagingBufferUsage() const {
: gfx::BufferUsage::GPU_READ_CPU_READ_WRITE;
}
+bool OneCopyRasterBufferProvider::CheckRasterFinishedQueries() {
+ return false;
+}
+
} // namespace cc
diff --git a/chromium/cc/raster/one_copy_raster_buffer_provider.h b/chromium/cc/raster/one_copy_raster_buffer_provider.h
index e23dc8ddb88..6766cae7a40 100644
--- a/chromium/cc/raster/one_copy_raster_buffer_provider.h
+++ b/chromium/cc/raster/one_copy_raster_buffer_provider.h
@@ -58,13 +58,13 @@ class CC_EXPORT OneCopyRasterBufferProvider : public RasterBufferProvider {
const base::Closure& callback,
uint64_t pending_callback_id) const override;
void Shutdown() override;
+ bool CheckRasterFinishedQueries() override;
// Playback raster source and copy result into |resource|.
gpu::SyncToken PlaybackAndCopyOnWorkerThread(
- const gpu::Mailbox& mailbox,
+ gpu::Mailbox* mailbox,
GLenum mailbox_texture_target,
bool mailbox_texture_is_overlay_candidate,
- bool mailbox_texture_storage_allocated,
const gpu::SyncToken& sync_token,
const RasterSource* raster_source,
const gfx::Rect& raster_full_rect,
@@ -109,11 +109,9 @@ class CC_EXPORT OneCopyRasterBufferProvider : public RasterBufferProvider {
const gfx::ColorSpace color_space_;
const uint64_t previous_content_id_;
const gpu::SyncToken before_raster_sync_token_;
- const gpu::Mailbox mailbox_;
+ gpu::Mailbox mailbox_;
const GLenum mailbox_texture_target_;
const bool mailbox_texture_is_overlay_candidate_;
- // Set to true once allocation is done in the worker thread.
- bool mailbox_texture_storage_allocated_;
// A SyncToken to be returned from the worker thread, and waited on before
// using the rastered resource.
gpu::SyncToken after_raster_sync_token_;
@@ -137,10 +135,9 @@ class CC_EXPORT OneCopyRasterBufferProvider : public RasterBufferProvider {
const gfx::Rect& rect_to_copy,
viz::ResourceFormat resource_format,
const gfx::Size& resource_size,
- const gpu::Mailbox& mailbox,
+ gpu::Mailbox* mailbox,
GLenum mailbox_texture_target,
bool mailbox_texture_is_overlay_candidate,
- bool mailbox_texture_storage_allocated,
const gpu::SyncToken& sync_token,
const gfx::ColorSpace& color_space);
gfx::BufferUsage StagingBufferUsage() const;
diff --git a/chromium/cc/raster/raster_buffer_provider.cc b/chromium/cc/raster/raster_buffer_provider.cc
index 57f2a6d01f3..991688679f7 100644
--- a/chromium/cc/raster/raster_buffer_provider.cc
+++ b/chromium/cc/raster/raster_buffer_provider.cc
@@ -8,7 +8,6 @@
#include "base/trace_event/trace_event.h"
#include "cc/raster/raster_source.h"
-#include "cc/raster/texture_compressor.h"
#include "components/viz/common/resources/platform_color.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "third_party/skia/include/core/SkCanvas.h"
@@ -29,11 +28,11 @@ bool IsSupportedPlaybackToMemoryFormat(viz::ResourceFormat format) {
case viz::RGBA_4444:
case viz::RGBA_8888:
case viz::BGRA_8888:
- case viz::ETC1:
return true;
case viz::ALPHA_8:
case viz::LUMINANCE_8:
case viz::RGB_565:
+ case viz::ETC1:
case viz::RED_8:
case viz::LUMINANCE_F16:
case viz::RGBA_F16:
@@ -115,27 +114,12 @@ void RasterBufferProvider::PlaybackToMemory(
surface->getCanvas(), target_color_space, content_size,
canvas_bitmap_rect, canvas_bitmap_rect, transform, playback_settings);
- if (format == viz::ETC1) {
- TRACE_EVENT0("cc",
- "RasterBufferProvider::PlaybackToMemory::CompressETC1");
- DCHECK_EQ(size.width() % 4, 0);
- DCHECK_EQ(size.height() % 4, 0);
- std::unique_ptr<TextureCompressor> texture_compressor =
- TextureCompressor::Create(TextureCompressor::kFormatETC1);
- SkPixmap pixmap;
- surface->peekPixels(&pixmap);
- texture_compressor->Compress(
- reinterpret_cast<const uint8_t*>(pixmap.addr()),
- reinterpret_cast<uint8_t*>(memory), size.width(), size.height(),
- TextureCompressor::kQualityHigh);
- } else {
- TRACE_EVENT0("cc",
- "RasterBufferProvider::PlaybackToMemory::ConvertRGBA4444");
- SkImageInfo dst_info = info.makeColorType(
- ResourceFormatToClosestSkColorType(gpu_compositing, format));
- bool rv = surface->readPixels(dst_info, memory, stride, 0, 0);
- DCHECK(rv);
- }
+ TRACE_EVENT0("cc",
+ "RasterBufferProvider::PlaybackToMemory::ConvertRGBA4444");
+ SkImageInfo dst_info = info.makeColorType(
+ ResourceFormatToClosestSkColorType(gpu_compositing, format));
+ bool rv = surface->readPixels(dst_info, memory, stride, 0, 0);
+ DCHECK(rv);
return;
}
case viz::ETC1:
diff --git a/chromium/cc/raster/raster_buffer_provider.h b/chromium/cc/raster/raster_buffer_provider.h
index 3e2b1aaeb50..74354c7aceb 100644
--- a/chromium/cc/raster/raster_buffer_provider.h
+++ b/chromium/cc/raster/raster_buffer_provider.h
@@ -88,6 +88,14 @@ class CC_EXPORT RasterBufferProvider {
// Shutdown for doing cleanup.
virtual void Shutdown() = 0;
+
+ // Checks whether GPU side queries issued for previous raster work have been
+ // finished. Note that this will acquire the worker context lock so it can be
+ // used from any thread. But usage from the compositor thread should be
+ // avoided to prevent contention with worker threads.
+ // Returns true if there are pending queries that could not be completed in
+ // this check.
+ virtual bool CheckRasterFinishedQueries() = 0;
};
} // namespace cc
diff --git a/chromium/cc/raster/raster_buffer_provider_perftest.cc b/chromium/cc/raster/raster_buffer_provider_perftest.cc
index 36db4d2c575..bbdd6f57bda 100644
--- a/chromium/cc/raster/raster_buffer_provider_perftest.cc
+++ b/chromium/cc/raster/raster_buffer_provider_perftest.cc
@@ -117,6 +117,12 @@ class PerfContextProvider
}
return test_context_provider_->GrContext();
}
+ gpu::SharedImageInterface* SharedImageInterface() override {
+ if (!test_context_provider_) {
+ test_context_provider_ = viz::TestContextProvider::Create();
+ }
+ return test_context_provider_->SharedImageInterface();
+ }
viz::ContextCacheController* CacheController() override {
return &cache_controller_;
}
@@ -305,7 +311,7 @@ class RasterBufferProviderPerfTestBase {
for (auto& decode_task : raster_task->dependencies()) {
// Add decode task if it doesn't already exist in graph.
- TaskGraph::Node::Vector::iterator decode_it =
+ auto decode_it =
std::find_if(graph->nodes.begin(), graph->nodes.end(),
[decode_task](const TaskGraph::Node& node) {
return node.task == decode_task;
diff --git a/chromium/cc/raster/raster_buffer_provider_unittest.cc b/chromium/cc/raster/raster_buffer_provider_unittest.cc
index cbe334d7eab..0cbd9467744 100644
--- a/chromium/cc/raster/raster_buffer_provider_unittest.cc
+++ b/chromium/cc/raster/raster_buffer_provider_unittest.cc
@@ -16,6 +16,7 @@
#include "base/macros.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
+#include "base/test/metrics/histogram_tester.h"
#include "base/threading/thread_task_runner_handle.h"
#include "cc/base/unique_notifier.h"
#include "cc/paint/draw_image.h"
@@ -171,7 +172,7 @@ class RasterBufferProviderTest
Create3dResourceProvider();
raster_buffer_provider_ = std::make_unique<GpuRasterBufferProvider>(
context_provider_.get(), worker_context_provider_.get(), false, 0,
- viz::RGBA_8888, gfx::Size(), true, false);
+ viz::RGBA_8888, gfx::Size(), true, false, 1);
break;
case RASTER_BUFFER_PROVIDER_TYPE_BITMAP:
CreateSoftwareResourceProvider();
@@ -487,6 +488,37 @@ TEST_P(RasterBufferProviderTest, WaitOnSyncTokenAfterReschedulingTask) {
EXPECT_FALSE(completed_tasks()[1].canceled);
}
+TEST_P(RasterBufferProviderTest, MeasureGpuRasterDuration) {
+ if (GetParam() != RASTER_BUFFER_PROVIDER_TYPE_GPU)
+ return;
+
+ // Schedule a task.
+ AppendTask(0u);
+ ScheduleTasks();
+ RunMessageLoopUntilAllTasksHaveCompleted();
+
+ // Wait for the GPU side work to finish.
+ base::RunLoop run_loop;
+ std::vector<const ResourcePool::InUsePoolResource*> array;
+ for (const auto& resource : resources_)
+ array.push_back(&resource);
+ uint64_t callback_id = raster_buffer_provider_->SetReadyToDrawCallback(
+ array,
+ base::Bind([](base::RunLoop* run_loop) { run_loop->Quit(); }, &run_loop),
+ 0);
+ ASSERT_TRUE(callback_id);
+ run_loop.Run();
+
+ // Poll the task and make sure a histogram is logged.
+ base::HistogramTester histogram_tester;
+ std::string histogram("Renderer4.Renderer.RasterTaskTotalDuration.Gpu");
+ histogram_tester.ExpectTotalCount(histogram, 0);
+ bool has_pending_queries =
+ raster_buffer_provider_->CheckRasterFinishedQueries();
+ EXPECT_FALSE(has_pending_queries);
+ histogram_tester.ExpectTotalCount(histogram, 1);
+}
+
INSTANTIATE_TEST_CASE_P(
RasterBufferProviderTests,
RasterBufferProviderTest,
diff --git a/chromium/cc/raster/raster_source.cc b/chromium/cc/raster/raster_source.cc
index be0cb4fbb3f..8f510fadc4c 100644
--- a/chromium/cc/raster/raster_source.cc
+++ b/chromium/cc/raster/raster_source.cc
@@ -94,16 +94,16 @@ void RasterSource::ClearForOpaqueRaster(
if (device_column.intersect(playback_device_rect)) {
clear_type = RasterSourceClearType::kBorder;
raster_canvas->save();
- raster_canvas->clipRect(SkRect::MakeFromIRect(device_column),
- SkClipOp::kIntersect, false);
+ raster_canvas->clipRect(SkRect::Make(device_column), SkClipOp::kIntersect,
+ false);
raster_canvas->drawColor(background_color_, SkBlendMode::kSrc);
raster_canvas->restore();
}
if (device_row.intersect(playback_device_rect)) {
clear_type = RasterSourceClearType::kBorder;
raster_canvas->save();
- raster_canvas->clipRect(SkRect::MakeFromIRect(device_row),
- SkClipOp::kIntersect, false);
+ raster_canvas->clipRect(SkRect::Make(device_row), SkClipOp::kIntersect,
+ false);
raster_canvas->drawColor(background_color_, SkBlendMode::kSrc);
raster_canvas->restore();
}
@@ -154,7 +154,7 @@ void RasterSource::PlaybackToCanvas(
raster_canvas->save();
raster_canvas->translate(-canvas_bitmap_rect.x(), -canvas_bitmap_rect.y());
- raster_canvas->clipRect(SkRect::MakeFromIRect(raster_bounds));
+ raster_canvas->clipRect(SkRect::Make(raster_bounds));
raster_canvas->translate(raster_transform.translation().x(),
raster_transform.translation().y());
raster_canvas->scale(raster_transform.scale() / recording_scale_factor_,
diff --git a/chromium/cc/raster/staging_buffer_pool.cc b/chromium/cc/raster/staging_buffer_pool.cc
index eada8b18943..a24fe9a16fb 100644
--- a/chromium/cc/raster/staging_buffer_pool.cc
+++ b/chromium/cc/raster/staging_buffer_pool.cc
@@ -6,7 +6,6 @@
#include <memory>
-#include "base/memory/memory_coordinator_client_registry.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_manager.h"
@@ -35,14 +34,16 @@ const int kMaxCheckForQueryResultAvailableAttempts = 256;
// Delay before a staging buffer might be released.
const int kStagingBufferExpirationDelayMs = 1000;
-bool CheckForQueryResult(gpu::raster::RasterInterface* ri, unsigned query_id) {
- unsigned complete = 1;
+bool CheckForQueryResult(gpu::raster::RasterInterface* ri, GLuint query_id) {
+ DCHECK(query_id);
+ GLuint complete = 1;
ri->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete);
return !!complete;
}
-void WaitForQueryResult(gpu::raster::RasterInterface* ri, unsigned query_id) {
+void WaitForQueryResult(gpu::raster::RasterInterface* ri, GLuint query_id) {
TRACE_EVENT0("cc", "WaitForQueryResult");
+ DCHECK(query_id);
int attempts_left = kMaxCheckForQueryResultAvailableAttempts;
while (attempts_left--) {
@@ -57,19 +58,14 @@ void WaitForQueryResult(gpu::raster::RasterInterface* ri, unsigned query_id) {
kCheckForQueryResultAvailableTickRateMs));
}
- unsigned result = 0;
+ GLuint result = 0;
ri->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result);
}
} // namespace
StagingBuffer::StagingBuffer(const gfx::Size& size, viz::ResourceFormat format)
- : size(size),
- format(format),
- texture_id(0),
- image_id(0),
- query_id(0),
- content_id(0) {}
+ : size(size), format(format) {}
StagingBuffer::~StagingBuffer() {
DCHECK_EQ(texture_id, 0u);
@@ -139,7 +135,6 @@ StagingBufferPool::StagingBufferPool(
base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, "cc::StagingBufferPool", base::ThreadTaskRunnerHandle::Get());
- base::MemoryCoordinatorClientRegistry::GetInstance()->Register(this);
memory_pressure_listener_.reset(new base::MemoryPressureListener(
base::BindRepeating(&StagingBufferPool::OnMemoryPressure,
weak_ptr_factory_.GetWeakPtr())));
@@ -149,7 +144,6 @@ StagingBufferPool::StagingBufferPool(
}
StagingBufferPool::~StagingBufferPool() {
- base::MemoryCoordinatorClientRegistry::GetInstance()->Unregister(this);
base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
this);
}
@@ -257,14 +251,15 @@ std::unique_ptr<StagingBuffer> StagingBufferPool::AcquireStagingBuffer(
DCHECK(ri);
// Check if any busy buffers have become available.
- if (worker_context_provider_->ContextCapabilities().sync_query) {
- while (!busy_buffers_.empty()) {
- if (!CheckForQueryResult(ri, busy_buffers_.front()->query_id))
- break;
+ while (!busy_buffers_.empty()) {
+ // Early out if query isn't used, or if query isn't complete yet. Query is
+ // created in OneCopyRasterBufferProvider::CopyOnWorkerThread().
+ if (!busy_buffers_.front()->query_id ||
+ !CheckForQueryResult(ri, busy_buffers_.front()->query_id))
+ break;
- MarkStagingBufferAsFree(busy_buffers_.front().get());
- free_buffers_.push_back(PopFront(&busy_buffers_));
- }
+ MarkStagingBufferAsFree(busy_buffers_.front().get());
+ free_buffers_.push_back(PopFront(&busy_buffers_));
}
// Wait for memory usage of non-free buffers to become less than the limit.
@@ -275,12 +270,12 @@ std::unique_ptr<StagingBuffer> StagingBufferPool::AcquireStagingBuffer(
if (busy_buffers_.empty())
break;
- if (worker_context_provider_->ContextCapabilities().sync_query) {
+ if (busy_buffers_.front()->query_id) {
WaitForQueryResult(ri, busy_buffers_.front()->query_id);
MarkStagingBufferAsFree(busy_buffers_.front().get());
free_buffers_.push_back(PopFront(&busy_buffers_));
} else {
- // Fall-back to glFinish if CHROMIUM_sync_query is not available.
+ // Fall back to glFinish if query isn't used.
ri->Finish();
while (!busy_buffers_.empty()) {
MarkStagingBufferAsFree(busy_buffers_.front().get());
@@ -424,12 +419,6 @@ void StagingBufferPool::ReleaseBuffersNotUsedSince(base::TimeTicks time) {
}
}
-void StagingBufferPool::OnPurgeMemory() {
- base::AutoLock lock(lock_);
- // Release all buffers, regardless of how recently they were used.
- ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max());
-}
-
void StagingBufferPool::OnMemoryPressure(
base::MemoryPressureListener::MemoryPressureLevel level) {
base::AutoLock lock(lock_);
@@ -438,6 +427,7 @@ void StagingBufferPool::OnMemoryPressure(
case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
break;
case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
+ // Release all buffers, regardless of how recently they were used.
ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max());
break;
}
diff --git a/chromium/cc/raster/staging_buffer_pool.h b/chromium/cc/raster/staging_buffer_pool.h
index 881c34e0acd..687b0066588 100644
--- a/chromium/cc/raster/staging_buffer_pool.h
+++ b/chromium/cc/raster/staging_buffer_pool.h
@@ -12,7 +12,6 @@
#include "base/containers/circular_deque.h"
#include "base/macros.h"
-#include "base/memory/memory_coordinator_client.h"
#include "base/memory/memory_pressure_listener.h"
#include "base/memory/weak_ptr.h"
#include "base/sequenced_task_runner.h"
@@ -22,6 +21,7 @@
#include "base/trace_event/trace_event.h"
#include "cc/cc_export.h"
#include "components/viz/common/resources/resource_format.h"
+#include "gpu/command_buffer/common/gl2_types.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/gpu_memory_buffer.h"
@@ -51,17 +51,31 @@ struct StagingBuffer {
const gfx::Size size;
const viz::ResourceFormat format;
- std::unique_ptr<gfx::GpuMemoryBuffer> gpu_memory_buffer;
base::TimeTicks last_usage;
- unsigned texture_id;
- unsigned image_id;
- unsigned query_id;
- uint64_t content_id;
+
+ // The following fields are initialized by OneCopyRasterBufferProvider.
+ // Storage for the staging buffer. This can be a GPU native or shared memory
+ // GpuMemoryBuffer.
+ std::unique_ptr<gfx::GpuMemoryBuffer> gpu_memory_buffer;
+
+ // Id for image used to import the GpuMemoryBuffer to command buffer.
+ GLuint image_id = 0;
+
+ // Id for texture that's bound to the GpuMemoryBuffer image.
+ GLuint texture_id = 0;
+
+ // Id of command buffer query that tracks use of this staging buffer by the
+ // GPU. In general, GPU synchronization is necessary for native
+ // GpuMemoryBuffers.
+ GLuint query_id = 0;
+
+ // Id of the content that's rastered into this staging buffer. Used to
+ // retrieve staging buffer with known content for reuse for partial raster.
+ uint64_t content_id = 0;
};
class CC_EXPORT StagingBufferPool
- : public base::trace_event::MemoryDumpProvider,
- public base::MemoryCoordinatorClient {
+ : public base::trace_event::MemoryDumpProvider {
public:
~StagingBufferPool() final;
@@ -98,11 +112,6 @@ class CC_EXPORT StagingBufferPool
void StagingStateAsValueInto(
base::trace_event::TracedValue* staging_state) const;
- // Overriden from base::MemoryCoordinatorClient.
- void OnPurgeMemory() override;
-
- // TODO(gyuyoung): OnMemoryPressure is deprecated. So this should be removed
- // when the memory coordinator is enabled by default.
void OnMemoryPressure(
base::MemoryPressureListener::MemoryPressureLevel level);
diff --git a/chromium/cc/raster/staging_buffer_pool_unittest.cc b/chromium/cc/raster/staging_buffer_pool_unittest.cc
index 7c7c79aec61..5fc5dac4d2c 100644
--- a/chromium/cc/raster/staging_buffer_pool_unittest.cc
+++ b/chromium/cc/raster/staging_buffer_pool_unittest.cc
@@ -4,8 +4,6 @@
#include "cc/raster/staging_buffer_pool.h"
-#include "base/memory/memory_coordinator_client.h"
-#include "base/memory/memory_coordinator_client_registry.h"
#include "base/run_loop.h"
#include "base/test/scoped_task_environment.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -37,9 +35,10 @@ TEST(StagingBufferPoolTest, ShutdownImmediatelyAfterCreation) {
flush_message_loop();
// Now, destroy the pool, and trigger a notification from the
- // MemoryCoordinatorClientRegistry.
+ // MemoryPressureListener.
pool = nullptr;
- base::MemoryCoordinatorClientRegistry::GetInstance()->PurgeMemory();
+ base::MemoryPressureListener::SimulatePressureNotification(
+ base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
// Allow the callbacks in the observers to run.
flush_message_loop();
// No crash.
diff --git a/chromium/cc/raster/task_graph_work_queue.cc b/chromium/cc/raster/task_graph_work_queue.cc
index 06b1f54e293..2d04f52e6aa 100644
--- a/chromium/cc/raster/task_graph_work_queue.cc
+++ b/chromium/cc/raster/task_graph_work_queue.cc
@@ -80,11 +80,11 @@ class DependentIterator {
} while (graph_->edges[current_index_].task != task_);
// Now find the node for the dependent of this edge.
- TaskGraph::Node::Vector::iterator it = std::find_if(
- graph_->nodes.begin(), graph_->nodes.end(),
- [this](const TaskGraph::Node& node) {
- return node.task == graph_->edges[current_index_].dependent;
- });
+ auto it = std::find_if(graph_->nodes.begin(), graph_->nodes.end(),
+ [this](const TaskGraph::Node& node) {
+ return node.task ==
+ graph_->edges[current_index_].dependent;
+ });
DCHECK(it != graph_->nodes.end());
current_node_ = &(*it);
@@ -152,11 +152,11 @@ void TaskGraphWorkQueue::ScheduleTasks(NamespaceToken token, TaskGraph* graph) {
// Remove any old nodes that are associated with this task. The result is
// that the old graph is left with all nodes not present in this graph,
// which we use below to determine what tasks need to be canceled.
- TaskGraph::Node::Vector::iterator old_it = std::find_if(
- task_namespace.graph.nodes.begin(), task_namespace.graph.nodes.end(),
- [&node](const TaskGraph::Node& other) {
- return node.task == other.task;
- });
+ auto old_it = std::find_if(task_namespace.graph.nodes.begin(),
+ task_namespace.graph.nodes.end(),
+ [&node](const TaskGraph::Node& other) {
+ return node.task == other.task;
+ });
if (old_it != task_namespace.graph.nodes.end()) {
std::swap(*old_it, task_namespace.graph.nodes.back());
// If old task is scheduled to run again and not yet started running,
@@ -200,8 +200,7 @@ void TaskGraphWorkQueue::ScheduleTasks(NamespaceToken token, TaskGraph* graph) {
task_namespace.graph.Swap(graph);
// Determine what tasks in old graph need to be canceled.
- for (TaskGraph::Node::Vector::iterator it = graph->nodes.begin();
- it != graph->nodes.end(); ++it) {
+ for (auto it = graph->nodes.begin(); it != graph->nodes.end(); ++it) {
TaskGraph::Node& node = *it;
// Skip if already finished running task.
@@ -353,7 +352,7 @@ void TaskGraphWorkQueue::CompleteTask(PrioritizedTask completed_task) {
void TaskGraphWorkQueue::CollectCompletedTasks(NamespaceToken token,
Task::Vector* completed_tasks) {
- TaskNamespaceMap::iterator it = namespaces_.find(token);
+ auto it = namespaces_.find(token);
if (it == namespaces_.end())
return;
diff --git a/chromium/cc/raster/task_graph_work_queue_unittest.cc b/chromium/cc/raster/task_graph_work_queue_unittest.cc
index 123364ff813..93455db7e23 100644
--- a/chromium/cc/raster/task_graph_work_queue_unittest.cc
+++ b/chromium/cc/raster/task_graph_work_queue_unittest.cc
@@ -4,6 +4,7 @@
#include "cc/raster/task_graph_work_queue.h"
+#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace cc {
@@ -58,5 +59,161 @@ TEST(TaskGraphWorkQueueTest, TestChangingDependency) {
EXPECT_FALSE(work_queue.HasReadyToRunTasks());
}
+// Tasks with same priority but in different category.
+TEST(TaskGraphWorkQueueTest, TestTaskWithDifferentCategory) {
+ TaskGraphWorkQueue work_queue;
+ NamespaceToken token = work_queue.GenerateNamespaceToken();
+
+ // Create a graph where | task| has dependencies.
+ TaskGraph graph;
+ scoped_refptr<FakeTaskImpl> task(new FakeTaskImpl());
+ scoped_refptr<FakeTaskImpl> dependency_task1(new FakeTaskImpl());
+ scoped_refptr<FakeTaskImpl> dependency_task2(new FakeTaskImpl());
+ scoped_refptr<FakeTaskImpl> dependency_task3(new FakeTaskImpl());
+
+ graph.nodes.push_back(TaskGraph::Node(task.get(), 0u, 0u, 3u));
+ graph.nodes.push_back(TaskGraph::Node(dependency_task1.get(), 0u, 0u, 0u));
+ graph.nodes.push_back(TaskGraph::Node(dependency_task2.get(), 1u, 0u, 0u));
+ graph.nodes.push_back(TaskGraph::Node(dependency_task3.get(), 2u, 0u, 0u));
+
+ graph.edges.push_back(TaskGraph::Edge(dependency_task1.get(), task.get()));
+ graph.edges.push_back(TaskGraph::Edge(dependency_task2.get(), task.get()));
+ graph.edges.push_back(TaskGraph::Edge(dependency_task3.get(), task.get()));
+
+ // Schedule the graph.
+ work_queue.ScheduleTasks(token, &graph);
+
+ // Run the |dependency_task1|from category 0.
+ TaskGraphWorkQueue::PrioritizedTask prioritized_dependency_task =
+ work_queue.GetNextTaskToRun(0u);
+ EXPECT_EQ(prioritized_dependency_task.task.get(), dependency_task1.get());
+ work_queue.CompleteTask(std::move(prioritized_dependency_task));
+ EXPECT_TRUE(work_queue.HasReadyToRunTasks());
+ EXPECT_FALSE(work_queue.HasReadyToRunTasksForCategory(0u));
+ EXPECT_TRUE(work_queue.HasReadyToRunTasksForCategory(1u));
+ EXPECT_TRUE(work_queue.HasReadyToRunTasksForCategory(2u));
+
+ // Run the |dependency_task2|from category 1.
+ prioritized_dependency_task = work_queue.GetNextTaskToRun(1u);
+ EXPECT_EQ(prioritized_dependency_task.task.get(), dependency_task2.get());
+ work_queue.CompleteTask(std::move(prioritized_dependency_task));
+ EXPECT_TRUE(work_queue.HasReadyToRunTasks());
+ EXPECT_FALSE(work_queue.HasReadyToRunTasksForCategory(0u));
+ EXPECT_FALSE(work_queue.HasReadyToRunTasksForCategory(1u));
+ EXPECT_TRUE(work_queue.HasReadyToRunTasksForCategory(2u));
+
+ // Run the |dependency_task3|from category 2.
+ prioritized_dependency_task = work_queue.GetNextTaskToRun(2u);
+ EXPECT_EQ(prioritized_dependency_task.task.get(), dependency_task3.get());
+ work_queue.CompleteTask(std::move(prioritized_dependency_task));
+ EXPECT_TRUE(work_queue.HasReadyToRunTasks());
+ // Once all dependencies from different category completed, | task| turns
+ // ready to run.
+ EXPECT_TRUE(work_queue.HasReadyToRunTasksForCategory(0u));
+ EXPECT_FALSE(work_queue.HasReadyToRunTasksForCategory(1u));
+ EXPECT_FALSE(work_queue.HasReadyToRunTasksForCategory(2u));
+
+ prioritized_dependency_task = work_queue.GetNextTaskToRun(0u);
+ EXPECT_EQ(prioritized_dependency_task.task.get(), task.get());
+ work_queue.CompleteTask(std::move(prioritized_dependency_task));
+ EXPECT_FALSE(work_queue.HasReadyToRunTasks());
+}
+
+// Tasks with different priority run in a priority order. But need to guarantee
+// its dependences are completed.
+TEST(TaskGraphWorkQueueTest, TestTaskWithDifferentPriority) {
+ TaskGraphWorkQueue work_queue;
+ NamespaceToken token = work_queue.GenerateNamespaceToken();
+ {
+ // Create a graph where task has a dependency
+ TaskGraph graph;
+ scoped_refptr<FakeTaskImpl> task(new FakeTaskImpl());
+ scoped_refptr<FakeTaskImpl> dependency_task1(new FakeTaskImpl());
+ scoped_refptr<FakeTaskImpl> dependency_task2(new FakeTaskImpl());
+ scoped_refptr<FakeTaskImpl> dependency_task3(new FakeTaskImpl());
+
+ // | task| has the lowest priority and 3 dependences, will run last.
+ graph.nodes.push_back(TaskGraph::Node(task.get(), 0u, 2u, 3u));
+ graph.nodes.push_back(TaskGraph::Node(dependency_task1.get(), 0u, 3u, 0u));
+ graph.nodes.push_back(TaskGraph::Node(dependency_task2.get(), 0u, 2u, 0u));
+ graph.nodes.push_back(TaskGraph::Node(dependency_task3.get(), 0u, 1u, 0u));
+
+ graph.edges.push_back(TaskGraph::Edge(dependency_task1.get(), task.get()));
+ graph.edges.push_back(TaskGraph::Edge(dependency_task2.get(), task.get()));
+ graph.edges.push_back(TaskGraph::Edge(dependency_task3.get(), task.get()));
+
+ // Schedule the graph.
+ work_queue.ScheduleTasks(token, &graph);
+
+ // Run the |dependency_task|
+ TaskGraphWorkQueue::PrioritizedTask prioritized_dependency_task =
+ work_queue.GetNextTaskToRun(0u);
+ EXPECT_EQ(prioritized_dependency_task.task.get(), dependency_task3.get());
+ work_queue.CompleteTask(std::move(prioritized_dependency_task));
+ EXPECT_TRUE(work_queue.HasReadyToRunTasks());
+
+ prioritized_dependency_task = work_queue.GetNextTaskToRun(0u);
+ EXPECT_EQ(prioritized_dependency_task.task.get(), dependency_task2.get());
+ work_queue.CompleteTask(std::move(prioritized_dependency_task));
+ EXPECT_TRUE(work_queue.HasReadyToRunTasks());
+
+ prioritized_dependency_task = work_queue.GetNextTaskToRun(0u);
+ EXPECT_EQ(prioritized_dependency_task.task.get(), dependency_task1.get());
+ work_queue.CompleteTask(std::move(prioritized_dependency_task));
+ EXPECT_TRUE(work_queue.HasReadyToRunTasks());
+
+ // | task| runs last.
+ prioritized_dependency_task = work_queue.GetNextTaskToRun(0u);
+ EXPECT_EQ(prioritized_dependency_task.task.get(), task.get());
+ work_queue.CompleteTask(std::move(prioritized_dependency_task));
+ EXPECT_FALSE(work_queue.HasReadyToRunTasks());
+ }
+
+ {
+ // Create a graph where task has dependencies
+ TaskGraph graph;
+ scoped_refptr<FakeTaskImpl> task(new FakeTaskImpl());
+ scoped_refptr<FakeTaskImpl> dependency_task1(new FakeTaskImpl());
+ scoped_refptr<FakeTaskImpl> dependency_task2(new FakeTaskImpl());
+ scoped_refptr<FakeTaskImpl> dependency_task3(new FakeTaskImpl());
+
+ // | task| has the highest priority and 3 dependences, also will run last.
+ graph.nodes.push_back(TaskGraph::Node(task.get(), 0u, 0u, 3u));
+ graph.nodes.push_back(TaskGraph::Node(dependency_task1.get(), 0u, 3u, 0u));
+ graph.nodes.push_back(TaskGraph::Node(dependency_task2.get(), 0u, 2u, 0u));
+ graph.nodes.push_back(TaskGraph::Node(dependency_task3.get(), 0u, 1u, 0u));
+
+ graph.edges.push_back(TaskGraph::Edge(dependency_task1.get(), task.get()));
+ graph.edges.push_back(TaskGraph::Edge(dependency_task2.get(), task.get()));
+ graph.edges.push_back(TaskGraph::Edge(dependency_task3.get(), task.get()));
+
+ // Schedule the graph.
+ work_queue.ScheduleTasks(token, &graph);
+
+ // Run the |dependency_task|
+ TaskGraphWorkQueue::PrioritizedTask prioritized_dependency_task =
+ work_queue.GetNextTaskToRun(0u);
+ EXPECT_EQ(prioritized_dependency_task.task.get(), dependency_task3.get());
+ work_queue.CompleteTask(std::move(prioritized_dependency_task));
+ EXPECT_TRUE(work_queue.HasReadyToRunTasks());
+
+ prioritized_dependency_task = work_queue.GetNextTaskToRun(0u);
+ EXPECT_EQ(prioritized_dependency_task.task.get(), dependency_task2.get());
+ work_queue.CompleteTask(std::move(prioritized_dependency_task));
+ EXPECT_TRUE(work_queue.HasReadyToRunTasks());
+
+ prioritized_dependency_task = work_queue.GetNextTaskToRun(0u);
+ EXPECT_EQ(prioritized_dependency_task.task.get(), dependency_task1.get());
+ work_queue.CompleteTask(std::move(prioritized_dependency_task));
+ EXPECT_TRUE(work_queue.HasReadyToRunTasks());
+
+ // | task| runs last.
+ prioritized_dependency_task = work_queue.GetNextTaskToRun(0u);
+ EXPECT_EQ(prioritized_dependency_task.task.get(), task.get());
+ work_queue.CompleteTask(std::move(prioritized_dependency_task));
+ EXPECT_FALSE(work_queue.HasReadyToRunTasks());
+ }
+}
+
} // namespace
} // namespace cc
diff --git a/chromium/cc/raster/texture_compressor.cc b/chromium/cc/raster/texture_compressor.cc
deleted file mode 100644
index 6aabf6792df..00000000000
--- a/chromium/cc/raster/texture_compressor.cc
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "cc/raster/texture_compressor.h"
-
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "cc/raster/texture_compressor_etc1.h"
-
-#if defined(ARCH_CPU_X86_FAMILY)
-#include "base/cpu.h"
-#include "cc/raster/texture_compressor_etc1_sse.h"
-#endif
-
-namespace cc {
-
-std::unique_ptr<TextureCompressor> TextureCompressor::Create(Format format) {
- switch (format) {
- case kFormatETC1: {
-#if defined(ARCH_CPU_X86_FAMILY)
- base::CPU cpu;
- if (cpu.has_sse2()) {
- return base::WrapUnique(new TextureCompressorETC1SSE());
- }
-#endif
- return base::WrapUnique(new TextureCompressorETC1());
- }
- }
-
- NOTREACHED();
- return nullptr;
-}
-
-} // namespace cc
diff --git a/chromium/cc/raster/texture_compressor.h b/chromium/cc/raster/texture_compressor.h
deleted file mode 100644
index 709fc264516..00000000000
--- a/chromium/cc/raster/texture_compressor.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CC_RASTER_TEXTURE_COMPRESSOR_H_
-#define CC_RASTER_TEXTURE_COMPRESSOR_H_
-
-#include <stdint.h>
-
-#include <memory>
-
-#include "base/macros.h"
-#include "cc/cc_export.h"
-
-namespace cc {
-
-class CC_EXPORT TextureCompressor {
- public:
- enum Format {
- kFormatETC1,
- };
-
- enum Quality {
- kQualityLow,
- kQualityMedium,
- kQualityHigh,
- };
-
- static std::unique_ptr<TextureCompressor> Create(Format format);
- virtual ~TextureCompressor() {}
-
- virtual void Compress(const uint8_t* src,
- uint8_t* dst,
- int width,
- int height,
- Quality quality) = 0;
-
- protected:
- TextureCompressor() {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TextureCompressor);
-};
-
-} // namespace cc
-
-#endif // CC_RASTER_TEXTURE_COMPRESSOR_H_
diff --git a/chromium/cc/raster/texture_compressor_etc1.cc b/chromium/cc/raster/texture_compressor_etc1.cc
deleted file mode 100644
index 55b0ca4640c..00000000000
--- a/chromium/cc/raster/texture_compressor_etc1.cc
+++ /dev/null
@@ -1,333 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// See the following specification for details on the ETC1 format:
-// https://www.khronos.org/registry/gles/extensions/OES/OES_compressed_ETC1_RGB8_texture.txt
-
-#include "cc/raster/texture_compressor_etc1.h"
-
-#include <stdint.h>
-#include <string.h>
-#include <limits>
-
-#include "base/logging.h"
-
-// Defining the following macro will cause the error metric function to weigh
-// each color channel differently depending on how the human eye can perceive
-// them. This can give a slight improvement in image quality at the cost of a
-// performance hit.
-// #define USE_PERCEIVED_ERROR_METRIC
-
-namespace cc {
-
-namespace {
-
-// Constructs a color from a given base color and luminance value.
-inline Color MakeColor(const Color& base, int16_t lum) {
- int b = static_cast<int>(base.channels.b) + lum;
- int g = static_cast<int>(base.channels.g) + lum;
- int r = static_cast<int>(base.channels.r) + lum;
- Color color;
- color.channels.b = static_cast<uint8_t>(clamp(b, 0, 255));
- color.channels.g = static_cast<uint8_t>(clamp(g, 0, 255));
- color.channels.r = static_cast<uint8_t>(clamp(r, 0, 255));
- return color;
-}
-
-// Calculates the error metric for two colors. A small error signals that the
-// colors are similar to each other, a large error the signals the opposite.
-inline uint32_t GetColorError(const Color& u, const Color& v) {
-#ifdef USE_PERCEIVED_ERROR_METRIC
- float delta_b = static_cast<float>(u.channels.b) - v.channels.b;
- float delta_g = static_cast<float>(u.channels.g) - v.channels.g;
- float delta_r = static_cast<float>(u.channels.r) - v.channels.r;
- return static_cast<uint32_t>(0.299f * delta_b * delta_b +
- 0.587f * delta_g * delta_g +
- 0.114f * delta_r * delta_r);
-#else
- int delta_b = static_cast<int>(u.channels.b) - v.channels.b;
- int delta_g = static_cast<int>(u.channels.g) - v.channels.g;
- int delta_r = static_cast<int>(u.channels.r) - v.channels.r;
- return delta_b * delta_b + delta_g * delta_g + delta_r * delta_r;
-#endif
-}
-
-void GetAverageColor(const Color* src, float* avg_color) {
- uint32_t sum_b = 0, sum_g = 0, sum_r = 0;
-
- for (unsigned int i = 0; i < 8; ++i) {
- sum_b += src[i].channels.b;
- sum_g += src[i].channels.g;
- sum_r += src[i].channels.r;
- }
-
- const float kInv8 = 1.0f / 8.0f;
- avg_color[0] = static_cast<float>(sum_b) * kInv8;
- avg_color[1] = static_cast<float>(sum_g) * kInv8;
- avg_color[2] = static_cast<float>(sum_r) * kInv8;
-}
-
-void ComputeLuminance(uint8_t* block,
- const Color* src,
- const Color& base,
- int sub_block_id,
- const uint8_t* idx_to_num_tab) {
- uint32_t best_tbl_err = std::numeric_limits<uint32_t>::max();
- uint8_t best_tbl_idx = 0;
- uint8_t best_mod_idx[8][8]; // [table][texel]
-
- // Try all codeword tables to find the one giving the best results for this
- // block.
- for (unsigned int tbl_idx = 0; tbl_idx < 8; ++tbl_idx) {
- // Pre-compute all the candidate colors; combinations of the base color and
- // all available luminance values.
- Color candidate_color[4]; // [modifier]
- for (unsigned int mod_idx = 0; mod_idx < 4; ++mod_idx) {
- int16_t lum = g_codeword_tables[tbl_idx][mod_idx];
- candidate_color[mod_idx] = MakeColor(base, lum);
- }
-
- uint32_t tbl_err = 0;
-
- for (unsigned int i = 0; i < 8; ++i) {
- // Try all modifiers in the current table to find which one gives the
- // smallest error.
- uint32_t best_mod_err = std::numeric_limits<uint32_t>::max();
- for (unsigned int mod_idx = 0; mod_idx < 4; ++mod_idx) {
- const Color& color = candidate_color[mod_idx];
-
- uint32_t mod_err = GetColorError(src[i], color);
- if (mod_err < best_mod_err) {
- best_mod_idx[tbl_idx][i] = mod_idx;
- best_mod_err = mod_err;
-
- if (mod_err == 0)
- break; // We cannot do any better than this.
- }
- }
-
- tbl_err += best_mod_err;
- if (tbl_err > best_tbl_err)
- break; // We're already doing worse than the best table so skip.
- }
-
- if (tbl_err < best_tbl_err) {
- best_tbl_err = tbl_err;
- best_tbl_idx = tbl_idx;
-
- if (tbl_err == 0)
- break; // We cannot do any better than this.
- }
- }
-
- WriteCodewordTable(block, sub_block_id, best_tbl_idx);
-
- uint32_t pix_data = 0;
-
- for (unsigned int i = 0; i < 8; ++i) {
- uint8_t mod_idx = best_mod_idx[best_tbl_idx][i];
- uint8_t pix_idx = g_mod_to_pix[mod_idx];
-
- uint32_t lsb = pix_idx & 0x1;
- uint32_t msb = pix_idx >> 1;
-
- // Obtain the texel number as specified in the standard.
- int texel_num = idx_to_num_tab[i];
- pix_data |= msb << (texel_num + 16);
- pix_data |= lsb << (texel_num);
- }
-
- WritePixelData(block, pix_data);
-}
-
-/**
- * Tries to compress the block under the assumption that it's a single color
- * block. If it's not the function will bail out without writing anything to
- * the destination buffer.
- */
-bool TryCompressSolidBlock(uint8_t* dst, const Color* src) {
- for (unsigned int i = 1; i < 16; ++i) {
- if (src[i].bits != src[0].bits)
- return false;
- }
-
- // Clear destination buffer so that we can "or" in the results.
- memset(dst, 0, 8);
-
- float src_color_float[3] = {static_cast<float>(src->channels.b),
- static_cast<float>(src->channels.g),
- static_cast<float>(src->channels.r)};
- Color base = MakeColor555(src_color_float);
-
- WriteDiff(dst, true);
- WriteFlip(dst, false);
- WriteColors555(dst, base, base);
-
- uint8_t best_tbl_idx = 0;
- uint8_t best_mod_idx = 0;
- uint32_t best_mod_err = std::numeric_limits<uint32_t>::max();
-
- // Try all codeword tables to find the one giving the best results for this
- // block.
- for (unsigned int tbl_idx = 0; tbl_idx < 8; ++tbl_idx) {
- // Try all modifiers in the current table to find which one gives the
- // smallest error.
- for (unsigned int mod_idx = 0; mod_idx < 4; ++mod_idx) {
- int16_t lum = g_codeword_tables[tbl_idx][mod_idx];
- const Color& color = MakeColor(base, lum);
-
- uint32_t mod_err = GetColorError(*src, color);
- if (mod_err < best_mod_err) {
- best_tbl_idx = tbl_idx;
- best_mod_idx = mod_idx;
- best_mod_err = mod_err;
-
- if (mod_err == 0)
- break; // We cannot do any better than this.
- }
- }
-
- if (best_mod_err == 0)
- break;
- }
-
- WriteCodewordTable(dst, 0, best_tbl_idx);
- WriteCodewordTable(dst, 1, best_tbl_idx);
-
- uint8_t pix_idx = g_mod_to_pix[best_mod_idx];
- uint32_t lsb = pix_idx & 0x1;
- uint32_t msb = pix_idx >> 1;
-
- uint32_t pix_data = 0;
- for (unsigned int i = 0; i < 2; ++i) {
- for (unsigned int j = 0; j < 8; ++j) {
- // Obtain the texel number as specified in the standard.
- int texel_num = g_idx_to_num[i][j];
- pix_data |= msb << (texel_num + 16);
- pix_data |= lsb << (texel_num);
- }
- }
-
- WritePixelData(dst, pix_data);
- return true;
-}
-
-void CompressBlock(uint8_t* dst, const Color* ver_src, const Color* hor_src) {
- if (TryCompressSolidBlock(dst, ver_src))
- return;
-
- const Color* sub_block_src[4] = {ver_src, ver_src + 8, hor_src, hor_src + 8};
-
- Color sub_block_avg[4];
- bool use_differential[2] = {true, true};
-
- // Compute the average color for each sub block and determine if differential
- // coding can be used.
- for (unsigned int i = 0, j = 1; i < 4; i += 2, j += 2) {
- float avg_color_0[3];
- GetAverageColor(sub_block_src[i], avg_color_0);
- Color avg_color_555_0 = MakeColor555(avg_color_0);
-
- float avg_color_1[3];
- GetAverageColor(sub_block_src[j], avg_color_1);
- Color avg_color_555_1 = MakeColor555(avg_color_1);
-
- for (unsigned int light_idx = 0; light_idx < 3; ++light_idx) {
- int u = avg_color_555_0.components[light_idx] >> 3;
- int v = avg_color_555_1.components[light_idx] >> 3;
-
- int component_diff = v - u;
- if (component_diff < -4 || component_diff > 3) {
- use_differential[i / 2] = false;
- sub_block_avg[i] = MakeColor444(avg_color_0);
- sub_block_avg[j] = MakeColor444(avg_color_1);
- } else {
- sub_block_avg[i] = avg_color_555_0;
- sub_block_avg[j] = avg_color_555_1;
- }
- }
- }
-
- // Compute the error of each sub block before adjusting for luminance. These
- // error values are later used for determining if we should flip the sub
- // block or not.
- uint32_t sub_block_err[4] = {0};
- for (unsigned int i = 0; i < 4; ++i) {
- for (unsigned int j = 0; j < 8; ++j) {
- sub_block_err[i] += GetColorError(sub_block_avg[i], sub_block_src[i][j]);
- }
- }
-
- bool flip =
- sub_block_err[2] + sub_block_err[3] < sub_block_err[0] + sub_block_err[1];
-
- // Clear destination buffer so that we can "or" in the results.
- memset(dst, 0, 8);
-
- WriteDiff(dst, use_differential[!!flip]);
- WriteFlip(dst, flip);
-
- uint8_t sub_block_off_0 = flip ? 2 : 0;
- uint8_t sub_block_off_1 = sub_block_off_0 + 1;
-
- if (use_differential[!!flip]) {
- WriteColors555(dst, sub_block_avg[sub_block_off_0],
- sub_block_avg[sub_block_off_1]);
- } else {
- WriteColors444(dst, sub_block_avg[sub_block_off_0],
- sub_block_avg[sub_block_off_1]);
- }
-
- // Compute luminance for the first sub block.
- ComputeLuminance(dst, sub_block_src[sub_block_off_0],
- sub_block_avg[sub_block_off_0], 0,
- g_idx_to_num[sub_block_off_0]);
- // Compute luminance for the second sub block.
- ComputeLuminance(dst, sub_block_src[sub_block_off_1],
- sub_block_avg[sub_block_off_1], 1,
- g_idx_to_num[sub_block_off_1]);
-}
-
-} // namespace
-
-void TextureCompressorETC1::Compress(const uint8_t* src,
- uint8_t* dst,
- int width,
- int height,
- Quality quality) {
- DCHECK_GE(width, 4);
- DCHECK_EQ((width & 3), 0);
- DCHECK_GE(height, 4);
- DCHECK_EQ((height & 3), 0);
-
- Color ver_blocks[16];
- Color hor_blocks[16];
-
- for (int y = 0; y < height; y += 4, src += width * 4 * 4) {
- for (int x = 0; x < width; x += 4, dst += 8) {
- const Color* row0 = reinterpret_cast<const Color*>(src + x * 4);
- const Color* row1 = row0 + width;
- const Color* row2 = row1 + width;
- const Color* row3 = row2 + width;
-
- memcpy(ver_blocks, row0, 8);
- memcpy(ver_blocks + 2, row1, 8);
- memcpy(ver_blocks + 4, row2, 8);
- memcpy(ver_blocks + 6, row3, 8);
- memcpy(ver_blocks + 8, row0 + 2, 8);
- memcpy(ver_blocks + 10, row1 + 2, 8);
- memcpy(ver_blocks + 12, row2 + 2, 8);
- memcpy(ver_blocks + 14, row3 + 2, 8);
-
- memcpy(hor_blocks, row0, 16);
- memcpy(hor_blocks + 4, row1, 16);
- memcpy(hor_blocks + 8, row2, 16);
- memcpy(hor_blocks + 12, row3, 16);
-
- CompressBlock(dst, ver_blocks, hor_blocks);
- }
- }
-}
-
-} // namespace cc
diff --git a/chromium/cc/raster/texture_compressor_etc1.h b/chromium/cc/raster/texture_compressor_etc1.h
deleted file mode 100644
index 6e85313cab0..00000000000
--- a/chromium/cc/raster/texture_compressor_etc1.h
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CC_RASTER_TEXTURE_COMPRESSOR_ETC1_H_
-#define CC_RASTER_TEXTURE_COMPRESSOR_ETC1_H_
-
-#include "cc/raster/texture_compressor.h"
-
-#include <stdint.h>
-
-#include "base/compiler_specific.h"
-#include "base/logging.h"
-#include "base/macros.h"
-
-namespace cc {
-
-template <typename T>
-inline T clamp(T val, T min, T max) {
- return val < min ? min : (val > max ? max : val);
-}
-
-inline uint8_t round_to_5_bits(float val) {
- return clamp<uint8_t>(val * 31.0f / 255.0f + 0.5f, 0, 31);
-}
-
-inline uint8_t round_to_4_bits(float val) {
- return clamp<uint8_t>(val * 15.0f / 255.0f + 0.5f, 0, 15);
-}
-
-union Color {
- struct BgraColorType {
- uint8_t b;
- uint8_t g;
- uint8_t r;
- uint8_t a;
- } channels;
- uint8_t components[4];
- uint32_t bits;
-};
-
-// Codeword tables.
-// See: Table 3.17.2
-alignas(16) static const int16_t g_codeword_tables[8][4] = {
- {-8, -2, 2, 8}, {-17, -5, 5, 17}, {-29, -9, 9, 29},
- {-42, -13, 13, 42}, {-60, -18, 18, 60}, {-80, -24, 24, 80},
- {-106, -33, 33, 106}, {-183, -47, 47, 183}};
-
-// Maps modifier indices to pixel index values.
-// See: Table 3.17.3
-static const uint8_t g_mod_to_pix[4] = {3, 2, 0, 1};
-
-// The ETC1 specification index texels as follows:
-// [a][e][i][m] [ 0][ 4][ 8][12]
-// [b][f][j][n] <-> [ 1][ 5][ 9][13]
-// [c][g][k][o] [ 2][ 6][10][14]
-// [d][h][l][p] [ 3][ 7][11][15]
-
-// [ 0][ 1][ 2][ 3] [ 0][ 1][ 4][ 5]
-// [ 4][ 5][ 6][ 7] <-> [ 8][ 9][12][13]
-// [ 8][ 9][10][11] [ 2][ 3][ 6][ 7]
-// [12][13][14][15] [10][11][14][15]
-
-// However, when extracting sub blocks from BGRA data the natural array
-// indexing order ends up different:
-// vertical0: [a][e][b][f] horizontal0: [a][e][i][m]
-// [c][g][d][h] [b][f][j][n]
-// vertical1: [i][m][j][n] horizontal1: [c][g][k][o]
-// [k][o][l][p] [d][h][l][p]
-
-// In order to translate from the natural array indices in a sub block to the
-// indices (number) used by specification and hardware we use this table.
-static const uint8_t g_idx_to_num[4][8] = {
- {0, 4, 1, 5, 2, 6, 3, 7}, // Vertical block 0.
- {8, 12, 9, 13, 10, 14, 11, 15}, // Vertical block 1.
- {0, 4, 8, 12, 1, 5, 9, 13}, // Horizontal block 0.
- {2, 6, 10, 14, 3, 7, 11, 15} // Horizontal block 1.
-};
-
-inline void WriteColors444(uint8_t* block,
- const Color& color0,
- const Color& color1) {
- // Write output color for BGRA textures.
- block[0] = (color0.channels.r & 0xf0) | (color1.channels.r >> 4);
- block[1] = (color0.channels.g & 0xf0) | (color1.channels.g >> 4);
- block[2] = (color0.channels.b & 0xf0) | (color1.channels.b >> 4);
-}
-
-inline void WriteColors555(uint8_t* block,
- const Color& color0,
- const Color& color1) {
- // Table for conversion to 3-bit two complement format.
- static const uint8_t two_compl_trans_table[8] = {
- 4, // -4 (100b)
- 5, // -3 (101b)
- 6, // -2 (110b)
- 7, // -1 (111b)
- 0, // 0 (000b)
- 1, // 1 (001b)
- 2, // 2 (010b)
- 3, // 3 (011b)
- };
-
- int16_t delta_r =
- static_cast<int16_t>(color1.channels.r >> 3) - (color0.channels.r >> 3);
- int16_t delta_g =
- static_cast<int16_t>(color1.channels.g >> 3) - (color0.channels.g >> 3);
- int16_t delta_b =
- static_cast<int16_t>(color1.channels.b >> 3) - (color0.channels.b >> 3);
- DCHECK_GE(delta_r, -4);
- DCHECK_LE(delta_r, 3);
- DCHECK_GE(delta_g, -4);
- DCHECK_LE(delta_g, 3);
- DCHECK_GE(delta_b, -4);
- DCHECK_LE(delta_b, 3);
-
- // Write output color for BGRA textures.
- block[0] = (color0.channels.r & 0xf8) | two_compl_trans_table[delta_r + 4];
- block[1] = (color0.channels.g & 0xf8) | two_compl_trans_table[delta_g + 4];
- block[2] = (color0.channels.b & 0xf8) | two_compl_trans_table[delta_b + 4];
-}
-
-inline void WriteCodewordTable(uint8_t* block,
- uint8_t sub_block_id,
- uint8_t table) {
- DCHECK_LT(sub_block_id, 2);
- DCHECK_LT(table, 8);
-
- uint8_t shift = (2 + (3 - sub_block_id * 3));
- block[3] &= ~(0x07 << shift);
- block[3] |= table << shift;
-}
-
-inline void WritePixelData(uint8_t* block, uint32_t pixel_data) {
- block[4] |= pixel_data >> 24;
- block[5] |= (pixel_data >> 16) & 0xff;
- block[6] |= (pixel_data >> 8) & 0xff;
- block[7] |= pixel_data & 0xff;
-}
-
-inline void WriteFlip(uint8_t* block, bool flip) {
- block[3] &= ~0x01;
- block[3] |= static_cast<uint8_t>(flip);
-}
-
-inline void WriteDiff(uint8_t* block, bool diff) {
- block[3] &= ~0x02;
- block[3] |= static_cast<uint8_t>(diff) << 1;
-}
-
-// Compress and rounds BGR888 into BGR444. The resulting BGR444 color is
-// expanded to BGR888 as it would be in hardware after decompression. The
-// actual 444-bit data is available in the four most significant bits of each
-// channel.
-inline Color MakeColor444(const float* bgr) {
- uint8_t b4 = round_to_4_bits(bgr[0]);
- uint8_t g4 = round_to_4_bits(bgr[1]);
- uint8_t r4 = round_to_4_bits(bgr[2]);
- Color bgr444;
- bgr444.channels.b = (b4 << 4) | b4;
- bgr444.channels.g = (g4 << 4) | g4;
- bgr444.channels.r = (r4 << 4) | r4;
- // Added to distinguish between expanded 555 and 444 colors.
- bgr444.channels.a = 0x44;
- return bgr444;
-}
-
-// Compress and rounds BGR888 into BGR555. The resulting BGR555 color is
-// expanded to BGR888 as it would be in hardware after decompression. The
-// actual 555-bit data is available in the five most significant bits of each
-// channel.
-inline Color MakeColor555(const float* bgr) {
- uint8_t b5 = round_to_5_bits(bgr[0]);
- uint8_t g5 = round_to_5_bits(bgr[1]);
- uint8_t r5 = round_to_5_bits(bgr[2]);
- Color bgr555;
- bgr555.channels.b = (b5 << 3) | (b5 >> 2);
- bgr555.channels.g = (g5 << 3) | (g5 >> 2);
- bgr555.channels.r = (r5 << 3) | (r5 >> 2);
- // Added to distinguish between expanded 555 and 444 colors.
- bgr555.channels.a = 0x55;
- return bgr555;
-}
-
-class CC_EXPORT TextureCompressorETC1 : public TextureCompressor {
- public:
- TextureCompressorETC1() {}
-
- // Compress a texture using ETC1. Note that the |quality| parameter is
- // ignored. The current implementation does not support different quality
- // settings.
- void Compress(const uint8_t* src,
- uint8_t* dst,
- int width,
- int height,
- Quality quality) override;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TextureCompressorETC1);
-};
-
-} // namespace cc
-
-#endif // CC_RASTER_TEXTURE_COMPRESSOR_ETC1_H_
diff --git a/chromium/cc/raster/texture_compressor_etc1_sse.cc b/chromium/cc/raster/texture_compressor_etc1_sse.cc
deleted file mode 100644
index f0936885d13..00000000000
--- a/chromium/cc/raster/texture_compressor_etc1_sse.cc
+++ /dev/null
@@ -1,818 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "cc/raster/texture_compressor_etc1_sse.h"
-
-#include <emmintrin.h>
-#include <stdint.h>
-
-#include "base/compiler_specific.h"
-#include "base/logging.h"
-// Using this header for common functions such as Color handling
-// and codeword table.
-#include "cc/raster/texture_compressor_etc1.h"
-
-namespace cc {
-
-namespace {
-
-inline uint32_t SetETC1MaxError(uint32_t avg_error) {
- // ETC1 codeword table is sorted in ascending order.
- // Our algorithm will try to identify the index that generates the minimum
- // error.
- // The min error calculated during ComputeLuminance main loop will converge
- // towards that value.
- // We use this threshold to determine when it doesn't make sense to iterate
- // further through the array.
- return avg_error + avg_error / 2 + 384;
-}
-
-struct __sse_data {
- // This is used to store raw data.
- uint8_t* block;
- // This is used to store 8 bit packed values.
- __m128i* packed;
- // This is used to store 32 bit zero extended values into 4x4 arrays.
- __m128i* blue;
- __m128i* green;
- __m128i* red;
-};
-
-inline __m128i AddAndClamp(const __m128i x, const __m128i y) {
- static const __m128i color_max = _mm_set1_epi32(0xFF);
- return _mm_max_epi16(_mm_setzero_si128(),
- _mm_min_epi16(_mm_add_epi16(x, y), color_max));
-}
-
-inline __m128i GetColorErrorSSE(const __m128i x, const __m128i y) {
- // Changed from _mm_mullo_epi32 (SSE4) to _mm_mullo_epi16 (SSE2).
- __m128i ret = _mm_sub_epi16(x, y);
- return _mm_mullo_epi16(ret, ret);
-}
-
-inline __m128i AddChannelError(const __m128i x,
- const __m128i y,
- const __m128i z) {
- return _mm_add_epi32(x, _mm_add_epi32(y, z));
-}
-
-inline uint32_t SumSSE(const __m128i x) {
- __m128i sum = _mm_add_epi32(x, _mm_shuffle_epi32(x, 0x4E));
- sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0xB1));
-
- return _mm_cvtsi128_si32(sum);
-}
-
-inline uint32_t GetVerticalError(const __sse_data* data,
- const __m128i* blue_avg,
- const __m128i* green_avg,
- const __m128i* red_avg,
- uint32_t* verror) {
- __m128i error = _mm_setzero_si128();
-
- for (int i = 0; i < 4; i++) {
- error = _mm_add_epi32(error, GetColorErrorSSE(data->blue[i], blue_avg[0]));
- error =
- _mm_add_epi32(error, GetColorErrorSSE(data->green[i], green_avg[0]));
- error = _mm_add_epi32(error, GetColorErrorSSE(data->red[i], red_avg[0]));
- }
-
- error = _mm_add_epi32(error, _mm_shuffle_epi32(error, 0x4E));
-
- verror[0] = _mm_cvtsi128_si32(error);
- verror[1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(error, 0xB1));
-
- return verror[0] + verror[1];
-}
-
-inline uint32_t GetHorizontalError(const __sse_data* data,
- const __m128i* blue_avg,
- const __m128i* green_avg,
- const __m128i* red_avg,
- uint32_t* verror) {
- __m128i error = _mm_setzero_si128();
- int first_index, second_index;
-
- for (int i = 0; i < 2; i++) {
- first_index = 2 * i;
- second_index = first_index + 1;
-
- error = _mm_add_epi32(
- error, GetColorErrorSSE(data->blue[first_index], blue_avg[i]));
- error = _mm_add_epi32(
- error, GetColorErrorSSE(data->blue[second_index], blue_avg[i]));
- error = _mm_add_epi32(
- error, GetColorErrorSSE(data->green[first_index], green_avg[i]));
- error = _mm_add_epi32(
- error, GetColorErrorSSE(data->green[second_index], green_avg[i]));
- error = _mm_add_epi32(error,
- GetColorErrorSSE(data->red[first_index], red_avg[i]));
- error = _mm_add_epi32(
- error, GetColorErrorSSE(data->red[second_index], red_avg[i]));
- }
-
- error = _mm_add_epi32(error, _mm_shuffle_epi32(error, 0x4E));
-
- verror[0] = _mm_cvtsi128_si32(error);
- verror[1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(error, 0xB1));
-
- return verror[0] + verror[1];
-}
-
-inline void GetAvgColors(const __sse_data* data,
- float* output,
- bool* __sse_use_diff) {
- __m128i sum[2], tmp;
-
- // TODO(radu.velea): _mm_avg_epu8 on packed data maybe.
-
- // Compute avg red value.
- // [S0 S0 S1 S1]
- sum[0] = _mm_add_epi32(data->red[0], data->red[1]);
- sum[0] = _mm_add_epi32(sum[0], _mm_shuffle_epi32(sum[0], 0xB1));
-
- // [S2 S2 S3 S3]
- sum[1] = _mm_add_epi32(data->red[2], data->red[3]);
- sum[1] = _mm_add_epi32(sum[1], _mm_shuffle_epi32(sum[1], 0xB1));
-
- float hred[2], vred[2];
- hred[0] = (_mm_cvtsi128_si32(
- _mm_add_epi32(sum[0], _mm_shuffle_epi32(sum[0], 0x4E)))) /
- 8.0f;
- hred[1] = (_mm_cvtsi128_si32(
- _mm_add_epi32(sum[1], _mm_shuffle_epi32(sum[1], 0x4E)))) /
- 8.0f;
-
- tmp = _mm_add_epi32(sum[0], sum[1]);
- vred[0] = (_mm_cvtsi128_si32(tmp)) / 8.0f;
- vred[1] = (_mm_cvtsi128_si32(_mm_shuffle_epi32(tmp, 0x2))) / 8.0f;
-
- // Compute avg green value.
- // [S0 S0 S1 S1]
- sum[0] = _mm_add_epi32(data->green[0], data->green[1]);
- sum[0] = _mm_add_epi32(sum[0], _mm_shuffle_epi32(sum[0], 0xB1));
-
- // [S2 S2 S3 S3]
- sum[1] = _mm_add_epi32(data->green[2], data->green[3]);
- sum[1] = _mm_add_epi32(sum[1], _mm_shuffle_epi32(sum[1], 0xB1));
-
- float hgreen[2], vgreen[2];
- hgreen[0] = (_mm_cvtsi128_si32(
- _mm_add_epi32(sum[0], _mm_shuffle_epi32(sum[0], 0x4E)))) /
- 8.0f;
- hgreen[1] = (_mm_cvtsi128_si32(
- _mm_add_epi32(sum[1], _mm_shuffle_epi32(sum[1], 0x4E)))) /
- 8.0f;
-
- tmp = _mm_add_epi32(sum[0], sum[1]);
- vgreen[0] = (_mm_cvtsi128_si32(tmp)) / 8.0f;
- vgreen[1] = (_mm_cvtsi128_si32(_mm_shuffle_epi32(tmp, 0x2))) / 8.0f;
-
- // Compute avg blue value.
- // [S0 S0 S1 S1]
- sum[0] = _mm_add_epi32(data->blue[0], data->blue[1]);
- sum[0] = _mm_add_epi32(sum[0], _mm_shuffle_epi32(sum[0], 0xB1));
-
- // [S2 S2 S3 S3]
- sum[1] = _mm_add_epi32(data->blue[2], data->blue[3]);
- sum[1] = _mm_add_epi32(sum[1], _mm_shuffle_epi32(sum[1], 0xB1));
-
- float hblue[2], vblue[2];
- hblue[0] = (_mm_cvtsi128_si32(
- _mm_add_epi32(sum[0], _mm_shuffle_epi32(sum[0], 0x4E)))) /
- 8.0f;
- hblue[1] = (_mm_cvtsi128_si32(
- _mm_add_epi32(sum[1], _mm_shuffle_epi32(sum[1], 0x4E)))) /
- 8.0f;
-
- tmp = _mm_add_epi32(sum[0], sum[1]);
- vblue[0] = (_mm_cvtsi128_si32(tmp)) / 8.0f;
- vblue[1] = (_mm_cvtsi128_si32(_mm_shuffle_epi32(tmp, 0x2))) / 8.0f;
-
- // TODO(radu.velea): Return int's instead of floats, based on Quality.
- output[0] = vblue[0];
- output[1] = vgreen[0];
- output[2] = vred[0];
-
- output[3] = vblue[1];
- output[4] = vgreen[1];
- output[5] = vred[1];
-
- output[6] = hblue[0];
- output[7] = hgreen[0];
- output[8] = hred[0];
-
- output[9] = hblue[1];
- output[10] = hgreen[1];
- output[11] = hred[1];
-
- __m128i threshold_upper = _mm_set1_epi32(3);
- __m128i threshold_lower = _mm_set1_epi32(-4);
-
- __m128 factor_v = _mm_set1_ps(31.0f / 255.0f);
- __m128 rounding_v = _mm_set1_ps(0.5f);
- __m128 h_avg_0 = _mm_set_ps(hblue[0], hgreen[0], hred[0], 0);
- __m128 h_avg_1 = _mm_set_ps(hblue[1], hgreen[1], hred[1], 0);
-
- __m128 v_avg_0 = _mm_set_ps(vblue[0], vgreen[0], vred[0], 0);
- __m128 v_avg_1 = _mm_set_ps(vblue[1], vgreen[1], vred[1], 0);
-
- h_avg_0 = _mm_mul_ps(h_avg_0, factor_v);
- h_avg_1 = _mm_mul_ps(h_avg_1, factor_v);
- v_avg_0 = _mm_mul_ps(v_avg_0, factor_v);
- v_avg_1 = _mm_mul_ps(v_avg_1, factor_v);
-
- h_avg_0 = _mm_add_ps(h_avg_0, rounding_v);
- h_avg_1 = _mm_add_ps(h_avg_1, rounding_v);
- v_avg_0 = _mm_add_ps(v_avg_0, rounding_v);
- v_avg_1 = _mm_add_ps(v_avg_1, rounding_v);
-
- __m128i h_avg_0i = _mm_cvttps_epi32(h_avg_0);
- __m128i h_avg_1i = _mm_cvttps_epi32(h_avg_1);
-
- __m128i v_avg_0i = _mm_cvttps_epi32(v_avg_0);
- __m128i v_avg_1i = _mm_cvttps_epi32(v_avg_1);
-
- h_avg_0i = _mm_sub_epi32(h_avg_1i, h_avg_0i);
- v_avg_0i = _mm_sub_epi32(v_avg_1i, v_avg_0i);
-
- __sse_use_diff[0] =
- (0 == _mm_movemask_epi8(_mm_cmplt_epi32(v_avg_0i, threshold_lower)));
- __sse_use_diff[0] &=
- (0 == _mm_movemask_epi8(_mm_cmpgt_epi32(v_avg_0i, threshold_upper)));
-
- __sse_use_diff[1] =
- (0 == _mm_movemask_epi8(_mm_cmplt_epi32(h_avg_0i, threshold_lower)));
- __sse_use_diff[1] &=
- (0 == _mm_movemask_epi8(_mm_cmpgt_epi32(h_avg_0i, threshold_upper)));
-}
-
-void ComputeLuminance(uint8_t* block,
- const Color& base,
- const int sub_block_id,
- const uint8_t* idx_to_num_tab,
- const __sse_data* data,
- const uint32_t expected_error) {
- uint8_t best_tbl_idx = 0;
- uint32_t best_error = 0x7FFFFFFF;
- uint8_t best_mod_idx[8][8]; // [table][texel]
-
- const __m128i base_blue = _mm_set1_epi32(base.channels.b);
- const __m128i base_green = _mm_set1_epi32(base.channels.g);
- const __m128i base_red = _mm_set1_epi32(base.channels.r);
-
- __m128i test_red, test_blue, test_green, tmp, tmp_blue, tmp_green, tmp_red;
- __m128i block_error, mask;
-
- // This will have the minimum errors for each 4 pixels.
- __m128i first_half_min;
- __m128i second_half_min;
-
- // This will have the matching table index combo for each 4 pixels.
- __m128i first_half_pattern;
- __m128i second_half_pattern;
-
- const __m128i first_blue_data_block = data->blue[2 * sub_block_id];
- const __m128i first_green_data_block = data->green[2 * sub_block_id];
- const __m128i first_red_data_block = data->red[2 * sub_block_id];
-
- const __m128i second_blue_data_block = data->blue[2 * sub_block_id + 1];
- const __m128i second_green_data_block = data->green[2 * sub_block_id + 1];
- const __m128i second_red_data_block = data->red[2 * sub_block_id + 1];
-
- uint32_t min;
- // Fail early to increase speed.
- long delta = INT32_MAX;
- uint32_t last_min = INT32_MAX;
-
- const uint8_t shuffle_mask[] = {
- 0x1B, 0x4E, 0xB1, 0xE4}; // Important they are sorted ascending.
-
- for (unsigned int tbl_idx = 0; tbl_idx < 8; ++tbl_idx) {
- tmp = _mm_set_epi32(
- g_codeword_tables[tbl_idx][3], g_codeword_tables[tbl_idx][2],
- g_codeword_tables[tbl_idx][1], g_codeword_tables[tbl_idx][0]);
-
- test_blue = AddAndClamp(tmp, base_blue);
- test_green = AddAndClamp(tmp, base_green);
- test_red = AddAndClamp(tmp, base_red);
-
- first_half_min = _mm_set1_epi32(0x7FFFFFFF);
- second_half_min = _mm_set1_epi32(0x7FFFFFFF);
-
- first_half_pattern = _mm_setzero_si128();
- second_half_pattern = _mm_setzero_si128();
-
- for (uint8_t imm8 : shuffle_mask) {
- switch (imm8) {
- case 0x1B:
- tmp_blue = _mm_shuffle_epi32(test_blue, 0x1B);
- tmp_green = _mm_shuffle_epi32(test_green, 0x1B);
- tmp_red = _mm_shuffle_epi32(test_red, 0x1B);
- break;
- case 0x4E:
- tmp_blue = _mm_shuffle_epi32(test_blue, 0x4E);
- tmp_green = _mm_shuffle_epi32(test_green, 0x4E);
- tmp_red = _mm_shuffle_epi32(test_red, 0x4E);
- break;
- case 0xB1:
- tmp_blue = _mm_shuffle_epi32(test_blue, 0xB1);
- tmp_green = _mm_shuffle_epi32(test_green, 0xB1);
- tmp_red = _mm_shuffle_epi32(test_red, 0xB1);
- break;
- case 0xE4:
- tmp_blue = _mm_shuffle_epi32(test_blue, 0xE4);
- tmp_green = _mm_shuffle_epi32(test_green, 0xE4);
- tmp_red = _mm_shuffle_epi32(test_red, 0xE4);
- break;
- default:
- tmp_blue = test_blue;
- tmp_green = test_green;
- tmp_red = test_red;
- }
-
- tmp = _mm_set1_epi32(imm8);
-
- block_error =
- AddChannelError(GetColorErrorSSE(tmp_blue, first_blue_data_block),
- GetColorErrorSSE(tmp_green, first_green_data_block),
- GetColorErrorSSE(tmp_red, first_red_data_block));
-
- // Save winning pattern.
- first_half_pattern = _mm_max_epi16(
- first_half_pattern,
- _mm_and_si128(tmp, _mm_cmpgt_epi32(first_half_min, block_error)));
- // Should use _mm_min_epi32(first_half_min, block_error); from SSE4
- // otherwise we have a small performance penalty.
- mask = _mm_cmplt_epi32(block_error, first_half_min);
- first_half_min = _mm_add_epi32(_mm_and_si128(mask, block_error),
- _mm_andnot_si128(mask, first_half_min));
-
- // Compute second part of the block.
- block_error =
- AddChannelError(GetColorErrorSSE(tmp_blue, second_blue_data_block),
- GetColorErrorSSE(tmp_green, second_green_data_block),
- GetColorErrorSSE(tmp_red, second_red_data_block));
-
- // Save winning pattern.
- second_half_pattern = _mm_max_epi16(
- second_half_pattern,
- _mm_and_si128(tmp, _mm_cmpgt_epi32(second_half_min, block_error)));
- // Should use _mm_min_epi32(second_half_min, block_error); from SSE4
- // otherwise we have a small performance penalty.
- mask = _mm_cmplt_epi32(block_error, second_half_min);
- second_half_min = _mm_add_epi32(_mm_and_si128(mask, block_error),
- _mm_andnot_si128(mask, second_half_min));
- }
-
- first_half_min = _mm_add_epi32(first_half_min, second_half_min);
- first_half_min =
- _mm_add_epi32(first_half_min, _mm_shuffle_epi32(first_half_min, 0x4E));
- first_half_min =
- _mm_add_epi32(first_half_min, _mm_shuffle_epi32(first_half_min, 0xB1));
-
- min = _mm_cvtsi128_si32(first_half_min);
-
- delta = min - last_min;
- last_min = min;
-
- if (min < best_error) {
- best_tbl_idx = tbl_idx;
- best_error = min;
-
- best_mod_idx[tbl_idx][0] =
- (_mm_cvtsi128_si32(first_half_pattern) >> (0)) & 3;
- best_mod_idx[tbl_idx][4] =
- (_mm_cvtsi128_si32(second_half_pattern) >> (0)) & 3;
-
- best_mod_idx[tbl_idx][1] =
- (_mm_cvtsi128_si32(_mm_shuffle_epi32(first_half_pattern, 0x1)) >>
- (2)) &
- 3;
- best_mod_idx[tbl_idx][5] =
- (_mm_cvtsi128_si32(_mm_shuffle_epi32(second_half_pattern, 0x1)) >>
- (2)) &
- 3;
-
- best_mod_idx[tbl_idx][2] =
- (_mm_cvtsi128_si32(_mm_shuffle_epi32(first_half_pattern, 0x2)) >>
- (4)) &
- 3;
- best_mod_idx[tbl_idx][6] =
- (_mm_cvtsi128_si32(_mm_shuffle_epi32(second_half_pattern, 0x2)) >>
- (4)) &
- 3;
-
- best_mod_idx[tbl_idx][3] =
- (_mm_cvtsi128_si32(_mm_shuffle_epi32(first_half_pattern, 0x3)) >>
- (6)) &
- 3;
- best_mod_idx[tbl_idx][7] =
- (_mm_cvtsi128_si32(_mm_shuffle_epi32(second_half_pattern, 0x3)) >>
- (6)) &
- 3;
-
- if (best_error == 0) {
- break;
- }
- } else if (delta > 0 && expected_error < min) {
- // The error is growing and is well beyond expected threshold.
- break;
- }
- }
-
- WriteCodewordTable(block, sub_block_id, best_tbl_idx);
-
- uint32_t pix_data = 0;
- uint8_t mod_idx;
- uint8_t pix_idx;
- uint32_t lsb;
- uint32_t msb;
- int texel_num;
-
- for (unsigned int i = 0; i < 8; ++i) {
- mod_idx = best_mod_idx[best_tbl_idx][i];
- pix_idx = g_mod_to_pix[mod_idx];
-
- lsb = pix_idx & 0x1;
- msb = pix_idx >> 1;
-
- // Obtain the texel number as specified in the standard.
- texel_num = idx_to_num_tab[i];
- pix_data |= msb << (texel_num + 16);
- pix_data |= lsb << (texel_num);
- }
-
- WritePixelData(block, pix_data);
-}
-
-void CompressBlock(uint8_t* dst, __sse_data* data) {
- // First 3 values are for vertical 1, second 3 vertical 2, third 3 horizontal
- // 1, last 3
- // horizontal 2.
- float __sse_avg_colors[12] = {
- 0,
- };
- bool use_differential[2] = {true, true};
- GetAvgColors(data, __sse_avg_colors, use_differential);
- Color sub_block_avg[4];
-
- // TODO(radu.velea): Remove floating point operations and use only int's +
- // normal rounding and shifts for reduced Quality.
- for (int i = 0, j = 1; i < 4; i += 2, j += 2) {
- if (use_differential[i / 2] == false) {
- sub_block_avg[i] = MakeColor444(&__sse_avg_colors[i * 3]);
- sub_block_avg[j] = MakeColor444(&__sse_avg_colors[j * 3]);
- } else {
- sub_block_avg[i] = MakeColor555(&__sse_avg_colors[i * 3]);
- sub_block_avg[j] = MakeColor555(&__sse_avg_colors[j * 3]);
- }
- }
-
- __m128i red_avg[2], green_avg[2], blue_avg[2];
-
- // TODO(radu.velea): Perfect accuracy, maybe skip floating variables.
- blue_avg[0] = _mm_set_epi32(static_cast<int>(__sse_avg_colors[3]),
- static_cast<int>(__sse_avg_colors[3]),
- static_cast<int>(__sse_avg_colors[0]),
- static_cast<int>(__sse_avg_colors[0]));
-
- green_avg[0] = _mm_set_epi32(static_cast<int>(__sse_avg_colors[4]),
- static_cast<int>(__sse_avg_colors[4]),
- static_cast<int>(__sse_avg_colors[1]),
- static_cast<int>(__sse_avg_colors[1]));
-
- red_avg[0] = _mm_set_epi32(static_cast<int>(__sse_avg_colors[5]),
- static_cast<int>(__sse_avg_colors[5]),
- static_cast<int>(__sse_avg_colors[2]),
- static_cast<int>(__sse_avg_colors[2]));
-
- uint32_t vertical_error[2];
- GetVerticalError(data, blue_avg, green_avg, red_avg, vertical_error);
-
- // TODO(radu.velea): Perfect accuracy, maybe skip floating variables.
- blue_avg[0] = _mm_set1_epi32(static_cast<int>(__sse_avg_colors[6]));
- blue_avg[1] = _mm_set1_epi32(static_cast<int>(__sse_avg_colors[9]));
-
- green_avg[0] = _mm_set1_epi32(static_cast<int>(__sse_avg_colors[7]));
- green_avg[1] = _mm_set1_epi32(static_cast<int>(__sse_avg_colors[10]));
-
- red_avg[0] = _mm_set1_epi32(static_cast<int>(__sse_avg_colors[8]));
- red_avg[1] = _mm_set1_epi32(static_cast<int>(__sse_avg_colors[11]));
-
- uint32_t horizontal_error[2];
- GetHorizontalError(data, blue_avg, green_avg, red_avg, horizontal_error);
-
- bool flip = (horizontal_error[0] + horizontal_error[1]) <
- (vertical_error[0] + vertical_error[1]);
- uint32_t* expected_errors = flip ? horizontal_error : vertical_error;
-
- // Clear destination buffer so that we can "or" in the results.
- memset(dst, 0, 8);
-
- WriteDiff(dst, use_differential[!!flip]);
- WriteFlip(dst, flip);
-
- uint8_t sub_block_off_0 = flip ? 2 : 0;
- uint8_t sub_block_off_1 = sub_block_off_0 + 1;
-
- if (use_differential[!!flip]) {
- WriteColors555(dst, sub_block_avg[sub_block_off_0],
- sub_block_avg[sub_block_off_1]);
- } else {
- WriteColors444(dst, sub_block_avg[sub_block_off_0],
- sub_block_avg[sub_block_off_1]);
- }
-
- if (!flip) {
- // Transpose vertical data into horizontal lines.
- __m128i tmp;
- for (int i = 0; i < 4; i += 2) {
- tmp = data->blue[i];
- data->blue[i] = _mm_add_epi32(
- _mm_move_epi64(data->blue[i]),
- _mm_shuffle_epi32(_mm_move_epi64(data->blue[i + 1]), 0x4E));
- data->blue[i + 1] = _mm_add_epi32(
- _mm_move_epi64(_mm_shuffle_epi32(tmp, 0x4E)),
- _mm_shuffle_epi32(
- _mm_move_epi64(_mm_shuffle_epi32(data->blue[i + 1], 0x4E)),
- 0x4E));
-
- tmp = data->green[i];
- data->green[i] = _mm_add_epi32(
- _mm_move_epi64(data->green[i]),
- _mm_shuffle_epi32(_mm_move_epi64(data->green[i + 1]), 0x4E));
- data->green[i + 1] = _mm_add_epi32(
- _mm_move_epi64(_mm_shuffle_epi32(tmp, 0x4E)),
- _mm_shuffle_epi32(
- _mm_move_epi64(_mm_shuffle_epi32(data->green[i + 1], 0x4E)),
- 0x4E));
-
- tmp = data->red[i];
- data->red[i] = _mm_add_epi32(
- _mm_move_epi64(data->red[i]),
- _mm_shuffle_epi32(_mm_move_epi64(data->red[i + 1]), 0x4E));
- data->red[i + 1] = _mm_add_epi32(
- _mm_move_epi64(_mm_shuffle_epi32(tmp, 0x4E)),
- _mm_shuffle_epi32(
- _mm_move_epi64(_mm_shuffle_epi32(data->red[i + 1], 0x4E)), 0x4E));
- }
-
- tmp = data->blue[1];
- data->blue[1] = data->blue[2];
- data->blue[2] = tmp;
-
- tmp = data->green[1];
- data->green[1] = data->green[2];
- data->green[2] = tmp;
-
- tmp = data->red[1];
- data->red[1] = data->red[2];
- data->red[2] = tmp;
- }
-
- // Compute luminance for the first sub block.
- ComputeLuminance(dst, sub_block_avg[sub_block_off_0], 0,
- g_idx_to_num[sub_block_off_0], data,
- SetETC1MaxError(expected_errors[0]));
- // Compute luminance for the second sub block.
- ComputeLuminance(dst, sub_block_avg[sub_block_off_1], 1,
- g_idx_to_num[sub_block_off_1], data,
- SetETC1MaxError(expected_errors[1]));
-}
-
-static void ExtractBlock(uint8_t* dst, const uint8_t* src, int width) {
- for (int j = 0; j < 4; ++j) {
- memcpy(&dst[j * 4 * 4], src, 4 * 4);
- src += width * 4;
- }
-}
-
-inline bool TransposeBlock(uint8_t* block, __m128i* transposed) {
- // This function transforms an incommig block of RGBA or GBRA pixels into 4
- // registers, each containing the data corresponding for a single channel.
- // Ex: transposed[0] will have all the R values for a RGBA block,
- // transposed[1] will have G, etc.
- // The values are packed as 8 bit unsigned values in the SSE registers.
-
- // Before doing any work we check if the block is solid.
- __m128i tmp3, tmp2, tmp1, tmp0;
- __m128i test_solid = _mm_set1_epi32(*((uint32_t*)block));
- uint16_t mask = 0xFFFF;
-
- // a0,a1,a2,...a7, ...a15
- transposed[0] = _mm_loadu_si128((__m128i*)(block));
- // b0, b1,b2,...b7.... b15
- transposed[1] = _mm_loadu_si128((__m128i*)(block + 16));
- // c0, c1,c2,...c7....c15
- transposed[2] = _mm_loadu_si128((__m128i*)(block + 32));
- // d0,d1,d2,...d7....d15
- transposed[3] = _mm_loadu_si128((__m128i*)(block + 48));
-
- for (int i = 0; i < 4; i++) {
- mask &= _mm_movemask_epi8(_mm_cmpeq_epi8(transposed[i], test_solid));
- }
-
- if (mask == 0xFFFF) {
- // Block is solid, no need to do any more work.
- return false;
- }
-
- // a0,b0, a1,b1, a2,b2, a3,b3,....a7,b7
- tmp0 = _mm_unpacklo_epi8(transposed[0], transposed[1]);
- // c0,d0, c1,d1, c2,d2, c3,d3,... c7,d7
- tmp1 = _mm_unpacklo_epi8(transposed[2], transposed[3]);
- // a8,b8, a9,b9, a10,b10, a11,b11,...a15,b15
- tmp2 = _mm_unpackhi_epi8(transposed[0], transposed[1]);
- // c8,d8, c9,d9, c10,d10, c11,d11,...c15,d15
- tmp3 = _mm_unpackhi_epi8(transposed[2], transposed[3]);
-
- // a0,a8, b0,b8, a1,a9, b1,b9, ....a3,a11, b3,b11
- transposed[0] = _mm_unpacklo_epi8(tmp0, tmp2);
- // a4,a12, b4,b12, a5,a13, b5,b13,....a7,a15,b7,b15
- transposed[1] = _mm_unpackhi_epi8(tmp0, tmp2);
- // c0,c8, d0,d8, c1,c9, d1,d9.....d3,d11
- transposed[2] = _mm_unpacklo_epi8(tmp1, tmp3);
- // c4,c12,d4,d12, c5,c13, d5,d13,....d7,d15
- transposed[3] = _mm_unpackhi_epi8(tmp1, tmp3);
-
- // a0,a8, b0,b8, c0,c8, d0,d8, a1,a9, b1,b9, c1,c9, d1,d9
- tmp0 = _mm_unpacklo_epi32(transposed[0], transposed[2]);
- // a2,a10, b2,b10, c2,c10, d2,d10, a3,a11, b3,b11, c3,c11, d3,d11
- tmp1 = _mm_unpackhi_epi32(transposed[0], transposed[2]);
- // a4,a12, b4,b12, c4,c12, d4,d12, a5,a13, b5,b13, c5,c13, d5,d13
- tmp2 = _mm_unpacklo_epi32(transposed[1], transposed[3]);
- // a6,a14, b6,b14, c6,c14, d6,d14, a7,a15, b7,b15, c7,c15, d7,d15
- tmp3 = _mm_unpackhi_epi32(transposed[1], transposed[3]);
-
- // a0,a4, a8,a12, b0,b4, b8,b12, c0,c4, c8,c12, d0,d4, d8,d12
- transposed[0] = _mm_unpacklo_epi8(tmp0, tmp2);
- // a1,a5, a9,a13, b1,b5, b9,b13, c1,c5, c9,c13, d1,d5, d9,d13
- transposed[1] = _mm_unpackhi_epi8(tmp0, tmp2);
- // a2,a6, a10,a14, b2,b6, b10,b14, c2,c6, c10,c14, d2,d6, d10,d14
- transposed[2] = _mm_unpacklo_epi8(tmp1, tmp3);
- // a3,a7, a11,a15, b3,b7, b11,b15, c3,c7, c11,c15, d3,d7, d11,d15
- transposed[3] = _mm_unpackhi_epi8(tmp1, tmp3);
-
- return true;
-}
-
-inline void UnpackBlock(__m128i* packed,
- __m128i* red,
- __m128i* green,
- __m128i* blue,
- __m128i* alpha) {
- const __m128i zero = _mm_set1_epi8(0);
- __m128i tmp_low, tmp_high;
-
- // Unpack red.
- tmp_low = _mm_unpacklo_epi8(packed[0], zero);
- tmp_high = _mm_unpackhi_epi8(packed[0], zero);
-
- red[0] = _mm_unpacklo_epi16(tmp_low, zero);
- red[1] = _mm_unpackhi_epi16(tmp_low, zero);
-
- red[2] = _mm_unpacklo_epi16(tmp_high, zero);
- red[3] = _mm_unpackhi_epi16(tmp_high, zero);
-
- // Unpack green.
- tmp_low = _mm_unpacklo_epi8(packed[1], zero);
- tmp_high = _mm_unpackhi_epi8(packed[1], zero);
-
- green[0] = _mm_unpacklo_epi16(tmp_low, zero);
- green[1] = _mm_unpackhi_epi16(tmp_low, zero);
-
- green[2] = _mm_unpacklo_epi16(tmp_high, zero);
- green[3] = _mm_unpackhi_epi16(tmp_high, zero);
-
- // Unpack blue.
- tmp_low = _mm_unpacklo_epi8(packed[2], zero);
- tmp_high = _mm_unpackhi_epi8(packed[2], zero);
-
- blue[0] = _mm_unpacklo_epi16(tmp_low, zero);
- blue[1] = _mm_unpackhi_epi16(tmp_low, zero);
-
- blue[2] = _mm_unpacklo_epi16(tmp_high, zero);
- blue[3] = _mm_unpackhi_epi16(tmp_high, zero);
-
- // Unpack alpha - unused for ETC1.
- tmp_low = _mm_unpacklo_epi8(packed[3], zero);
- tmp_high = _mm_unpackhi_epi8(packed[3], zero);
-
- alpha[0] = _mm_unpacklo_epi16(tmp_low, zero);
- alpha[1] = _mm_unpackhi_epi16(tmp_low, zero);
-
- alpha[2] = _mm_unpacklo_epi16(tmp_high, zero);
- alpha[3] = _mm_unpackhi_epi16(tmp_high, zero);
-}
-
-inline void CompressSolid(uint8_t* dst, uint8_t* block) {
- // Clear destination buffer so that we can "or" in the results.
- memset(dst, 0, 8);
-
- const float src_color_float[3] = {static_cast<float>(block[0]),
- static_cast<float>(block[1]),
- static_cast<float>(block[2])};
- const Color base = MakeColor555(src_color_float);
- const __m128i base_v =
- _mm_set_epi32(0, base.channels.r, base.channels.g, base.channels.b);
-
- const __m128i constant = _mm_set_epi32(0, block[2], block[1], block[0]);
- __m128i lum;
- __m128i colors[4];
- static const __m128i rgb =
- _mm_set_epi32(0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
-
- WriteDiff(dst, true);
- WriteFlip(dst, false);
-
- WriteColors555(dst, base, base);
-
- uint8_t best_tbl_idx = 0;
- uint8_t best_mod_idx = 0;
- uint32_t best_mod_err = INT32_MAX;
-
- for (unsigned int tbl_idx = 0; tbl_idx < 8; ++tbl_idx) {
- lum = _mm_set_epi32(
- g_codeword_tables[tbl_idx][3], g_codeword_tables[tbl_idx][2],
- g_codeword_tables[tbl_idx][1], g_codeword_tables[tbl_idx][0]);
- colors[0] = AddAndClamp(base_v, _mm_shuffle_epi32(lum, 0x0));
- colors[1] = AddAndClamp(base_v, _mm_shuffle_epi32(lum, 0x55));
- colors[2] = AddAndClamp(base_v, _mm_shuffle_epi32(lum, 0xAA));
- colors[3] = AddAndClamp(base_v, _mm_shuffle_epi32(lum, 0xFF));
-
- for (int i = 0; i < 4; i++) {
- uint32_t mod_err =
- SumSSE(GetColorErrorSSE(constant, _mm_and_si128(colors[i], rgb)));
- colors[i] = _mm_and_si128(colors[i], rgb);
- if (mod_err < best_mod_err) {
- best_tbl_idx = tbl_idx;
- best_mod_idx = i;
- best_mod_err = mod_err;
-
- if (mod_err == 0) {
- break; // We cannot do any better than this.
- }
- }
- }
- }
-
- WriteCodewordTable(dst, 0, best_tbl_idx);
- WriteCodewordTable(dst, 1, best_tbl_idx);
-
- uint8_t pix_idx = g_mod_to_pix[best_mod_idx];
- uint32_t lsb = pix_idx & 0x1;
- uint32_t msb = pix_idx >> 1;
-
- uint32_t pix_data = 0;
- for (unsigned int i = 0; i < 2; ++i) {
- for (unsigned int j = 0; j < 8; ++j) {
- // Obtain the texel number as specified in the standard.
- int texel_num = g_idx_to_num[i][j];
- pix_data |= msb << (texel_num + 16);
- pix_data |= lsb << (texel_num);
- }
- }
-
- WritePixelData(dst, pix_data);
-}
-
-} // namespace
-
-void TextureCompressorETC1SSE::Compress(const uint8_t* src,
- uint8_t* dst,
- int width,
- int height,
- Quality quality) {
- DCHECK_GE(width, 4);
- DCHECK_EQ((width & 3), 0);
- DCHECK_GE(height, 4);
- DCHECK_EQ((height & 3), 0);
-
- alignas(16) uint8_t block[64];
- __m128i packed[4];
- __m128i red[4], green[4], blue[4], alpha[4];
- __sse_data data;
-
- for (int y = 0; y < height; y += 4, src += width * 4 * 4) {
- for (int x = 0; x < width; x += 4, dst += 8) {
- ExtractBlock(block, src + x * 4, width);
- if (TransposeBlock(block, packed) == false) {
- CompressSolid(dst, block);
- } else {
- UnpackBlock(packed, blue, green, red, alpha);
-
- data.block = block;
- data.packed = packed;
- data.red = red;
- data.blue = blue;
- data.green = green;
-
- CompressBlock(dst, &data);
- }
- }
- }
-}
-
-} // namespace cc
diff --git a/chromium/cc/raster/texture_compressor_etc1_sse.h b/chromium/cc/raster/texture_compressor_etc1_sse.h
deleted file mode 100644
index 3c186c63ce7..00000000000
--- a/chromium/cc/raster/texture_compressor_etc1_sse.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CC_RASTER_TEXTURE_COMPRESSOR_ETC1_SSE_H_
-#define CC_RASTER_TEXTURE_COMPRESSOR_ETC1_SSE_H_
-
-#include <stdint.h>
-
-#include "base/macros.h"
-#include "cc/raster/texture_compressor.h"
-
-namespace cc {
-
-class CC_EXPORT TextureCompressorETC1SSE : public TextureCompressor {
- public:
- TextureCompressorETC1SSE() {}
-
- // Compress a texture using ETC1. Note that the |quality| parameter is
- // ignored. The current implementation does not support different quality
- // settings.
- void Compress(const uint8_t* src,
- uint8_t* dst,
- int width,
- int height,
- Quality quality) override;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TextureCompressorETC1SSE);
-};
-
-} // namespace cc
-
-#endif // CC_RASTER_TEXTURE_COMPRESSOR_ETC1_SSE_H_
diff --git a/chromium/cc/raster/texture_compressor_etc1_unittest.cc b/chromium/cc/raster/texture_compressor_etc1_unittest.cc
deleted file mode 100644
index 4857b692612..00000000000
--- a/chromium/cc/raster/texture_compressor_etc1_unittest.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "cc/raster/texture_compressor.h"
-
-#include <stdint.h>
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace cc {
-namespace {
-
-const int kImageWidth = 256;
-const int kImageHeight = 256;
-const int kImageChannels = 4;
-const int kImageSizeInBytes = kImageWidth * kImageHeight * kImageChannels;
-
-TEST(TextureCompressorETC1Test, Compress256x256Ratio) {
- std::unique_ptr<TextureCompressor> compressor =
- TextureCompressor::Create(TextureCompressor::kFormatETC1);
- uint8_t src[kImageSizeInBytes];
- uint8_t dst[kImageSizeInBytes];
- const unsigned int kImagePoison = 0xDEADBEEF;
-
- // Poison destination bytes so we can see how much has been
- // overwritten by compression algorithm.
- uint32_t* dst_32 = reinterpret_cast<uint32_t*>(dst);
- for (int i = 0; i < kImageWidth * kImageHeight; i++) {
- dst_32[i] = kImagePoison;
- }
-
- // Generate test texture.
- for (int i = 0; i < kImageSizeInBytes; i++) {
- src[i] = i % 256;
- }
-
- compressor->Compress(src, dst, kImageWidth, kImageHeight,
- TextureCompressor::kQualityLow);
-
- int compressed_size = 0;
- for (compressed_size = 0; compressed_size < kImageWidth * kImageHeight;
- compressed_size++) {
- if (dst_32[compressed_size] == kImagePoison) {
- // Represents size in bytes of the compressed block.
- compressed_size = compressed_size * 4;
- break;
- }
- }
-
- // Check if compression ratio is 8:1 for RGBA or BGRA images, after discarding
- // alpha channel.
- EXPECT_EQ(kImageSizeInBytes, compressed_size * 8);
-}
-
-} // namespace
-} // namespace cc
diff --git a/chromium/cc/raster/texture_compressor_perftest.cc b/chromium/cc/raster/texture_compressor_perftest.cc
deleted file mode 100644
index e444c30b33f..00000000000
--- a/chromium/cc/raster/texture_compressor_perftest.cc
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stdint.h>
-
-#include "base/logging.h"
-#include "cc/base/lap_timer.h"
-#include "cc/raster/texture_compressor.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "testing/perf/perf_test.h"
-
-namespace cc {
-namespace {
-
-const int kTimeLimitMillis = 2000;
-const int kWarmupRuns = 5;
-const int kTimeCheckInterval = 10;
-
-const int kImageWidth = 256;
-const int kImageHeight = 256;
-const int kImageChannels = 4;
-const int kImageSizeInBytes = kImageWidth * kImageHeight * kImageChannels;
-
-std::string FormatName(TextureCompressor::Format format) {
- switch (format) {
- case TextureCompressor::kFormatETC1:
- return "ETC1";
- }
-
- NOTREACHED();
- return "";
-}
-
-std::string QualityName(TextureCompressor::Quality quality) {
- switch (quality) {
- case TextureCompressor::kQualityLow:
- return "Low";
- case TextureCompressor::kQualityMedium:
- return "Medium";
- case TextureCompressor::kQualityHigh:
- return "High";
- }
-
- NOTREACHED();
- return "";
-}
-
-class TextureCompressorPerfTest
- : public testing::TestWithParam<
- ::testing::tuple<TextureCompressor::Quality,
- TextureCompressor::Format>> {
- public:
- TextureCompressorPerfTest()
- : timer_(kWarmupRuns,
- base::TimeDelta::FromMilliseconds(kTimeLimitMillis),
- kTimeCheckInterval) {}
-
- void SetUp() override {
- TextureCompressor::Format format = ::testing::get<1>(GetParam());
- compressor_ = TextureCompressor::Create(format);
- }
-
- void RunTest(const std::string& name) {
- TextureCompressor::Quality quality = ::testing::get<0>(GetParam());
- timer_.Reset();
- do {
- compressor_->Compress(src_, dst_, kImageWidth, kImageHeight, quality);
- timer_.NextLap();
- } while (!timer_.HasTimeLimitExpired());
-
- TextureCompressor::Format format = ::testing::get<1>(GetParam());
- std::string str = FormatName(format) + " " + QualityName(quality);
- perf_test::PrintResult("Compress256x256", name, str, timer_.MsPerLap(),
- "us", true);
- }
-
- protected:
- LapTimer timer_;
- std::unique_ptr<TextureCompressor> compressor_;
- uint8_t src_[kImageSizeInBytes];
- uint8_t dst_[kImageSizeInBytes];
-};
-
-TEST_P(TextureCompressorPerfTest, Compress256x256BlackAndWhiteGradientImage) {
- for (int i = 0; i < kImageSizeInBytes; ++i)
- src_[i] = i % 256;
-
- RunTest("BlackAndWhiteGradientImage");
-}
-
-TEST_P(TextureCompressorPerfTest, Compress256x256SolidBlackImage) {
- memset(src_, 0, kImageSizeInBytes);
-
- RunTest("SolidBlackImage");
-}
-
-TEST_P(TextureCompressorPerfTest, Compress256x256SolidColorImage) {
- for (int i = 0; i < kImageSizeInBytes; ++i)
- src_[i] = (4 - i % 4) * 50;
-
- RunTest("SolidColorImage");
-}
-
-TEST_P(TextureCompressorPerfTest, Compress256x256RandomColorImage) {
- unsigned int kImageSeed = 1234567890;
- srand(kImageSeed);
- for (int i = 0; i < kImageSizeInBytes; ++i)
- src_[i] = rand() % 256; // NOLINT
-
- RunTest("RandomColorImage");
-}
-
-INSTANTIATE_TEST_CASE_P(
- TextureCompressorPerfTests,
- TextureCompressorPerfTest,
- ::testing::Combine(::testing::Values(TextureCompressor::kQualityLow,
- TextureCompressor::kQualityMedium,
- TextureCompressor::kQualityHigh),
- ::testing::Values(TextureCompressor::kFormatETC1)));
-
-} // namespace
-} // namespace cc
diff --git a/chromium/cc/raster/zero_copy_raster_buffer_provider.cc b/chromium/cc/raster/zero_copy_raster_buffer_provider.cc
index 037e8a366ce..21010d76efd 100644
--- a/chromium/cc/raster/zero_copy_raster_buffer_provider.cc
+++ b/chromium/cc/raster/zero_copy_raster_buffer_provider.cc
@@ -271,4 +271,8 @@ uint64_t ZeroCopyRasterBufferProvider::SetReadyToDrawCallback(
void ZeroCopyRasterBufferProvider::Shutdown() {}
+bool ZeroCopyRasterBufferProvider::CheckRasterFinishedQueries() {
+ return false;
+}
+
} // namespace cc
diff --git a/chromium/cc/raster/zero_copy_raster_buffer_provider.h b/chromium/cc/raster/zero_copy_raster_buffer_provider.h
index 68f78bfb2c7..106f7c9bbc7 100644
--- a/chromium/cc/raster/zero_copy_raster_buffer_provider.h
+++ b/chromium/cc/raster/zero_copy_raster_buffer_provider.h
@@ -49,6 +49,7 @@ class CC_EXPORT ZeroCopyRasterBufferProvider : public RasterBufferProvider {
const base::Closure& callback,
uint64_t pending_callback_id) const override;
void Shutdown() override;
+ bool CheckRasterFinishedQueries() override;
private:
std::unique_ptr<base::trace_event::ConvertableToTraceFormat> StateAsValue()
diff --git a/chromium/cc/resources/resource_pool.cc b/chromium/cc/resources/resource_pool.cc
index 9d8c3dacaad..05d6fd97386 100644
--- a/chromium/cc/resources/resource_pool.cc
+++ b/chromium/cc/resources/resource_pool.cc
@@ -12,7 +12,6 @@
#include "base/atomic_sequence_num.h"
#include "base/format_macros.h"
-#include "base/memory/memory_coordinator_client_registry.h"
#include "base/memory/shared_memory_handle.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/stringprintf.h"
@@ -23,7 +22,10 @@
#include "components/viz/client/client_resource_provider.h"
#include "components/viz/common/gpu/context_provider.h"
#include "components/viz/common/resources/resource_sizes.h"
+#include "gpu/command_buffer/client/context_support.h"
#include "gpu/command_buffer/client/gles2_interface.h"
+#include "gpu/command_buffer/common/capabilities.h"
+#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
using base::trace_event::MemoryAllocatorDump;
using base::trace_event::MemoryDumpLevelOfDetail;
@@ -66,6 +68,21 @@ bool ResourceMeetsSizeRequirements(const gfx::Size& requested_size,
constexpr base::TimeDelta ResourcePool::kDefaultExpirationDelay;
+void ResourcePool::GpuBacking::InitOverlayCandidateAndTextureTarget(
+ const viz::ResourceFormat format,
+ const gpu::Capabilities& caps,
+ bool use_gpu_memory_buffer_resources) {
+ overlay_candidate = use_gpu_memory_buffer_resources &&
+ caps.texture_storage_image &&
+ IsGpuMemoryBufferFormatSupported(format);
+ if (overlay_candidate) {
+ texture_target = gpu::GetBufferTextureTarget(gfx::BufferUsage::SCANOUT,
+ BufferFormat(format), caps);
+ } else {
+ texture_target = GL_TEXTURE_2D;
+ }
+}
+
ResourcePool::ResourcePool(
viz::ClientResourceProvider* resource_provider,
viz::ContextProvider* context_provider,
@@ -81,8 +98,6 @@ ResourcePool::ResourcePool(
weak_ptr_factory_(this) {
base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, "cc::ResourcePool", task_runner_.get());
- // Register this component with base::MemoryCoordinatorClientRegistry.
- base::MemoryCoordinatorClientRegistry::GetInstance()->Register(this);
memory_pressure_listener_.reset(
new base::MemoryPressureListener(base::BindRepeating(
&ResourcePool::OnMemoryPressure, weak_ptr_factory_.GetWeakPtr())));
@@ -91,8 +106,6 @@ ResourcePool::ResourcePool(
ResourcePool::~ResourcePool() {
base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
this);
- // Unregister this component with memory_coordinator::ClientRegistry.
- base::MemoryCoordinatorClientRegistry::GetInstance()->Unregister(this);
DCHECK_EQ(0u, in_use_resources_.size());
@@ -488,8 +501,11 @@ void ResourcePool::EvictExpiredResources() {
// If nothing is evictable, we have deleted one (and possibly more)
// resources without any new activity. Flush to ensure these deletions are
// processed.
- if (context_provider_)
- context_provider_->ContextGL()->ShallowFlushCHROMIUM();
+ if (context_provider_) {
+ // Flush any ContextGL work as well as any SharedImageInterface work.
+ context_provider_->ContextGL()->OrderingBarrierCHROMIUM();
+ context_provider_->ContextSupport()->FlushPendingWork();
+ }
return;
}
@@ -548,17 +564,6 @@ bool ResourcePool::OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
return true;
}
-void ResourcePool::OnPurgeMemory() {
- // Release all resources, regardless of how recently they were used.
- EvictResourcesNotUsedSince(base::TimeTicks() + base::TimeDelta::Max());
-}
-
-void ResourcePool::OnMemoryStateChange(base::MemoryState state) {
- // While in a SUSPENDED state, we don't put resources back into the pool
- // when they become available. Instead we free them immediately.
- evict_busy_resources_when_unused_ = state == base::MemoryState::SUSPENDED;
-}
-
void ResourcePool::OnMemoryPressure(
base::MemoryPressureListener::MemoryPressureLevel level) {
switch (level) {
diff --git a/chromium/cc/resources/resource_pool.h b/chromium/cc/resources/resource_pool.h
index 0fe162d74f7..b7928fcae8c 100644
--- a/chromium/cc/resources/resource_pool.h
+++ b/chromium/cc/resources/resource_pool.h
@@ -13,7 +13,6 @@
#include "base/containers/circular_deque.h"
#include "base/macros.h"
-#include "base/memory/memory_coordinator_client.h"
#include "base/memory/memory_pressure_listener.h"
#include "base/memory/weak_ptr.h"
#include "base/optional.h"
@@ -34,6 +33,10 @@ namespace base {
class SingleThreadTaskRunner;
}
+namespace gpu {
+struct Capabilities;
+}
+
namespace viz {
class ClientResourceProvider;
class ContextProvider;
@@ -41,8 +44,7 @@ class ContextProvider;
namespace cc {
-class CC_EXPORT ResourcePool : public base::trace_event::MemoryDumpProvider,
- public base::MemoryCoordinatorClient {
+class CC_EXPORT ResourcePool : public base::trace_event::MemoryDumpProvider {
class PoolResource;
public:
@@ -68,6 +70,11 @@ class CC_EXPORT ResourcePool : public base::trace_event::MemoryDumpProvider,
uint64_t tracing_process_id,
int importance) const = 0;
+ void InitOverlayCandidateAndTextureTarget(
+ const viz::ResourceFormat format,
+ const gpu::Capabilities& caps,
+ bool use_gpu_memory_buffer_resources);
+
gpu::Mailbox mailbox;
gpu::SyncToken mailbox_sync_token;
GLenum texture_target = 0;
@@ -235,12 +242,6 @@ class CC_EXPORT ResourcePool : public base::trace_event::MemoryDumpProvider,
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) override;
- // Overriden from base::MemoryCoordinatorClient.
- void OnPurgeMemory() override;
- void OnMemoryStateChange(base::MemoryState state) override;
-
- // TODO(gyuyoung): OnMemoryPressure is deprecated. So this should be removed
- // when the memory coordinator is enabled by default.
void OnMemoryPressure(
base::MemoryPressureListener::MemoryPressureLevel level);
diff --git a/chromium/cc/resources/resource_pool_unittest.cc b/chromium/cc/resources/resource_pool_unittest.cc
index 8b6cf746e70..ef0ba34c2db 100644
--- a/chromium/cc/resources/resource_pool_unittest.cc
+++ b/chromium/cc/resources/resource_pool_unittest.cc
@@ -469,11 +469,10 @@ TEST_F(ResourcePoolTest, PurgedMemory) {
EXPECT_EQ(0u, resource_pool_->GetBusyResourceCountForTesting());
// Purging and suspending should not impact an in-use resource.
- resource_pool_->OnPurgeMemory();
- resource_pool_->OnMemoryStateChange(base::MemoryState::SUSPENDED);
+ resource_pool_->OnMemoryPressure(
+ base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
EXPECT_EQ(1u, resource_pool_->GetTotalResourceCountForTesting());
EXPECT_EQ(0u, resource_pool_->GetBusyResourceCountForTesting());
- resource_pool_->OnMemoryStateChange(base::MemoryState::NORMAL);
// Export the resource to the display compositor, so it will be busy once
// released.
@@ -482,77 +481,25 @@ TEST_F(ResourcePoolTest, PurgedMemory) {
&transfers, context_provider_.get());
// Release the resource making it busy.
- resource_pool_->OnMemoryStateChange(base::MemoryState::NORMAL);
resource_pool_->ReleaseResource(std::move(resource));
EXPECT_EQ(1u, resource_pool_->GetTotalResourceCountForTesting());
EXPECT_EQ(1u, resource_pool_->GetBusyResourceCountForTesting());
// Purging and suspending should not impact a busy resource either.
- resource_pool_->OnPurgeMemory();
- resource_pool_->OnMemoryStateChange(base::MemoryState::SUSPENDED);
+ resource_pool_->OnMemoryPressure(
+ base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
EXPECT_EQ(1u, resource_pool_->GetTotalResourceCountForTesting());
EXPECT_EQ(1u, resource_pool_->GetBusyResourceCountForTesting());
// The resource moves from busy to available.
- resource_pool_->OnMemoryStateChange(base::MemoryState::NORMAL);
resource_provider_->ReceiveReturnsFromParent(
viz::TransferableResource::ReturnResources(transfers));
EXPECT_EQ(1u, resource_pool_->GetTotalResourceCountForTesting());
EXPECT_EQ(0u, resource_pool_->GetBusyResourceCountForTesting());
// Purging and suspending should drop unused resources.
- resource_pool_->OnPurgeMemory();
- resource_pool_->OnMemoryStateChange(base::MemoryState::SUSPENDED);
- EXPECT_EQ(0u, resource_pool_->GetTotalResourceCountForTesting());
- EXPECT_EQ(0u, resource_pool_->GetBusyResourceCountForTesting());
-}
-
-TEST_F(ResourcePoolTest, MemoryStateSuspended) {
- // Limits high enough to not be hit by this test.
- size_t bytes_limit = 10 * 1024 * 1024;
- size_t count_limit = 100;
- resource_pool_->SetResourceUsageLimits(bytes_limit, count_limit);
-
- gfx::Size size(100, 100);
- viz::ResourceFormat format = viz::RGBA_8888;
- gfx::ColorSpace color_space = gfx::ColorSpace::CreateSRGB();
- ResourcePool::InUsePoolResource resource =
- resource_pool_->AcquireResource(size, format, color_space);
- SetBackingOnResource(resource);
- resource_pool_->PrepareForExport(resource);
-
- EXPECT_EQ(1u, resource_pool_->GetTotalResourceCountForTesting());
- EXPECT_EQ(0u, resource_pool_->GetBusyResourceCountForTesting());
-
- // Purging and suspending should not impact an in-use resource.
- resource_pool_->OnPurgeMemory();
- resource_pool_->OnMemoryStateChange(base::MemoryState::SUSPENDED);
- EXPECT_EQ(1u, resource_pool_->GetTotalResourceCountForTesting());
- EXPECT_EQ(0u, resource_pool_->GetBusyResourceCountForTesting());
- resource_pool_->OnMemoryStateChange(base::MemoryState::NORMAL);
-
- // Export the resource to the display compositor, so it will be busy once
- // released.
- std::vector<viz::TransferableResource> transfers;
- resource_provider_->PrepareSendToParent({resource.resource_id_for_export()},
- &transfers, context_provider_.get());
-
- // Release the resource making it busy.
- resource_pool_->OnMemoryStateChange(base::MemoryState::NORMAL);
- resource_pool_->ReleaseResource(std::move(resource));
- EXPECT_EQ(1u, resource_pool_->GetTotalResourceCountForTesting());
- EXPECT_EQ(1u, resource_pool_->GetBusyResourceCountForTesting());
-
- // Purging and suspending should not impact a busy resource either.
- resource_pool_->OnPurgeMemory();
- resource_pool_->OnMemoryStateChange(base::MemoryState::SUSPENDED);
- EXPECT_EQ(1u, resource_pool_->GetTotalResourceCountForTesting());
- EXPECT_EQ(1u, resource_pool_->GetBusyResourceCountForTesting());
-
- // The resource moves from busy to available, but since we are SUSPENDED
- // it is not kept.
- resource_provider_->ReceiveReturnsFromParent(
- viz::TransferableResource::ReturnResources(transfers));
+ resource_pool_->OnMemoryPressure(
+ base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
EXPECT_EQ(0u, resource_pool_->GetTotalResourceCountForTesting());
EXPECT_EQ(0u, resource_pool_->GetBusyResourceCountForTesting());
}
@@ -668,7 +615,7 @@ TEST_F(ResourcePoolTest, MetadataSentToDisplayCompositor) {
// These values are all non-default values so we can tell they are propagated.
gfx::Size size(100, 101);
- viz::ResourceFormat format = viz::ETC1;
+ viz::ResourceFormat format = viz::RGBA_4444;
EXPECT_NE(gfx::BufferFormat::RGBA_8888, viz::BufferFormat(format));
gfx::ColorSpace color_space = gfx::ColorSpace::CreateSRGB();
uint32_t target = 5;
diff --git a/chromium/cc/scheduler/scheduler.cc b/chromium/cc/scheduler/scheduler.cc
index f2be14bef49..fcdbde8a7c0 100644
--- a/chromium/cc/scheduler/scheduler.cc
+++ b/chromium/cc/scheduler/scheduler.cc
@@ -265,7 +265,7 @@ void Scheduler::CancelPendingBeginFrameTask() {
if (pending_begin_frame_args_.IsValid()) {
TRACE_EVENT_INSTANT0("cc", "Scheduler::BeginFrameDropped",
TRACE_EVENT_SCOPE_THREAD);
- SendBeginFrameAck(pending_begin_frame_args_, kBeginFrameSkipped);
+ SendDidNotProduceFrame(pending_begin_frame_args_);
// Make pending begin frame invalid so that we don't accidentally use it.
pending_begin_frame_args_ = viz::BeginFrameArgs();
}
@@ -312,7 +312,7 @@ bool Scheduler::OnBeginFrameDerivedImpl(const viz::BeginFrameArgs& args) {
// Since we don't use the BeginFrame, we may later receive the same
// BeginFrame again. Thus, we can't confirm it at this point, even though we
// don't have any updates right now.
- SendBeginFrameAck(args, kBeginFrameSkipped);
+ SendDidNotProduceFrame(args);
return false;
}
@@ -340,7 +340,7 @@ bool Scheduler::OnBeginFrameDerivedImpl(const viz::BeginFrameArgs& args) {
if (pending_begin_frame_args_.IsValid()) {
TRACE_EVENT_INSTANT0("cc", "Scheduler::BeginFrameDropped",
TRACE_EVENT_SCOPE_THREAD);
- SendBeginFrameAck(pending_begin_frame_args_, kBeginFrameSkipped);
+ SendDidNotProduceFrame(pending_begin_frame_args_);
}
pending_begin_frame_args_ = args;
// ProcessScheduledActions() will post the previous frame's deadline if it
@@ -415,7 +415,7 @@ void Scheduler::BeginImplFrameWithDeadline(const viz::BeginFrameArgs& args) {
TRACE_EVENT_INSTANT0("cc", "Scheduler::MissedBeginFrameDropped",
TRACE_EVENT_SCOPE_THREAD);
skipped_last_frame_missed_exceeded_deadline_ = true;
- SendBeginFrameAck(args, kBeginFrameSkipped);
+ SendDidNotProduceFrame(args);
return;
}
skipped_last_frame_missed_exceeded_deadline_ = false;
@@ -515,7 +515,7 @@ void Scheduler::BeginImplFrameWithDeadline(const viz::BeginFrameArgs& args) {
TRACE_EVENT_INSTANT0("cc", "SkipBeginImplFrameToReduceLatency",
TRACE_EVENT_SCOPE_THREAD);
skipped_last_frame_to_reduce_latency_ = true;
- SendBeginFrameAck(begin_main_frame_args_, kBeginFrameSkipped);
+ SendDidNotProduceFrame(args);
return;
}
@@ -545,7 +545,9 @@ void Scheduler::FinishImplFrame() {
// Send ack before calling ProcessScheduledActions() because it might send an
// ack for any pending begin frame if we are going idle after this. This
// ensures that the acks are sent in order.
- SendBeginFrameAck(begin_main_frame_args_, kBeginFrameFinished);
+ if (!state_machine_.did_submit_in_last_frame())
+ SendDidNotProduceFrame(begin_impl_frame_tracker_.Current());
+
begin_impl_frame_tracker_.Finish();
ProcessScheduledActions();
@@ -554,24 +556,19 @@ void Scheduler::FinishImplFrame() {
base::AutoReset<bool> mark_inside(&inside_scheduled_action_, true);
client_->DidFinishImplFrame();
}
-}
-
-void Scheduler::SendBeginFrameAck(const viz::BeginFrameArgs& args,
- BeginFrameResult result) {
- bool did_submit = false;
- if (result == kBeginFrameFinished)
- did_submit = state_machine_.did_submit_in_last_frame();
-
- if (!did_submit) {
- DCHECK(!inside_scheduled_action_);
- base::AutoReset<bool> mark_inside(&inside_scheduled_action_, true);
- client_->DidNotProduceFrame(viz::BeginFrameAck(args, did_submit));
- }
if (begin_frame_source_)
begin_frame_source_->DidFinishFrame(this);
}
+void Scheduler::SendDidNotProduceFrame(const viz::BeginFrameArgs& args) {
+ if (last_begin_frame_ack_.source_id == args.source_id &&
+ last_begin_frame_ack_.sequence_number == args.sequence_number)
+ return;
+ last_begin_frame_ack_ = viz::BeginFrameAck(args, false /* has_damage */);
+ client_->DidNotProduceFrame(last_begin_frame_ack_);
+}
+
// BeginImplFrame starts a compositor frame that will wait up until a deadline
// for a BeginMainFrame+activation to complete before it times out and draws
// any asynchronous animation and scroll/pinch updates.
@@ -605,21 +602,18 @@ void Scheduler::BeginImplFrame(const viz::BeginFrameArgs& args,
}
void Scheduler::ScheduleBeginImplFrameDeadline() {
- base::TimeTicks new_deadline;
+ using DeadlineMode = SchedulerStateMachine::BeginImplFrameDeadlineMode;
+ deadline_mode_ = state_machine_.CurrentBeginImplFrameDeadlineMode();
- begin_impl_frame_deadline_mode_ =
- state_machine_.CurrentBeginImplFrameDeadlineMode();
- // Avoid using Now() for immediate deadlines because it's expensive, and this
- // method is called in every ProcessScheduledActions() call. Using
- // base::TimeTicks() achieves the same result.
- switch (begin_impl_frame_deadline_mode_) {
- case SchedulerStateMachine::BeginImplFrameDeadlineMode::NONE:
+ base::TimeTicks new_deadline;
+ switch (deadline_mode_) {
+ case DeadlineMode::NONE:
// NONE is returned when deadlines aren't used (synchronous compositor),
// or when outside a begin frame. In either case deadline task shouldn't
// be posted or should be cancelled already.
DCHECK(begin_impl_frame_deadline_task_.IsCancelled());
return;
- case SchedulerStateMachine::BeginImplFrameDeadlineMode::BLOCKED: {
+ case DeadlineMode::BLOCKED: {
// TODO(sunnyps): Posting the deadline for pending begin frame is required
// for browser compositor (commit_to_active_tree) to make progress in some
// cases. Change browser compositor deadline to LATE in state machine to
@@ -631,37 +625,44 @@ void Scheduler::ScheduleBeginImplFrameDeadline() {
bool has_pending_begin_frame = pending_begin_frame_args_.IsValid();
if (has_pending_begin_frame) {
new_deadline = base::TimeTicks();
+ break;
} else {
begin_impl_frame_deadline_task_.Cancel();
return;
}
- break;
}
- case SchedulerStateMachine::BeginImplFrameDeadlineMode::LATE:
- // We are waiting for a commit without needing active tree draw or we have
- // nothing to do.
+ case DeadlineMode::LATE: {
+ // We are waiting for a commit without needing active tree draw or we
+ // have nothing to do.
new_deadline = begin_impl_frame_tracker_.Current().frame_time +
begin_impl_frame_tracker_.Current().interval;
+ // Send early DidNotProduceFrame if we don't expect to produce a frame
+ // soon so that display scheduler doesn't wait unnecessarily.
+ // Note: This will only send one DidNotProduceFrame ack per begin frame.
+ if (!state_machine_.NewActiveTreeLikely())
+ SendDidNotProduceFrame(begin_impl_frame_tracker_.Current());
break;
- case SchedulerStateMachine::BeginImplFrameDeadlineMode::REGULAR:
+ }
+ case DeadlineMode::REGULAR:
// We are animating the active tree but we're also waiting for commit.
new_deadline = begin_impl_frame_tracker_.Current().deadline;
break;
- case SchedulerStateMachine::BeginImplFrameDeadlineMode::IMMEDIATE:
+ case DeadlineMode::IMMEDIATE:
+ // Avoid using Now() for immediate deadlines because it's expensive, and
+ // this method is called in every ProcessScheduledActions() call. Using
+ // base::TimeTicks() achieves the same result.
new_deadline = base::TimeTicks();
break;
}
- bool has_no_deadline_task = begin_impl_frame_deadline_task_.IsCancelled();
// Post deadline task only if we didn't have one already or something caused
- // us to change the deadline. Comparing deadline mode is not sufficient
- // because the calculated deadline also depends on whether we have a pending
- // begin frame which the state machine doesn't know about.
+ // us to change the deadline.
+ bool has_no_deadline_task = begin_impl_frame_deadline_task_.IsCancelled();
if (has_no_deadline_task || new_deadline != deadline_) {
TRACE_EVENT2("cc", "Scheduler::ScheduleBeginImplFrameDeadline",
"new deadline", new_deadline, "deadline mode",
SchedulerStateMachine::BeginImplFrameDeadlineModeToString(
- begin_impl_frame_deadline_mode_));
+ deadline_mode_));
deadline_ = new_deadline;
deadline_scheduled_at_ = Now();
@@ -862,9 +863,9 @@ void Scheduler::AsValueInto(base::trace_event::TracedValue* state) const {
skipped_last_frame_to_reduce_latency_);
state->SetString("inside_action",
SchedulerStateMachine::ActionToString(inside_action_));
- state->SetString("begin_impl_frame_deadline_mode",
+ state->SetString("deadline_mode",
SchedulerStateMachine::BeginImplFrameDeadlineModeToString(
- begin_impl_frame_deadline_mode_));
+ deadline_mode_));
state->SetDouble("deadline_ms", deadline_.since_origin().InMillisecondsF());
state->SetDouble("deadline_scheduled_at_ms",
diff --git a/chromium/cc/scheduler/scheduler.h b/chromium/cc/scheduler/scheduler.h
index 4368060c6af..d042ec39883 100644
--- a/chromium/cc/scheduler/scheduler.h
+++ b/chromium/cc/scheduler/scheduler.h
@@ -194,13 +194,12 @@ class CC_EXPORT Scheduler : public viz::BeginFrameObserverBase {
std::unique_ptr<CompositorTimingHistory> compositor_timing_history_;
// What the latest deadline was, and when it was scheduled.
- SchedulerStateMachine::BeginImplFrameDeadlineMode
- begin_impl_frame_deadline_mode_ =
- SchedulerStateMachine::BeginImplFrameDeadlineMode::NONE;
base::TimeTicks deadline_;
base::TimeTicks deadline_scheduled_at_;
+ SchedulerStateMachine::BeginImplFrameDeadlineMode deadline_mode_;
BeginFrameTracker begin_impl_frame_tracker_;
+ viz::BeginFrameAck last_begin_frame_ack_;
viz::BeginFrameArgs begin_main_frame_args_;
// Task posted for the deadline or drawing phase of the scheduler. This task
@@ -271,9 +270,7 @@ class CC_EXPORT Scheduler : public viz::BeginFrameObserverBase {
void BeginImplFrameSynchronous(const viz::BeginFrameArgs& args);
void BeginImplFrame(const viz::BeginFrameArgs& args, base::TimeTicks now);
void FinishImplFrame();
- enum BeginFrameResult { kBeginFrameSkipped, kBeginFrameFinished };
- void SendBeginFrameAck(const viz::BeginFrameArgs& args,
- BeginFrameResult result);
+ void SendDidNotProduceFrame(const viz::BeginFrameArgs& args);
void OnBeginImplFrameDeadline();
void PollToAdvanceCommitState();
void BeginMainFrameAnimateAndLayoutOnly(const viz::BeginFrameArgs& args);
diff --git a/chromium/cc/scheduler/scheduler_state_machine.cc b/chromium/cc/scheduler/scheduler_state_machine.cc
index 327e907e305..d9711b24a86 100644
--- a/chromium/cc/scheduler/scheduler_state_machine.cc
+++ b/chromium/cc/scheduler/scheduler_state_machine.cc
@@ -565,14 +565,12 @@ bool SchedulerStateMachine::ShouldInvalidateLayerTreeFrameSink() const {
if (begin_impl_frame_state_ != BeginImplFrameState::INSIDE_BEGIN_FRAME)
return false;
- // Don't invalidate if we cannnot draw.
- if (PendingDrawsShouldBeAborted())
- return false;
-
+ // Don't invalidate for draw if we cannnot draw.
// TODO(sunnyps): needs_prepare_tiles_ is needed here because PrepareTiles is
// called only inside the deadline / draw phase. We could remove this if we
// allowed PrepareTiles to happen in OnBeginImplFrame.
- return needs_redraw_ || needs_prepare_tiles_;
+ return (needs_redraw_ && !PendingDrawsShouldBeAborted()) ||
+ needs_prepare_tiles_;
}
SchedulerStateMachine::Action SchedulerStateMachine::NextAction() const {
diff --git a/chromium/cc/scheduler/scheduler_unittest.cc b/chromium/cc/scheduler/scheduler_unittest.cc
index 7c768c7c7d1..aff1c76351c 100644
--- a/chromium/cc/scheduler/scheduler_unittest.cc
+++ b/chromium/cc/scheduler/scheduler_unittest.cc
@@ -3026,6 +3026,25 @@ TEST_F(SchedulerTest, InvalidateLayerTreeFrameSinkWhenCannotDraw) {
EXPECT_FALSE(scheduler_->RedrawPending());
}
+TEST_F(SchedulerTest, NeedsPrepareTilesInvalidates) {
+ // This is to test that SetNeedsPrepareTiles causes invalidates even if
+ // CanDraw is false.
+ scheduler_settings_.using_synchronous_renderer_compositor = true;
+ SetUpScheduler(EXTERNAL_BFS);
+
+ scheduler_->SetCanDraw(false);
+
+ scheduler_->SetNeedsPrepareTiles();
+ EXPECT_ACTIONS("AddObserver(this)");
+ client_->Reset();
+
+ // Do not invalidate in next BeginFrame.
+ EXPECT_SCOPED(AdvanceFrame());
+ EXPECT_ACTIONS("WillBeginImplFrame",
+ "ScheduledActionInvalidateLayerTreeFrameSink");
+ client_->Reset();
+}
+
TEST_F(SchedulerTest, SetNeedsOneBeginImplFrame) {
SetUpScheduler(EXTERNAL_BFS);
@@ -4142,5 +4161,23 @@ TEST_F(SchedulerTest, NoInvalidationForAnimateOnlyFrames) {
EXPECT_ACTIONS();
}
+TEST_F(SchedulerTest, SendEarlyDidNotProduceFrameIfIdle) {
+ SetUpScheduler(EXTERNAL_BFS);
+ scheduler_->SetNeedsBeginMainFrame();
+
+ client_->Reset();
+ EXPECT_SCOPED(AdvanceFrame());
+ EXPECT_ACTIONS("WillBeginImplFrame", "ScheduledActionSendBeginMainFrame");
+ auto begin_main_frame_args = client_->last_begin_main_frame_args();
+ EXPECT_NE(client_->last_begin_frame_ack().sequence_number,
+ begin_main_frame_args.sequence_number);
+
+ client_->Reset();
+ scheduler_->NotifyBeginMainFrameStarted(task_runner_->NowTicks());
+ scheduler_->BeginMainFrameAborted(CommitEarlyOutReason::FINISHED_NO_UPDATES);
+ EXPECT_EQ(client_->last_begin_frame_ack().sequence_number,
+ begin_main_frame_args.sequence_number);
+}
+
} // namespace
} // namespace cc
diff --git a/chromium/cc/tiles/gpu_image_decode_cache.cc b/chromium/cc/tiles/gpu_image_decode_cache.cc
index 3f30649393b..71550802821 100644
--- a/chromium/cc/tiles/gpu_image_decode_cache.cc
+++ b/chromium/cc/tiles/gpu_image_decode_cache.cc
@@ -10,7 +10,6 @@
#include "base/debug/alias.h"
#include "base/hash.h"
#include "base/memory/discardable_memory_allocator.h"
-#include "base/memory/memory_coordinator_client_registry.h"
#include "base/metrics/histogram_macros.h"
#include "base/numerics/safe_math.h"
#include "base/strings/stringprintf.h"
@@ -41,7 +40,6 @@ namespace {
// the system. This limit can be breached by in-use cache items, which cannot
// be deleted.
static const int kNormalMaxItemsInCacheForGpu = 2000;
-static const int kThrottledMaxItemsInCacheForGpu = 100;
static const int kSuspendedMaxItemsInCacheForGpu = 0;
// The maximum number of images that we can lock simultaneously in our working
@@ -193,47 +191,39 @@ bool DrawAndScaleImage(const DrawImage& draw_image,
draw_image.frame_index(), client_id);
}
- // If we can't decode/scale directly, we will handle this in up to 3 steps.
+ // If we can't decode/scale directly, we will handle this in 2 steps.
// Step 1: Decode at the nearest (larger) directly supported size or the
// original size if nearest neighbor quality is requested.
+ // Step 2: Scale to |pixmap| size. If decoded image is half float backed and
+ // the device does not support image resize, decode to N32 color type and
+ // convert to F16 afterward.
SkISize decode_size =
is_nearest_neighbor
? SkISize::Make(paint_image.width(), paint_image.height())
: supported_size;
SkImageInfo decode_info =
- SkImageInfo::MakeN32Premul(decode_size.width(), decode_size.height());
+ pixmap.info().makeWH(decode_size.width(), decode_size.height());
+ SkFilterQuality filter_quality = CalculateDesiredFilterQuality(draw_image);
+ bool decode_to_f16_using_n32_intermediate =
+ decode_info.colorType() == kRGBA_F16_SkColorType &&
+ !ImageDecodeCacheUtils::CanResizeF16Image(filter_quality);
+ if (decode_to_f16_using_n32_intermediate)
+ decode_info = decode_info.makeColorType(kN32_SkColorType);
+
SkBitmap decode_bitmap;
if (!decode_bitmap.tryAllocPixels(decode_info))
return false;
- SkPixmap decode_pixmap(decode_bitmap.info(), decode_bitmap.getPixels(),
- decode_bitmap.rowBytes());
+ SkPixmap decode_pixmap = decode_bitmap.pixmap();
if (!paint_image.Decode(decode_pixmap.writable_addr(), &decode_info,
color_space, draw_image.frame_index(), client_id)) {
return false;
}
- // Step 2a: Scale to |pixmap| directly if kN32_SkColorType.
- if (pixmap.info().colorType() == kN32_SkColorType) {
- return decode_pixmap.scalePixels(pixmap,
- CalculateDesiredFilterQuality(draw_image));
- }
-
- // Step 2b: Scale to temporary pixmap of kN32_SkColorType.
- SkImageInfo scaled_info = pixmap.info().makeColorType(kN32_SkColorType);
- SkBitmap scaled_bitmap;
- if (!scaled_bitmap.tryAllocPixels(scaled_info))
- return false;
- SkPixmap scaled_pixmap(scaled_bitmap.info(), scaled_bitmap.getPixels(),
- scaled_bitmap.rowBytes());
- if (!decode_pixmap.scalePixels(scaled_pixmap,
- CalculateDesiredFilterQuality(draw_image))) {
- return false;
+ if (decode_to_f16_using_n32_intermediate) {
+ return ImageDecodeCacheUtils::ScaleToHalfFloatPixmapUsingN32Intermediate(
+ decode_pixmap, &pixmap, filter_quality);
}
-
- // Step 3: Copy the temporary scaled pixmap to |pixmap|, performing
- // color type conversion. We can't do the color conversion in step 1, as
- // the scale in step 2 must happen in kN32_SkColorType.
- return scaled_pixmap.readPixels(pixmap);
+ return decode_pixmap.scalePixels(pixmap, filter_quality);
}
// Takes ownership of the backing texture of an SkImage. This allows us to
@@ -693,8 +683,9 @@ GpuImageDecodeCache::GpuImageDecodeCache(
base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, "cc::GpuImageDecodeCache", base::ThreadTaskRunnerHandle::Get());
}
- // Register this component with base::MemoryCoordinatorClientRegistry.
- base::MemoryCoordinatorClientRegistry::GetInstance()->Register(this);
+ memory_pressure_listener_.reset(
+ new base::MemoryPressureListener(base::BindRepeating(
+ &GpuImageDecodeCache::OnMemoryPressure, base::Unretained(this))));
}
GpuImageDecodeCache::~GpuImageDecodeCache() {
@@ -708,12 +699,6 @@ GpuImageDecodeCache::~GpuImageDecodeCache() {
// It is safe to unregister, even if we didn't register in the constructor.
base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
this);
- // Unregister this component with memory_coordinator::ClientRegistry.
- base::MemoryCoordinatorClientRegistry::GetInstance()->Unregister(this);
-
- memory_pressure_listener_.reset(
- new base::MemoryPressureListener(base::BindRepeating(
- &GpuImageDecodeCache::OnMemoryPressure, base::Unretained(this))));
// TODO(vmpstr): If we don't have a client name, it may cause problems in
// unittests, since most tests don't set the name but some do. The UMA system
@@ -1425,13 +1410,8 @@ bool GpuImageDecodeCache::ExceedsPreferredCount() const {
size_t items_limit;
if (aggressively_freeing_resources_) {
items_limit = kSuspendedMaxItemsInCacheForGpu;
- } else if (memory_state_ == base::MemoryState::NORMAL) {
- items_limit = kNormalMaxItemsInCacheForGpu;
- } else if (memory_state_ == base::MemoryState::THROTTLED) {
- items_limit = kThrottledMaxItemsInCacheForGpu;
} else {
- DCHECK_EQ(base::MemoryState::SUSPENDED, memory_state_);
- items_limit = kSuspendedMaxItemsInCacheForGpu;
+ items_limit = kNormalMaxItemsInCacheForGpu;
}
return persistent_cache_.size() > items_limit;
@@ -1958,26 +1938,16 @@ sk_sp<SkImage> GpuImageDecodeCache::GetSWImageDecodeForTesting(
return image_data->decode.ImageForTesting();
}
-void GpuImageDecodeCache::OnMemoryStateChange(base::MemoryState state) {
- memory_state_ = state;
-}
-
-void GpuImageDecodeCache::OnPurgeMemory() {
- base::AutoLock lock(lock_);
- // Temporary changes |memory_state_| to free up cache as much as possible.
- base::AutoReset<base::MemoryState> reset(&memory_state_,
- base::MemoryState::SUSPENDED);
- EnsureCapacity(0);
-}
-
void GpuImageDecodeCache::OnMemoryPressure(
base::MemoryPressureListener::MemoryPressureLevel level) {
+ base::AutoLock lock(lock_);
switch (level) {
case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
break;
case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
- OnPurgeMemory();
+ base::AutoReset<bool> reset(&aggressively_freeing_resources_, true);
+ EnsureCapacity(0);
break;
}
}
diff --git a/chromium/cc/tiles/gpu_image_decode_cache.h b/chromium/cc/tiles/gpu_image_decode_cache.h
index 634f826bb37..dd21622a83d 100644
--- a/chromium/cc/tiles/gpu_image_decode_cache.h
+++ b/chromium/cc/tiles/gpu_image_decode_cache.h
@@ -11,7 +11,6 @@
#include "base/containers/mru_cache.h"
#include "base/memory/discardable_memory.h"
-#include "base/memory/memory_coordinator_client.h"
#include "base/memory/memory_pressure_listener.h"
#include "base/synchronization/lock.h"
#include "base/trace_event/memory_dump_provider.h"
@@ -98,8 +97,7 @@ namespace cc {
//
class CC_EXPORT GpuImageDecodeCache
: public ImageDecodeCache,
- public base::trace_event::MemoryDumpProvider,
- public base::MemoryCoordinatorClient {
+ public base::trace_event::MemoryDumpProvider {
public:
enum class DecodeTaskType { kPartOfUploadTask, kStandAloneDecodeTask };
@@ -137,10 +135,6 @@ class CC_EXPORT GpuImageDecodeCache
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) override;
- // base::MemoryCoordinatorClient overrides.
- void OnMemoryStateChange(base::MemoryState state) override;
- void OnPurgeMemory() override;
-
// TODO(gyuyoung): OnMemoryPressure is deprecated. So this should be removed
// when the memory coordinator is enabled by default.
void OnMemoryPressure(
@@ -511,7 +505,6 @@ class CC_EXPORT GpuImageDecodeCache
size_t max_working_set_items_ = 0;
size_t working_set_bytes_ = 0;
size_t working_set_items_ = 0;
- base::MemoryState memory_state_ = base::MemoryState::NORMAL;
bool aggressively_freeing_resources_ = false;
// We can't modify GPU backed SkImages without holding the context lock, so
diff --git a/chromium/cc/tiles/gpu_image_decode_cache_unittest.cc b/chromium/cc/tiles/gpu_image_decode_cache_unittest.cc
index 1e59e9664bc..fc570ccbde2 100644
--- a/chromium/cc/tiles/gpu_image_decode_cache_unittest.cc
+++ b/chromium/cc/tiles/gpu_image_decode_cache_unittest.cc
@@ -104,7 +104,8 @@ class FakeGPUImageDecodeTestGLES2Interface : public viz::TestGLES2Interface,
TransferCacheTestHelper* transfer_cache_helper)
: extension_string_(
"GL_EXT_texture_format_BGRA8888 GL_OES_rgb8_rgba8 "
- "GL_OES_texture_npot"),
+ "GL_OES_texture_npot "
+ "GL_OES_texture_half_float GL_OES_texture_half_float_linear"),
discardable_manager_(discardable_manager),
transfer_cache_helper_(transfer_cache_helper) {}
@@ -184,6 +185,9 @@ class FakeGPUImageDecodeTestGLES2Interface : public viz::TestGLES2Interface,
}
void GetIntegerv(GLenum name, GLint* params) override {
switch (name) {
+ case GL_MAX_TEXTURE_IMAGE_UNITS:
+ *params = 8;
+ return;
case GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS:
*params = 8;
return;
@@ -233,10 +237,6 @@ class GPUImageDecodeTestMockContextProvider : public viz::TestContextProvider {
: TestContextProvider(std::move(support), std::move(gl), true) {}
};
-gfx::ColorSpace DefaultColorSpace() {
- return gfx::ColorSpace::CreateSRGB();
-}
-
SkMatrix CreateMatrix(const SkSize& scale, bool is_decomposable) {
SkMatrix matrix;
matrix.setScale(scale.width(), scale.height());
@@ -279,6 +279,26 @@ class GpuImageDecodeCacheTest
PaintImage::kDefaultGeneratorClientId);
}
+ PaintImage CreatePaintImageInternal(
+ const gfx::Size& size,
+ sk_sp<SkColorSpace> color_space = nullptr,
+ PaintImage::Id id = PaintImage::kInvalidId) {
+ const bool allocate_encoded_memory = true;
+ return CreateDiscardablePaintImage(
+ size, color_space, allocate_encoded_memory, id, color_type_);
+ }
+
+ PaintImage CreateBitmapImageInternal(const gfx::Size& size) {
+ return CreateBitmapImage(size, color_type_);
+ }
+
+ gfx::ColorSpace DefaultColorSpace() {
+ if (color_type_ != kRGBA_F16_SkColorType)
+ return gfx::ColorSpace::CreateSRGB();
+ return gfx::ColorSpace(gfx::ColorSpace::PrimaryID::SMPTEST432_1, // P3
+ gfx::ColorSpace::TransferID::LINEAR);
+ }
+
GPUImageDecodeTestMockContextProvider* context_provider() {
return context_provider_.get();
}
@@ -336,7 +356,7 @@ class GpuImageDecodeCacheTest
TEST_P(GpuImageDecodeCacheTest, GetTaskForImageSameImage) {
auto cache = CreateCache();
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
@@ -367,7 +387,7 @@ TEST_P(GpuImageDecodeCacheTest, GetTaskForImageSameImage) {
TEST_P(GpuImageDecodeCacheTest, GetTaskForImageSmallerScale) {
auto cache = CreateCache();
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
@@ -398,7 +418,7 @@ TEST_P(GpuImageDecodeCacheTest, GetTaskForImageSmallerScale) {
TEST_P(GpuImageDecodeCacheTest, GetTaskForImageLowerQuality) {
auto cache = CreateCache();
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
bool is_decomposable = true;
SkMatrix matrix = CreateMatrix(SkSize::Make(0.4f, 0.4f), is_decomposable);
@@ -431,7 +451,7 @@ TEST_P(GpuImageDecodeCacheTest, GetTaskForImageDifferentImage) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage first_image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage first_image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage first_draw_image(
first_image, SkIRect::MakeWH(first_image.width(), first_image.height()),
quality, CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -441,7 +461,7 @@ TEST_P(GpuImageDecodeCacheTest, GetTaskForImageDifferentImage) {
EXPECT_TRUE(first_result.need_unref);
EXPECT_TRUE(first_result.task);
- PaintImage second_image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage second_image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage second_draw_image(
second_image,
SkIRect::MakeWH(second_image.width(), second_image.height()), quality,
@@ -467,7 +487,7 @@ TEST_P(GpuImageDecodeCacheTest, GetTaskForImageLargerScale) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage first_image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage first_image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage first_draw_image(
first_image, SkIRect::MakeWH(first_image.width(), first_image.height()),
quality, CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -513,7 +533,7 @@ TEST_P(GpuImageDecodeCacheTest, GetTaskForImageLargerScaleNoReuse) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage first_image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage first_image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage first_draw_image(
first_image, SkIRect::MakeWH(first_image.width(), first_image.height()),
quality, CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -557,7 +577,7 @@ TEST_P(GpuImageDecodeCacheTest, GetTaskForImageHigherQuality) {
bool is_decomposable = true;
SkMatrix matrix = CreateMatrix(SkSize::Make(0.4f, 0.4f), is_decomposable);
- PaintImage first_image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage first_image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage first_draw_image(
first_image, SkIRect::MakeWH(first_image.width(), first_image.height()),
kLow_SkFilterQuality, matrix, PaintImage::kDefaultFrameIndex,
@@ -579,12 +599,11 @@ TEST_P(GpuImageDecodeCacheTest, GetTaskForImageHigherQuality) {
ImageDecodeCache::TaskResult second_result = cache->GetTaskForImageAndRef(
second_draw_image, ImageDecodeCache::TracingInfo());
EXPECT_TRUE(second_result.need_unref);
+
EXPECT_TRUE(second_result.task);
EXPECT_TRUE(first_result.task.get() != second_result.task.get());
-
TestTileTaskRunner::ProcessTask(second_result.task->dependencies()[0].get());
TestTileTaskRunner::ProcessTask(second_result.task.get());
-
cache->UnrefImage(second_draw_image);
}
@@ -593,7 +612,7 @@ TEST_P(GpuImageDecodeCacheTest, GetTaskForImageAlreadyDecodedAndLocked) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -635,7 +654,7 @@ TEST_P(GpuImageDecodeCacheTest, GetTaskForImageAlreadyDecodedNotLocked) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -677,7 +696,7 @@ TEST_P(GpuImageDecodeCacheTest, GetTaskForImageAlreadyUploaded) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -708,7 +727,7 @@ TEST_P(GpuImageDecodeCacheTest, GetTaskForImageCanceledGetsNewTask) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -751,7 +770,7 @@ TEST_P(GpuImageDecodeCacheTest, GetTaskForImageCanceledWhileReffedGetsNewTask) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -798,7 +817,7 @@ TEST_P(GpuImageDecodeCacheTest, GetTaskForImageUploadCanceledButDecodeRun) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -827,7 +846,7 @@ TEST_P(GpuImageDecodeCacheTest, NoTaskForImageAlreadyFailedDecoding) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -857,7 +876,7 @@ TEST_P(GpuImageDecodeCacheTest, GetDecodedImageForDraw) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -889,7 +908,7 @@ TEST_P(GpuImageDecodeCacheTest, GetLargeDecodedImageForDraw) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(1, 24000));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(1, 24000));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -925,7 +944,7 @@ TEST_P(GpuImageDecodeCacheTest, GetDecodedImageForDrawAtRasterDecode) {
cache->SetWorkingSetLimitsForTesting(0 /* max_bytes */, 0 /* max_items */);
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -959,7 +978,7 @@ TEST_P(GpuImageDecodeCacheTest, GetDecodedImageForDrawLargerScale) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
kLow_SkFilterQuality,
CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -1014,7 +1033,7 @@ TEST_P(GpuImageDecodeCacheTest, GetDecodedImageForDrawHigherQuality) {
bool is_decomposable = true;
SkMatrix matrix = CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable);
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
kLow_SkFilterQuality, matrix,
PaintImage::kDefaultFrameIndex, DefaultColorSpace());
@@ -1034,7 +1053,6 @@ TEST_P(GpuImageDecodeCacheTest, GetDecodedImageForDrawHigherQuality) {
higher_quality_draw_image, ImageDecodeCache::TracingInfo());
EXPECT_TRUE(hq_result.need_unref);
EXPECT_TRUE(hq_result.task);
-
TestTileTaskRunner::ProcessTask(hq_result.task->dependencies()[0].get());
TestTileTaskRunner::ProcessTask(hq_result.task.get());
@@ -1068,7 +1086,7 @@ TEST_P(GpuImageDecodeCacheTest, GetDecodedImageForDrawNegative) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(-0.5f, 0.5f), is_decomposable),
@@ -1102,7 +1120,7 @@ TEST_P(GpuImageDecodeCacheTest, GetLargeScaledDecodedImageForDraw) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(1, 48000));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(1, 48000));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -1125,7 +1143,9 @@ TEST_P(GpuImageDecodeCacheTest, GetLargeScaledDecodedImageForDraw) {
// The mip level scale should never go below 0 in any dimension.
EXPECT_EQ(1, decoded_draw_image.image()->width());
EXPECT_EQ(24000, decoded_draw_image.image()->height());
+
EXPECT_EQ(decoded_draw_image.filter_quality(), kMedium_SkFilterQuality);
+
EXPECT_FALSE(decoded_draw_image.image()->isTextureBacked());
ExpectIfNotUsingTransferCache(
cache->DiscardableIsLockedForTesting(draw_image));
@@ -1142,7 +1162,7 @@ TEST_P(GpuImageDecodeCacheTest, AtRasterUsedDirectlyIfSpaceAllows) {
cache->SetWorkingSetLimitsForTesting(0 /* max_bytes */, 0 /* max_items */);
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -1183,7 +1203,7 @@ TEST_P(GpuImageDecodeCacheTest,
cache->SetWorkingSetLimitsForTesting(0 /* max_bytes */, 0 /* max_items */);
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -1220,7 +1240,7 @@ TEST_P(GpuImageDecodeCacheTest,
SkFilterQuality quality = kHigh_SkFilterQuality;
cache->SetWorkingSetLimitsForTesting(0 /* max_bytes */, 0 /* max_items */);
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(1, 24000));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(1, 24000));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -1257,7 +1277,7 @@ TEST_P(GpuImageDecodeCacheTest, ZeroSizedImagesAreSkipped) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(0.f, 0.f), is_decomposable),
@@ -1283,7 +1303,7 @@ TEST_P(GpuImageDecodeCacheTest, NonOverlappingSrcRectImagesAreSkipped) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(
image, SkIRect::MakeXYWH(150, 150, image.width(), image.height()),
quality, CreateMatrix(SkSize::Make(1.f, 1.f), is_decomposable),
@@ -1309,7 +1329,7 @@ TEST_P(GpuImageDecodeCacheTest, CanceledTasksDoNotCountAgainstBudget) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(
image, SkIRect::MakeXYWH(0, 0, image.width(), image.height()), quality,
CreateMatrix(SkSize::Make(1.f, 1.f), is_decomposable),
@@ -1335,7 +1355,7 @@ TEST_P(GpuImageDecodeCacheTest, ShouldAggressivelyFreeResources) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -1397,7 +1417,7 @@ TEST_P(GpuImageDecodeCacheTest, OrphanedImagesFreeOnReachingZeroRefs) {
SkFilterQuality quality = kHigh_SkFilterQuality;
// Create a downscaled image.
- PaintImage first_image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage first_image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage first_draw_image(
first_image, SkIRect::MakeWH(first_image.width(), first_image.height()),
quality, CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -1445,7 +1465,7 @@ TEST_P(GpuImageDecodeCacheTest, OrphanedZeroRefImagesImmediatelyDeleted) {
SkFilterQuality quality = kHigh_SkFilterQuality;
// Create a downscaled image.
- PaintImage first_image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage first_image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage first_draw_image(
first_image, SkIRect::MakeWH(first_image.width(), first_image.height()),
quality, CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -1487,7 +1507,7 @@ TEST_P(GpuImageDecodeCacheTest, OrphanedZeroRefImagesImmediatelyDeleted) {
TEST_P(GpuImageDecodeCacheTest, QualityCappedAtMedium) {
auto cache = CreateCache();
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
bool is_decomposable = true;
SkMatrix matrix = CreateMatrix(SkSize::Make(0.4f, 0.4f), is_decomposable);
@@ -1541,7 +1561,7 @@ TEST_P(GpuImageDecodeCacheTest, GetDecodedImageForDrawMipUsageChange) {
SkFilterQuality quality = kHigh_SkFilterQuality;
// Create an image decode task and cache entry that does not need mips.
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(4000, 4000));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(3072, 4096));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -1573,70 +1593,10 @@ TEST_P(GpuImageDecodeCacheTest, GetDecodedImageForDrawMipUsageChange) {
cache->DrawWithImageFinished(draw_image_mips, decoded_draw_image);
}
-TEST_P(GpuImageDecodeCacheTest, MemoryStateSuspended) {
- auto cache = CreateCache();
-
- // First Insert an image into our cache.
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(1, 1));
- bool is_decomposable = true;
- SkMatrix matrix = CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable);
- DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
- kLow_SkFilterQuality, matrix,
- PaintImage::kDefaultFrameIndex, DefaultColorSpace());
- ImageDecodeCache::TaskResult result =
- cache->GetTaskForImageAndRef(draw_image, ImageDecodeCache::TracingInfo());
- EXPECT_TRUE(result.need_unref);
- EXPECT_TRUE(result.task);
-
- TestTileTaskRunner::ProcessTask(result.task->dependencies()[0].get());
- TestTileTaskRunner::ProcessTask(result.task.get());
- cache->UnrefImage(draw_image);
-
- // The image should be cached.
- EXPECT_EQ(cache->GetNumCacheEntriesForTesting(), 1u);
-
- // Set us to the SUSPENDED state with purging.
- cache->OnPurgeMemory();
- cache->OnMemoryStateChange(base::MemoryState::SUSPENDED);
-
- // Nothing should be cached.
- EXPECT_EQ(cache->GetWorkingSetBytesForTesting(), 0u);
- EXPECT_EQ(cache->GetNumCacheEntriesForTesting(), 0u);
-
- // Attempts to get a task for the image will still succeed, as SUSPENDED
- // doesn't impact working set size.
- result =
- cache->GetTaskForImageAndRef(draw_image, ImageDecodeCache::TracingInfo());
- EXPECT_TRUE(result.need_unref);
- EXPECT_TRUE(result.task);
-
- TestTileTaskRunner::ProcessTask(result.task->dependencies()[0].get());
- TestTileTaskRunner::ProcessTask(result.task.get());
- cache->UnrefImage(draw_image);
-
- // Nothing should be cached.
- EXPECT_EQ(cache->GetWorkingSetBytesForTesting(), 0u);
- EXPECT_EQ(cache->GetNumCacheEntriesForTesting(), 0u);
-
- // Restore us to visible and NORMAL memory state.
- cache->OnMemoryStateChange(base::MemoryState::NORMAL);
- cache->SetShouldAggressivelyFreeResources(false);
-
- // We should now be able to create a task again (space available).
- result =
- cache->GetTaskForImageAndRef(draw_image, ImageDecodeCache::TracingInfo());
- EXPECT_TRUE(result.need_unref);
- EXPECT_TRUE(result.task);
-
- TestTileTaskRunner::ProcessTask(result.task->dependencies()[0].get());
- TestTileTaskRunner::ProcessTask(result.task.get());
- cache->UnrefImage(draw_image);
-}
-
TEST_P(GpuImageDecodeCacheTest, OutOfRasterDecodeTask) {
auto cache = CreateCache();
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(1, 1));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(1, 1));
bool is_decomposable = true;
SkMatrix matrix = CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable);
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
@@ -1665,7 +1625,7 @@ TEST_P(GpuImageDecodeCacheTest, ZeroCacheNormalWorkingSet) {
// Add an image to the cache-> Due to normal working set, this should produce
// a task and a ref.
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -1716,13 +1676,13 @@ TEST_P(GpuImageDecodeCacheTest, SmallCacheNormalWorkingSet) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
PaintImage::kDefaultFrameIndex, DefaultColorSpace());
- PaintImage image2 = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image2 = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image2(
image2, SkIRect::MakeWH(image2.width(), image2.height()), quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -1799,7 +1759,7 @@ TEST_P(GpuImageDecodeCacheTest, ClearCache) {
SkFilterQuality quality = kHigh_SkFilterQuality;
for (int i = 0; i < 10; ++i) {
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(
image, SkIRect::MakeWH(image.width(), image.height()), quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -1829,7 +1789,7 @@ TEST_P(GpuImageDecodeCacheTest, ClearCacheInUse) {
SkFilterQuality quality = kHigh_SkFilterQuality;
// Create an image but keep it reffed so it can't be immediately freed.
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -1866,7 +1826,7 @@ TEST_P(GpuImageDecodeCacheTest, GetTaskForImageDifferentColorSpace) {
gfx::ColorSpace color_space_a = gfx::ColorSpace::CreateSRGB();
gfx::ColorSpace color_space_b = gfx::ColorSpace::CreateXYZD50();
- PaintImage first_image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage first_image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage first_draw_image(
first_image, SkIRect::MakeWH(first_image.width(), first_image.height()),
quality, CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -1912,7 +1872,7 @@ TEST_P(GpuImageDecodeCacheTest, GetTaskForLargeImageNonSRGBColorSpace) {
gfx::ColorSpace color_space = gfx::ColorSpace::CreateXYZD50();
// Create an image that's too large to cache.
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(1, 24000));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(1, 24000));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -1938,8 +1898,8 @@ TEST_P(GpuImageDecodeCacheTest, CacheDecodesExpectedFrames) {
FrameMetadata(true, base::TimeDelta::FromMilliseconds(5)),
};
sk_sp<FakePaintImageGenerator> generator =
- sk_make_sp<FakePaintImageGenerator>(SkImageInfo::MakeN32Premul(10, 10),
- frames);
+ sk_make_sp<FakePaintImageGenerator>(
+ SkImageInfo::Make(10, 10, color_type_, kPremul_SkAlphaType), frames);
PaintImage image = PaintImageBuilder::WithDefault()
.set_id(PaintImage::GetNextId())
.set_paint_image_generator(generator)
@@ -1992,7 +1952,7 @@ TEST_P(GpuImageDecodeCacheTest, OrphanedDataCancelledWhileReplaced) {
SkFilterQuality quality = kHigh_SkFilterQuality;
// Create a downscaled image.
- PaintImage first_image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage first_image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage first_draw_image(
first_image, SkIRect::MakeWH(first_image.width(), first_image.height()),
quality, CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -2041,46 +2001,6 @@ TEST_P(GpuImageDecodeCacheTest, OrphanedDataCancelledWhileReplaced) {
EXPECT_EQ(0u, cache->GetInUseCacheEntriesForTesting());
}
-TEST_P(GpuImageDecodeCacheTest, EvictDueToCachedItemsLimit) {
- auto cache = CreateCache();
- bool is_decomposable = true;
- SkFilterQuality quality = kHigh_SkFilterQuality;
-
- // Set the THROTTLED state, which limits our cache to 100 entries.
- cache->OnMemoryStateChange(base::MemoryState::THROTTLED);
-
- // Create and unlock 200 images. We should end up with 100 cached.
- for (int i = 0; i < 200; ++i) {
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(10, 10));
- DrawImage draw_image(
- image, SkIRect::MakeWH(image.width(), image.height()), quality,
- CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
- PaintImage::kDefaultFrameIndex, DefaultColorSpace());
-
- ImageDecodeCache::TaskResult result = cache->GetTaskForImageAndRef(
- draw_image, ImageDecodeCache::TracingInfo());
- EXPECT_TRUE(result.need_unref);
- if (result.task) {
- TestTileTaskRunner::ProcessTask(result.task->dependencies()[0].get());
- TestTileTaskRunner::ProcessTask(result.task.get());
- }
-
- // Must hold context lock before calling GetDecodedImageForDraw /
- // DrawWithImageFinished.
- viz::ContextProvider::ScopedContextLock context_lock(context_provider());
- DecodedDrawImage decoded_draw_image =
- EnsureImageBacked(cache->GetDecodedImageForDraw(draw_image));
- EXPECT_TRUE(decoded_draw_image.image());
- EXPECT_TRUE(decoded_draw_image.image()->isTextureBacked());
- EXPECT_FALSE(cache->DiscardableIsLockedForTesting(draw_image));
-
- cache->DrawWithImageFinished(draw_image, decoded_draw_image);
- cache->UnrefImage(draw_image);
- }
-
- EXPECT_EQ(cache->GetNumCacheEntriesForTesting(), 100u);
-}
-
TEST_P(GpuImageDecodeCacheTest, AlreadyBudgetedImagesAreNotAtRaster) {
auto cache = CreateCache();
bool is_decomposable = true;
@@ -2090,7 +2010,7 @@ TEST_P(GpuImageDecodeCacheTest, AlreadyBudgetedImagesAreNotAtRaster) {
cache->SetWorkingSetLimitsForTesting(
SkColorTypeBytesPerPixel(GetParam().first) * 10 * 10 * 10 /* max_bytes */,
1 /* max_items */);
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(10, 10));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(10, 10));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -2127,7 +2047,7 @@ TEST_P(GpuImageDecodeCacheTest, ImageBudgetingByCount) {
// count restriction.
cache->SetWorkingSetLimitsForTesting(96 * 1024 * 1024 /* max_bytes */,
1 /* max_items */);
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(10, 10));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(10, 10));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -2142,7 +2062,7 @@ TEST_P(GpuImageDecodeCacheTest, ImageBudgetingByCount) {
// Try another image, it shouldn't be budgeted and should be at-raster.
DrawImage second_draw_image(
- CreateDiscardablePaintImage(gfx::Size(100, 100)),
+ CreatePaintImageInternal(gfx::Size(100, 100)),
SkIRect::MakeWH(image.width(), image.height()), quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
PaintImage::kDefaultFrameIndex, DefaultColorSpace());
@@ -2213,7 +2133,7 @@ TEST_P(GpuImageDecodeCacheTest,
gfx::ColorSpace color_space = gfx::ColorSpace::CreateXYZD50();
// Create an image that's too large to upload.
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(1, 24000));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(1, 24000));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -2267,7 +2187,7 @@ TEST_P(GpuImageDecodeCacheTest,
SkFilterQuality quality = kHigh_SkFilterQuality;
gfx::ColorSpace color_space = gfx::ColorSpace::CreateDisplayP3D65();
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(11, 12));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(11, 12));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -2315,7 +2235,7 @@ TEST_P(GpuImageDecodeCacheTest, NonLazyImageUploadNoScale) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateBitmapImage(gfx::Size(10, 10));
+ PaintImage image = CreateBitmapImageInternal(gfx::Size(10, 10));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -2336,7 +2256,7 @@ TEST_P(GpuImageDecodeCacheTest, NonLazyImageUploadNoScaleTask) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateBitmapImage(gfx::Size(10, 10));
+ PaintImage image = CreateBitmapImageInternal(gfx::Size(10, 10));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -2359,7 +2279,7 @@ TEST_P(GpuImageDecodeCacheTest, NonLazyImageLargeImageColorConverted) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateBitmapImage(gfx::Size(10, 24000));
+ PaintImage image = CreateBitmapImageInternal(gfx::Size(10, 24000));
DrawImage draw_image(
image, SkIRect::MakeWH(image.width(), image.height()), quality,
CreateMatrix(SkSize::Make(1.0f, 1.0f), is_decomposable),
@@ -2386,7 +2306,7 @@ TEST_P(GpuImageDecodeCacheTest, NonLazyImageUploadDownscaled) {
bool is_decomposable = true;
SkFilterQuality quality = kHigh_SkFilterQuality;
- PaintImage image = CreateBitmapImage(gfx::Size(10, 10));
+ PaintImage image = CreateBitmapImageInternal(gfx::Size(10, 10));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
quality,
CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -2416,8 +2336,8 @@ TEST_P(GpuImageDecodeCacheTest, KeepOnlyLast2ContentIds) {
std::vector<DecodedDrawImage> decoded_draw_images;
for (int i = 0; i < 10; ++i) {
- PaintImage image = CreateDiscardablePaintImage(
- gfx::Size(10, 10), SkColorSpace::MakeSRGB(), true, paint_image_id);
+ PaintImage image = CreatePaintImageInternal(
+ gfx::Size(10, 10), SkColorSpace::MakeSRGB(), paint_image_id);
DrawImage draw_image(
image, SkIRect::MakeWH(image.width(), image.height()), quality,
CreateMatrix(SkSize::Make(0.5f, 0.5f), is_decomposable),
@@ -2445,7 +2365,8 @@ TEST_P(GpuImageDecodeCacheTest, KeepOnlyLast2ContentIds) {
// We have a single tracked entry, that gets cleared once we purge the cache.
EXPECT_EQ(cache->paint_image_entries_count_for_testing(), 1u);
- cache->OnPurgeMemory();
+ cache->OnMemoryPressure(
+ base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
EXPECT_EQ(cache->paint_image_entries_count_for_testing(), 0u);
}
@@ -2530,7 +2451,7 @@ TEST_P(GpuImageDecodeCacheTest, BasicMips) {
auto cache = CreateCache();
bool is_decomposable = true;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
DrawImage draw_image(image, SkIRect::MakeWH(image.width(), image.height()),
filter_quality, CreateMatrix(scale, is_decomposable),
PaintImage::kDefaultFrameIndex, color_space);
@@ -2587,7 +2508,7 @@ TEST_P(GpuImageDecodeCacheTest, MipsAddedSubsequentDraw) {
bool is_decomposable = true;
auto filter_quality = kMedium_SkFilterQuality;
- PaintImage image = CreateDiscardablePaintImage(gfx::Size(100, 100));
+ PaintImage image = CreatePaintImageInternal(gfx::Size(100, 100));
// Create an image with no scaling. It will not have mips.
{
@@ -2759,8 +2680,10 @@ INSTANTIATE_TEST_CASE_P(
::testing::Values(
std::make_pair(kN32_SkColorType, false /* use_transfer_cache */),
std::make_pair(kARGB_4444_SkColorType, false /* use_transfer_cache */),
+ std::make_pair(kRGBA_F16_SkColorType, false /* use_transfer_cache */),
std::make_pair(kN32_SkColorType, true /* use_transfer_cache */),
- std::make_pair(kARGB_4444_SkColorType, true /* use_transfer_cache */)));
+ std::make_pair(kARGB_4444_SkColorType, true /* use_transfer_cache */),
+ std::make_pair(kRGBA_F16_SkColorType, true /* use_transfer_cache */)));
} // namespace
} // namespace cc
diff --git a/chromium/cc/tiles/image_controller.cc b/chromium/cc/tiles/image_controller.cc
index 99e977fb4a9..d6a91504c08 100644
--- a/chromium/cc/tiles/image_controller.cc
+++ b/chromium/cc/tiles/image_controller.cc
@@ -117,7 +117,9 @@ void ImageController::StopWorkerTasks() {
request.task->DidComplete();
}
}
- cache_->UnrefImage(request.draw_image);
+
+ if (request.need_unref)
+ cache_->UnrefImage(request.draw_image);
// Orphan the request so that we can still run it when a new cache is set.
request.task = nullptr;
diff --git a/chromium/cc/tiles/image_controller_unittest.cc b/chromium/cc/tiles/image_controller_unittest.cc
index 7456767908c..dfa0bfc2750 100644
--- a/chromium/cc/tiles/image_controller_unittest.cc
+++ b/chromium/cc/tiles/image_controller_unittest.cc
@@ -232,6 +232,12 @@ DrawImage CreateDiscardableDrawImage(gfx::Size size) {
PaintImage::kDefaultFrameIndex, gfx::ColorSpace());
}
+DrawImage CreateBitmapDrawImage(gfx::Size size) {
+ return DrawImage(
+ CreateBitmapImage(size), SkIRect::MakeWH(size.width(), size.height()),
+ kNone_SkFilterQuality, SkMatrix::I(), PaintImage::kDefaultFrameIndex);
+}
+
class ImageControllerTest : public testing::Test {
public:
ImageControllerTest()
@@ -322,16 +328,7 @@ TEST_F(ImageControllerTest, QueueImageDecodeNonLazy) {
base::RunLoop run_loop;
DecodeClient decode_client;
- SkBitmap bitmap;
- bitmap.allocN32Pixels(1, 1);
- DrawImage image =
- DrawImage(PaintImageBuilder::WithDefault()
- .set_id(PaintImage::GetNextId())
- .set_image(SkImage::MakeFromBitmap(bitmap),
- PaintImage::GetNextContentId())
- .TakePaintImage(),
- SkIRect::MakeWH(1, 1), kNone_SkFilterQuality, SkMatrix::I(),
- PaintImage::kDefaultFrameIndex, gfx::ColorSpace());
+ DrawImage image = CreateBitmapDrawImage(gfx::Size(1, 1));
ImageController::ImageDecodeRequestId expected_id =
controller()->QueueImageDecode(
@@ -607,5 +604,92 @@ TEST_F(ImageControllerTest, DispatchesDecodeCallbacksAfterCacheChanged) {
ResetController();
}
+TEST_F(ImageControllerTest, QueueImageDecodeLazyCancelImmediately) {
+ DecodeClient decode_client1;
+ DecodeClient decode_client2;
+
+ // Create two images so that there is always one that is queued up and
+ // not run yet. This prevents raciness in this test.
+ DrawImage image1 = CreateDiscardableDrawImage(gfx::Size(1, 1));
+ DrawImage image2 = CreateDiscardableDrawImage(gfx::Size(1, 1));
+
+ ImageController::ImageDecodeRequestId expected_id1 =
+ controller()->QueueImageDecode(
+ image(),
+ base::Bind(&DecodeClient::Callback, base::Unretained(&decode_client1),
+ base::Bind([]() {})));
+
+ ImageController::ImageDecodeRequestId expected_id2 =
+ controller()->QueueImageDecode(
+ image(),
+ base::Bind(&DecodeClient::Callback, base::Unretained(&decode_client2),
+ base::Bind([]() {})));
+
+ // This needs a ref because it is lazy.
+ EXPECT_EQ(2, cache()->number_of_refs());
+
+ // Instead of running, immediately cancel everything.
+ controller()->SetImageDecodeCache(nullptr);
+
+ // This should not crash, and nothing should have run.
+ EXPECT_NE(expected_id1, decode_client1.id());
+ EXPECT_NE(expected_id2, decode_client2.id());
+ EXPECT_EQ(0u, decode_client1.id());
+ EXPECT_EQ(0u, decode_client2.id());
+ EXPECT_EQ(ImageController::ImageDecodeResult::FAILURE,
+ decode_client1.result());
+ EXPECT_EQ(ImageController::ImageDecodeResult::FAILURE,
+ decode_client2.result());
+
+ // Refs should still be cleaned up.
+ EXPECT_EQ(0, cache()->number_of_refs());
+
+ // Explicitly reset the controller so that orphaned task callbacks run
+ // while the decode clients still exist.
+ ResetController();
+}
+
+TEST_F(ImageControllerTest, QueueImageDecodeNonLazyCancelImmediately) {
+ DecodeClient decode_client1;
+ DecodeClient decode_client2;
+
+ // Create two images so that there is always one that is queued up and
+ // not run yet. This prevents raciness in this test.
+ DrawImage image1 = CreateBitmapDrawImage(gfx::Size(1, 1));
+ DrawImage image2 = CreateBitmapDrawImage(gfx::Size(1, 1));
+
+ ImageController::ImageDecodeRequestId expected_id1 =
+ controller()->QueueImageDecode(
+ image1,
+ base::Bind(&DecodeClient::Callback, base::Unretained(&decode_client1),
+ base::Bind([]() {})));
+ ImageController::ImageDecodeRequestId expected_id2 =
+ controller()->QueueImageDecode(
+ image2,
+ base::Bind(&DecodeClient::Callback, base::Unretained(&decode_client2),
+ base::Bind([]() {})));
+
+ // No ref needed here, because it is non-lazy.
+ EXPECT_EQ(0, cache()->number_of_refs());
+
+ // Instead of running, immediately cancel everything.
+ controller()->SetImageDecodeCache(nullptr);
+
+ // This should not crash, and nothing should have run.
+ EXPECT_NE(expected_id1, decode_client1.id());
+ EXPECT_NE(expected_id2, decode_client2.id());
+ EXPECT_EQ(0u, decode_client1.id());
+ EXPECT_EQ(0u, decode_client2.id());
+ EXPECT_EQ(ImageController::ImageDecodeResult::FAILURE,
+ decode_client1.result());
+ EXPECT_EQ(ImageController::ImageDecodeResult::FAILURE,
+ decode_client2.result());
+ EXPECT_EQ(0, cache()->number_of_refs());
+
+ // Explicitly reset the controller so that orphaned task callbacks run
+ // while the decode clients still exist.
+ ResetController();
+}
+
} // namespace
} // namespace cc
diff --git a/chromium/cc/tiles/image_decode_cache.h b/chromium/cc/tiles/image_decode_cache.h
index 7c75dd9d7c5..6d7d339ee28 100644
--- a/chromium/cc/tiles/image_decode_cache.h
+++ b/chromium/cc/tiles/image_decode_cache.h
@@ -11,6 +11,7 @@
#include "cc/paint/decoded_draw_image.h"
#include "cc/paint/draw_image.h"
#include "cc/raster/tile_task.h"
+#include "cc/tiles/image_decode_cache_utils.h"
#include "cc/tiles/tile_priority.h"
namespace cc {
diff --git a/chromium/cc/tiles/image_decode_cache_utils.cc b/chromium/cc/tiles/image_decode_cache_utils.cc
new file mode 100644
index 00000000000..a50f2464110
--- /dev/null
+++ b/chromium/cc/tiles/image_decode_cache_utils.cc
@@ -0,0 +1,52 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_TILES_IMAGE_DECODE_CACHE_UTILS_CC_
+#define CC_TILES_IMAGE_DECODE_CACHE_UTILS_CC_
+
+#include "cc/tiles/image_decode_cache_utils.h"
+
+#include "base/logging.h"
+#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkSurface.h"
+
+namespace cc {
+
+bool ImageDecodeCacheUtils::ScaleToHalfFloatPixmapUsingN32Intermediate(
+ const SkPixmap& source_pixmap,
+ SkPixmap* scaled_pixmap,
+ SkFilterQuality filter_quality) {
+ // Target pixmap should be half float backed.
+ DCHECK(scaled_pixmap->colorType() == kRGBA_F16_SkColorType);
+ // Filter quality should be medium or high. This is needed if the device
+ // (Android KitKat and lower) does not support mipmaps properly. Mipmaps are
+ // used only for medium and high filter qualities.
+ DCHECK(filter_quality >= kMedium_SkFilterQuality);
+
+ // Convert to kN32 color type if necessary
+ SkPixmap n32_pixmap = source_pixmap;
+ SkBitmap n32_bitmap;
+ if (source_pixmap.info().colorType() == kRGBA_F16_SkColorType) {
+ SkImageInfo n32_image_info =
+ source_pixmap.info().makeColorType(kN32_SkColorType);
+ if (!n32_bitmap.tryAllocPixels(n32_image_info))
+ return false;
+ n32_pixmap = n32_bitmap.pixmap();
+ source_pixmap.readPixels(n32_pixmap, 0, 0);
+ }
+ // Scale
+ SkBitmap n32_resized_bitmap;
+ SkImageInfo n32_resize_info =
+ n32_pixmap.info().makeWH(scaled_pixmap->width(), scaled_pixmap->height());
+ if (!n32_resized_bitmap.tryAllocPixels(n32_resize_info))
+ return false;
+ if (!n32_pixmap.scalePixels(n32_resized_bitmap.pixmap(), filter_quality))
+ return false;
+ // Convert back to f16 and return
+ return n32_resized_bitmap.readPixels(*scaled_pixmap, 0, 0);
+}
+
+} // namespace cc
+
+#endif // CC_TILES_IMAGE_DECODE_CACHE_UTILS_CC_
diff --git a/chromium/cc/tiles/image_decode_cache_utils.h b/chromium/cc/tiles/image_decode_cache_utils.h
new file mode 100644
index 00000000000..12b21de039e
--- /dev/null
+++ b/chromium/cc/tiles/image_decode_cache_utils.h
@@ -0,0 +1,42 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CC_TILES_IMAGE_DECODE_CACHE_UTILS_H_
+#define CC_TILES_IMAGE_DECODE_CACHE_UTILS_H_
+
+#include "build/build_config.h"
+#include "third_party/skia/include/core/SkFilterQuality.h"
+#include "third_party/skia/include/core/SkPixmap.h"
+
+#if defined(OS_ANDROID)
+#include "base/android/build_info.h"
+#endif
+
+namespace cc {
+
+class ImageDecodeCacheUtils {
+ public:
+ static bool CanResizeF16Image(SkFilterQuality filter_quality) {
+#if defined(OS_ANDROID)
+ // Return false on Android KitKat or lower if filter quality is medium or
+ // high (hence, mipmaps are used), return true otherwise. This is because
+ // of skia:8410 which causes a crash when trying to scale a f16 image on
+ // these configs. crbug.com/876349
+ return (base::android::BuildInfo::GetInstance()->sdk_int() >=
+ base::android::SDK_VERSION_LOLLIPOP) ||
+ (filter_quality < kMedium_SkFilterQuality);
+#else
+ return true;
+#endif
+ }
+
+ static bool ScaleToHalfFloatPixmapUsingN32Intermediate(
+ const SkPixmap& source_pixmap,
+ SkPixmap* scaled_pixmap,
+ SkFilterQuality filter_quality);
+};
+
+} // namespace cc
+
+#endif // CC_TILES_IMAGE_DECODE_CACHE_UTILS_H_
diff --git a/chromium/cc/tiles/picture_layer_tiling.cc b/chromium/cc/tiles/picture_layer_tiling.cc
index 4901cdbfa38..13b5f9396f8 100644
--- a/chromium/cc/tiles/picture_layer_tiling.cc
+++ b/chromium/cc/tiles/picture_layer_tiling.cc
@@ -94,7 +94,7 @@ void PictureLayerTiling::CreateMissingTilesInLiveTilesRect() {
include_borders);
iter; ++iter) {
TileMapKey key(iter.index());
- TileMap::iterator find = tiles_.find(key);
+ auto find = tiles_.find(key);
if (find != tiles_.end())
continue;
@@ -552,7 +552,7 @@ gfx::RectF PictureLayerTiling::CoverageIterator::texture_rect() const {
}
std::unique_ptr<Tile> PictureLayerTiling::TakeTileAt(int i, int j) {
- TileMap::iterator found = tiles_.find(TileMapKey(i, j));
+ auto found = tiles_.find(TileMapKey(i, j));
if (found == tiles_.end())
return nullptr;
std::unique_ptr<Tile> result = std::move(found->second);
@@ -976,7 +976,7 @@ void PictureLayerTiling::AsValueInto(
size_t PictureLayerTiling::GPUMemoryUsageInBytes() const {
size_t amount = 0;
- for (TileMap::const_iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
+ for (auto it = tiles_.begin(); it != tiles_.end(); ++it) {
const Tile* tile = it->second.get();
amount += tile->GPUMemoryUsageInBytes();
}
diff --git a/chromium/cc/tiles/picture_layer_tiling_set.cc b/chromium/cc/tiles/picture_layer_tiling_set.cc
index dfb5bcadba6..38d7ad0e9fa 100644
--- a/chromium/cc/tiles/picture_layer_tiling_set.cc
+++ b/chromium/cc/tiles/picture_layer_tiling_set.cc
@@ -514,10 +514,10 @@ void PictureLayerTilingSet::UpdatePriorityRects(
// Finally, update our visible rect history. Note that we use the original
// visible rect here, since we want as accurate of a history as possible for
// stable skewports.
+ if (visible_rect_history_.size() == 2)
+ visible_rect_history_.pop_back();
visible_rect_history_.push_front(FrameVisibleRect(
visible_rect_in_layer_space_, current_frame_time_in_seconds));
- if (visible_rect_history_.size() > 2)
- visible_rect_history_.pop_back();
}
bool PictureLayerTilingSet::UpdateTilePriorities(
diff --git a/chromium/cc/tiles/picture_layer_tiling_set.h b/chromium/cc/tiles/picture_layer_tiling_set.h
index f87faa3068b..78e426e4bab 100644
--- a/chromium/cc/tiles/picture_layer_tiling_set.h
+++ b/chromium/cc/tiles/picture_layer_tiling_set.h
@@ -7,7 +7,7 @@
#include <stddef.h>
-#include <list>
+#include <deque>
#include <set>
#include <vector>
@@ -250,8 +250,8 @@ class CC_EXPORT PictureLayerTilingSet {
PictureLayerTilingClient* client_;
const float max_preraster_distance_;
// State saved for computing velocities based on finite differences.
- // .front() of the list refers to the most recent FrameVisibleRect.
- std::list<FrameVisibleRect> visible_rect_history_;
+ // .front() of the deque refers to the most recent FrameVisibleRect.
+ std::deque<FrameVisibleRect> visible_rect_history_;
StateSinceLastTilePriorityUpdate state_since_last_tile_priority_update_;
scoped_refptr<RasterSource> raster_source_;
diff --git a/chromium/cc/tiles/picture_layer_tiling_unittest.cc b/chromium/cc/tiles/picture_layer_tiling_unittest.cc
index 7f46861ab2f..7931cbb31e4 100644
--- a/chromium/cc/tiles/picture_layer_tiling_unittest.cc
+++ b/chromium/cc/tiles/picture_layer_tiling_unittest.cc
@@ -129,9 +129,7 @@ class PictureLayerTilingIteratorTest : public testing::Test {
tiling_->SetLiveTilesRect(live_tiles_rect);
std::vector<Tile*> tiles = tiling_->AllTilesForTesting();
- for (std::vector<Tile*>::iterator iter = tiles.begin();
- iter != tiles.end();
- ++iter) {
+ for (auto iter = tiles.begin(); iter != tiles.end(); ++iter) {
EXPECT_TRUE(live_tiles_rect.Intersects((*iter)->content_rect()));
}
}
diff --git a/chromium/cc/tiles/software_image_decode_cache.cc b/chromium/cc/tiles/software_image_decode_cache.cc
index 8fbb0bae81d..d75e6e2001f 100644
--- a/chromium/cc/tiles/software_image_decode_cache.cc
+++ b/chromium/cc/tiles/software_image_decode_cache.cc
@@ -8,9 +8,9 @@
#include "base/format_macros.h"
#include "base/macros.h"
-#include "base/memory/memory_coordinator_client_registry.h"
#include "base/metrics/histogram_macros.h"
#include "base/strings/stringprintf.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_manager.h"
#include "cc/base/devtools_instrumentation.h"
#include "cc/base/histograms.h"
@@ -30,8 +30,6 @@ namespace {
// Depending on the memory state of the system, we limit the amount of items
// differently.
const size_t kNormalMaxItemsInCacheForSoftware = 1000;
-const size_t kThrottledMaxItemsInCacheForSoftware = 100;
-const size_t kSuspendedMaxItemsInCacheForSoftware = 0;
class AutoRemoveKeyFromTaskMap {
public:
@@ -155,8 +153,6 @@ SoftwareImageDecodeCache::SoftwareImageDecodeCache(
this, "cc::SoftwareImageDecodeCache",
base::ThreadTaskRunnerHandle::Get());
}
- // Register this component with base::MemoryCoordinatorClientRegistry.
- base::MemoryCoordinatorClientRegistry::GetInstance()->Register(this);
memory_pressure_listener_.reset(new base::MemoryPressureListener(
base::BindRepeating(&SoftwareImageDecodeCache::OnMemoryPressure,
base::Unretained(this))));
@@ -166,9 +162,6 @@ SoftwareImageDecodeCache::~SoftwareImageDecodeCache() {
// It is safe to unregister, even if we didn't register in the constructor.
base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
this);
- // Unregister this component with memory_coordinator::ClientRegistry.
- base::MemoryCoordinatorClientRegistry::GetInstance()->Unregister(this);
-
// TODO(vmpstr): If we don't have a client name, it may cause problems in
// unittests, since most tests don't set the name but some do. The UMA system
// expects the name to be always the same. This assertion is violated in the
@@ -657,8 +650,8 @@ bool SoftwareImageDecodeCache::OnMemoryDump(
int image_id = static_cast<int>(image_pair.first.frame_key().hash());
CacheEntry* entry = image_pair.second.get();
DCHECK(entry);
- // We might not have memory for this cache entry, depending on where int
- // he CacheEntry lifecycle we are. If we don't have memory, then we don't
+ // We might not have memory for this cache entry, depending on where in
+ // the CacheEntry lifecycle we are. If we don't have memory, then we don't
// have to record it in the dump.
if (!entry->memory)
continue;
@@ -684,31 +677,6 @@ bool SoftwareImageDecodeCache::OnMemoryDump(
return true;
}
-void SoftwareImageDecodeCache::OnMemoryStateChange(base::MemoryState state) {
- {
- base::AutoLock hold(lock_);
- switch (state) {
- case base::MemoryState::NORMAL:
- max_items_in_cache_ = kNormalMaxItemsInCacheForSoftware;
- break;
- case base::MemoryState::THROTTLED:
- max_items_in_cache_ = kThrottledMaxItemsInCacheForSoftware;
- break;
- case base::MemoryState::SUSPENDED:
- max_items_in_cache_ = kSuspendedMaxItemsInCacheForSoftware;
- break;
- case base::MemoryState::UNKNOWN:
- NOTREACHED();
- return;
- }
- }
-}
-
-void SoftwareImageDecodeCache::OnPurgeMemory() {
- base::AutoLock lock(lock_);
- ReduceCacheUsageUntilWithinLimit(0);
-}
-
void SoftwareImageDecodeCache::OnMemoryPressure(
base::MemoryPressureListener::MemoryPressureLevel level) {
base::AutoLock lock(lock_);
diff --git a/chromium/cc/tiles/software_image_decode_cache.h b/chromium/cc/tiles/software_image_decode_cache.h
index 84e6c69bc40..30040d1da66 100644
--- a/chromium/cc/tiles/software_image_decode_cache.h
+++ b/chromium/cc/tiles/software_image_decode_cache.h
@@ -11,7 +11,6 @@
#include <unordered_map>
#include "base/containers/mru_cache.h"
-#include "base/memory/memory_coordinator_client.h"
#include "base/memory/memory_pressure_listener.h"
#include "base/memory/ref_counted.h"
#include "base/numerics/safe_math.h"
@@ -25,8 +24,7 @@ namespace cc {
class CC_EXPORT SoftwareImageDecodeCache
: public ImageDecodeCache,
- public base::trace_event::MemoryDumpProvider,
- public base::MemoryCoordinatorClient {
+ public base::trace_event::MemoryDumpProvider {
public:
using Utils = SoftwareImageDecodeCacheUtils;
using CacheKey = Utils::CacheKey;
@@ -114,12 +112,6 @@ class CC_EXPORT SoftwareImageDecodeCache
// reduced within the given limit.
void ReduceCacheUsageUntilWithinLimit(size_t limit);
- // Overriden from base::MemoryCoordinatorClient.
- void OnMemoryStateChange(base::MemoryState state) override;
- void OnPurgeMemory() override;
-
- // TODO(gyuyoung): OnMemoryPressure is deprecated. So this should be removed
- // when the memory coordinator is enabled by default.
void OnMemoryPressure(
base::MemoryPressureListener::MemoryPressureLevel level);
diff --git a/chromium/cc/tiles/software_image_decode_cache_unittest_combinations.cc b/chromium/cc/tiles/software_image_decode_cache_unittest_combinations.cc
index 9a308bde9f2..5052a2a5d21 100644
--- a/chromium/cc/tiles/software_image_decode_cache_unittest_combinations.cc
+++ b/chromium/cc/tiles/software_image_decode_cache_unittest_combinations.cc
@@ -93,6 +93,15 @@ class RGBA4444Cache : public virtual BaseTest {
}
};
+class RGBA_F16Cache : public virtual BaseTest {
+ protected:
+ std::unique_ptr<SoftwareImageDecodeCache> CreateCache() override {
+ return std::make_unique<SoftwareImageDecodeCache>(
+ kRGBA_F16_SkColorType, kLockedMemoryLimitBytes,
+ PaintImage::kDefaultGeneratorClientId);
+ }
+};
+
class AtRaster : public virtual BaseTest {
protected:
CacheEntryResult GenerateCacheEntry(const DrawImage& image) override {
@@ -126,6 +135,9 @@ class Predecode : public virtual BaseTest {
const DrawImage& draw_image,
const gfx::Size& expected_size) override {
auto decoded = cache().GetDecodedImageForDraw(draw_image);
+ EXPECT_TRUE(SkColorSpace::Equals(
+ decoded.image()->colorSpace(),
+ draw_image.target_color_space().ToSkColorSpace().get()));
SCOPED_TRACE(base::StringPrintf("Failure from line %d", line));
EXPECT_EQ(decoded.image()->width(), expected_size.width());
EXPECT_EQ(decoded.image()->height(), expected_size.height());
@@ -140,6 +152,17 @@ class NoDecodeToScaleSupport : public virtual BaseTest {
}
};
+class NoDecodeToScaleSupportF16 : public virtual BaseTest {
+ protected:
+ PaintImage CreatePaintImage(const gfx::Size& size) override {
+ PaintImage paint_image = CreateDiscardablePaintImage(
+ size, GetColorSpace().ToSkColorSpace(),
+ true /*allocate_encoded_memory*/, PaintImage::kInvalidId,
+ kRGBA_F16_SkColorType);
+ return paint_image;
+ }
+};
+
class DefaultColorSpace : public virtual BaseTest {
protected:
gfx::ColorSpace GetColorSpace() override {
@@ -155,6 +178,13 @@ class ExoticColorSpace : public virtual BaseTest {
}
};
+class WideGamutCanvasColorSpace : public virtual BaseTest {
+ protected:
+ gfx::ColorSpace GetColorSpace() override {
+ return gfx::ColorSpace(gfx::ColorSpace::PrimaryID::SMPTEST432_1, // P3
+ gfx::ColorSpace::TransferID::LINEAR);
+ }
+};
class SoftwareImageDecodeCacheTest_Typical : public N32Cache,
public Predecode,
public NoDecodeToScaleSupport,
@@ -360,69 +390,81 @@ TEST_F(SoftwareImageDecodeCacheTest_AtRaster,
EXPECT_EQ(3u, cache().GetNumCacheEntriesForTesting());
}
-class SoftwareImageDecodeCacheTest_RGBA4444 : public RGBA4444Cache,
- public Predecode,
- public NoDecodeToScaleSupport,
- public DefaultColorSpace {};
+class SoftwareImageDecodeCacheTest_ExoticColorSpace
+ : public N32Cache,
+ public Predecode,
+ public NoDecodeToScaleSupport,
+ public ExoticColorSpace {};
-TEST_F(SoftwareImageDecodeCacheTest_RGBA4444, AlwaysUseOriginalDecode) {
+TEST_F(SoftwareImageDecodeCacheTest_ExoticColorSpace,
+ UseClosestAvailableDecode) {
auto draw_image_50 = CreateDrawImageForScale(0.5f);
auto result = GenerateCacheEntry(draw_image_50);
+
EXPECT_TRUE(result.has_task);
EXPECT_TRUE(result.needs_unref);
+ // Clear the cache to eliminate the transient 1.f scale from the cache.
cache().ClearCache();
- VerifyEntryExists(__LINE__, draw_image_50, gfx::Size(512, 512));
+ VerifyEntryExists(__LINE__, draw_image_50, gfx::Size(256, 256));
EXPECT_EQ(1u, cache().GetNumCacheEntriesForTesting());
auto draw_image_125 = CreateDrawImageForScale(0.125f);
result = GenerateCacheEntry(draw_image_125);
- EXPECT_FALSE(result.has_task);
+ EXPECT_TRUE(result.has_task);
EXPECT_TRUE(result.needs_unref);
- VerifyEntryExists(__LINE__, draw_image_125, gfx::Size(512, 512));
+ VerifyEntryExists(__LINE__, draw_image_125, gfx::Size(64, 64));
// We didn't clear the cache the second time, and should only expect to find
- // one entry: 1.0 scale.
- EXPECT_EQ(1u, cache().GetNumCacheEntriesForTesting());
+ // these entries: 0.5 scale and 0.125 scale.
+ EXPECT_EQ(2u, cache().GetNumCacheEntriesForTesting());
cache().UnrefImage(draw_image_50);
cache().UnrefImage(draw_image_125);
}
-TEST_F(SoftwareImageDecodeCacheTest_RGBA4444,
- AlwaysUseOriginalDecodeEvenSubrected) {
- auto draw_image_50 = CreateDrawImageForScale(0.5f, SkIRect::MakeWH(10, 10));
+class SoftwareImageDecodeCacheTest_F16_ExoticColorSpace
+ : public RGBA_F16Cache,
+ public Predecode,
+ public NoDecodeToScaleSupportF16,
+ public ExoticColorSpace {};
+
+TEST_F(SoftwareImageDecodeCacheTest_F16_ExoticColorSpace,
+ UseClosestAvailableDecode_F16_ExoticColorSpace) {
+ auto draw_image_50 = CreateDrawImageForScale(0.5f);
auto result = GenerateCacheEntry(draw_image_50);
EXPECT_TRUE(result.has_task);
EXPECT_TRUE(result.needs_unref);
+ // Clear the cache to eliminate the transient 1.f scale from the cache.
cache().ClearCache();
- VerifyEntryExists(__LINE__, draw_image_50, gfx::Size(512, 512));
+ VerifyEntryExists(__LINE__, draw_image_50, gfx::Size(256, 256));
+ EXPECT_EQ(kRGBA_F16_SkColorType, draw_image_50.paint_image().GetColorType());
EXPECT_EQ(1u, cache().GetNumCacheEntriesForTesting());
- auto draw_image_125 =
- CreateDrawImageForScale(0.125f, SkIRect::MakeWH(20, 20));
+ auto draw_image_125 = CreateDrawImageForScale(0.125f);
result = GenerateCacheEntry(draw_image_125);
- EXPECT_FALSE(result.has_task);
+ EXPECT_TRUE(result.has_task);
EXPECT_TRUE(result.needs_unref);
- VerifyEntryExists(__LINE__, draw_image_125, gfx::Size(512, 512));
+ VerifyEntryExists(__LINE__, draw_image_125, gfx::Size(64, 64));
+ EXPECT_EQ(kRGBA_F16_SkColorType, draw_image_125.paint_image().GetColorType());
// We didn't clear the cache the second time, and should only expect to find
- // one entry: 1.0 scale.
- EXPECT_EQ(1u, cache().GetNumCacheEntriesForTesting());
+ // these entries: 0.5 scale and 0.125 scale.
+ EXPECT_EQ(2u, cache().GetNumCacheEntriesForTesting());
cache().UnrefImage(draw_image_50);
cache().UnrefImage(draw_image_125);
}
-class SoftwareImageDecodeCacheTest_ExoticColorSpace
- : public N32Cache,
+class SoftwareImageDecodeCacheTest_F16_WideGamutCanvasColorSpace
+ : public RGBA_F16Cache,
public Predecode,
- public NoDecodeToScaleSupport,
- public ExoticColorSpace {};
+ public NoDecodeToScaleSupportF16,
+ public WideGamutCanvasColorSpace {};
-TEST_F(SoftwareImageDecodeCacheTest_ExoticColorSpace,
- UseClosestAvailableDecode) {
+TEST_F(SoftwareImageDecodeCacheTest_F16_WideGamutCanvasColorSpace,
+ UseClosestAvailableDecode_F16_WideGamutCanvasColorSpace) {
auto draw_image_50 = CreateDrawImageForScale(0.5f);
auto result = GenerateCacheEntry(draw_image_50);
EXPECT_TRUE(result.has_task);
@@ -431,6 +473,7 @@ TEST_F(SoftwareImageDecodeCacheTest_ExoticColorSpace,
// Clear the cache to eliminate the transient 1.f scale from the cache.
cache().ClearCache();
VerifyEntryExists(__LINE__, draw_image_50, gfx::Size(256, 256));
+ EXPECT_EQ(kRGBA_F16_SkColorType, draw_image_50.paint_image().GetColorType());
EXPECT_EQ(1u, cache().GetNumCacheEntriesForTesting());
auto draw_image_125 = CreateDrawImageForScale(0.125f);
@@ -438,6 +481,7 @@ TEST_F(SoftwareImageDecodeCacheTest_ExoticColorSpace,
EXPECT_TRUE(result.has_task);
EXPECT_TRUE(result.needs_unref);
VerifyEntryExists(__LINE__, draw_image_125, gfx::Size(64, 64));
+ EXPECT_EQ(kRGBA_F16_SkColorType, draw_image_125.paint_image().GetColorType());
// We didn't clear the cache the second time, and should only expect to find
// these entries: 0.5 scale and 0.125 scale.
diff --git a/chromium/cc/tiles/software_image_decode_cache_utils.cc b/chromium/cc/tiles/software_image_decode_cache_utils.cc
index 6e4c4eb233f..58bf110a3a6 100644
--- a/chromium/cc/tiles/software_image_decode_cache_utils.cc
+++ b/chromium/cc/tiles/software_image_decode_cache_utils.cc
@@ -136,9 +136,16 @@ SoftwareImageDecodeCacheUtils::GenerateCacheEntryFromCandidate(
DCHECK(!key.is_nearest_neighbor());
SkPixmap target_pixmap(target_info, target_pixels->data(),
target_info.minRowBytes());
- // Always use medium quality for scaling.
- result = decoded_pixmap.scalePixels(target_pixmap, kMedium_SkFilterQuality);
+ SkFilterQuality filter_quality = kMedium_SkFilterQuality;
+ if (decoded_pixmap.colorType() == kRGBA_F16_SkColorType &&
+ !ImageDecodeCacheUtils::CanResizeF16Image(filter_quality)) {
+ result = ImageDecodeCacheUtils::ScaleToHalfFloatPixmapUsingN32Intermediate(
+ decoded_pixmap, &target_pixmap, filter_quality);
+ } else {
+ result = decoded_pixmap.scalePixels(target_pixmap, filter_quality);
+ }
DCHECK(result) << key.ToString();
+
return std::make_unique<CacheEntry>(
target_info.makeColorSpace(candidate_image.image()->refColorSpace()),
std::move(target_pixels),
@@ -182,14 +189,12 @@ SoftwareImageDecodeCacheUtils::CacheKey::FromDrawImage(const DrawImage& image,
// If any of the following conditions hold, then use at most low filter
// quality and adjust the target size to match the original image:
// - Quality is none: We need a pixelated image, so we can't upgrade it.
- // - Format is 4444: Skia doesn't support scaling these, so use low
- // filter quality.
// - Mip level is 0: The required mip is the original image, so just use low
// filter quality.
// - Matrix is not decomposable: There's perspective on this image and we
// can't determine the size, so use the original.
- if (is_nearest_neighbor || color_type == kARGB_4444_SkColorType ||
- mip_level == 0 || !image.matrix_is_decomposable()) {
+ if (is_nearest_neighbor || mip_level == 0 ||
+ !image.matrix_is_decomposable()) {
type = kOriginal;
// Update the size to be the original image size.
target_size =
diff --git a/chromium/cc/tiles/software_image_decode_cache_utils.h b/chromium/cc/tiles/software_image_decode_cache_utils.h
index 686f9eda125..835d68c8492 100644
--- a/chromium/cc/tiles/software_image_decode_cache_utils.h
+++ b/chromium/cc/tiles/software_image_decode_cache_utils.h
@@ -12,6 +12,7 @@
#include "cc/paint/draw_image.h"
#include "cc/paint/paint_image.h"
#include "cc/raster/tile_task.h"
+#include "cc/tiles/image_decode_cache_utils.h"
#include "third_party/skia/include/core/SkImage.h"
#include "third_party/skia/include/core/SkImageInfo.h"
#include "third_party/skia/include/core/SkSize.h"
diff --git a/chromium/cc/tiles/tile_draw_info.h b/chromium/cc/tiles/tile_draw_info.h
index c401258c103..aab93183734 100644
--- a/chromium/cc/tiles/tile_draw_info.h
+++ b/chromium/cc/tiles/tile_draw_info.h
@@ -83,10 +83,6 @@ class CC_EXPORT TileDrawInfo {
const ResourcePool::InUsePoolResource& GetResource();
- inline bool has_compressed_resource() const {
- return resource_ ? IsResourceFormatCompressed(resource_.format()) : false;
- }
-
bool is_checker_imaged() const {
DCHECK(!resource_is_checker_imaged_ || resource_);
return resource_is_checker_imaged_;
diff --git a/chromium/cc/tiles/tile_manager.cc b/chromium/cc/tiles/tile_manager.cc
index 6761f8d66a0..8f076724eb5 100644
--- a/chromium/cc/tiles/tile_manager.cc
+++ b/chromium/cc/tiles/tile_manager.cc
@@ -253,8 +253,7 @@ void InsertNodesForRasterTask(TaskGraph* graph,
size_t dependencies = 0u;
// Insert image decode tasks.
- for (TileTask::Vector::const_iterator it = decode_tasks.begin();
- it != decode_tasks.end(); ++it) {
+ for (auto it = decode_tasks.begin(); it != decode_tasks.end(); ++it) {
TileTask* decode_task = it->get();
// Skip if already decoded.
@@ -264,11 +263,10 @@ void InsertNodesForRasterTask(TaskGraph* graph,
dependencies++;
// Add decode task if it doesn't already exist in graph.
- TaskGraph::Node::Vector::iterator decode_it =
- std::find_if(graph->nodes.begin(), graph->nodes.end(),
- [decode_task](const TaskGraph::Node& node) {
- return node.task == decode_task;
- });
+ auto decode_it = std::find_if(graph->nodes.begin(), graph->nodes.end(),
+ [decode_task](const TaskGraph::Node& node) {
+ return node.task == decode_task;
+ });
// In rare circumstances, a background category task may come in before a
// foreground category task. In these cases, upgrade any background category
@@ -328,6 +326,36 @@ class TaskSetFinishedTaskImpl : public TileTask {
DISALLOW_COPY_AND_ASSIGN(TaskSetFinishedTaskImpl);
};
+class DidFinishRunningAllTilesTask : public TileTask {
+ public:
+ using CompletionCb = base::OnceCallback<void(bool has_pending_queries)>;
+ DidFinishRunningAllTilesTask(base::SequencedTaskRunner* task_runner,
+ RasterBufferProvider* raster_buffer_provider,
+ CompletionCb completion_cb)
+ : TileTask(false /* supports_concurrent_execution */),
+ task_runner_(task_runner),
+ raster_buffer_provider_(raster_buffer_provider),
+ completion_cb_(std::move(completion_cb)) {}
+
+ void RunOnWorkerThread() override {
+ TRACE_EVENT0("cc", "TaskSetFinishedTaskImpl::RunOnWorkerThread");
+ bool has_pending_queries =
+ raster_buffer_provider_->CheckRasterFinishedQueries();
+ task_runner_->PostTask(FROM_HERE, base::BindOnce(std::move(completion_cb_),
+ has_pending_queries));
+ }
+
+ void OnTaskCompleted() override {}
+
+ protected:
+ ~DidFinishRunningAllTilesTask() override = default;
+
+ private:
+ base::SequencedTaskRunner* task_runner_;
+ RasterBufferProvider* raster_buffer_provider_;
+ CompletionCb completion_cb_;
+};
+
} // namespace
RasterTaskCompletionStats::RasterTaskCompletionStats()
@@ -404,6 +432,7 @@ void TileManager::FinishTasksAndCleanUp() {
signals_check_notifier_.Cancel();
task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs();
ready_to_draw_callback_weak_ptr_factory_.InvalidateWeakPtrs();
+ check_pending_tile_queries_callback_.Cancel();
raster_buffer_provider_ = nullptr;
// Ask the tracker to drop any locked decodes since we will be destroying the
@@ -459,13 +488,14 @@ void TileManager::DidFinishRunningTileTasksRequiredForDraw() {
signals_check_notifier_.Schedule();
}
-void TileManager::DidFinishRunningAllTileTasks() {
+void TileManager::DidFinishRunningAllTileTasks(bool has_pending_queries) {
TRACE_EVENT0("cc", "TileManager::DidFinishRunningAllTileTasks");
TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
DCHECK(resource_pool_);
DCHECK(tile_task_manager_);
has_scheduled_tile_tasks_ = false;
+ has_pending_queries_ = has_pending_queries;
if (all_tiles_that_need_to_be_rasterized_are_scheduled_ &&
!resource_pool_->ResourceUsageTooHigh()) {
@@ -960,8 +990,13 @@ void TileManager::ScheduleTasks(PrioritizedWorkToSchedule work_to_schedule) {
scoped_refptr<TileTask> required_for_draw_done_task =
CreateTaskSetFinishedTask(
&TileManager::DidFinishRunningTileTasksRequiredForDraw);
+
+ auto all_done_cb =
+ base::BindOnce(&TileManager::DidFinishRunningAllTileTasks,
+ task_set_finished_weak_ptr_factory_.GetWeakPtr());
scoped_refptr<TileTask> all_done_task =
- CreateTaskSetFinishedTask(&TileManager::DidFinishRunningAllTileTasks);
+ base::MakeRefCounted<DidFinishRunningAllTilesTask>(
+ task_runner_, raster_buffer_provider_, std::move(all_done_cb));
// Build a new task queue containing all task currently needed. Tasks
// are added in order of priority, highest priority task first.
@@ -1352,6 +1387,34 @@ bool TileManager::IsReadyToDraw() const {
RasterTilePriorityQueue::Type::REQUIRED_FOR_DRAW);
}
+void TileManager::ScheduleCheckRasterFinishedQueries() {
+ DCHECK(has_pending_queries_);
+
+ if (!check_pending_tile_queries_callback_.IsCancelled())
+ return;
+
+ check_pending_tile_queries_callback_.Reset(base::Bind(
+ &TileManager::CheckRasterFinishedQueries, base::Unretained(this)));
+ task_runner_->PostDelayedTask(FROM_HERE,
+ check_pending_tile_queries_callback_.callback(),
+ base::TimeDelta::FromMilliseconds(100));
+}
+
+void TileManager::CheckRasterFinishedQueries() {
+ check_pending_tile_queries_callback_.Cancel();
+
+ if (!has_pending_queries_)
+ return;
+
+ // Raster tasks are in progress. The queries will be polled once they finish.
+ if (has_scheduled_tile_tasks_ || !signals_.all_tile_tasks_completed)
+ return;
+
+ has_pending_queries_ = raster_buffer_provider_->CheckRasterFinishedQueries();
+ if (has_pending_queries_)
+ ScheduleCheckRasterFinishedQueries();
+}
+
void TileManager::FlushAndIssueSignals() {
TRACE_EVENT0("cc", "TileManager::FlushAndIssueSignals");
tile_task_manager_->CheckForCompletedTasks();
@@ -1391,6 +1454,10 @@ void TileManager::IssueSignals() {
if (!has_scheduled_tile_tasks_) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
"TileManager::IssueSignals - all tile tasks completed");
+
+ if (has_pending_queries_)
+ ScheduleCheckRasterFinishedQueries();
+
signals_.did_notify_all_tile_tasks_completed = true;
client_->NotifyAllTileTasksCompleted();
}
diff --git a/chromium/cc/tiles/tile_manager.h b/chromium/cc/tiles/tile_manager.h
index 469b21f458a..e475cdf52b0 100644
--- a/chromium/cc/tiles/tile_manager.h
+++ b/chromium/cc/tiles/tile_manager.h
@@ -369,7 +369,7 @@ class CC_EXPORT TileManager : CheckerImageTrackerClient {
void DidFinishRunningTileTasksRequiredForActivation();
void DidFinishRunningTileTasksRequiredForDraw();
- void DidFinishRunningAllTileTasks();
+ void DidFinishRunningAllTileTasks(bool has_pending_queries);
scoped_refptr<TileTask> CreateTaskSetFinishedTask(
void (TileManager::*callback)());
@@ -397,6 +397,8 @@ class CC_EXPORT TileManager : CheckerImageTrackerClient {
void FlushAndIssueSignals();
void CheckPendingGpuWorkAndIssueSignals();
void IssueSignals();
+ void ScheduleCheckRasterFinishedQueries();
+ void CheckRasterFinishedQueries();
TileManagerClient* client_;
base::SequencedTaskRunner* task_runner_;
@@ -452,6 +454,11 @@ class CC_EXPORT TileManager : CheckerImageTrackerClient {
GURL active_url_;
+ // The callback scheduled to poll whether the GPU side work for pending tiles
+ // has completed.
+ bool has_pending_queries_ = false;
+ base::CancelableClosure check_pending_tile_queries_callback_;
+
// We need two WeakPtrFactory objects as the invalidation pattern of each is
// different. The |task_set_finished_weak_ptr_factory_| is invalidated any
// time new tasks are scheduled, preventing a race when the callback has
diff --git a/chromium/cc/tiles/tile_manager_unittest.cc b/chromium/cc/tiles/tile_manager_unittest.cc
index 1925b063d86..6c7fe87703b 100644
--- a/chromium/cc/tiles/tile_manager_unittest.cc
+++ b/chromium/cc/tiles/tile_manager_unittest.cc
@@ -3344,5 +3344,34 @@ TEST_F(DecodedImageTrackerTileManagerTest, DecodedImageTrackerDropsLocksOnUse) {
.NumLockedImagesForTesting());
}
+class TileManagerCheckRasterQueriesTest : public TileManagerTest {
+ public:
+ void SetUp() override {
+ TileManagerTest::SetUp();
+ host_impl()->tile_manager()->SetRasterBufferProviderForTesting(
+ &raster_buffer_provider_);
+ }
+
+ protected:
+ class MockRasterBufferProvider : public FakeRasterBufferProviderImpl {
+ public:
+ MOCK_METHOD0(CheckRasterFinishedQueries, bool());
+ };
+
+ MockRasterBufferProvider raster_buffer_provider_;
+};
+
+TEST_F(TileManagerCheckRasterQueriesTest,
+ ChecksRasterQueriesInAllTilesDoneTask) {
+ base::RunLoop run_loop;
+ EXPECT_FALSE(host_impl()->tile_manager()->HasScheduledTileTasksForTesting());
+ EXPECT_CALL(MockHostImpl(), NotifyAllTileTasksCompleted())
+ .WillOnce(testing::Invoke([&run_loop]() { run_loop.Quit(); }));
+ EXPECT_CALL(raster_buffer_provider_, CheckRasterFinishedQueries()).Times(1);
+ host_impl()->tile_manager()->PrepareTiles(host_impl()->global_tile_state());
+ EXPECT_TRUE(host_impl()->tile_manager()->HasScheduledTileTasksForTesting());
+ run_loop.Run();
+}
+
} // namespace
} // namespace cc
diff --git a/chromium/cc/trees/damage_tracker.cc b/chromium/cc/trees/damage_tracker.cc
index 56f03bfa2d6..925069e0643 100644
--- a/chromium/cc/trees/damage_tracker.cc
+++ b/chromium/cc/trees/damage_tracker.cc
@@ -213,8 +213,8 @@ DamageTracker::LayerRectMapData& DamageTracker::RectDataForLayer(
bool* layer_is_new) {
LayerRectMapData data(layer_id);
- SortedRectMapForLayers::iterator it = std::lower_bound(
- rect_history_for_layers_.begin(), rect_history_for_layers_.end(), data);
+ auto it = std::lower_bound(rect_history_for_layers_.begin(),
+ rect_history_for_layers_.end(), data);
if (it == rect_history_for_layers_.end() || it->layer_id_ != layer_id) {
*layer_is_new = true;
@@ -229,9 +229,8 @@ DamageTracker::SurfaceRectMapData& DamageTracker::RectDataForSurface(
bool* surface_is_new) {
SurfaceRectMapData data(surface_id);
- SortedRectMapForSurfaces::iterator it =
- std::lower_bound(rect_history_for_surfaces_.begin(),
- rect_history_for_surfaces_.end(), data);
+ auto it = std::lower_bound(rect_history_for_surfaces_.begin(),
+ rect_history_for_surfaces_.end(), data);
if (it == rect_history_for_surfaces_.end() || it->surface_id_ != surface_id) {
*surface_is_new = true;
@@ -271,12 +270,10 @@ DamageTracker::DamageAccumulator DamageTracker::TrackDamageFromLeftoverRects() {
// So, these regions are now exposed on the target surface.
DamageAccumulator damage;
- SortedRectMapForLayers::iterator layer_cur_pos =
- rect_history_for_layers_.begin();
- SortedRectMapForLayers::iterator layer_copy_pos = layer_cur_pos;
- SortedRectMapForSurfaces::iterator surface_cur_pos =
- rect_history_for_surfaces_.begin();
- SortedRectMapForSurfaces::iterator surface_copy_pos = surface_cur_pos;
+ auto layer_cur_pos = rect_history_for_layers_.begin();
+ auto layer_copy_pos = layer_cur_pos;
+ auto surface_cur_pos = rect_history_for_surfaces_.begin();
+ auto surface_copy_pos = surface_cur_pos;
// Loop below basically implements std::remove_if loop with and extra
// processing (adding deleted rect to damage) for deleted items.
diff --git a/chromium/cc/trees/draw_property_utils.cc b/chromium/cc/trees/draw_property_utils.cc
index 666b849d014..7c9b86cb096 100644
--- a/chromium/cc/trees/draw_property_utils.cc
+++ b/chromium/cc/trees/draw_property_utils.cc
@@ -29,6 +29,11 @@ namespace draw_property_utils {
namespace {
+static gfx::Rect ToEnclosingClipRect(const gfx::RectF& clip_rect) {
+ constexpr float kClipError = 0.00001f;
+ return gfx::ToEnclosingRectIgnoringError(clip_rect, kClipError);
+}
+
static bool IsRootLayer(const Layer* layer) {
return !layer->parent();
}
@@ -129,10 +134,11 @@ static ConditionalClip ComputeCurrentClip(const ClipNode* clip_node,
const PropertyTrees* property_trees,
int target_transform_id,
int target_effect_id) {
- if (clip_node->transform_id != target_transform_id)
+ if (clip_node->transform_id != target_transform_id) {
return ComputeLocalRectInTargetSpace(clip_node->clip, property_trees,
clip_node->transform_id,
target_transform_id, target_effect_id);
+ }
const EffectTree& effect_tree = property_trees->effect_tree;
gfx::RectF current_clip = clip_node->clip;
@@ -183,7 +189,7 @@ static bool ApplyClipNodeToAccumulatedClip(const PropertyTrees* property_trees,
// Do the expansion.
gfx::RectF expanded_clip_in_expanding_space =
gfx::RectF(clip_node->clip_expander->MapRectReverse(
- gfx::ToEnclosingRect(accumulated_clip_rect_in_expanding_space),
+ ToEnclosingClipRect(accumulated_clip_rect_in_expanding_space),
property_trees));
// Put the expanded clip back into the original target space.
@@ -557,13 +563,13 @@ static void SetSurfaceClipRect(const ClipNode* parent_clip_node,
bool include_expanding_clips = false;
if (render_surface->EffectTreeIndex() == EffectTree::kContentsRootNodeId) {
render_surface->SetClipRect(
- gfx::ToEnclosingRect(clip_tree.Node(effect_node->clip_id)->clip));
+ ToEnclosingClipRect(clip_tree.Node(effect_node->clip_id)->clip));
} else {
ConditionalClip accumulated_clip_rect =
ComputeAccumulatedClip(property_trees, include_expanding_clips,
effect_node->clip_id, target_node->id);
render_surface->SetClipRect(
- gfx::ToEnclosingRect(accumulated_clip_rect.clip_rect));
+ ToEnclosingClipRect(accumulated_clip_rect.clip_rect));
}
}
@@ -641,7 +647,7 @@ static gfx::Rect LayerVisibleRect(PropertyTrees* property_trees,
gfx::RectF clip_in_layer_space = accumulated_clip_in_layer_space.clip_rect;
clip_in_layer_space.Offset(-layer->offset_to_transform_parent());
- gfx::Rect visible_rect = gfx::ToEnclosingRect(clip_in_layer_space);
+ gfx::Rect visible_rect = ToEnclosingClipRect(clip_in_layer_space);
visible_rect.Intersect(layer_content_rect);
return visible_rect;
}
@@ -916,7 +922,7 @@ void ComputeDrawPropertiesOfVisibleLayers(const LayerImplList* layer_list,
// is_clipped should be set before visible rect computation as it is used
// there.
layer->draw_properties().is_clipped = clip.is_clipped;
- layer->draw_properties().clip_rect = gfx::ToEnclosingRect(clip.clip_rect);
+ layer->draw_properties().clip_rect = ToEnclosingClipRect(clip.clip_rect);
layer->draw_properties().visible_layer_rect =
LayerVisibleRect(property_trees, layer);
}
@@ -949,8 +955,7 @@ void ComputeMaskDrawProperties(LayerImpl* mask_layer,
// is_clipped should be set before visible rect computation as it is used
// there.
mask_layer->draw_properties().is_clipped = clip.is_clipped;
- mask_layer->draw_properties().clip_rect =
- gfx::ToEnclosingRect(clip.clip_rect);
+ mask_layer->draw_properties().clip_rect = ToEnclosingClipRect(clip.clip_rect);
// Calculate actual visible layer rect for mask layers, since we could have
// tiled mask layers and the tile manager would need this info for rastering.
mask_layer->draw_properties().visible_layer_rect =
diff --git a/chromium/cc/trees/effect_node.cc b/chromium/cc/trees/effect_node.cc
index 3a0669ea25c..4bc3274497e 100644
--- a/chromium/cc/trees/effect_node.cc
+++ b/chromium/cc/trees/effect_node.cc
@@ -15,6 +15,7 @@ EffectNode::EffectNode()
stable_id(INVALID_STABLE_ID),
opacity(1.f),
screen_space_opacity(1.f),
+ backdrop_filter_quality(1.f),
blend_mode(SkBlendMode::kSrcOver),
has_render_surface(false),
cache_render_surface(false),
@@ -44,6 +45,7 @@ bool EffectNode::operator==(const EffectNode& other) const {
return id == other.id && parent_id == other.parent_id &&
stable_id == other.stable_id && opacity == other.opacity &&
screen_space_opacity == other.screen_space_opacity &&
+ backdrop_filter_quality == other.backdrop_filter_quality &&
has_render_surface == other.has_render_surface &&
cache_render_surface == other.cache_render_surface &&
has_copy_request == other.has_copy_request &&
@@ -80,6 +82,8 @@ void EffectNode::AsValueInto(base::trace_event::TracedValue* value) const {
value->SetInteger("parent_id", parent_id);
value->SetInteger("stable_id", stable_id);
value->SetDouble("opacity", opacity);
+ value->SetDouble("backdrop_filter_quality", backdrop_filter_quality);
+ value->SetString("blend_mode", SkBlendMode_Name(blend_mode));
value->SetBoolean("has_render_surface", has_render_surface);
value->SetBoolean("cache_render_surface", cache_render_surface);
value->SetBoolean("has_copy_request", has_copy_request);
diff --git a/chromium/cc/trees/effect_node.h b/chromium/cc/trees/effect_node.h
index 3609ad6ff7f..72d0c062e17 100644
--- a/chromium/cc/trees/effect_node.h
+++ b/chromium/cc/trees/effect_node.h
@@ -40,6 +40,7 @@ struct CC_EXPORT EffectNode {
FilterOperations filters;
FilterOperations background_filters;
+ float backdrop_filter_quality;
gfx::PointF filters_origin;
SkBlendMode blend_mode;
diff --git a/chromium/cc/trees/layer_tree_host.cc b/chromium/cc/trees/layer_tree_host.cc
index 5cb9cab1b4d..ecee592a25f 100644
--- a/chromium/cc/trees/layer_tree_host.cc
+++ b/chromium/cc/trees/layer_tree_host.cc
@@ -72,49 +72,55 @@ static base::AtomicSequenceNumber s_image_decode_sequence_number;
namespace cc {
LayerTreeHost::InitParams::InitParams() = default;
-
LayerTreeHost::InitParams::~InitParams() = default;
+LayerTreeHost::InitParams::InitParams(InitParams&&) = default;
+LayerTreeHost::InitParams& LayerTreeHost::InitParams::operator=(InitParams&&) =
+ default;
std::unique_ptr<LayerTreeHost> LayerTreeHost::CreateThreaded(
scoped_refptr<base::SingleThreadTaskRunner> impl_task_runner,
- InitParams* params) {
- DCHECK(params->main_task_runner.get());
- DCHECK(impl_task_runner.get());
- DCHECK(params->settings);
- std::unique_ptr<LayerTreeHost> layer_tree_host(
- new LayerTreeHost(params, CompositorMode::THREADED));
- layer_tree_host->InitializeThreaded(params->main_task_runner,
- impl_task_runner);
+ InitParams params) {
+ DCHECK(params.settings);
+ scoped_refptr<base::SingleThreadTaskRunner> main_task_runner =
+ params.main_task_runner;
+ DCHECK(main_task_runner);
+ DCHECK(impl_task_runner);
+ auto layer_tree_host = base::WrapUnique(
+ new LayerTreeHost(std::move(params), CompositorMode::THREADED));
+ layer_tree_host->InitializeThreaded(std::move(main_task_runner),
+ std::move(impl_task_runner));
return layer_tree_host;
}
-std::unique_ptr<LayerTreeHost>
-LayerTreeHost::CreateSingleThreaded(
+std::unique_ptr<LayerTreeHost> LayerTreeHost::CreateSingleThreaded(
LayerTreeHostSingleThreadClient* single_thread_client,
- InitParams* params) {
- DCHECK(params->settings);
- std::unique_ptr<LayerTreeHost> layer_tree_host(
- new LayerTreeHost(params, CompositorMode::SINGLE_THREADED));
+ InitParams params) {
+ DCHECK(params.settings);
+ scoped_refptr<base::SingleThreadTaskRunner> main_task_runner =
+ params.main_task_runner;
+ auto layer_tree_host = base::WrapUnique(
+ new LayerTreeHost(std::move(params), CompositorMode::SINGLE_THREADED));
layer_tree_host->InitializeSingleThreaded(single_thread_client,
- params->main_task_runner);
+ std::move(main_task_runner));
return layer_tree_host;
}
-LayerTreeHost::LayerTreeHost(InitParams* params, CompositorMode mode)
+LayerTreeHost::LayerTreeHost(InitParams params, CompositorMode mode)
: micro_benchmark_controller_(this),
- image_worker_task_runner_(params->image_worker_task_runner),
- ukm_recorder_factory_(std::move(params->ukm_recorder_factory)),
+ image_worker_task_runner_(std::move(params.image_worker_task_runner)),
+ ukm_recorder_factory_(std::move(params.ukm_recorder_factory)),
compositor_mode_(mode),
ui_resource_manager_(std::make_unique<UIResourceManager>()),
- client_(params->client),
+ client_(params.client),
rendering_stats_instrumentation_(RenderingStatsInstrumentation::Create()),
- settings_(*params->settings),
+ settings_(*params.settings),
debug_state_(settings_.initial_debug_state),
id_(s_layer_tree_host_sequence_number.GetNext() + 1),
- task_graph_runner_(params->task_graph_runner),
+ task_graph_runner_(params.task_graph_runner),
content_source_id_(0),
event_listener_properties_(),
- mutator_host_(params->mutator_host) {
+ mutator_host_(params.mutator_host),
+ defer_commits_weak_ptr_factory_(this) {
DCHECK(task_graph_runner_);
DCHECK(!settings_.enable_checker_imaging || image_worker_task_runner_);
@@ -271,8 +277,8 @@ const LayerTreeDebugState& LayerTreeHost::GetDebugState() const {
return debug_state_;
}
-void LayerTreeHost::RequestMainFrameUpdate(VisualStateUpdate requested_update) {
- client_->UpdateLayerTreeHost(requested_update);
+void LayerTreeHost::RequestMainFrameUpdate() {
+ client_->UpdateLayerTreeHost();
}
// This function commits the LayerTreeHost to an impl tree. When modifying
@@ -366,6 +372,27 @@ void LayerTreeHost::FinishCommitOnImplThread(
micro_benchmark_controller_.ScheduleImplBenchmarks(host_impl);
property_trees_.ResetAllChangeTracking();
+
+ // Dump property trees and layers if run with:
+ // --vmodule=layer_tree_host=3
+ if (VLOG_IS_ON(3)) {
+ VLOG(3) << "After finishing commit on impl, the sync tree:";
+ // Because the property tree and layer list output can be verbose, the VLOG
+ // output is split by line to avoid line buffer limits on android.
+ VLOG(3) << "property trees:";
+ std::string property_trees;
+ base::JSONWriter::WriteWithOptions(
+ *sync_tree->property_trees()->AsTracedValue()->ToBaseValue(),
+ base::JSONWriter::OPTIONS_PRETTY_PRINT, &property_trees);
+ std::stringstream property_trees_stream(property_trees);
+ for (std::string line; std::getline(property_trees_stream, line);)
+ VLOG(3) << line;
+
+ VLOG(3) << "layers:";
+ std::stringstream layers_stream(host_impl->LayerListAsJson());
+ for (std::string line; std::getline(layers_stream, line);)
+ VLOG(3) << line;
+ }
}
void LayerTreeHost::ImageDecodesFinished(
@@ -419,7 +446,7 @@ void LayerTreeHost::WillCommit() {
void LayerTreeHost::UpdateDeferCommitsInternal() {
- proxy_->SetDeferCommits(defer_commits_ ||
+ proxy_->SetDeferCommits(defer_commits_count_ > 0 ||
(settings_.enable_surface_synchronization &&
!local_surface_id_from_parent_.is_valid()));
}
@@ -507,11 +534,23 @@ void LayerTreeHost::DidLoseLayerTreeFrameSink() {
SetNeedsCommit();
}
-void LayerTreeHost::SetDeferCommits(bool defer_commits) {
- if (defer_commits_ == defer_commits)
- return;
- defer_commits_ = defer_commits;
- UpdateDeferCommitsInternal();
+ScopedDeferCommits::ScopedDeferCommits(LayerTreeHost* host)
+ : host_(host->defer_commits_weak_ptr_factory_.GetWeakPtr()) {
+ host->defer_commits_count_++;
+ host->UpdateDeferCommitsInternal();
+}
+
+ScopedDeferCommits::~ScopedDeferCommits() {
+ LayerTreeHost* host = host_.get();
+ if (host) {
+ DCHECK_GT(host->defer_commits_count_, 0u);
+ if (--host->defer_commits_count_ == 0)
+ host->UpdateDeferCommitsInternal();
+ }
+}
+
+std::unique_ptr<ScopedDeferCommits> LayerTreeHost::DeferCommits() {
+ return std::make_unique<ScopedDeferCommits>(this);
}
DISABLE_CFI_PERF
@@ -579,12 +618,6 @@ void LayerTreeHost::SetDebugState(
SetNeedsCommit();
}
-void LayerTreeHost::ResetGpuRasterizationTracking() {
- content_has_slow_paths_ = false;
- content_has_non_aa_paint_ = false;
- gpu_rasterization_histogram_recorded_ = false;
-}
-
void LayerTreeHost::SetHasGpuRasterizationTrigger(bool has_trigger) {
if (has_trigger == has_gpu_rasterization_trigger_)
return;
@@ -716,30 +749,6 @@ bool LayerTreeHost::DoUpdateLayers(Layer* root_layer) {
UpdateHudLayer(debug_state_.ShowHudInfo());
- Layer* root_scroll =
- PropertyTreeBuilder::FindFirstScrollableLayer(root_layer);
- Layer* page_scale_layer = viewport_layers_.page_scale.get();
- if (!page_scale_layer && root_scroll)
- page_scale_layer = root_scroll->parent();
-
- if (hud_layer_) {
- hud_layer_->PrepareForCalculateDrawProperties(device_viewport_size_,
- device_scale_factor_);
- // The HUD layer is managed outside the layer list sent to LayerTreeHost
- // and needs to have its property tree state set.
- if (IsUsingLayerLists() && root_layer_.get()) {
- hud_layer_->SetTransformTreeIndex(root_layer_->transform_tree_index());
- hud_layer_->SetEffectTreeIndex(root_layer_->effect_tree_index());
- hud_layer_->SetClipTreeIndex(root_layer_->clip_tree_index());
- hud_layer_->SetScrollTreeIndex(root_layer_->scroll_tree_index());
- hud_layer_->set_property_tree_sequence_number(
- root_layer_->property_tree_sequence_number());
- }
- }
-
- gfx::Transform identity_transform;
- LayerList update_layer_list;
-
// The non-layer-list mode is used when blink provides cc with a layer tree
// and cc needs to compute property trees from that.
// In layer lists mode, blink sends cc property trees directly so they do not
@@ -749,6 +758,12 @@ bool LayerTreeHost::DoUpdateLayers(Layer* root_layer) {
TRACE_EVENT0("cc", "LayerTreeHost::UpdateLayers::BuildPropertyTrees");
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug.cdp-perf"),
"LayerTreeHostCommon::ComputeVisibleRectsWithPropertyTrees");
+ Layer* root_scroll =
+ PropertyTreeBuilder::FindFirstScrollableLayer(root_layer);
+ Layer* page_scale_layer = viewport_layers_.page_scale.get();
+ if (!page_scale_layer && root_scroll)
+ page_scale_layer = root_scroll->parent();
+ gfx::Transform identity_transform;
PropertyTreeBuilder::BuildPropertyTrees(
root_layer, page_scale_layer, inner_viewport_scroll_layer(),
outer_viewport_scroll_layer(), overscroll_elasticity_element_id(),
@@ -757,63 +772,75 @@ bool LayerTreeHost::DoUpdateLayers(Layer* root_layer) {
TRACE_EVENT_INSTANT1("cc", "LayerTreeHost::UpdateLayers_BuiltPropertyTrees",
TRACE_EVENT_SCOPE_THREAD, "property_trees",
property_trees_.AsTracedValue());
- } else {
- TRACE_EVENT_INSTANT1("cc",
- "LayerTreeHost::UpdateLayers_ReceivedPropertyTrees",
- TRACE_EVENT_SCOPE_THREAD, "property_trees",
- property_trees_.AsTracedValue());
+ } else {
+ TRACE_EVENT_INSTANT1("cc",
+ "LayerTreeHost::UpdateLayers_ReceivedPropertyTrees",
+ TRACE_EVENT_SCOPE_THREAD, "property_trees",
+ property_trees_.AsTracedValue());
+ // The HUD layer is managed outside the layer list sent to LayerTreeHost
+ // and needs to have its property tree state set.
+ if (hud_layer_ && root_layer_.get()) {
+ hud_layer_->SetTransformTreeIndex(root_layer_->transform_tree_index());
+ hud_layer_->SetEffectTreeIndex(root_layer_->effect_tree_index());
+ hud_layer_->SetClipTreeIndex(root_layer_->clip_tree_index());
+ hud_layer_->SetScrollTreeIndex(root_layer_->scroll_tree_index());
+ hud_layer_->set_property_tree_sequence_number(
+ root_layer_->property_tree_sequence_number());
}
+ }
#if DCHECK_IS_ON()
- // Ensure property tree nodes were created for all layers. When using layer
- // lists, this can fail if blink doesn't setup layers or nodes correctly in
- // |PaintArtifactCompositor|. When not using layer lists, this can fail if
- // |PropertyTreeBuilder::BuildPropertyTrees| fails to create property tree
- // nodes.
- for (auto* layer : *this) {
- DCHECK(property_trees_.effect_tree.Node(layer->effect_tree_index()));
- DCHECK(
- property_trees_.transform_tree.Node(layer->transform_tree_index()));
- DCHECK(property_trees_.clip_tree.Node(layer->clip_tree_index()));
- DCHECK(property_trees_.scroll_tree.Node(layer->scroll_tree_index()));
- }
+ // Ensure property tree nodes were created for all layers. When using layer
+ // lists, this can fail if blink doesn't setup layers or nodes correctly in
+ // |PaintArtifactCompositor|. When not using layer lists, this can fail if
+ // |PropertyTreeBuilder::BuildPropertyTrees| fails to create property tree
+ // nodes.
+ for (auto* layer : *this) {
+ DCHECK(property_trees_.effect_tree.Node(layer->effect_tree_index()));
+ DCHECK(property_trees_.transform_tree.Node(layer->transform_tree_index()));
+ DCHECK(property_trees_.clip_tree.Node(layer->clip_tree_index()));
+ DCHECK(property_trees_.scroll_tree.Node(layer->scroll_tree_index()));
+ }
#endif
- draw_property_utils::UpdatePropertyTrees(this, &property_trees_);
- draw_property_utils::FindLayersThatNeedUpdates(this, &property_trees_,
- &update_layer_list);
-
- // Dump property trees useful for debugging --blink-gen-property-trees
- // flag. We care only about the renderer compositor.
- if (VLOG_IS_ON(3) && GetClientNameForMetrics() == std::string("Renderer")) {
- VLOG(3) << "CC Property Trees:";
- std::string out;
- base::JSONWriter::WriteWithOptions(
- *property_trees_.AsTracedValue()->ToBaseValue(),
- base::JSONWriter::OPTIONS_PRETTY_PRINT, &out);
- std::stringstream ss(out);
- while (!ss.eof()) {
- std::string line;
- std::getline(ss, line);
- VLOG(3) << line;
- }
-
- VLOG(3) << "CC Layer List:";
- for (auto* layer : *this) {
- VLOG(3) << "layer id " << layer->id();
- VLOG(3) << " element_id: " << layer->element_id();
- VLOG(3) << " bounds: " << layer->bounds().ToString();
- VLOG(3) << " opacity: " << layer->opacity();
- VLOG(3) << " position: " << layer->position().ToString();
- VLOG(3) << " draws_content: " << layer->DrawsContent();
- VLOG(3) << " scrollable: " << layer->scrollable();
- VLOG(3) << " contents_opaque: " << layer->contents_opaque();
- VLOG(3) << " transform_tree_index: " << layer->transform_tree_index();
- VLOG(3) << " clip_tree_index: " << layer->clip_tree_index();
- VLOG(3) << " effect_tree_index: " << layer->effect_tree_index();
- VLOG(3) << " scroll_tree_index: " << layer->scroll_tree_index();
- }
+ draw_property_utils::UpdatePropertyTrees(this, &property_trees_);
+
+ LayerList update_layer_list;
+ draw_property_utils::FindLayersThatNeedUpdates(this, &property_trees_,
+ &update_layer_list);
+
+ // Dump property trees and layers if run with:
+ // --vmodule=layer_tree_host=3
+ // This only prints output for the renderer.
+ if (VLOG_IS_ON(3) && GetClientNameForMetrics() == std::string("Renderer")) {
+ VLOG(3) << "After updating layers on the main thread:";
+ // Because the property tree and layer list output can be verbose, the VLOG
+ // output is split by line to avoid line buffer limits on android.
+ VLOG(3) << "property trees:";
+ std::string property_trees;
+ base::JSONWriter::WriteWithOptions(
+ *property_trees_.AsTracedValue()->ToBaseValue(),
+ base::JSONWriter::OPTIONS_PRETTY_PRINT, &property_trees);
+ std::stringstream property_trees_stream(property_trees);
+ for (std::string line; std::getline(property_trees_stream, line);)
+ VLOG(3) << line;
+
+ VLOG(3) << "layers:";
+ for (auto* layer : *this) {
+ VLOG(3) << " layer id " << layer->id();
+ VLOG(3) << " element_id: " << layer->element_id();
+ VLOG(3) << " bounds: " << layer->bounds().ToString();
+ VLOG(3) << " opacity: " << layer->opacity();
+ VLOG(3) << " position: " << layer->position().ToString();
+ VLOG(3) << " draws_content: " << layer->DrawsContent();
+ VLOG(3) << " scrollable: " << layer->scrollable();
+ VLOG(3) << " contents_opaque: " << layer->contents_opaque();
+ VLOG(3) << " transform_tree_index: " << layer->transform_tree_index();
+ VLOG(3) << " clip_tree_index: " << layer->clip_tree_index();
+ VLOG(3) << " effect_tree_index: " << layer->effect_tree_index();
+ VLOG(3) << " scroll_tree_index: " << layer->scroll_tree_index();
}
+ }
bool painted_content_has_slow_paths = false;
bool painted_content_has_non_aa_paint = false;
@@ -842,13 +869,13 @@ bool LayerTreeHost::DoUpdateLayers(Layer* root_layer) {
return did_paint_content;
}
-void LayerTreeHost::ApplyViewportDeltas(ScrollAndScaleSet* info) {
+void LayerTreeHost::ApplyViewportDeltas(const ScrollAndScaleSet& info) {
gfx::Vector2dF inner_viewport_scroll_delta;
- if (info->inner_viewport_scroll.element_id)
- inner_viewport_scroll_delta = info->inner_viewport_scroll.scroll_delta;
+ if (info.inner_viewport_scroll.element_id)
+ inner_viewport_scroll_delta = info.inner_viewport_scroll.scroll_delta;
- if (inner_viewport_scroll_delta.IsZero() && info->page_scale_delta == 1.f &&
- info->elastic_overscroll_delta.IsZero() && !info->top_controls_delta)
+ if (inner_viewport_scroll_delta.IsZero() && info.page_scale_delta == 1.f &&
+ info.elastic_overscroll_delta.IsZero() && !info.top_controls_delta)
return;
// Preemptively apply the scroll offset and scale delta here before sending
@@ -861,21 +888,21 @@ void LayerTreeHost::ApplyViewportDeltas(ScrollAndScaleSet* info) {
inner_viewport_scroll_delta));
}
- ApplyPageScaleDeltaFromImplSide(info->page_scale_delta);
+ ApplyPageScaleDeltaFromImplSide(info.page_scale_delta);
SetElasticOverscrollFromImplSide(elastic_overscroll_ +
- info->elastic_overscroll_delta);
+ info.elastic_overscroll_delta);
// TODO(ccameron): pass the elastic overscroll here so that input events
// may be translated appropriately.
- client_->ApplyViewportDeltas(inner_viewport_scroll_delta, gfx::Vector2dF(),
- info->elastic_overscroll_delta,
- info->page_scale_delta,
- info->top_controls_delta);
+ client_->ApplyViewportChanges(
+ {inner_viewport_scroll_delta, info.elastic_overscroll_delta,
+ info.page_scale_delta, info.top_controls_delta});
SetNeedsUpdateLayers();
}
-void LayerTreeHost::RecordWheelAndTouchScrollingCount(ScrollAndScaleSet* info) {
- bool has_scrolled_by_wheel = info->has_scrolled_by_wheel;
- bool has_scrolled_by_touch = info->has_scrolled_by_touch;
+void LayerTreeHost::RecordWheelAndTouchScrollingCount(
+ const ScrollAndScaleSet& info) {
+ bool has_scrolled_by_wheel = info.has_scrolled_by_wheel;
+ bool has_scrolled_by_touch = info.has_scrolled_by_touch;
if (has_scrolled_by_wheel || has_scrolled_by_touch) {
client_->RecordWheelAndTouchScrollingCount(has_scrolled_by_wheel,
@@ -884,6 +911,7 @@ void LayerTreeHost::RecordWheelAndTouchScrollingCount(ScrollAndScaleSet* info) {
}
void LayerTreeHost::ApplyScrollAndScale(ScrollAndScaleSet* info) {
+ DCHECK(info);
for (auto& swap_promise : info->swap_promises) {
TRACE_EVENT_WITH_FLOW1("input,benchmark", "LatencyInfo.Flow",
TRACE_ID_DONT_MANGLE(swap_promise->TraceId()),
@@ -912,9 +940,13 @@ void LayerTreeHost::ApplyScrollAndScale(ScrollAndScaleSet* info) {
// This needs to happen after scroll deltas have been sent to prevent top
// controls from clamping the layout viewport both on the compositor and
// on the main thread.
- ApplyViewportDeltas(info);
+ ApplyViewportDeltas(*info);
- RecordWheelAndTouchScrollingCount(info);
+ RecordWheelAndTouchScrollingCount(*info);
+}
+
+void LayerTreeHost::RecordEndOfFrameMetrics(base::TimeTicks frame_begin_time) {
+ client_->RecordEndOfFrameMetrics(frame_begin_time);
}
const base::WeakPtr<InputHandler>& LayerTreeHost::GetInputHandler()
@@ -970,7 +1002,7 @@ void LayerTreeHost::SetLayerTreeMutator(
// from the main thread, which will not be the case if we're running in
// single-threaded mode.
if (!task_runner_provider_->HasImplThread()) {
- LOG(ERROR) << "LayerTreeMutator not supported in single-thread mode";
+ DLOG(ERROR) << "LayerTreeMutator not supported in single-thread mode";
return;
}
proxy_->SetMutator(std::move(mutator));
@@ -1010,7 +1042,9 @@ void LayerTreeHost::SetRootLayer(scoped_refptr<Layer> root_layer) {
// Reset gpu rasterization tracking.
// This flag is sticky until a new tree comes along.
- ResetGpuRasterizationTracking();
+ content_has_slow_paths_ = false;
+ content_has_non_aa_paint_ = false;
+ gpu_rasterization_histogram_recorded_ = false;
SetNeedsFullTreeSync();
}
@@ -1067,20 +1101,36 @@ void LayerTreeHost::SetEventListenerProperties(
if (event_listener_properties_[index] == properties)
return;
+ // If the mouse wheel event listener is blocking, then every layer in the
+ // layer tree sets a wheel event handler region to be its entire bounds,
+ // otherwise it sets it to empty.
+ //
+ // Thus when it changes, we might want to request every layer to push
+ // properties and recompute its wheel event handler region, since the
+ // computation is done in PushPropertiesTo. However neither
+ // SetSubtreePropertyChanged() nor SetNeedsFullTreeSync() do this, so
+ // it is unclear why we call them.
+ // Also why we don't want to recompute the wheel event handler region for all
+ // layers when the blocking state goes away is unclear. Also why we mark all
+ // layers below the root layer as damaged is unclear.
+ // TODO(bokan): Sort out what should be set and why. https://crbug.com/881011
+ //
// TODO(sunxd): Remove NeedsFullTreeSync when computing mouse wheel event
// handler region is done.
- // We only do full tree sync if the mouse wheel event listener property
- // changes from kNone/kPassive to kBlocking/kBlockingAndPassive.
- if (event_class == EventListenerClass::kMouseWheel &&
- !(event_listener_properties_[index] ==
- EventListenerProperties::kBlocking ||
- event_listener_properties_[index] ==
- EventListenerProperties::kBlockingAndPassive) &&
- (properties == EventListenerProperties::kBlocking ||
- properties == EventListenerProperties::kBlockingAndPassive)) {
- if (root_layer())
- root_layer()->SetSubtreePropertyChanged();
- SetNeedsFullTreeSync();
+ if (event_class == EventListenerClass::kMouseWheel) {
+ bool new_property_is_blocking =
+ properties == EventListenerProperties::kBlocking ||
+ properties == EventListenerProperties::kBlockingAndPassive;
+ EventListenerProperties old_properties = event_listener_properties_[index];
+ bool old_property_is_blocking =
+ old_properties == EventListenerProperties::kBlocking ||
+ old_properties == EventListenerProperties::kBlockingAndPassive;
+
+ if (!old_property_is_blocking && new_property_is_blocking) {
+ if (root_layer())
+ root_layer()->SetSubtreePropertyChanged();
+ SetNeedsFullTreeSync();
+ }
}
event_listener_properties_[index] = properties;
@@ -1282,7 +1332,7 @@ void LayerTreeHost::UnregisterLayer(Layer* layer) {
mutator_host_->UnregisterElement(layer->element_id(),
ElementListType::ACTIVE);
}
- RemoveLayerShouldPushProperties(layer);
+ layers_that_should_push_properties_.erase(layer);
layer_id_map_.erase(layer->id());
}
@@ -1335,13 +1385,8 @@ void LayerTreeHost::RemoveLayerShouldPushProperties(Layer* layer) {
layers_that_should_push_properties_.erase(layer);
}
-std::unordered_set<Layer*>& LayerTreeHost::LayersThatShouldPushProperties() {
- return layers_that_should_push_properties_;
-}
-
-bool LayerTreeHost::LayerNeedsPushPropertiesForTesting(Layer* layer) const {
- return layers_that_should_push_properties_.find(layer) !=
- layers_that_should_push_properties_.end();
+void LayerTreeHost::ClearLayersThatShouldPushProperties() {
+ layers_that_should_push_properties_.clear();
}
void LayerTreeHost::SetPageScaleFromImplSide(float page_scale) {
@@ -1365,9 +1410,13 @@ void LayerTreeHost::SetElasticOverscrollFromImplSide(
void LayerTreeHost::UpdateHudLayer(bool show_hud_info) {
if (show_hud_info) {
- if (!hud_layer_.get()) {
+ if (!hud_layer_.get())
hud_layer_ = HeadsUpDisplayLayer::Create();
- }
+
+ gfx::Size device_viewport_in_layout_pixels =
+ gfx::Size(device_viewport_size_.width() / device_scale_factor_,
+ device_viewport_size_.height() / device_scale_factor_);
+ hud_layer_->SetBounds(device_viewport_in_layout_pixels);
if (root_layer_.get() && !hud_layer_->parent())
root_layer_->AddChild(hud_layer_);
diff --git a/chromium/cc/trees/layer_tree_host.h b/chromium/cc/trees/layer_tree_host.h
index 7b1069bb28f..d2cd6ed1f5b 100644
--- a/chromium/cc/trees/layer_tree_host.h
+++ b/chromium/cc/trees/layer_tree_host.h
@@ -72,6 +72,17 @@ class UkmRecorderFactory;
struct RenderingStats;
struct ScrollAndScaleSet;
+// Returned from LayerTreeHost::DeferCommits. Automatically un-defers on
+// destruction.
+class CC_EXPORT ScopedDeferCommits {
+ public:
+ explicit ScopedDeferCommits(LayerTreeHost* host);
+ ~ScopedDeferCommits();
+
+ private:
+ base::WeakPtr<LayerTreeHost> host_;
+};
+
class CC_EXPORT LayerTreeHost : public MutatorHostClient {
public:
struct CC_EXPORT InitParams {
@@ -90,15 +101,27 @@ class CC_EXPORT LayerTreeHost : public MutatorHostClient {
InitParams();
~InitParams();
+
+ InitParams(InitParams&&);
+ InitParams& operator=(InitParams&&);
};
+ // Constructs a LayerTreeHost with a compositor thread where scrolling and
+ // animation take place. This is used for the web compositor in the renderer
+ // process to move work off the main thread which javascript can dominate.
static std::unique_ptr<LayerTreeHost> CreateThreaded(
scoped_refptr<base::SingleThreadTaskRunner> impl_task_runner,
- InitParams* params);
-
+ InitParams params);
+
+ // Constructs a LayerTreeHost without a separate compositor thread, but which
+ // behaves and looks the same as a threaded compositor externally, with the
+ // exception of the additional client interface. This is used in other cases
+ // where the main thread creating this instance can be expected to not become
+ // blocked, so moving work to another thread and the overhead it adds are not
+ // required.
static std::unique_ptr<LayerTreeHost> CreateSingleThreaded(
LayerTreeHostSingleThreadClient* single_thread_client,
- InitParams* params);
+ InitParams params);
virtual ~LayerTreeHost();
@@ -106,7 +129,9 @@ class CC_EXPORT LayerTreeHost : public MutatorHostClient {
int GetId() const;
// The current source frame number. This is incremented for each main frame
- // update(commit) pushed to the compositor thread.
+ // update(commit) pushed to the compositor thread. The initial frame number
+ // is 0, and it is incremented once commit is completed (which is before the
+ // compositor-thread-side submits its frame for the commit).
int SourceFrameNumber() const;
// Returns the UIResourceManager used to create UIResources for
@@ -117,26 +142,38 @@ class CC_EXPORT LayerTreeHost : public MutatorHostClient {
// thread task runners.
TaskRunnerProvider* GetTaskRunnerProvider() const;
- // Returns the settings used by this host.
+ // Returns the settings used by this host. These settings are constants given
+ // at startup.
const LayerTreeSettings& GetSettings() const;
// Sets the LayerTreeMutator interface used to directly mutate the compositor
// state on the compositor thread. (Compositor-Worker)
void SetLayerTreeMutator(std::unique_ptr<LayerTreeMutator> mutator);
- // Call this function when you expect there to be a swap buffer.
+ // Attachs a SwapPromise to the Layer tree, that passes through the
+ // LayerTreeHost and LayerTreeHostImpl with the next commit and frame
+ // submission, which can be used to observe that progress. This also
+ // causes a main frame to be requested.
// See swap_promise.h for how to use SwapPromise.
void QueueSwapPromise(std::unique_ptr<SwapPromise> swap_promise);
- // Returns the SwapPromiseManager used to create SwapPromiseMonitors for this
- // host.
+ // Returns the SwapPromiseManager, used to insert SwapPromises dynamically
+ // when a main frame is requested.
SwapPromiseManager* GetSwapPromiseManager();
- // Sets whether the content is suitable to use Gpu Rasterization.
+ // Sets whether the content is suitable to use Gpu Rasterization. This flag is
+ // used to enable gpu rasterization, and can be modified at any time to change
+ // the setting based on content.
void SetHasGpuRasterizationTrigger(bool has_trigger);
// Visibility and LayerTreeFrameSink -------------------------------
+ // Sets or gets if the LayerTreeHost is visible. When not visible it will:
+ // - Not request a new LayerTreeFrameSink from the client.
+ // - Stop submitting frames to the display compositor.
+ // - Stop producing main frames and committing them.
+ // The LayerTreeHost is not visible when first created, so this must be called
+ // to make it visible before it will attempt to start producing output.
void SetVisible(bool visible);
bool IsVisible() const;
@@ -182,12 +219,15 @@ class CC_EXPORT LayerTreeHost : public MutatorHostClient {
// requested.
bool CommitRequested() const;
- // Enables/disables the compositor from requesting main frame updates from the
- // client.
- void SetDeferCommits(bool defer_commits);
- // Returns the value last passed to SetDeferCommits(), though commits may be
- // deferred also when the local_surface_id_from_parent() is not valid.
- bool defer_commits() const { return defer_commits_; }
+ // Prevents the compositor from requesting main frame updates from the client
+ // until the ScopedDeferCommits object is destroyed, or StopDeferringCommits
+ // is called.
+ std::unique_ptr<ScopedDeferCommits> DeferCommits();
+
+ // Returns whether there are any outstanding ScopedDeferCommits, though
+ // commits may be deferred also when the local_surface_id_from_parent() is not
+ // valid.
+ bool defer_commits() const { return defer_commits_count_; }
// Synchronously performs a main frame update and layer updates. Used only in
// single threaded mode when the compositor's internal scheduling is disabled.
@@ -235,16 +275,12 @@ class CC_EXPORT LayerTreeHost : public MutatorHostClient {
// Returns true if the message was successfully delivered and handled.
bool SendMessageToMicroBenchmark(int id, std::unique_ptr<base::Value> value);
- // When the main thread informs the impl thread that it is ready to commit,
- // generally it would remain blocked till the main thread state is copied to
- // the pending tree. Calling this would ensure that the main thread remains
- // blocked till the pending tree is activated.
+ // When the main thread informs the compositor thread that it is ready to
+ // commit, generally it would remain blocked until the main thread state is
+ // copied to the pending tree. Calling this would ensure that the main thread
+ // remains blocked until the pending tree is activated.
void SetNextCommitWaitsForActivation();
- // The LayerTreeHost tracks whether the content is suitable for Gpu raster.
- // Calling this will reset it back to not suitable state.
- void ResetGpuRasterizationTracking();
-
// Registers a callback that is run when the next frame successfully makes it
// to the screen (it's entirely possible some frames may be dropped between
// the time this is called and the callback is run).
@@ -252,10 +288,18 @@ class CC_EXPORT LayerTreeHost : public MutatorHostClient {
base::OnceCallback<void(const gfx::PresentationFeedback&)>;
void RequestPresentationTimeForNextFrame(PresentationTimeCallback callback);
+ // Layer tree accessors and modifiers ------------------------
+
+ // Sets or gets the root of the Layer tree. Children of the root Layer are
+ // attached to it and will be added/removed along with the root Layer. The
+ // LayerTreeHost retains ownership of a reference to the root Layer.
void SetRootLayer(scoped_refptr<Layer> root_layer);
Layer* root_layer() { return root_layer_.get(); }
const Layer* root_layer() const { return root_layer_.get(); }
+ // Viewport Layers are used to identify key layers to the compositor thread,
+ // so that it can perform viewport-based scrolling independently, such as
+ // for pinch-zoom or overscroll elasticity.
struct CC_EXPORT ViewportLayers {
ViewportLayers();
~ViewportLayers();
@@ -266,6 +310,8 @@ class CC_EXPORT LayerTreeHost : public MutatorHostClient {
scoped_refptr<Layer> inner_viewport_scroll;
scoped_refptr<Layer> outer_viewport_scroll;
};
+ // Sets or gets the collection of viewport Layers, defined to allow pinch-zoom
+ // transformations on the compositor thread.
void RegisterViewportLayers(const ViewportLayers& viewport_layers);
ElementId overscroll_elasticity_element_id() const {
return viewport_layers_.overscroll_elasticity_element_id;
@@ -284,14 +330,28 @@ class CC_EXPORT LayerTreeHost : public MutatorHostClient {
return viewport_layers_.outer_viewport_scroll.get();
}
+ // Sets or gets the position of touch handles for a text selection. These are
+ // submitted to the display compositor along with the Layer tree's contents
+ // allowing it to present the selection handles. This is done because the
+ // handles are a UI widget above, and not clipped to, the viewport of this
+ // LayerTreeHost.
void RegisterSelection(const LayerSelection& selection);
const LayerSelection& selection() const { return selection_; }
+ // Sets or gets if the client has any scroll event handlers registered. This
+ // allows the threaded compositor to prioritize main frames even when
+ // servicing a touch scroll on the compositor thread, in order to give the
+ // event handler a chance to be part of each frame.
void SetHaveScrollEventHandlers(bool have_event_handlers);
bool have_scroll_event_handlers() const {
return have_scroll_event_handlers_;
}
+ // Set or get what event handlers exist on the layer tree in order to inform
+ // the compositor thread if it is able to handle an input event, or it needs
+ // to pass it to the main thread to be handled. The class is the type of input
+ // event, and for each class there is a properties defining if the compositor
+ // thread can handle the event.
void SetEventListenerProperties(EventListenerClass event_class,
EventListenerProperties event_properties);
EventListenerProperties event_listener_properties(
@@ -394,15 +454,24 @@ class CC_EXPORT LayerTreeHost : public MutatorHostClient {
void RemoveSurfaceRange(const viz::SurfaceRange& surface_range);
base::flat_set<viz::SurfaceRange> SurfaceRanges() const;
+ // Marks or unmarks a layer are needing PushPropertiesTo in the next commit.
+ // These are internal methods, called from the Layer itself when changing a
+ // property or completing a PushPropertiesTo.
void AddLayerShouldPushProperties(Layer* layer);
void RemoveLayerShouldPushProperties(Layer* layer);
- std::unordered_set<Layer*>& LayersThatShouldPushProperties();
- bool LayerNeedsPushPropertiesForTesting(Layer* layer) const;
+ void ClearLayersThatShouldPushProperties();
+ // The current set of all Layers attached to the LayerTreeHost's tree that
+ // have been marked as needing PushPropertiesTo in the next commit.
+ const base::flat_set<Layer*>& LayersThatShouldPushProperties() {
+ return layers_that_should_push_properties_;
+ }
void SetPageScaleFromImplSide(float page_scale);
void SetElasticOverscrollFromImplSide(gfx::Vector2dF elastic_overscroll);
gfx::Vector2dF elastic_overscroll() const { return elastic_overscroll_; }
+ // Ensures a HUD layer exists if it is needed, and updates the layer bounds.
+ // If a HUD layer exists but is no longer needed, it is destroyed.
void UpdateHudLayer(bool show_hud_info);
HeadsUpDisplayLayer* hud_layer() const { return hud_layer_.get(); }
@@ -445,9 +514,7 @@ class CC_EXPORT LayerTreeHost : public MutatorHostClient {
void BeginMainFrameNotExpectedSoon();
void BeginMainFrameNotExpectedUntil(base::TimeTicks time);
void AnimateLayers(base::TimeTicks monotonic_frame_begin_time);
- using VisualStateUpdate = LayerTreeHostClient::VisualStateUpdate;
- void RequestMainFrameUpdate(
- VisualStateUpdate requested_update = VisualStateUpdate::kAll);
+ void RequestMainFrameUpdate();
void FinishCommitOnImplThread(LayerTreeHostImpl* host_impl);
void WillCommit();
void CommitComplete();
@@ -469,6 +536,7 @@ class CC_EXPORT LayerTreeHost : public MutatorHostClient {
// Called when the compositor completed page scale animation.
void DidCompletePageScaleAnimation();
void ApplyScrollAndScale(ScrollAndScaleSet* info);
+ void RecordEndOfFrameMetrics(base::TimeTicks frame_begin_time);
LayerTreeHostClient* client() { return client_; }
@@ -540,7 +608,7 @@ class CC_EXPORT LayerTreeHost : public MutatorHostClient {
std::unique_ptr<RenderFrameMetadataObserver> observer);
protected:
- LayerTreeHost(InitParams* params, CompositorMode mode);
+ LayerTreeHost(InitParams params, CompositorMode mode);
void InitializeThreaded(
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
@@ -573,13 +641,14 @@ class CC_EXPORT LayerTreeHost : public MutatorHostClient {
private:
friend class LayerTreeHostSerializationTest;
+ friend class ScopedDeferCommits;
// This is the number of consecutive frames in which we want the content to be
// free of slow-paths before toggling the flag.
enum { kNumFramesToConsiderBeforeRemovingSlowPathFlag = 60 };
- void ApplyViewportDeltas(ScrollAndScaleSet* info);
- void RecordWheelAndTouchScrollingCount(ScrollAndScaleSet* info);
+ void ApplyViewportDeltas(const ScrollAndScaleSet& info);
+ void RecordWheelAndTouchScrollingCount(const ScrollAndScaleSet& info);
void ApplyPageScaleDeltaFromImplSide(float page_scale_delta);
void InitializeProxy(std::unique_ptr<Proxy> proxy);
@@ -661,7 +730,7 @@ class CC_EXPORT LayerTreeHost : public MutatorHostClient {
// Used to detect surface invariant violations.
bool has_pushed_local_surface_id_from_parent_ = false;
bool new_local_surface_id_request_ = false;
- bool defer_commits_ = false;
+ uint32_t defer_commits_count_ = 0;
SkColor background_color_ = SK_ColorWHITE;
@@ -692,7 +761,7 @@ class CC_EXPORT LayerTreeHost : public MutatorHostClient {
base::flat_map<viz::SurfaceRange, int> surface_ranges_;
// Set of layers that need to push properties.
- std::unordered_set<Layer*> layers_that_should_push_properties_;
+ base::flat_set<Layer*> layers_that_should_push_properties_;
// Layer id to Layer map.
std::unordered_map<int, Layer*> layer_id_map_;
@@ -717,6 +786,9 @@ class CC_EXPORT LayerTreeHost : public MutatorHostClient {
// added here.
std::vector<PresentationTimeCallback> pending_presentation_time_callbacks_;
+ // Used to vend weak pointers to LayerTreeHost to ScopedDeferCommits objects.
+ base::WeakPtrFactory<LayerTreeHost> defer_commits_weak_ptr_factory_;
+
DISALLOW_COPY_AND_ASSIGN(LayerTreeHost);
};
diff --git a/chromium/cc/trees/layer_tree_host_client.h b/chromium/cc/trees/layer_tree_host_client.h
index 1c2e7fff509..97150433e01 100644
--- a/chromium/cc/trees/layer_tree_host_client.h
+++ b/chromium/cc/trees/layer_tree_host_client.h
@@ -9,6 +9,7 @@
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
+#include "ui/gfx/geometry/vector2d_f.h"
namespace gfx {
struct PresentationFeedback;
@@ -21,6 +22,23 @@ struct BeginFrameArgs;
namespace cc {
+struct ApplyViewportChangesArgs {
+ // Scroll offset delta of the inner (visual) viewport.
+ gfx::Vector2dF inner_delta;
+
+ // Elastic overscroll effect offset delta. This is used only on Mac. a.k.a
+ // "rubber-banding" overscroll.
+ gfx::Vector2dF elastic_overscroll_delta;
+
+ // "Pinch-zoom" page scale delta. This is a multiplicative delta. i.e.
+ // main_thread_scale * delta == impl_thread_scale.
+ float page_scale_delta;
+
+ // How much the browser controls have been shown or hidden. The ratio runs
+ // between 0 (hidden) and 1 (full-shown). This is additive.
+ float browser_controls_delta;
+};
+
// A LayerTreeHost is bound to a LayerTreeHostClient. The main rendering
// loop (in ProxyMain or SingleThreadProxy) calls methods on the
// LayerTreeHost, which then handles them and also calls into the equivalent
@@ -63,18 +81,13 @@ class LayerTreeHostClient {
// state. (The "compositing state" will result in a mutated layer tree on the
// LayerTreeHost via additional interface indirections which lead back to
// mutations on the LayerTreeHost.)
- //
- // If |requested_update| is kPrePaint, the client should apply layout and
- // animation updates and their side effects, but can skip painting stages.
- enum class VisualStateUpdate { kPrePaint, kAll };
- virtual void UpdateLayerTreeHost(VisualStateUpdate requested_update) = 0;
-
- virtual void ApplyViewportDeltas(
- const gfx::Vector2dF& inner_delta,
- const gfx::Vector2dF& outer_delta,
- const gfx::Vector2dF& elastic_overscroll_delta,
- float page_scale,
- float top_controls_delta) = 0;
+ virtual void UpdateLayerTreeHost() = 0;
+
+ // Notifies the client of viewport-related changes that occured in the
+ // LayerTreeHost since the last commit. This typically includes things
+ // related to pinch-zoom, browser controls (aka URL bar), overscroll, etc.
+ virtual void ApplyViewportChanges(const ApplyViewportChangesArgs& args) = 0;
+
virtual void RecordWheelAndTouchScrollingCount(
bool has_scrolled_by_wheel,
bool has_scrolled_by_touch) = 0;
@@ -93,6 +106,9 @@ class LayerTreeHostClient {
virtual void DidPresentCompositorFrame(
uint32_t frame_token,
const gfx::PresentationFeedback& feedback) = 0;
+ // Record UMA and UKM metrics that require the time from the start of
+ // BeginMainFrame to the Commit, or early out.
+ virtual void RecordEndOfFrameMetrics(base::TimeTicks frame_begin_time) = 0;
protected:
virtual ~LayerTreeHostClient() {}
diff --git a/chromium/cc/trees/layer_tree_host_common_unittest.cc b/chromium/cc/trees/layer_tree_host_common_unittest.cc
index 5defcc78679..57bed051c21 100644
--- a/chromium/cc/trees/layer_tree_host_common_unittest.cc
+++ b/chromium/cc/trees/layer_tree_host_common_unittest.cc
@@ -2705,8 +2705,8 @@ TEST_F(LayerTreeHostCommonTest, VisibleRectWithClippingAndFilters) {
ExecuteCalculateDrawProperties(root);
- EXPECT_EQ(gfx::Rect(50, 40, 10, 20), filter_child->visible_layer_rect());
- EXPECT_EQ(gfx::Rect(0, -10, 10, 20),
+ EXPECT_EQ(gfx::Rect(49, 39, 12, 21), filter_child->visible_layer_rect());
+ EXPECT_EQ(gfx::Rect(-1, -11, 12, 21),
GetRenderSurface(filter)->content_rect());
}
@@ -2758,8 +2758,8 @@ TEST_F(LayerTreeHostCommonTest, VisibleRectWithScalingClippingAndFilters) {
ExecuteCalculateDrawProperties(root);
- EXPECT_EQ(gfx::Rect(50, 40, 10, 20), filter_child->visible_layer_rect());
- EXPECT_EQ(gfx::Rect(0, -30, 30, 60),
+ EXPECT_EQ(gfx::Rect(49, 39, 12, 21), filter_child->visible_layer_rect());
+ EXPECT_EQ(gfx::Rect(-1, -31, 32, 61),
GetRenderSurface(filter)->content_rect());
}
@@ -10014,7 +10014,6 @@ TEST_F(LayerTreeHostCommonTest, ScrollTreeBuilderTest) {
property_tree_root->scrollable = false;
property_tree_root->main_thread_scrolling_reasons =
MainThreadScrollingReason::kNotScrollingOnMain;
- property_tree_root->non_fast_scrollable_region = Region();
property_tree_root->transform_id = kRootPropertyTreeNodeId;
// The node owned by root1
@@ -10079,7 +10078,6 @@ TEST_F(LayerTreeHostCommonTest, ScrollTreeBuilderTest) {
// The node owned by parent5
ScrollNode scroll_parent5;
scroll_parent5.id = 8;
- scroll_parent5.non_fast_scrollable_region = gfx::Rect(0, 0, 50, 50);
scroll_parent5.bounds = gfx::Size(10, 10);
scroll_parent5.should_flatten = true;
scroll_parent5.user_scrollable_horizontal = true;
diff --git a/chromium/cc/trees/layer_tree_host_impl.cc b/chromium/cc/trees/layer_tree_host_impl.cc
index 5b9296f35dd..9b1e5d7484e 100644
--- a/chromium/cc/trees/layer_tree_host_impl.cc
+++ b/chromium/cc/trees/layer_tree_host_impl.cc
@@ -20,7 +20,6 @@
#include "base/containers/adapters.h"
#include "base/containers/flat_map.h"
#include "base/json/json_writer.h"
-#include "base/memory/memory_coordinator_client_registry.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram.h"
#include "base/numerics/safe_conversions.h"
@@ -353,7 +352,6 @@ LayerTreeHostImpl::LayerTreeHostImpl(
this, settings.top_controls_show_threshold,
settings.top_controls_hide_threshold);
- base::MemoryCoordinatorClientRegistry::GetInstance()->Register(this);
memory_pressure_listener_.reset(
new base::MemoryPressureListener(base::BindRepeating(
&LayerTreeHostImpl::OnMemoryPressure, base::Unretained(this))));
@@ -393,8 +391,6 @@ LayerTreeHostImpl::~LayerTreeHostImpl() {
pending_tree_ = nullptr;
active_tree_ = nullptr;
- base::MemoryCoordinatorClientRegistry::GetInstance()->Unregister(this);
-
// All resources should already be removed, so lose anything still exported.
resource_provider_.ShutdownAndReleaseAllResources();
@@ -1058,8 +1054,6 @@ DrawResult LayerTreeHostImpl::CalculateRenderPasses(FrameData* frame) {
// texture suddenly appearing in the future.
DrawResult draw_result = DRAW_SUCCESS;
- int layers_drawn = 0;
-
const DrawMode draw_mode = GetDrawMode();
int num_missing_tiles = 0;
@@ -1071,6 +1065,12 @@ DrawResult LayerTreeHostImpl::CalculateRenderPasses(FrameData* frame) {
active_tree()->property_trees()->effect_tree.HasCopyRequests();
bool have_missing_animated_tiles = false;
+ int num_layers = 0;
+ int num_mask_layers = 0;
+ int num_rounded_corner_mask_layers = 0;
+ int64_t visible_mask_layer_area = 0;
+ int64_t visible_rounded_corner_mask_layer_area = 0;
+
for (EffectTreeLayerListIterator it(active_tree());
it.state() != EffectTreeLayerListIterator::State::END; ++it) {
auto target_render_pass_id = it.target_render_surface()->id();
@@ -1105,10 +1105,9 @@ DrawResult LayerTreeHostImpl::CalculateRenderPasses(FrameData* frame) {
frame->may_contain_video = true;
layer->AppendQuads(target_render_pass, &append_quads_data);
+ ++num_layers;
}
- ++layers_drawn;
-
rendering_stats_instrumentation_->AddVisibleContentArea(
append_quads_data.visible_layer_area);
rendering_stats_instrumentation_->AddApproximatedVisibleContentArea(
@@ -1146,6 +1145,12 @@ DrawResult LayerTreeHostImpl::CalculateRenderPasses(FrameData* frame) {
}
frame->use_default_lower_bound_deadline |=
append_quads_data.use_default_lower_bound_deadline;
+ num_mask_layers += append_quads_data.num_mask_layers;
+ num_rounded_corner_mask_layers +=
+ append_quads_data.num_rounded_corner_mask_layers;
+ visible_mask_layer_area += append_quads_data.visible_mask_layer_area;
+ visible_rounded_corner_mask_layer_area +=
+ append_quads_data.visible_rounded_corner_mask_layer_area;
}
// If CommitToActiveTree() is true, then we wait to draw until
@@ -1215,16 +1220,43 @@ DrawResult LayerTreeHostImpl::CalculateRenderPasses(FrameData* frame) {
UMA_HISTOGRAM_COUNTS_100(
"Compositing.RenderPass.AppendQuadData.NumIncompleteTiles",
num_incomplete_tiles);
- UMA_HISTOGRAM_COUNTS(
+ UMA_HISTOGRAM_COUNTS_1M(
"Compositing.RenderPass.AppendQuadData."
"CheckerboardedNoRecordingContentArea",
checkerboarded_no_recording_content_area);
- UMA_HISTOGRAM_COUNTS(
+ UMA_HISTOGRAM_COUNTS_1M(
"Compositing.RenderPass.AppendQuadData."
"CheckerboardedNeedRasterContentArea",
checkerboarded_needs_raster_content_area);
}
+ // Only record these umas on the first frame, and only in the renderer,
+ // for which we use having a compositor thread as a proxy.
+ if (!active_tree_->has_ever_been_drawn() && SupportsImplScrolling()) {
+ int mask_layer_percent = static_cast<int>(
+ num_mask_layers / static_cast<float>(num_layers) * 100);
+ int rc_mask_layer_percent =
+ static_cast<int>(num_rounded_corner_mask_layers /
+ static_cast<float>(num_mask_layers) * 100);
+ int rc_area_percent =
+ static_cast<int>(visible_rounded_corner_mask_layer_area /
+ static_cast<float>(total_visible_area) * 100);
+
+ UMA_HISTOGRAM_PERCENTAGE(
+ "Compositing.RenderPass.AppendQuadData.MaskLayerPercent",
+ mask_layer_percent);
+ if (num_mask_layers > 0) {
+ UMA_HISTOGRAM_PERCENTAGE(
+ "Compositing.RenderPass.AppendQuadData.RCMaskLayerPercent",
+ rc_mask_layer_percent);
+ }
+ UMA_HISTOGRAM_PERCENTAGE(
+ "Compositing.RenderPass.AppendQuadData.RCMaskAreaPercent",
+ rc_area_percent);
+ UMA_HISTOGRAM_COUNTS_10M("Compositing.RenderPass.AppendQuadData.RCMaskArea",
+ visible_rounded_corner_mask_layer_area);
+ }
+
TRACE_EVENT_END2("cc,benchmark", "LayerTreeHostImpl::CalculateRenderPasses",
"draw_result", draw_result, "missing tiles",
num_missing_tiles);
@@ -1302,7 +1334,7 @@ DrawResult LayerTreeHostImpl::PrepareToDraw(FrameData* frame) {
total_gpu_memory_for_tilings_in_bytes += layer->GPUMemoryUsageInBytes();
}
if (total_memory_in_bytes != 0) {
- UMA_HISTOGRAM_COUNTS(
+ UMA_HISTOGRAM_COUNTS_1M(
base::StringPrintf("Compositing.%s.PictureMemoryUsageKb",
client_name),
base::saturated_cast<int>(total_memory_in_bytes / 1024));
@@ -1922,6 +1954,11 @@ viz::CompositorFrameMetadata LayerTreeHostImpl::MakeCompositorFrameMetadata() {
metadata.min_page_scale_factor = active_tree_->min_page_scale_factor();
+ metadata.top_controls_height =
+ browser_controls_offset_manager_->TopControlsHeight();
+ metadata.top_controls_shown_ratio =
+ browser_controls_offset_manager_->TopControlsShownRatio();
+
#if defined(OS_ANDROID)
metadata.max_page_scale_factor = active_tree_->max_page_scale_factor();
metadata.root_layer_size = active_tree_->ScrollableSize();
@@ -1931,10 +1968,6 @@ viz::CompositorFrameMetadata LayerTreeHostImpl::MakeCompositorFrameMetadata() {
!outer_viewport_scroll_node->user_scrollable_vertical;
}
- metadata.top_controls_height =
- browser_controls_offset_manager_->TopControlsHeight();
- metadata.top_controls_shown_ratio =
- browser_controls_offset_manager_->TopControlsShownRatio();
metadata.bottom_controls_height =
browser_controls_offset_manager_->BottomControlsHeight();
metadata.bottom_controls_shown_ratio =
@@ -1973,17 +2006,18 @@ RenderFrameMetadata LayerTreeHostImpl::MakeRenderFrameMetadata(
metadata.is_mobile_optimized = IsMobileOptimized(active_tree_.get());
metadata.viewport_size_in_pixels = active_tree_->GetDeviceViewport().size();
-#if defined(OS_ANDROID)
+ metadata.page_scale_factor = active_tree_->current_page_scale_factor();
+
metadata.top_controls_height =
browser_controls_offset_manager_->TopControlsHeight();
metadata.top_controls_shown_ratio =
browser_controls_offset_manager_->TopControlsShownRatio();
+#if defined(OS_ANDROID)
metadata.bottom_controls_height =
browser_controls_offset_manager_->BottomControlsHeight();
metadata.bottom_controls_shown_ratio =
browser_controls_offset_manager_->BottomControlsShownRatio();
metadata.scrollable_viewport_size = active_tree_->ScrollableViewportSize();
- metadata.page_scale_factor = active_tree_->current_page_scale_factor();
metadata.min_page_scale_factor = active_tree_->min_page_scale_factor();
metadata.max_page_scale_factor = active_tree_->max_page_scale_factor();
metadata.root_layer_size = active_tree_->ScrollableSize();
@@ -2002,7 +2036,11 @@ RenderFrameMetadata LayerTreeHostImpl::MakeRenderFrameMetadata(
bool allocate_new_local_surface_id =
#if !defined(OS_ANDROID)
- false;
+ last_draw_render_frame_metadata_ &&
+ (last_draw_render_frame_metadata_->top_controls_height !=
+ metadata.top_controls_height ||
+ last_draw_render_frame_metadata_->top_controls_shown_ratio !=
+ metadata.top_controls_shown_ratio);
#else
last_draw_render_frame_metadata_ &&
(last_draw_render_frame_metadata_->top_controls_height !=
@@ -2473,34 +2511,48 @@ void LayerTreeHostImpl::UpdateViewportContainerSizes() {
// for changes in the size (e.g. browser controls) since the last resize from
// Blink.
auto* property_trees = active_tree_->property_trees();
- gfx::Vector2dF inner_bounds_delta(0.f, delta_from_top_controls);
- if (property_trees->inner_viewport_container_bounds_delta() ==
- inner_bounds_delta)
+ gfx::Vector2dF bounds_delta(0.f, delta_from_top_controls);
+ if (property_trees->inner_viewport_container_bounds_delta() == bounds_delta)
return;
- property_trees->SetInnerViewportContainerBoundsDelta(inner_bounds_delta);
+ property_trees->SetInnerViewportContainerBoundsDelta(bounds_delta);
ClipNode* inner_clip_node = property_trees->clip_tree.Node(
InnerViewportScrollLayer()->clip_tree_index());
inner_clip_node->clip.set_height(
- InnerViewportScrollNode()->container_bounds.height() +
- inner_bounds_delta.y());
+ InnerViewportScrollNode()->container_bounds.height() + bounds_delta.y());
// Adjust the outer viewport container as well, since adjusting only the
// inner may cause its bounds to exceed those of the outer, causing scroll
// clamping.
if (OuterViewportScrollNode()) {
- gfx::Vector2dF outer_bounds_delta = gfx::ScaleVector2d(
- inner_bounds_delta, 1.f / active_tree_->min_page_scale_factor());
+ gfx::Vector2dF scaled_bounds_delta = gfx::ScaleVector2d(
+ bounds_delta, 1.f / active_tree_->min_page_scale_factor());
- property_trees->SetOuterViewportContainerBoundsDelta(outer_bounds_delta);
- property_trees->SetInnerViewportScrollBoundsDelta(outer_bounds_delta);
+ property_trees->SetOuterViewportContainerBoundsDelta(scaled_bounds_delta);
+ property_trees->SetInnerViewportScrollBoundsDelta(scaled_bounds_delta);
ClipNode* outer_clip_node = property_trees->clip_tree.Node(
OuterViewportScrollLayer()->clip_tree_index());
- outer_clip_node->clip.set_height(
- OuterViewportScrollNode()->container_bounds.height() +
- outer_bounds_delta.y());
+
+ float container_height =
+ OuterViewportScrollNode()->container_bounds.height();
+
+ // TODO(bokan): The container bounds for the outer viewport are incorrectly
+ // computed pre-Blink-Gen-Property-Trees so we must apply the minimum page
+ // scale factor. https://crbug.com/901083
+ if (!settings().use_layer_lists)
+ container_height *= active_tree_->min_page_scale_factor();
+
+ outer_clip_node->clip.set_height(container_height + bounds_delta.y());
+
+ // Expand all clips between the outer viewport and the inner viewport.
+ auto* outer_ancestor = property_trees->clip_tree.parent(outer_clip_node);
+ while (outer_ancestor && outer_ancestor != inner_clip_node) {
+ outer_ancestor->clip.Union(outer_clip_node->clip);
+ outer_ancestor = property_trees->clip_tree.parent(outer_ancestor);
+ }
+
anchor.ResetViewportToAnchoredPosition();
}
@@ -2588,9 +2640,18 @@ base::Optional<viz::HitTestRegionList> LayerTreeHostImpl::BuildHitTestData() {
if (layer->is_surface_layer()) {
const auto* surface_layer = static_cast<const SurfaceLayerImpl*>(layer);
- if (!surface_layer->surface_hit_testable()) {
- overlapping_region.Union(MathUtil::MapEnclosingClippedRect(
- layer->ScreenSpaceTransform(), gfx::Rect(surface_layer->bounds())));
+ // If a surface layer is created not by child frame compositor or the
+ // frame owner has pointer-events: none property, the surface layer
+ // becomes not hit testable. We should not generate data for it.
+ if (!surface_layer->ShouldGenerateSurfaceHitTestData()) {
+ // If a surface layer is created due to video or offscreen canvas, it
+ // can still block overlapped surface layers from getting events, we
+ // need to account for all layers that don't have pointer-events: none.
+ if (!surface_layer->has_pointer_events_none()) {
+ overlapping_region.Union(MathUtil::MapEnclosingClippedRect(
+ layer->ScreenSpaceTransform(),
+ gfx::Rect(surface_layer->bounds())));
+ }
continue;
}
@@ -2849,6 +2910,27 @@ void LayerTreeHostImpl::ActivateSyncTree() {
if (active_tree()->TakeNewLocalSurfaceIdRequest())
child_local_surface_id_allocator_.GenerateId();
}
+
+ // Dump property trees and layers if run with:
+ // --vmodule=layer_tree_host_impl=3
+ if (VLOG_IS_ON(3)) {
+ VLOG(3) << "After activating sync tree, the active tree:";
+ // Because the property tree and layer list output can be verbose, the VLOG
+ // output is split by line to avoid line buffer limits on android.
+ VLOG(3) << "property trees:";
+ std::string property_trees;
+ base::JSONWriter::WriteWithOptions(
+ *active_tree_->property_trees()->AsTracedValue()->ToBaseValue(),
+ base::JSONWriter::OPTIONS_PRETTY_PRINT, &property_trees);
+ std::stringstream property_trees_stream(property_trees);
+ for (std::string line; std::getline(property_trees_stream, line);)
+ VLOG(3) << line;
+
+ VLOG(3) << "layers:";
+ std::stringstream layers_stream(LayerListAsJson());
+ for (std::string line; std::getline(layers_stream, line);)
+ VLOG(3) << line;
+ }
}
void LayerTreeHostImpl::ActivateStateForImages() {
@@ -2856,7 +2938,15 @@ void LayerTreeHostImpl::ActivateStateForImages() {
tile_manager_.DidActivateSyncTree();
}
-void LayerTreeHostImpl::OnPurgeMemory() {
+void LayerTreeHostImpl::OnMemoryPressure(
+ base::MemoryPressureListener::MemoryPressureLevel level) {
+ // Only work for low-end devices for now.
+ if (!base::SysInfo::IsLowEndDevice())
+ return;
+
+ if (level != base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL)
+ return;
+
ReleaseTileResources();
active_tree_->OnPurgeMemory();
if (pending_tree_)
@@ -2870,26 +2960,10 @@ void LayerTreeHostImpl::OnPurgeMemory() {
image_decode_cache_->SetShouldAggressivelyFreeResources(false);
}
if (resource_pool_)
- resource_pool_->OnPurgeMemory();
+ resource_pool_->OnMemoryPressure(level);
tile_manager_.decoded_image_tracker().UnlockAllImages();
}
-void LayerTreeHostImpl::OnMemoryPressure(
- base::MemoryPressureListener::MemoryPressureLevel level) {
- // Only work for low-end devices for now.
- if (!base::SysInfo::IsLowEndDevice())
- return;
-
- switch (level) {
- case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
- case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
- break;
- case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
- OnPurgeMemory();
- break;
- }
-}
-
void LayerTreeHostImpl::SetVisible(bool visible) {
DCHECK(task_runner_provider_->IsImplThread());
@@ -3352,7 +3426,18 @@ InputHandler::ScrollStatus LayerTreeHostImpl::TryScroll(
return scroll_status;
}
- if (!scroll_node->non_fast_scrollable_region.IsEmpty()) {
+ LayerImpl* layer =
+ active_tree_->ScrollableLayerByElementId(scroll_node->element_id);
+
+ // We may not find an associated layer for the root or secondary root node -
+ // that's fine, they're not associated with any elements on the page. We also
+ // won't find a layer for the inner viewport (in SPv2) since it doesn't
+ // require hit testing.
+ DCHECK(layer || scroll_node->id == ScrollTree::kRootNodeId ||
+ scroll_node->id == ScrollTree::kSecondaryRootNodeId ||
+ scroll_node->scrolls_inner_viewport);
+
+ if (layer && !layer->non_fast_scrollable_region().IsEmpty()) {
bool clipped = false;
gfx::Transform inverse_screen_space_transform(
gfx::Transform::kSkipInitialization);
@@ -3364,7 +3449,7 @@ InputHandler::ScrollStatus LayerTreeHostImpl::TryScroll(
gfx::PointF hit_test_point_in_layer_space = MathUtil::ProjectPoint(
inverse_screen_space_transform, screen_space_point, &clipped);
- if (!clipped && scroll_node->non_fast_scrollable_region.Contains(
+ if (!clipped && layer->non_fast_scrollable_region().Contains(
gfx::ToRoundedPoint(hit_test_point_in_layer_space))) {
TRACE_EVENT0("cc",
"LayerImpl::tryScroll: Failed NonFastScrollableRegion");
@@ -3548,6 +3633,24 @@ InputHandler::ScrollStatus LayerTreeHostImpl::RootScrollBegin(
ClearCurrentlyScrollingNode();
+ gfx::Point viewport_point(scroll_state->position_x(),
+ scroll_state->position_y());
+
+ gfx::PointF device_viewport_point = gfx::ScalePoint(
+ gfx::PointF(viewport_point), active_tree_->device_scale_factor());
+ LayerImpl* first_scrolling_layer_or_scrollbar =
+ active_tree_->FindFirstScrollingLayerOrScrollbarThatIsHitByPoint(
+ device_viewport_point);
+
+ if (IsTouchDraggingScrollbar(first_scrolling_layer_or_scrollbar, type)) {
+ TRACE_EVENT_INSTANT0("cc", "Scrollbar Scrolling", TRACE_EVENT_SCOPE_THREAD);
+ ScrollStatus scroll_status;
+ scroll_status.thread = SCROLL_ON_MAIN_THREAD;
+ scroll_status.main_thread_scrolling_reasons =
+ MainThreadScrollingReason::kScrollbarScrolling;
+ return scroll_status;
+ }
+
return ScrollBeginImpl(scroll_state, OuterViewportScrollNode(), type);
}
@@ -3577,7 +3680,21 @@ InputHandler::ScrollStatus LayerTreeHostImpl::ScrollBegin(
active_tree_->FindLayerThatIsHitByPoint(device_viewport_point);
if (layer_impl) {
- if (!IsInitialScrollHitTestReliable(layer_impl, device_viewport_point)) {
+ LayerImpl* first_scrolling_layer_or_scrollbar =
+ active_tree_->FindFirstScrollingLayerOrScrollbarThatIsHitByPoint(
+ device_viewport_point);
+
+ // Touch dragging the scrollbar requires falling back to main-thread
+ // scrolling.
+ if (IsTouchDraggingScrollbar(first_scrolling_layer_or_scrollbar, type)) {
+ TRACE_EVENT_INSTANT0("cc", "Scrollbar Scrolling",
+ TRACE_EVENT_SCOPE_THREAD);
+ scroll_status.thread = SCROLL_ON_MAIN_THREAD;
+ scroll_status.main_thread_scrolling_reasons =
+ MainThreadScrollingReason::kScrollbarScrolling;
+ return scroll_status;
+ } else if (!IsInitialScrollHitTestReliable(
+ layer_impl, first_scrolling_layer_or_scrollbar)) {
TRACE_EVENT_INSTANT0("cc", "Failed Hit Test", TRACE_EVENT_SCOPE_THREAD);
scroll_status.thread = SCROLL_UNKNOWN;
scroll_status.main_thread_scrolling_reasons =
@@ -3603,17 +3720,23 @@ InputHandler::ScrollStatus LayerTreeHostImpl::ScrollBegin(
return ScrollBeginImpl(scroll_state, scrolling_node, type);
}
-// Some initial scroll tests are known to be unreliable and require falling
-// back to main thread scrolling.
+// Requires falling back to main thread scrolling when it hit tests in scrollbar
+// from touch.
+bool LayerTreeHostImpl::IsTouchDraggingScrollbar(
+ LayerImpl* first_scrolling_layer_or_scrollbar,
+ InputHandler::ScrollInputType type) {
+ return first_scrolling_layer_or_scrollbar &&
+ first_scrolling_layer_or_scrollbar->is_scrollbar() &&
+ type == InputHandler::TOUCHSCREEN;
+}
+
+// Initial scroll hit testing can be unreliable in the presence of squashed
+// layers. In this case, we fall back to main thread scrolling.
bool LayerTreeHostImpl::IsInitialScrollHitTestReliable(
LayerImpl* layer_impl,
- const gfx::PointF& device_viewport_point) {
- LayerImpl* first_scrolling_layer_or_drawn_scrollbar =
- active_tree_->FindFirstScrollingLayerOrDrawnScrollbarThatIsHitByPoint(
- device_viewport_point);
- if (!first_scrolling_layer_or_drawn_scrollbar)
+ LayerImpl* first_scrolling_layer_or_scrollbar) {
+ if (!first_scrolling_layer_or_scrollbar)
return true;
-
ScrollNode* closest_scroll_node = nullptr;
auto& scroll_tree = active_tree_->property_trees()->scroll_tree;
ScrollNode* scroll_node = scroll_tree.Node(layer_impl->scroll_tree_index());
@@ -3627,19 +3750,19 @@ bool LayerTreeHostImpl::IsInitialScrollHitTestReliable(
if (!closest_scroll_node)
return false;
- // If |first_scrolling_layer_or_drawn_scrollbar| is scrollable, it will
+ // If |first_scrolling_layer_or_scrollbar| is scrollable, it will
// create a scroll node. If this scroll node corresponds to first scrollable
// ancestor along the scroll tree for |layer_impl|, the hit test has not
// escaped to other areas of the scroll tree and is reliable.
- if (first_scrolling_layer_or_drawn_scrollbar->scrollable()) {
+ if (first_scrolling_layer_or_scrollbar->scrollable()) {
return closest_scroll_node->id ==
- first_scrolling_layer_or_drawn_scrollbar->scroll_tree_index();
+ first_scrolling_layer_or_scrollbar->scroll_tree_index();
}
- // If |first_scrolling_layer_or_drawn_scrollbar| is not scrollable, it must
- // be a drawn scrollbar. These hit tests require falling back to main-thread
- // scrolling.
- DCHECK(first_scrolling_layer_or_drawn_scrollbar->IsDrawnScrollbar());
+ // If |first_scrolling_layer_or_scrollbar| is not scrollable, it must
+ // be a drawn scrollbar. It may hit the squashing layer at the same time.
+ // These hit tests require falling back to main-thread scrolling.
+ DCHECK(first_scrolling_layer_or_scrollbar->is_scrollbar());
return false;
}
@@ -5269,8 +5392,7 @@ bool LayerTreeHostImpl::EvictedUIResourcesExist() const {
}
void LayerTreeHostImpl::MarkUIResourceNotEvicted(UIResourceId uid) {
- std::set<UIResourceId>::iterator found_in_evicted =
- evicted_ui_resources_.find(uid);
+ auto found_in_evicted = evicted_ui_resources_.find(uid);
if (found_in_evicted == evicted_ui_resources_.end())
return;
evicted_ui_resources_.erase(found_in_evicted);
@@ -5292,13 +5414,13 @@ void LayerTreeHostImpl::RemoveSwapPromiseMonitor(SwapPromiseMonitor* monitor) {
}
void LayerTreeHostImpl::NotifySwapPromiseMonitorsOfSetNeedsRedraw() {
- std::set<SwapPromiseMonitor*>::iterator it = swap_promise_monitor_.begin();
+ auto it = swap_promise_monitor_.begin();
for (; it != swap_promise_monitor_.end(); it++)
(*it)->OnSetNeedsRedrawOnImpl();
}
void LayerTreeHostImpl::NotifySwapPromiseMonitorsOfForwardingToMainThread() {
- std::set<SwapPromiseMonitor*>::iterator it = swap_promise_monitor_.begin();
+ auto it = swap_promise_monitor_.begin();
for (; it != swap_promise_monitor_.end(); it++)
(*it)->OnForwardScrollUpdateToMainThreadOnImpl();
}
diff --git a/chromium/cc/trees/layer_tree_host_impl.h b/chromium/cc/trees/layer_tree_host_impl.h
index a8188a89ed4..a215f1cb908 100644
--- a/chromium/cc/trees/layer_tree_host_impl.h
+++ b/chromium/cc/trees/layer_tree_host_impl.h
@@ -18,7 +18,6 @@
#include "base/containers/circular_deque.h"
#include "base/containers/flat_map.h"
#include "base/macros.h"
-#include "base/memory/memory_coordinator_client.h"
#include "base/memory/memory_pressure_listener.h"
#include "base/sequenced_task_runner.h"
#include "base/time/time.h"
@@ -171,8 +170,7 @@ class CC_EXPORT LayerTreeHostImpl
public ScrollbarAnimationControllerClient,
public VideoFrameControllerClient,
public MutatorHostClient,
- public base::SupportsWeakPtr<LayerTreeHostImpl>,
- public base::MemoryCoordinatorClient {
+ public base::SupportsWeakPtr<LayerTreeHostImpl> {
public:
// This structure is used to build all the state required for producing a
// single CompositorFrame. The |render_passes| list becomes the set of
@@ -788,7 +786,12 @@ class CC_EXPORT LayerTreeHostImpl
ScrollState* scroll_state,
ScrollNode* scrolling_node,
InputHandler::ScrollInputType type);
- bool IsInitialScrollHitTestReliable(LayerImpl* layer, const gfx::PointF&);
+ bool IsTouchDraggingScrollbar(
+ LayerImpl* first_scrolling_layer_or_drawn_scrollbar,
+ InputHandler::ScrollInputType type);
+ bool IsInitialScrollHitTestReliable(
+ LayerImpl* layer,
+ LayerImpl* first_scrolling_layer_or_drawn_scrollbar);
void DistributeScrollDelta(ScrollState* scroll_state);
bool AnimatePageScale(base::TimeTicks monotonic_time);
@@ -877,11 +880,6 @@ class CC_EXPORT LayerTreeHostImpl
// active tree.
void ActivateStateForImages();
- // Overriden from base::MemoryCoordinatorClient.
- void OnPurgeMemory() override;
-
- // TODO(gyuyoung): OnMemoryPressure is deprecated. So this should be removed
- // when the memory coordinator is enabled by default.
void OnMemoryPressure(
base::MemoryPressureListener::MemoryPressureLevel level);
diff --git a/chromium/cc/trees/layer_tree_host_impl_unittest.cc b/chromium/cc/trees/layer_tree_host_impl_unittest.cc
index 6857c8ddf26..47732887d78 100644
--- a/chromium/cc/trees/layer_tree_host_impl_unittest.cc
+++ b/chromium/cc/trees/layer_tree_host_impl_unittest.cc
@@ -1022,6 +1022,20 @@ TEST_F(CommitToPendingTreeLayerTreeHostImplTest,
"Compositing.Renderer.GPUMemoryForTilingsInKb", 1);
}
+TEST_F(LayerTreeHostImplTest, ScrollBeforeRootLayerAttached) {
+ InputHandler::ScrollStatus status = host_impl_->ScrollBegin(
+ BeginState(gfx::Point()).get(), InputHandler::WHEEL);
+ EXPECT_EQ(InputHandler::SCROLL_IGNORED, status.thread);
+ EXPECT_EQ(MainThreadScrollingReason::kNoScrollingLayer,
+ status.main_thread_scrolling_reasons);
+
+ status = host_impl_->RootScrollBegin(BeginState(gfx::Point()).get(),
+ InputHandler::WHEEL);
+ EXPECT_EQ(InputHandler::SCROLL_IGNORED, status.thread);
+ EXPECT_EQ(MainThreadScrollingReason::kNoScrollingLayer,
+ status.main_thread_scrolling_reasons);
+}
+
TEST_F(LayerTreeHostImplTest, ScrollRootCallsCommitAndRedraw) {
SetupScrollAndContentsLayers(gfx::Size(100, 100));
host_impl_->active_tree()->BuildPropertyTreesForTesting();
@@ -1296,8 +1310,8 @@ TEST_F(LayerTreeHostImplTest, NonFastScrollableRegionBasic) {
SetupScrollAndContentsLayers(gfx::Size(200, 200));
host_impl_->active_tree()->SetDeviceViewportSize(gfx::Size(100, 100));
- LayerImpl* root = host_impl_->active_tree()->root_layer_for_testing();
- root->SetNonFastScrollableRegion(gfx::Rect(0, 0, 50, 50));
+ LayerImpl* outer_scroll = host_impl_->OuterViewportScrollLayer();
+ outer_scroll->SetNonFastScrollableRegion(gfx::Rect(0, 0, 50, 50));
host_impl_->active_tree()->BuildPropertyTreesForTesting();
DrawFrame();
@@ -1352,16 +1366,16 @@ TEST_F(LayerTreeHostImplTest, NonFastScrollableRegionWithOffset) {
SetupScrollAndContentsLayers(gfx::Size(200, 200));
host_impl_->active_tree()->SetDeviceViewportSize(gfx::Size(100, 100));
- LayerImpl* root = *host_impl_->active_tree()->begin();
- root->SetNonFastScrollableRegion(gfx::Rect(0, 0, 50, 50));
- root->SetPosition(gfx::PointF(-25.f, 0.f));
- root->SetDrawsContent(true);
+ LayerImpl* outer_scroll = host_impl_->OuterViewportScrollLayer();
+ outer_scroll->SetNonFastScrollableRegion(gfx::Rect(0, 0, 50, 50));
+ outer_scroll->SetPosition(gfx::PointF(-25.f, 0.f));
+ outer_scroll->SetDrawsContent(true);
host_impl_->active_tree()->BuildPropertyTreesForTesting();
DrawFrame();
// This point would fall into the non-fast scrollable region except that we've
- // moved the layer down by 25 pixels.
+ // moved the layer left by 25 pixels.
InputHandler::ScrollStatus status = host_impl_->ScrollBegin(
BeginState(gfx::Point(40, 10)).get(), InputHandler::WHEEL);
EXPECT_EQ(InputHandler::SCROLL_ON_IMPL_THREAD, status.thread);
@@ -1933,6 +1947,7 @@ TEST_F(CommitToPendingTreeLayerTreeHostImplTest,
host_impl_->pending_tree()->SetRootLayerForTesting(std::move(root_owned));
root->SetBounds(gfx::Size(50, 50));
root->test_properties()->force_render_surface = true;
+ root->SetNeedsPushProperties();
root->test_properties()->AddChild(
LayerImpl::Create(host_impl_->pending_tree(), 2));
@@ -1940,6 +1955,7 @@ TEST_F(CommitToPendingTreeLayerTreeHostImplTest,
child->SetBounds(gfx::Size(10, 10));
child->draw_properties().visible_layer_rect = gfx::Rect(10, 10);
child->SetDrawsContent(true);
+ child->SetNeedsPushProperties();
host_impl_->pending_tree()->SetElementIdsForTesting();
@@ -3772,7 +3788,9 @@ class LayerTreeHostImplTestScrollbarOpacity : public LayerTreeHostImplTest {
scrollbar->SetScrollElementId(scroll->element_id());
scrollbar->SetBounds(gfx::Size(10, 100));
scrollbar->SetPosition(gfx::PointF(90, 0));
+ scrollbar->SetNeedsPushProperties();
container->test_properties()->AddChild(std::move(scrollbar));
+
host_impl_->pending_tree()->PushPageScaleFromMainThread(1.f, 1.f, 1.f);
host_impl_->pending_tree()->BuildPropertyTreesForTesting();
host_impl_->ActivateSyncTree();
@@ -3803,6 +3821,8 @@ class LayerTreeHostImplTestScrollbarOpacity : public LayerTreeHostImplTest {
container = host_impl_->pending_tree()->InnerViewportContainerLayer();
container->test_properties()->force_render_surface = true;
container->SetBounds(gfx::Size(10, 10));
+ container->SetNeedsPushProperties();
+
host_impl_->pending_tree()->BuildPropertyTreesForTesting();
LayerImpl* pending_scrollbar_layer =
@@ -3948,9 +3968,10 @@ TEST_F(LayerTreeHostImplTestMultiScrollable,
EXPECT_EQ(scrollbar_2_->Opacity(), 0.f);
// Scroll on root should flash all scrollbars.
- host_impl_->RootScrollBegin(BeginState(gfx::Point(10, 10)).get(),
+ host_impl_->RootScrollBegin(BeginState(gfx::Point(20, 20)).get(),
InputHandler::WHEEL);
- host_impl_->ScrollBy(UpdateState(gfx::Point(), gfx::Vector2d(0, 10)).get());
+ host_impl_->ScrollBy(
+ UpdateState(gfx::Point(20, 20), gfx::Vector2d(0, 10)).get());
host_impl_->ScrollEnd(EndState().get());
EXPECT_TRUE(scrollbar_1_->Opacity());
@@ -3960,8 +3981,8 @@ TEST_F(LayerTreeHostImplTestMultiScrollable,
ResetScrollbars();
// Scroll on child should flash all scrollbars.
- host_impl_->ScrollAnimatedBegin(BeginState(gfx::Point(51, 51)).get());
- host_impl_->ScrollAnimated(gfx::Point(51, 51), gfx::Vector2d(0, 100));
+ host_impl_->ScrollAnimatedBegin(BeginState(gfx::Point(70, 70)).get());
+ host_impl_->ScrollAnimated(gfx::Point(70, 70), gfx::Vector2d(0, 100));
host_impl_->ScrollEnd(EndState().get());
EXPECT_TRUE(scrollbar_1_->Opacity());
@@ -3996,6 +4017,49 @@ TEST_F(LayerTreeHostImplTestMultiScrollable, ScrollbarFlashWhenMouseEnter) {
EXPECT_FALSE(animation_task_.Equals(base::Closure()));
}
+TEST_F(LayerTreeHostImplTestMultiScrollable, ScrollHitTestOnScrollbar) {
+ LayerTreeSettings settings = DefaultSettings();
+ settings.scrollbar_fade_delay = base::TimeDelta::FromMilliseconds(500);
+ settings.scrollbar_fade_duration = base::TimeDelta::FromMilliseconds(300);
+ settings.scrollbar_animator = LayerTreeSettings::NO_ANIMATOR;
+
+ SetUpLayers(settings);
+
+ // Wheel scroll on root scrollbar should process on impl thread.
+ {
+ InputHandler::ScrollStatus status = host_impl_->RootScrollBegin(
+ BeginState(gfx::Point(1, 1)).get(), InputHandler::WHEEL);
+ EXPECT_EQ(InputHandler::SCROLL_ON_IMPL_THREAD, status.thread);
+ }
+
+ // Touch scroll on root scrollbar should process on main thread.
+ {
+ InputHandler::ScrollStatus status = host_impl_->RootScrollBegin(
+ BeginState(gfx::Point(1, 1)).get(), InputHandler::TOUCHSCREEN);
+ EXPECT_EQ(InputHandler::SCROLL_ON_MAIN_THREAD, status.thread);
+ EXPECT_EQ(MainThreadScrollingReason::kScrollbarScrolling,
+ status.main_thread_scrolling_reasons);
+ }
+
+ // Wheel scroll on scrollbar should fallback to main thread.
+ {
+ InputHandler::ScrollStatus status = host_impl_->ScrollBegin(
+ BeginState(gfx::Point(51, 51)).get(), InputHandler::WHEEL);
+ EXPECT_EQ(InputHandler::SCROLL_UNKNOWN, status.thread);
+ EXPECT_EQ(MainThreadScrollingReason::kFailedHitTest,
+ status.main_thread_scrolling_reasons);
+ }
+
+ // Touch scroll on scrollbar should process on main thread.
+ {
+ InputHandler::ScrollStatus status = host_impl_->RootScrollBegin(
+ BeginState(gfx::Point(51, 51)).get(), InputHandler::TOUCHSCREEN);
+ EXPECT_EQ(InputHandler::SCROLL_ON_MAIN_THREAD, status.thread);
+ EXPECT_EQ(MainThreadScrollingReason::kScrollbarScrolling,
+ status.main_thread_scrolling_reasons);
+ }
+}
+
TEST_F(LayerTreeHostImplTest, ScrollbarVisibilityChangeCausesRedrawAndCommit) {
LayerTreeSettings settings = DefaultSettings();
settings.scrollbar_animator = LayerTreeSettings::AURA_OVERLAY;
@@ -4016,7 +4080,9 @@ TEST_F(LayerTreeHostImplTest, ScrollbarVisibilityChangeCausesRedrawAndCommit) {
scrollbar->SetScrollElementId(scroll->element_id());
scrollbar->SetBounds(gfx::Size(10, 100));
scrollbar->SetPosition(gfx::PointF(90, 0));
+ scrollbar->SetNeedsPushProperties();
container->test_properties()->AddChild(std::move(scrollbar));
+
host_impl_->pending_tree()->PushPageScaleFromMainThread(1.f, 1.f, 1.f);
host_impl_->pending_tree()->BuildPropertyTreesForTesting();
host_impl_->ActivateSyncTree();
@@ -9512,6 +9578,7 @@ TEST_F(LayerTreeHostImplTest, FarAwayQuadsDontNeedAA) {
LayerImpl::Create(host_impl_->pending_tree(), 1);
LayerImpl* root = scoped_root.get();
root->test_properties()->force_render_surface = true;
+ root->SetNeedsPushProperties();
host_impl_->pending_tree()->SetRootLayerForTesting(std::move(scoped_root));
@@ -9519,6 +9586,7 @@ TEST_F(LayerTreeHostImplTest, FarAwayQuadsDontNeedAA) {
LayerImpl::Create(host_impl_->pending_tree(), 2);
LayerImpl* scrolling_layer = scoped_scrolling_layer.get();
root->test_properties()->AddChild(std::move(scoped_scrolling_layer));
+ scrolling_layer->SetNeedsPushProperties();
gfx::Size content_layer_bounds(100001, 100);
scoped_refptr<FakeRasterSource> raster_source(
@@ -9531,6 +9599,7 @@ TEST_F(LayerTreeHostImplTest, FarAwayQuadsDontNeedAA) {
scrolling_layer->test_properties()->AddChild(std::move(scoped_content_layer));
content_layer->SetBounds(content_layer_bounds);
content_layer->SetDrawsContent(true);
+ content_layer->SetNeedsPushProperties();
root->SetBounds(root_size);
@@ -11454,6 +11523,9 @@ TEST_F(CommitToPendingTreeLayerTreeHostImplTest,
LayerImpl::Create(host_impl_->pending_tree(), 1));
host_impl_->pending_tree()->BuildPropertyTreesForTesting();
host_impl_->pending_tree()->UpdateDrawProperties();
+ host_impl_->pending_tree()
+ ->root_layer_for_testing()
+ ->SetNeedsPushProperties();
host_impl_->ActivateSyncTree();
host_impl_->active_tree()->BuildPropertyTreesForTesting();
diff --git a/chromium/cc/trees/layer_tree_host_perftest.cc b/chromium/cc/trees/layer_tree_host_perftest.cc
index fe6ad7870da..1c95b6fcf4e 100644
--- a/chromium/cc/trees/layer_tree_host_perftest.cc
+++ b/chromium/cc/trees/layer_tree_host_perftest.cc
@@ -255,8 +255,7 @@ class ScrollingLayerTreePerfTest : public LayerTreeHostPerfTestJsonReader {
ASSERT_TRUE(scrollable_.get());
}
- void UpdateLayerTreeHost(
- LayerTreeHostClient::VisualStateUpdate requested_update) override {
+ void UpdateLayerTreeHost() override {
if (TestEnded())
return;
static const gfx::Vector2d delta = gfx::Vector2d(0, 10);
diff --git a/chromium/cc/trees/layer_tree_host_unittest.cc b/chromium/cc/trees/layer_tree_host_unittest.cc
index 170f44fc3cd..d71fdf95d32 100644
--- a/chromium/cc/trees/layer_tree_host_unittest.cc
+++ b/chromium/cc/trees/layer_tree_host_unittest.cc
@@ -125,8 +125,7 @@ class LayerTreeHostTestSetNeedsCommitInsideLayout : public LayerTreeHostTest {
protected:
void BeginTest() override { PostSetNeedsCommitToMainThread(); }
- void UpdateLayerTreeHost(
- LayerTreeHostClient::VisualStateUpdate requested_update) override {
+ void UpdateLayerTreeHost() override {
// This shouldn't cause a second commit to happen.
layer_tree_host()->SetNeedsCommit();
}
@@ -172,8 +171,7 @@ class LayerTreeHostTestFrameOrdering : public LayerTreeHostTest {
void BeginTest() override { PostSetNeedsCommitToMainThread(); }
- void UpdateLayerTreeHost(
- LayerTreeHostClient::VisualStateUpdate requested_update) override {
+ void UpdateLayerTreeHost() override {
EXPECT_TRUE(CheckStep(MAIN_LAYOUT, &main_));
}
@@ -263,8 +261,7 @@ class LayerTreeHostTestSetNeedsUpdateInsideLayout : public LayerTreeHostTest {
protected:
void BeginTest() override { PostSetNeedsCommitToMainThread(); }
- void UpdateLayerTreeHost(
- LayerTreeHostClient::VisualStateUpdate requested_update) override {
+ void UpdateLayerTreeHost() override {
// This shouldn't cause a second commit to happen.
layer_tree_host()->SetNeedsUpdateLayers();
}
@@ -1143,6 +1140,169 @@ class LayerTreeHostTestSurfaceDamage : public LayerTreeHostTest {
SINGLE_AND_MULTI_THREAD_TEST_F(LayerTreeHostTestSurfaceDamage);
+class LayerTreeHostTestLayerListSurfaceDamage : public LayerTreeHostTest {
+ protected:
+ void InitializeSettings(LayerTreeSettings* settings) override {
+ settings->use_layer_lists = true;
+ }
+
+ void SetupTree() override {
+ root_ = Layer::Create();
+ child_a_ = Layer::Create();
+ child_b_ = Layer::Create();
+ child_c_ = Layer::Create();
+
+ layer_tree_host()->SetRootLayer(root_);
+
+ root_->AddChild(child_a_);
+ root_->AddChild(child_b_);
+ root_->AddChild(child_c_);
+
+ root_->SetBounds(gfx::Size(50, 50));
+
+ child_a_->SetBounds(gfx::Size(10, 20));
+ child_a_->SetForceRenderSurfaceForTesting(true);
+ child_a_->SetIsDrawable(true);
+
+ child_b_->SetBounds(gfx::Size(20, 10));
+ child_b_->SetForceRenderSurfaceForTesting(true);
+ child_b_->SetIsDrawable(true);
+
+ child_c_->SetBounds(gfx::Size(15, 15));
+ child_c_->SetForceRenderSurfaceForTesting(true);
+ child_c_->SetIsDrawable(true);
+
+ layer_tree_host()->BuildPropertyTreesForTesting();
+
+ LayerTreeHostTest::SetupTree();
+ }
+
+ void BeginTest() override { PostSetNeedsCommitToMainThread(); }
+
+ void DidCommit() override {
+ switch (layer_tree_host()->SourceFrameNumber()) {
+ case 1:
+ // Push an unchanged list. This should cause no damage.
+ {
+ LayerList same_list = root_->children();
+ root_->SetChildLayerList(same_list);
+ }
+ break;
+ case 2:
+ // Reverse the last two layers so the order becomes: [a, c, b]. This
+ // should only damage the 'b' layer.
+ {
+ LayerList last_two_reversed;
+ last_two_reversed.push_back(child_a_);
+ last_two_reversed.push_back(child_c_);
+ last_two_reversed.push_back(child_b_);
+ root_->SetChildLayerList(last_two_reversed);
+ }
+ break;
+ case 3:
+ // Reverse the first two layers so the order becomes: [c, a, b]. This
+ // should damage the last two layers, 'a' and 'b'.
+ {
+ LayerList last_pair_reversed;
+ last_pair_reversed.push_back(child_c_);
+ last_pair_reversed.push_back(child_a_);
+ last_pair_reversed.push_back(child_b_);
+ root_->SetChildLayerList(last_pair_reversed);
+ }
+ break;
+ case 4:
+ // Remove the first layer, 'c', so the order becomes: ['a', 'b']. This
+ // should not damage 'a' or 'b'.
+ {
+ LayerList first_removed = root_->children();
+ first_removed.erase(first_removed.begin());
+ root_->SetChildLayerList(first_removed);
+ }
+ break;
+ case 5:
+ // Add a new layer, 'c', so the order becomes: ['a', 'b', 'c']. This
+ // should only damage 'c'.
+ {
+ LayerList existing_plus_new_child = root_->children();
+ existing_plus_new_child.push_back(child_c_);
+ root_->SetChildLayerList(existing_plus_new_child);
+ }
+ break;
+ }
+ }
+
+ DrawResult PrepareToDrawOnThread(LayerTreeHostImpl* impl,
+ LayerTreeHostImpl::FrameData* frame_data,
+ DrawResult draw_result) override {
+ LayerImpl* child_a_impl = impl->active_tree()->LayerById(child_a_->id());
+ LayerImpl* child_b_impl = impl->active_tree()->LayerById(child_b_->id());
+ LayerImpl* child_c_impl = impl->active_tree()->LayerById(child_c_->id());
+ switch (impl->active_tree()->source_frame_number()) {
+ case 0:
+ // Full damage on first frame.
+ EXPECT_EQ(GetRenderSurface(child_a_impl)->GetDamageRect(),
+ gfx::Rect(0, 0, 10, 20));
+ EXPECT_EQ(GetRenderSurface(child_b_impl)->GetDamageRect(),
+ gfx::Rect(0, 0, 20, 10));
+ EXPECT_EQ(GetRenderSurface(child_c_impl)->GetDamageRect(),
+ gfx::Rect(0, 0, 15, 15));
+ PostSetNeedsCommitToMainThread();
+ break;
+ case 1:
+ // No damage after pushing the same list.
+ EXPECT_TRUE(GetRenderSurface(child_a_impl)->GetDamageRect().IsEmpty());
+ EXPECT_TRUE(GetRenderSurface(child_b_impl)->GetDamageRect().IsEmpty());
+ EXPECT_TRUE(GetRenderSurface(child_c_impl)->GetDamageRect().IsEmpty());
+ PostSetNeedsCommitToMainThread();
+ break;
+ case 2:
+ // Only 'b' damaged after reversing the last two layers.
+ EXPECT_TRUE(GetRenderSurface(child_a_impl)->GetDamageRect().IsEmpty());
+ EXPECT_EQ(GetRenderSurface(child_b_impl)->GetDamageRect(),
+ gfx::Rect(0, 0, 20, 10));
+ EXPECT_TRUE(GetRenderSurface(child_c_impl)->GetDamageRect().IsEmpty());
+ PostSetNeedsCommitToMainThread();
+ break;
+ case 3:
+ // 'a' and 'b' damaged after reversing the first two layers.
+ EXPECT_EQ(GetRenderSurface(child_a_impl)->GetDamageRect(),
+ gfx::Rect(0, 0, 10, 20));
+ EXPECT_EQ(GetRenderSurface(child_b_impl)->GetDamageRect(),
+ gfx::Rect(0, 0, 20, 10));
+ EXPECT_TRUE(GetRenderSurface(child_c_impl)->GetDamageRect().IsEmpty());
+ PostSetNeedsCommitToMainThread();
+ break;
+ case 4:
+ // When the first layer, 'c', is removed, 'a' and 'b' should not be
+ // damaged.
+ EXPECT_TRUE(GetRenderSurface(child_a_impl)->GetDamageRect().IsEmpty());
+ EXPECT_TRUE(GetRenderSurface(child_b_impl)->GetDamageRect().IsEmpty());
+ PostSetNeedsCommitToMainThread();
+ break;
+ case 5:
+ // When 'c' is added, 'a' and 'b' should not be damaged.
+ EXPECT_TRUE(GetRenderSurface(child_a_impl)->GetDamageRect().IsEmpty());
+ EXPECT_TRUE(GetRenderSurface(child_b_impl)->GetDamageRect().IsEmpty());
+ EXPECT_EQ(GetRenderSurface(child_c_impl)->GetDamageRect(),
+ gfx::Rect(0, 0, 15, 15));
+ EndTest();
+ break;
+ }
+
+ return draw_result;
+ }
+
+ void AfterTest() override {}
+
+ private:
+ scoped_refptr<Layer> root_;
+ scoped_refptr<Layer> child_a_;
+ scoped_refptr<Layer> child_b_;
+ scoped_refptr<Layer> child_c_;
+};
+
+SINGLE_AND_MULTI_THREAD_TEST_F(LayerTreeHostTestLayerListSurfaceDamage);
+
// When settings->enable_early_damage_check is true, verify that invalidate is
// not called when changes to a layer don't cause visible damage.
class LayerTreeHostTestNoDamageCausesNoInvalidate : public LayerTreeHostTest {
@@ -3074,14 +3234,12 @@ class LayerTreeHostTestStartPageScaleAnimation : public LayerTreeHostTest {
void BeginTest() override { PostSetNeedsCommitToMainThread(); }
- void ApplyViewportDeltas(const gfx::Vector2dF& scroll_delta,
- const gfx::Vector2dF&,
- const gfx::Vector2dF& elastic_overscroll_delta,
- float scale,
- float) override {
+ void ApplyViewportChanges(const ApplyViewportChangesArgs& args) override {
gfx::ScrollOffset offset = scroll_layer_->CurrentScrollOffset();
- scroll_layer_->SetScrollOffset(ScrollOffsetWithDelta(offset, scroll_delta));
- layer_tree_host()->SetPageScaleFactorAndLimits(scale, 0.5f, 2.f);
+ scroll_layer_->SetScrollOffset(
+ ScrollOffsetWithDelta(offset, args.inner_delta));
+ layer_tree_host()->SetPageScaleFactorAndLimits(args.page_scale_delta, 0.5f,
+ 2.f);
}
void DidActivateTreeOnThread(LayerTreeHostImpl* impl) override {
@@ -3163,14 +3321,10 @@ class ViewportDeltasAppliedDuringPinch : public LayerTreeHostTest {
}
}
- void ApplyViewportDeltas(const gfx::Vector2dF& inner,
- const gfx::Vector2dF& outer,
- const gfx::Vector2dF& elastic_overscroll_delta,
- float scale_delta,
- float top_controls_delta) override {
+ void ApplyViewportChanges(const ApplyViewportChangesArgs& args) override {
EXPECT_TRUE(sent_gesture_);
- EXPECT_EQ(gfx::Vector2dF(50, 50), inner);
- EXPECT_EQ(2, scale_delta);
+ EXPECT_EQ(gfx::Vector2dF(50, 50), args.inner_delta);
+ EXPECT_EQ(2, args.page_scale_delta);
auto* scroll_layer = layer_tree_host()->inner_viewport_scroll_layer();
EXPECT_EQ(gfx::ScrollOffset(50, 50), scroll_layer->CurrentScrollOffset());
@@ -3372,7 +3526,7 @@ class LayerTreeHostTestDeferCommits : public LayerTreeHostTest {
void BeginTest() override {
// Start with commits deferred.
- PostSetDeferCommitsToMainThread(true);
+ PostGetDeferCommitsToMainThread(&scoped_defer_commits_);
PostSetNeedsCommitToMainThread();
}
@@ -3406,6 +3560,7 @@ class LayerTreeHostTestDeferCommits : public LayerTreeHostTest {
}
void WillBeginMainFrame() override {
+ EXPECT_FALSE(scoped_defer_commits_);
EXPECT_TRUE(IsCommitAllowed());
num_send_begin_main_frame_++;
EndTest();
@@ -3418,12 +3573,13 @@ class LayerTreeHostTestDeferCommits : public LayerTreeHostTest {
virtual void AllowCommits() {
allow_commits_ = true;
- layer_tree_host()->SetDeferCommits(false);
+ scoped_defer_commits_.reset();
}
virtual bool IsCommitAllowed() const { return allow_commits_; }
private:
+ std::unique_ptr<ScopedDeferCommits> scoped_defer_commits_;
bool allow_commits_ = false;
int num_will_begin_impl_frame_ = 0;
int num_send_begin_main_frame_ = 0;
@@ -3474,11 +3630,9 @@ class LayerTreeHostTestDeferCommitsInsideBeginMainFrame
void WillBeginMainFrame() override {
++begin_main_frame_count_;
- if (allow_commits_)
- return;
// This should prevent the commit from happening.
- layer_tree_host()->SetDeferCommits(true);
+ scoped_defer_commits_ = layer_tree_host()->DeferCommits();
// Wait to see if the commit happens. It's possible the deferred
// commit happens when it shouldn't but takes long enough that
// this passes. But it won't fail when it shouldn't.
@@ -3497,7 +3651,7 @@ class LayerTreeHostTestDeferCommitsInsideBeginMainFrame
}
private:
- bool allow_commits_ = false;
+ std::unique_ptr<ScopedDeferCommits> scoped_defer_commits_;
int commit_count_ = 0;
int begin_main_frame_count_ = 0;
};
@@ -3520,7 +3674,7 @@ class LayerTreeHostTestDeferCommitsInsideBeginMainFrameWithCommitAfter
return;
// This should prevent the commit from happening.
- layer_tree_host()->SetDeferCommits(true);
+ scoped_defer_commits_ = layer_tree_host()->DeferCommits();
// Wait to see if the commit happens. It's possible the deferred
// commit happens when it shouldn't but takes long enough that
// this passes. But it won't fail when it shouldn't.
@@ -3538,7 +3692,7 @@ class LayerTreeHostTestDeferCommitsInsideBeginMainFrameWithCommitAfter
// Once we've waited and seen that commit did not happen, we
// allow commits and should see this one go through.
allow_commits_ = true;
- layer_tree_host()->SetDeferCommits(false);
+ scoped_defer_commits_.reset();
}
void DidCommit() override {
@@ -3559,6 +3713,7 @@ class LayerTreeHostTestDeferCommitsInsideBeginMainFrameWithCommitAfter
}
private:
+ std::unique_ptr<ScopedDeferCommits> scoped_defer_commits_;
bool allow_commits_ = false;
int commit_count_ = 0;
int begin_main_frame_count_ = 0;
@@ -3569,7 +3724,7 @@ SINGLE_AND_MULTI_THREAD_TEST_F(
LayerTreeHostTestDeferCommitsInsideBeginMainFrameWithCommitAfter);
// This verifies that animate_only BeginFrames only run animation/layout
-// updates, i.e. abort commits after the animate stage and only request layer
+// updates, i.e. abort commits after the paint stage and only request layer
// tree updates for layout.
//
// The tests sends four Begin(Main)Frames in sequence: three animate_only
@@ -3654,21 +3809,7 @@ class LayerTreeHostTestAnimateOnlyBeginFrames
}
}
- void UpdateLayerTreeHost(
- LayerTreeHostClient::VisualStateUpdate requested_update) override {
- ++update_layer_tree_host_count_;
-
- if (begin_frame_count_ < 4) {
- // First three BeginFrames are animate_only, so only kPrePaint updates are
- // requested.
- EXPECT_EQ(LayerTreeHostClient::VisualStateUpdate::kPrePaint,
- requested_update);
- } else {
- EXPECT_EQ(4, begin_frame_count_);
- // Last BeginFrame is normal, so all updates are requested.
- EXPECT_EQ(LayerTreeHostClient::VisualStateUpdate::kAll, requested_update);
- }
- }
+ void UpdateLayerTreeHost() override { ++update_layer_tree_host_count_; }
void DidCommit() override {
++commit_count_;
@@ -4219,7 +4360,6 @@ class LayerTreeHostTestLayersPushProperties : public LayerTreeHostTest {
child2_ = PushPropertiesCountingLayer::Create();
grandchild_ = PushPropertiesCountingLayer::Create();
leaf_always_pushing_layer_ = PushPropertiesCountingLayer::Create();
- leaf_always_pushing_layer_->set_persist_needs_push_properties(true);
root_->AddChild(child_);
root_->AddChild(child2_);
@@ -4255,36 +4395,39 @@ class LayerTreeHostTestLayersPushProperties : public LayerTreeHostTest {
// The scrollbar layer always needs to be pushed.
if (root_->layer_tree_host()) {
- EXPECT_FALSE(root_->layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ root_->layer_tree_host()->LayersThatShouldPushProperties(),
root_.get()));
}
if (child2_->layer_tree_host()) {
- EXPECT_FALSE(
- child2_->layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child2_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ child2_->layer_tree_host()->LayersThatShouldPushProperties(),
+ child2_.get()));
}
if (leaf_always_pushing_layer_->layer_tree_host()) {
- EXPECT_TRUE(leaf_always_pushing_layer_->layer_tree_host()
- ->LayerNeedsPushPropertiesForTesting(
- leaf_always_pushing_layer_.get()));
+ leaf_always_pushing_layer_->SetNeedsPushProperties();
+ EXPECT_TRUE(
+ base::ContainsKey(leaf_always_pushing_layer_->layer_tree_host()
+ ->LayersThatShouldPushProperties(),
+ leaf_always_pushing_layer_.get()));
}
// child_ and grandchild_ don't persist their need to push properties.
if (child_->layer_tree_host()) {
- EXPECT_FALSE(
- child_->layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ child_->layer_tree_host()->LayersThatShouldPushProperties(),
+ child_.get()));
}
if (grandchild_->layer_tree_host()) {
- EXPECT_FALSE(
- grandchild_->layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- grandchild_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ grandchild_->layer_tree_host()->LayersThatShouldPushProperties(),
+ grandchild_.get()));
}
if (other_root_->layer_tree_host()) {
- EXPECT_FALSE(
- other_root_->layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- other_root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ other_root_->layer_tree_host()->LayersThatShouldPushProperties(),
+ other_root_.get()));
}
switch (num_commits_) {
@@ -4635,7 +4778,8 @@ class LayerTreeHostTestPropertyChangesDuringUpdateArePushed
scrollbar_layer_->SetBounds(gfx::Size(30, 30));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
scrollbar_layer_.get()));
layer_tree_host()->SetNeedsCommit();
@@ -4680,10 +4824,10 @@ class LayerTreeHostTestSetDrawableCausesCommit : public LayerTreeHostTest {
// avoid causing a second commit to be scheduled. If a property change
// is made during this, however, it needs to be pushed in the upcoming
// commit.
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
EXPECT_EQ(0, root_->NumDescendantsThatDrawContent());
root_->reset_push_properties_count();
child_->reset_push_properties_count();
@@ -4691,21 +4835,19 @@ class LayerTreeHostTestSetDrawableCausesCommit : public LayerTreeHostTest {
EXPECT_EQ(1, root_->NumDescendantsThatDrawContent());
EXPECT_EQ(0u, root_->push_properties_count());
EXPECT_EQ(0u, child_->push_properties_count());
- EXPECT_TRUE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_TRUE(
-
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
break;
}
case 2:
EXPECT_EQ(1u, root_->push_properties_count());
EXPECT_EQ(1u, child_->push_properties_count());
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
EndTest();
break;
}
@@ -4772,15 +4914,18 @@ class LayerTreeHostTestPushPropertiesAddingToTreeRequiresPush
case 0:
// All layers will need push properties as we set their layer tree host
layer_tree_host()->SetRootLayer(root_);
- EXPECT_TRUE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild1_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild2_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild3_.get()));
break;
case 1:
@@ -4802,71 +4947,82 @@ class LayerTreeHostTestPushPropertiesRemovingChildStopsRecursion
layer_tree_host()->SetRootLayer(root_);
break;
case 1:
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild1_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild2_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild3_.get()));
grandchild1_->RemoveFromParent();
grandchild1_->SetPosition(gfx::PointF(1.f, 1.f));
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild2_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild3_.get()));
child_->AddChild(grandchild1_);
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild1_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild2_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild3_.get()));
grandchild2_->SetPosition(gfx::PointF(1.f, 1.f));
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild1_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild2_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild3_.get()));
// grandchild2_ will still need a push properties.
grandchild1_->RemoveFromParent();
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
// grandchild3_ does not need a push properties, so recursing should
// no longer be needed.
grandchild2_->RemoveFromParent();
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
EndTest();
break;
}
@@ -4879,41 +5035,47 @@ class LayerTreeHostTestPushPropertiesRemovingChildStopsRecursionWithPersistence
: public LayerTreeHostTestCasePushPropertiesThreeGrandChildren {
protected:
void DidCommitAndDrawFrame() override {
+ // The grand children are set to need push properties, to verify the impact
+ // on their ancestors.
+ grandchild1_->SetNeedsPushProperties();
+ grandchild2_->SetNeedsPushProperties();
+
int last_source_frame_number = layer_tree_host()->SourceFrameNumber() - 1;
switch (last_source_frame_number) {
case 0:
layer_tree_host()->SetRootLayer(root_);
- grandchild1_->set_persist_needs_push_properties(true);
- grandchild2_->set_persist_needs_push_properties(true);
break;
case 1:
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild1_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild2_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild3_.get()));
// grandchild2_ will still need a push properties.
grandchild1_->RemoveFromParent();
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
// grandchild3_ does not need a push properties, so recursing should
// no longer be needed.
grandchild2_->RemoveFromParent();
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
EndTest();
break;
}
@@ -4933,15 +5095,18 @@ class LayerTreeHostTestPushPropertiesSetPropertiesWhileOutsideTree
layer_tree_host()->SetRootLayer(root_);
break;
case 1:
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild1_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild2_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild3_.get()));
// Change grandchildren while their parent is not in the tree.
@@ -4950,37 +5115,40 @@ class LayerTreeHostTestPushPropertiesSetPropertiesWhileOutsideTree
grandchild2_->SetPosition(gfx::PointF(1.f, 1.f));
root_->AddChild(child_);
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild1_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild2_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild3_.get()));
grandchild1_->RemoveFromParent();
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
grandchild2_->RemoveFromParent();
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
grandchild3_->RemoveFromParent();
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
EndTest();
break;
@@ -5001,50 +5169,56 @@ class LayerTreeHostTestPushPropertiesSetPropertyInParentThenChild
layer_tree_host()->SetRootLayer(root_);
break;
case 1:
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild1_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild2_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild3_.get()));
child_->SetPosition(gfx::PointF(1.f, 1.f));
grandchild1_->SetPosition(gfx::PointF(1.f, 1.f));
grandchild2_->SetPosition(gfx::PointF(1.f, 1.f));
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild1_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild2_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild3_.get()));
grandchild1_->RemoveFromParent();
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
grandchild2_->RemoveFromParent();
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
child_->RemoveFromParent();
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
EndTest();
break;
@@ -5065,50 +5239,56 @@ class LayerTreeHostTestPushPropertiesSetPropertyInChildThenParent
layer_tree_host()->SetRootLayer(root_);
break;
case 1:
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild1_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild2_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild3_.get()));
grandchild1_->SetPosition(gfx::PointF(1.f, 1.f));
grandchild2_->SetPosition(gfx::PointF(1.f, 1.f));
child_->SetPosition(gfx::PointF(1.f, 1.f));
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild1_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild2_.get()));
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
grandchild3_.get()));
grandchild1_->RemoveFromParent();
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
grandchild2_->RemoveFromParent();
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
- EXPECT_TRUE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- child_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
+ EXPECT_TRUE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), child_.get()));
child_->RemoveFromParent();
- EXPECT_FALSE(
- layer_tree_host()->LayerNeedsPushPropertiesForTesting(root_.get()));
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(), root_.get()));
EndTest();
break;
@@ -5269,7 +5449,8 @@ class LayerTreeHostTestPushHiddenLayer : public LayerTreeHostTest {
switch (layer_tree_host()->SourceFrameNumber()) {
case 1:
// The layer type used does not need to push properties every frame.
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
child_layer_.get()));
// Change the bounds of the child layer, but make it skipped
@@ -5279,7 +5460,8 @@ class LayerTreeHostTestPushHiddenLayer : public LayerTreeHostTest {
break;
case 2:
// The bounds of the child layer were pushed to the impl side.
- EXPECT_FALSE(layer_tree_host()->LayerNeedsPushPropertiesForTesting(
+ EXPECT_FALSE(base::ContainsKey(
+ layer_tree_host()->LayersThatShouldPushProperties(),
child_layer_.get()));
EndTest();
@@ -6509,8 +6691,9 @@ class LayerTreeHostTestWillBeginImplFrameHasDidFinishImplFrame
int did_finish_impl_frame_count_;
};
-SINGLE_AND_MULTI_THREAD_TEST_F(
- LayerTreeHostTestWillBeginImplFrameHasDidFinishImplFrame);
+// TODO(crbug.com/842038): Disabled as flaky.
+// SINGLE_AND_MULTI_THREAD_TEST_F(
+// LayerTreeHostTestWillBeginImplFrameHasDidFinishImplFrame);
::testing::AssertionResult AssertFrameTimeContained(
const char* haystack_expr,
@@ -6833,13 +7016,9 @@ class LayerTreeHostAcceptsDeltasFromImplWithoutRootLayer
EndTest();
}
- void ApplyViewportDeltas(const gfx::Vector2dF& inner,
- const gfx::Vector2dF& outer,
- const gfx::Vector2dF& elastic_overscroll_delta,
- float scale_delta,
- float top_controls_delta) override {
- EXPECT_EQ(info_.page_scale_delta, scale_delta);
- EXPECT_EQ(info_.top_controls_delta, top_controls_delta);
+ void ApplyViewportChanges(const ApplyViewportChangesArgs& args) override {
+ EXPECT_EQ(info_.page_scale_delta, args.page_scale_delta);
+ EXPECT_EQ(info_.top_controls_delta, args.browser_controls_delta);
deltas_sent_to_client_ = true;
}
@@ -8728,5 +8907,40 @@ class LayerTreeHostTestNewLocalSurfaceIdForcesDraw : public LayerTreeHostTest {
SINGLE_AND_MULTI_THREAD_TEST_F(LayerTreeHostTestNewLocalSurfaceIdForcesDraw);
+// Verifies that DidReceiveCompositorFrameAck does not get sent with PostTask
+// when not needed.
+class DidReceiveCompositorFrameAckNotSentWhenNotNeeded
+ : public LayerTreeHostTest {
+ public:
+ DidReceiveCompositorFrameAckNotSentWhenNotNeeded() {}
+
+ void InitializeSettings(LayerTreeSettings* settings) override {
+ settings->send_compositor_frame_ack = false;
+ }
+
+ void BeginTest() override { PostSetNeedsCommitToMainThread(); }
+
+ void DidReceiveCompositorFrameAck() override { ADD_FAILURE(); }
+
+ // DrawLayersOnThread gets called after the conditional call to
+ // DidReceiveCompositorFrameAck, so we wait for it to end the test.
+ void DrawLayersOnThread(LayerTreeHostImpl* impl) override {
+ if (!received_first_frame_) {
+ received_first_frame_ = true;
+ PostSetNeedsCommitToMainThread();
+ } else {
+ EndTest();
+ }
+ }
+
+ void AfterTest() override {}
+
+ private:
+ bool received_first_frame_ = false;
+};
+
+SINGLE_AND_MULTI_THREAD_TEST_F(
+ DidReceiveCompositorFrameAckNotSentWhenNotNeeded);
+
} // namespace
} // namespace cc
diff --git a/chromium/cc/trees/layer_tree_host_unittest_animation.cc b/chromium/cc/trees/layer_tree_host_unittest_animation.cc
index 0b935c7adf9..d50a4cce305 100644
--- a/chromium/cc/trees/layer_tree_host_unittest_animation.cc
+++ b/chromium/cc/trees/layer_tree_host_unittest_animation.cc
@@ -638,8 +638,7 @@ class LayerTreeHostAnimationTestForceRedraw
layer_tree_host()->SetNeedsAnimate();
}
- void UpdateLayerTreeHost(
- LayerTreeHostClient::VisualStateUpdate requested_update) override {
+ void UpdateLayerTreeHost() override {
layer_tree_host()->SetNeedsCommitWithForcedRedraw();
}
@@ -2242,8 +2241,7 @@ class LayerTreeHostAnimationTestSetPotentiallyAnimatingOnLacDestruction
void DidCommit() override { PostSetNeedsCommitToMainThread(); }
- void UpdateLayerTreeHost(
- LayerTreeHostClient::VisualStateUpdate requested_update) override {
+ void UpdateLayerTreeHost() override {
if (layer_tree_host()->SourceFrameNumber() == 2) {
// Destroy animation.
timeline_->DetachAnimation(animation_.get());
@@ -2320,8 +2318,7 @@ class LayerTreeHostAnimationTestRebuildPropertyTreesOnAnimationSetNeedsCommit
PostSetNeedsCommitToMainThread();
}
- void UpdateLayerTreeHost(
- LayerTreeHostClient::VisualStateUpdate requested_update) override {
+ void UpdateLayerTreeHost() override {
if (layer_tree_host()->SourceFrameNumber() == 1) {
EXPECT_FALSE(layer_tree_host()->property_trees()->needs_rebuild);
AddAnimatedTransformToAnimation(animation_child_.get(), 1.0, 5, 5);
diff --git a/chromium/cc/trees/layer_tree_host_unittest_context.cc b/chromium/cc/trees/layer_tree_host_unittest_context.cc
index a57153ed21b..4f0c551c681 100644
--- a/chromium/cc/trees/layer_tree_host_unittest_context.cc
+++ b/chromium/cc/trees/layer_tree_host_unittest_context.cc
@@ -1737,7 +1737,7 @@ class LayerTreeHostContextTestLoseAfterSendingBeginMainFrame
deferred_ = true;
// Defer commits before the BeginFrame completes, causing it to be delayed.
- layer_tree_host()->SetDeferCommits(true);
+ scoped_defer_commits_ = layer_tree_host()->DeferCommits();
// Meanwhile, lose the context while we are in defer commits.
ImplThreadTaskRunner()->PostTask(
FROM_HERE,
@@ -1760,13 +1760,14 @@ class LayerTreeHostContextTestLoseAfterSendingBeginMainFrame
LoseContext();
// After losing the context, stop deferring commits.
- PostSetDeferCommitsToMainThread(false);
+ PostReturnDeferCommitsToMainThread(std::move(scoped_defer_commits_));
}
void DidCommitAndDrawFrame() override { EndTest(); }
void AfterTest() override {}
+ std::unique_ptr<ScopedDeferCommits> scoped_defer_commits_;
bool deferred_ = false;
bool lost_ = true;
};
diff --git a/chromium/cc/trees/layer_tree_host_unittest_scroll.cc b/chromium/cc/trees/layer_tree_host_unittest_scroll.cc
index d5fa8c48b57..cef1be4927f 100644
--- a/chromium/cc/trees/layer_tree_host_unittest_scroll.cc
+++ b/chromium/cc/trees/layer_tree_host_unittest_scroll.cc
@@ -7,6 +7,7 @@
#include "base/location.h"
#include "base/memory/weak_ptr.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/threading/thread_task_runner_handle.h"
#include "cc/animation/animation_host.h"
#include "cc/base/completion_event.h"
@@ -104,8 +105,7 @@ class LayerTreeHostScrollTestScrollSimple : public LayerTreeHostScrollTest {
PostSetNeedsCommitToMainThread();
}
- void UpdateLayerTreeHost(
- LayerTreeHostClient::VisualStateUpdate requested_update) override {
+ void UpdateLayerTreeHost() override {
Layer* scroll_layer = layer_tree_host()->outer_viewport_scroll_layer();
if (!layer_tree_host()->SourceFrameNumber()) {
EXPECT_VECTOR_EQ(initial_scroll_, scroll_layer->CurrentScrollOffset());
@@ -642,8 +642,7 @@ class LayerTreeHostScrollTestCaseWithChild : public LayerTreeHostScrollTest {
num_scrolls_++;
}
- void UpdateLayerTreeHost(
- LayerTreeHostClient::VisualStateUpdate requested_update) override {
+ void UpdateLayerTreeHost() override {
EXPECT_VECTOR_EQ(gfx::Vector2d(),
expected_no_scroll_layer_->CurrentScrollOffset());
@@ -846,8 +845,7 @@ class LayerTreeHostScrollTestSimple : public LayerTreeHostScrollTest {
PostSetNeedsCommitToMainThread();
}
- void UpdateLayerTreeHost(
- LayerTreeHostClient::VisualStateUpdate requested_update) override {
+ void UpdateLayerTreeHost() override {
Layer* scroll_layer = layer_tree_host()->outer_viewport_scroll_layer();
if (!layer_tree_host()->SourceFrameNumber()) {
EXPECT_VECTOR_EQ(initial_scroll_, scroll_layer->CurrentScrollOffset());
@@ -975,17 +973,17 @@ class LayerTreeHostScrollTestImplOnlyScroll : public LayerTreeHostScrollTest {
Layer* scroll_layer = layer_tree_host()->outer_viewport_scroll_layer();
switch (layer_tree_host()->SourceFrameNumber()) {
case 0:
- EXPECT_TRUE(
- scroll_layer->layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- scroll_layer));
+ EXPECT_TRUE(base::ContainsKey(
+ scroll_layer->layer_tree_host()->LayersThatShouldPushProperties(),
+ scroll_layer));
break;
case 1:
// Even if this layer doesn't need push properties, it should
// still pick up scrolls that happen on the active layer during
// commit.
- EXPECT_FALSE(
- scroll_layer->layer_tree_host()->LayerNeedsPushPropertiesForTesting(
- scroll_layer));
+ EXPECT_FALSE(base::ContainsKey(
+ scroll_layer->layer_tree_host()->LayersThatShouldPushProperties(),
+ scroll_layer));
break;
}
}
@@ -1123,8 +1121,7 @@ class LayerTreeHostScrollTestScrollZeroMaxScrollOffset
PostSetNeedsCommitToMainThread();
}
- void UpdateLayerTreeHost(
- LayerTreeHostClient::VisualStateUpdate requested_update) override {
+ void UpdateLayerTreeHost() override {
Layer* root = layer_tree_host()->root_layer();
Layer* scroll_layer = layer_tree_host()->outer_viewport_scroll_layer();
switch (layer_tree_host()->SourceFrameNumber()) {
@@ -1495,8 +1492,7 @@ class LayerTreeHostScrollTestScrollMFBA : public LayerTreeHostScrollTest {
num_commits_++;
}
- void UpdateLayerTreeHost(
- LayerTreeHostClient::VisualStateUpdate requested_update) override {
+ void UpdateLayerTreeHost() override {
Layer* scroll_layer = layer_tree_host()->outer_viewport_scroll_layer();
switch (layer_tree_host()->SourceFrameNumber()) {
case 0:
@@ -1856,18 +1852,14 @@ class LayerTreeHostScrollTestElasticOverscroll
DCHECK(scroll_elasticity_helper_);
}
- void ApplyViewportDeltas(const gfx::Vector2dF& inner_delta,
- const gfx::Vector2dF& outer_delta,
- const gfx::Vector2dF& elastic_overscroll_delta,
- float scale,
- float top_controls_delta) override {
+ void ApplyViewportChanges(const ApplyViewportChangesArgs& args) override {
DCHECK_NE(0, num_begin_main_frames_main_thread_)
<< "The first BeginMainFrame has no deltas to report";
DCHECK_LT(num_begin_main_frames_main_thread_, 5);
gfx::Vector2dF expected_elastic_overscroll =
elastic_overscroll_test_cases_[num_begin_main_frames_main_thread_];
- current_elastic_overscroll_ += elastic_overscroll_delta;
+ current_elastic_overscroll_ += args.elastic_overscroll_delta;
EXPECT_EQ(expected_elastic_overscroll, current_elastic_overscroll_);
EXPECT_EQ(expected_elastic_overscroll,
layer_tree_host()->elastic_overscroll());
@@ -2000,8 +1992,7 @@ class LayerTreeHostScrollTestPropertyTreeUpdate
PostSetNeedsCommitToMainThread();
}
- void UpdateLayerTreeHost(
- LayerTreeHostClient::VisualStateUpdate requested_update) override {
+ void UpdateLayerTreeHost() override {
Layer* scroll_layer = layer_tree_host()->inner_viewport_scroll_layer();
if (layer_tree_host()->SourceFrameNumber() == 0) {
EXPECT_VECTOR_EQ(initial_scroll_, scroll_layer->CurrentScrollOffset());
diff --git a/chromium/cc/trees/layer_tree_impl.cc b/chromium/cc/trees/layer_tree_impl.cc
index 956f6cc06c4..f3e04c1d449 100644
--- a/chromium/cc/trees/layer_tree_impl.cc
+++ b/chromium/cc/trees/layer_tree_impl.cc
@@ -617,7 +617,11 @@ ElementListType LayerTreeImpl::GetElementTypeForAnimation() const {
return IsActiveTree() ? ElementListType::ACTIVE : ElementListType::PENDING;
}
-void LayerTreeImpl::AddToElementLayerList(ElementId element_id) {
+void LayerTreeImpl::AddToElementLayerList(ElementId element_id,
+ LayerImpl* layer) {
+ DCHECK(layer);
+ DCHECK(layer->element_id() == element_id);
+
if (!element_id)
return;
@@ -636,6 +640,9 @@ void LayerTreeImpl::AddToElementLayerList(ElementId element_id) {
host_impl_->mutator_host()->RegisterElement(element_id,
GetElementTypeForAnimation());
+
+ if (layer->scrollable())
+ AddScrollableLayer(layer);
}
void LayerTreeImpl::RemoveFromElementLayerList(ElementId element_id) {
@@ -650,6 +657,19 @@ void LayerTreeImpl::RemoveFromElementLayerList(ElementId element_id) {
GetElementTypeForAnimation());
elements_in_layer_list_.erase(element_id);
+ element_id_to_scrollable_layer_.erase(element_id);
+}
+
+void LayerTreeImpl::AddScrollableLayer(LayerImpl* layer) {
+ DCHECK(layer);
+ DCHECK(layer->scrollable());
+
+ if (!layer->element_id())
+ return;
+
+ DCHECK(!element_id_to_scrollable_layer_.count(layer->element_id()));
+ element_id_to_scrollable_layer_.insert(
+ std::make_pair(layer->element_id(), layer));
}
void LayerTreeImpl::SetTransformMutated(ElementId element_id,
@@ -1216,7 +1236,7 @@ bool LayerTreeImpl::UpdateDrawProperties(
&render_surface_list_, &property_trees_, PageScaleTransformNode());
LayerTreeHostCommon::CalculateDrawProperties(&inputs);
if (const char* client_name = GetClientNameForMetrics()) {
- UMA_HISTOGRAM_COUNTS(
+ UMA_HISTOGRAM_COUNTS_1M(
base::StringPrintf(
"Compositing.%s.LayerTreeImpl.CalculateDrawPropertiesUs",
client_name),
@@ -1382,10 +1402,16 @@ gfx::SizeF LayerTreeImpl::ScrollableSize() const {
}
LayerImpl* LayerTreeImpl::LayerById(int id) const {
- LayerImplMap::const_iterator iter = layer_id_map_.find(id);
+ auto iter = layer_id_map_.find(id);
return iter != layer_id_map_.end() ? iter->second : nullptr;
}
+LayerImpl* LayerTreeImpl::ScrollableLayerByElementId(
+ ElementId element_id) const {
+ auto iter = element_id_to_scrollable_layer_.find(element_id);
+ return iter != element_id_to_scrollable_layer_.end() ? iter->second : nullptr;
+}
+
void LayerTreeImpl::SetSurfaceRanges(
const base::flat_set<viz::SurfaceRange> surface_ranges) {
DCHECK(surface_layer_ranges_.empty());
@@ -1404,21 +1430,14 @@ void LayerTreeImpl::ClearSurfaceRanges() {
void LayerTreeImpl::AddLayerShouldPushProperties(LayerImpl* layer) {
DCHECK(!IsActiveTree()) << "The active tree does not push layer properties";
+ // TODO(crbug.com/303943): PictureLayerImpls always push properties so should
+ // not go into this set or we'd push them twice.
+ DCHECK(!base::ContainsValue(picture_layers_, layer));
layers_that_should_push_properties_.insert(layer);
}
-void LayerTreeImpl::RemoveLayerShouldPushProperties(LayerImpl* layer) {
- layers_that_should_push_properties_.erase(layer);
-}
-
-std::unordered_set<LayerImpl*>&
-LayerTreeImpl::LayersThatShouldPushProperties() {
- return layers_that_should_push_properties_;
-}
-
-bool LayerTreeImpl::LayerNeedsPushPropertiesForTesting(LayerImpl* layer) {
- return layers_that_should_push_properties_.find(layer) !=
- layers_that_should_push_properties_.end();
+void LayerTreeImpl::ClearLayersThatShouldPushProperties() {
+ layers_that_should_push_properties_.clear();
}
void LayerTreeImpl::RegisterLayer(LayerImpl* layer) {
@@ -1815,8 +1834,7 @@ void LayerTreeImpl::RegisterPictureLayerImpl(PictureLayerImpl* layer) {
}
void LayerTreeImpl::UnregisterPictureLayerImpl(PictureLayerImpl* layer) {
- std::vector<PictureLayerImpl*>::iterator it =
- std::find(picture_layers_.begin(), picture_layers_.end(), layer);
+ auto it = std::find(picture_layers_.begin(), picture_layers_.end(), layer);
DCHECK(it != picture_layers_.end());
picture_layers_.erase(it);
}
@@ -2090,19 +2108,21 @@ static void FindClosestMatchingLayer(const gfx::PointF& screen_space_point,
}
}
-struct FindScrollingLayerOrDrawnScrollbarFunctor {
+struct FindScrollingLayerOrScrollbarFunctor {
bool operator()(LayerImpl* layer) const {
- return layer->scrollable() || layer->IsDrawnScrollbar();
+ return layer->scrollable() || layer->is_scrollbar();
}
};
-LayerImpl*
-LayerTreeImpl::FindFirstScrollingLayerOrDrawnScrollbarThatIsHitByPoint(
+LayerImpl* LayerTreeImpl::FindFirstScrollingLayerOrScrollbarThatIsHitByPoint(
const gfx::PointF& screen_space_point) {
+ if (layer_list_.empty())
+ return nullptr;
+
FindClosestMatchingLayerState state;
- LayerImpl* root_layer = layer_list_.empty() ? nullptr : layer_list_[0];
+ LayerImpl* root_layer = layer_list_[0];
FindClosestMatchingLayer(screen_space_point, root_layer,
- FindScrollingLayerOrDrawnScrollbarFunctor(), &state);
+ FindScrollingLayerOrScrollbarFunctor(), &state);
return state.closest_match;
}
diff --git a/chromium/cc/trees/layer_tree_impl.h b/chromium/cc/trees/layer_tree_impl.h
index 4f22ac2d884..f5dd26f151f 100644
--- a/chromium/cc/trees/layer_tree_impl.h
+++ b/chromium/cc/trees/layer_tree_impl.h
@@ -11,6 +11,7 @@
#include <unordered_map>
#include <vector>
+#include "base/containers/flat_set.h"
#include "base/macros.h"
#include "base/values.h"
#include "cc/base/synced_property.h"
@@ -411,19 +412,23 @@ class CC_EXPORT LayerTreeImpl {
gfx::Rect RootScrollLayerDeviceViewportBounds() const;
LayerImpl* LayerById(int id) const;
+ LayerImpl* ScrollableLayerByElementId(ElementId element_id) const;
bool IsElementInLayerList(ElementId element_id) const;
- void AddToElementLayerList(ElementId element_id);
+ void AddToElementLayerList(ElementId element_id, LayerImpl* layer);
void RemoveFromElementLayerList(ElementId element_id);
+ void AddScrollableLayer(LayerImpl* layer);
+
void SetSurfaceRanges(const base::flat_set<viz::SurfaceRange> surface_ranges);
const base::flat_set<viz::SurfaceRange>& SurfaceRanges() const;
void ClearSurfaceRanges();
void AddLayerShouldPushProperties(LayerImpl* layer);
- void RemoveLayerShouldPushProperties(LayerImpl* layer);
- std::unordered_set<LayerImpl*>& LayersThatShouldPushProperties();
- bool LayerNeedsPushPropertiesForTesting(LayerImpl* layer);
+ void ClearLayersThatShouldPushProperties();
+ const base::flat_set<LayerImpl*>& LayersThatShouldPushProperties() {
+ return layers_that_should_push_properties_;
+ }
// These should be called by LayerImpl's ctor/dtor.
void RegisterLayer(LayerImpl* layer);
@@ -497,7 +502,7 @@ class CC_EXPORT LayerTreeImpl {
void UnregisterScrollbar(ScrollbarLayerImplBase* scrollbar_layer);
ScrollbarSet ScrollbarsFor(ElementId scroll_element_id) const;
- LayerImpl* FindFirstScrollingLayerOrDrawnScrollbarThatIsHitByPoint(
+ LayerImpl* FindFirstScrollingLayerOrScrollbarThatIsHitByPoint(
const gfx::PointF& screen_space_point);
LayerImpl* FindLayerThatIsHitByPoint(const gfx::PointF& screen_space_point);
@@ -559,6 +564,7 @@ class CC_EXPORT LayerTreeImpl {
// SetScrollbarGeometriesNeedUpdate).
void UpdateScrollbarGeometries();
+ // See LayerTreeHost.
bool have_scroll_event_handlers() const {
return have_scroll_event_handlers_;
}
@@ -566,6 +572,7 @@ class CC_EXPORT LayerTreeImpl {
have_scroll_event_handlers_ = have_event_handlers;
}
+ // See LayerTreeHost.
EventListenerProperties event_listener_properties(
EventListenerClass event_class) const {
return event_listener_properties_[static_cast<size_t>(event_class)];
@@ -653,7 +660,7 @@ class CC_EXPORT LayerTreeImpl {
LayerImplMap layer_id_map_;
LayerImplList layer_list_;
// Set of layers that need to push properties.
- std::unordered_set<LayerImpl*> layers_that_should_push_properties_;
+ base::flat_set<LayerImpl*> layers_that_should_push_properties_;
// Set of ElementIds which are present in the |layer_list_|.
std::unordered_set<ElementId, ElementIdHash> elements_in_layer_list_;
@@ -665,6 +672,9 @@ class CC_EXPORT LayerTreeImpl {
std::unordered_map<ElementId, FilterOperations, ElementIdHash>
element_id_to_filter_animations_;
+ std::unordered_map<ElementId, LayerImpl*, ElementIdHash>
+ element_id_to_scrollable_layer_;
+
struct ScrollbarLayerIds {
int horizontal = Layer::INVALID_ID;
int vertical = Layer::INVALID_ID;
diff --git a/chromium/cc/trees/layer_tree_mutator.cc b/chromium/cc/trees/layer_tree_mutator.cc
index 109ad6d273c..845ed7dc60f 100644
--- a/chromium/cc/trees/layer_tree_mutator.cc
+++ b/chromium/cc/trees/layer_tree_mutator.cc
@@ -32,7 +32,10 @@ bool AnimationWorkletInput::ValidateScope(int scope_id) const {
[scope_id](auto& it) {
return it.worklet_animation_id.scope_id == scope_id;
}) &&
- std::all_of(removed_animations.cbegin(), removed_animations.cend(),
+ std::all_of(
+ removed_animations.cbegin(), removed_animations.cend(),
+ [scope_id](auto& it) { return it.scope_id == scope_id; }) &&
+ std::all_of(peeked_animations.cbegin(), peeked_animations.cend(),
[scope_id](auto& it) { return it.scope_id == scope_id; });
}
#endif
@@ -74,6 +77,12 @@ void MutatorInputState::Remove(WorkletAnimationId worklet_animation_id) {
worklet_input.removed_animations.push_back(worklet_animation_id);
}
+void MutatorInputState::Peek(WorkletAnimationId worklet_animation_id) {
+ AnimationWorkletInput& worklet_input =
+ EnsureWorkletEntry(worklet_animation_id.scope_id);
+ worklet_input.peeked_animations.push_back(worklet_animation_id);
+}
+
std::unique_ptr<AnimationWorkletInput> MutatorInputState::TakeWorkletState(
int scope_id) {
auto it = inputs_.find(scope_id);
diff --git a/chromium/cc/trees/layer_tree_mutator.h b/chromium/cc/trees/layer_tree_mutator.h
index f2d7ce830cc..dd702c49458 100644
--- a/chromium/cc/trees/layer_tree_mutator.h
+++ b/chromium/cc/trees/layer_tree_mutator.h
@@ -60,6 +60,7 @@ struct CC_EXPORT AnimationWorkletInput {
std::vector<AddAndUpdateState> added_and_updated_animations;
std::vector<UpdateState> updated_animations;
std::vector<WorkletAnimationId> removed_animations;
+ std::vector<WorkletAnimationId> peeked_animations;
AnimationWorkletInput();
~AnimationWorkletInput();
@@ -80,6 +81,12 @@ class CC_EXPORT MutatorInputState {
void Add(AnimationWorkletInput::AddAndUpdateState&& state);
void Update(AnimationWorkletInput::UpdateState&& state);
void Remove(WorkletAnimationId worklet_animation_id);
+ // |Update| asks for the animation to *animate* given a current time and
+ // return the output value while |Peek| only asks for the last output value
+ // (if one available) without requiring animate or providing a current time.
+ // In particular, composited animations are updated from compositor and peeked
+ // from main thread.
+ void Peek(WorkletAnimationId worklet_animation_id);
// Returns input for animation worklet with the given |scope_id| and nullptr
// if there is no input.
@@ -141,7 +148,7 @@ class CC_EXPORT LayerTreeMutator {
virtual void Mutate(std::unique_ptr<MutatorInputState> input_state) = 0;
// TODO(majidvp): Remove when timeline inputs are known.
- virtual bool HasAnimators() = 0;
+ virtual bool HasMutators() = 0;
};
} // namespace cc
diff --git a/chromium/cc/trees/layer_tree_settings.h b/chromium/cc/trees/layer_tree_settings.h
index e124035096b..389e25b5eec 100644
--- a/chromium/cc/trees/layer_tree_settings.h
+++ b/chromium/cc/trees/layer_tree_settings.h
@@ -160,6 +160,11 @@ class CC_EXPORT LayerTreeSettings {
// When false, sync tokens are expected to be present, and are verified,
// before transfering gpu resources to the display compositor.
bool delegated_sync_points_required = true;
+
+ // When true, LayerTreeHostImplClient will be posting a task to call
+ // DidReceiveCompositorFrameAck, used by the Compositor but not the
+ // LayerTreeView.
+ bool send_compositor_frame_ack = true;
};
} // namespace cc
diff --git a/chromium/cc/trees/property_tree.cc b/chromium/cc/trees/property_tree.cc
index 99b15dd5a5d..c4fa5bc4a19 100644
--- a/chromium/cc/trees/property_tree.cc
+++ b/chromium/cc/trees/property_tree.cc
@@ -1436,7 +1436,7 @@ gfx::Vector2dF ScrollTree::ClampScrollToMaxScrollOffset(
const gfx::ScrollOffset ScrollTree::current_scroll_offset(ElementId id) const {
if (property_trees()->is_main_thread) {
- ScrollOffsetMap::const_iterator it = scroll_offset_map_.find(id);
+ auto it = scroll_offset_map_.find(id);
return it != scroll_offset_map_.end() ? it->second : gfx::ScrollOffset();
}
return GetSyncedScrollOffset(id)
diff --git a/chromium/cc/trees/property_tree.h b/chromium/cc/trees/property_tree.h
index 9a9d9fdae67..85abe4979ad 100644
--- a/chromium/cc/trees/property_tree.h
+++ b/chromium/cc/trees/property_tree.h
@@ -62,9 +62,11 @@ class CC_EXPORT PropertyTree {
virtual ~PropertyTree();
PropertyTree<T>& operator=(const PropertyTree<T>&);
- // Property tree node starts from index 0.
+ // Property tree node starts from index 0. See equivalent constants in
+ // property_tree_manager.cc for comments.
static const int kInvalidNodeId = -1;
static const int kRootNodeId = 0;
+ static const int kSecondaryRootNodeId = 1;
bool operator==(const PropertyTree<T>& other) const;
diff --git a/chromium/cc/trees/property_tree_builder.cc b/chromium/cc/trees/property_tree_builder.cc
index 269abca6286..fd6c3edb5f6 100644
--- a/chromium/cc/trees/property_tree_builder.cc
+++ b/chromium/cc/trees/property_tree_builder.cc
@@ -748,6 +748,14 @@ static inline const FilterOperations& BackgroundFilters(LayerImpl* layer) {
return layer->test_properties()->background_filters;
}
+static inline float BackdropFilterQuality(Layer* layer) {
+ return layer->backdrop_filter_quality();
+}
+
+static inline float BackdropFilterQuality(LayerImpl* layer) {
+ return layer->test_properties()->backdrop_filter_quality;
+}
+
static inline bool HideLayerAndSubtree(Layer* layer) {
return layer->hide_layer_and_subtree();
}
@@ -984,6 +992,7 @@ bool PropertyTreeBuilderContext<LayerType>::AddEffectNodeIfNeeded(
node->has_copy_request = HasCopyRequest(layer);
node->filters = Filters(layer);
node->background_filters = BackgroundFilters(layer);
+ node->backdrop_filter_quality = BackdropFilterQuality(layer);
node->filters_origin = FiltersOrigin(layer);
node->trilinear_filtering = TrilinearFiltering(layer);
node->has_potential_opacity_animation = has_potential_opacity_animation;
@@ -1134,7 +1143,6 @@ void PropertyTreeBuilderContext<LayerType>::AddScrollNodeIfNeeded(
ScrollNode node;
node.scrollable = scrollable;
node.main_thread_scrolling_reasons = main_thread_scrolling_reasons;
- node.non_fast_scrollable_region = layer->non_fast_scrollable_region();
node.scrolls_inner_viewport = layer == inner_viewport_scroll_layer_;
node.scrolls_outer_viewport = layer == outer_viewport_scroll_layer_;
@@ -1405,7 +1413,7 @@ static void CheckClipPointersForLayer(Layer* layer) {
return;
if (layer->clip_children()) {
- for (std::set<Layer*>::iterator it = layer->clip_children()->begin();
+ for (auto it = layer->clip_children()->begin();
it != layer->clip_children()->end(); ++it) {
DCHECK_EQ((*it)->clip_parent(), layer);
}
diff --git a/chromium/cc/trees/proxy_impl.cc b/chromium/cc/trees/proxy_impl.cc
index 966124c8670..6aff668ee58 100644
--- a/chromium/cc/trees/proxy_impl.cc
+++ b/chromium/cc/trees/proxy_impl.cc
@@ -69,6 +69,7 @@ ProxyImpl::ProxyImpl(base::WeakPtr<ProxyMain> proxy_main_weak_ptr,
host_impl_ = layer_tree_host->CreateLayerTreeHostImpl(this);
const LayerTreeSettings& settings = layer_tree_host->GetSettings();
+ send_compositor_frame_ack_ = settings.send_compositor_frame_ack;
SchedulerSettings scheduler_settings(settings.ToSchedulerSettings());
@@ -291,9 +292,11 @@ void ProxyImpl::DidReceiveCompositorFrameAckOnImplThread() {
"ProxyImpl::DidReceiveCompositorFrameAckOnImplThread");
DCHECK(IsImplThread());
scheduler_->DidReceiveCompositorFrameAck();
- MainThreadTaskRunner()->PostTask(
- FROM_HERE, base::BindOnce(&ProxyMain::DidReceiveCompositorFrameAck,
- proxy_main_frame_sink_bound_weak_ptr_));
+ if (send_compositor_frame_ack_) {
+ MainThreadTaskRunner()->PostTask(
+ FROM_HERE, base::BindOnce(&ProxyMain::DidReceiveCompositorFrameAck,
+ proxy_main_frame_sink_bound_weak_ptr_));
+ }
}
void ProxyImpl::OnCanDrawStateChanged(bool can_draw) {
diff --git a/chromium/cc/trees/proxy_impl.h b/chromium/cc/trees/proxy_impl.h
index a6b9fa279b5..4bfbf73cd40 100644
--- a/chromium/cc/trees/proxy_impl.h
+++ b/chromium/cc/trees/proxy_impl.h
@@ -153,6 +153,8 @@ class CC_EXPORT ProxyImpl : public LayerTreeHostImplClient,
bool inside_draw_;
bool input_throttled_until_commit_;
+ bool send_compositor_frame_ack_;
+
TaskRunnerProvider* task_runner_provider_;
DelayedUniqueNotifier smoothness_priority_expiration_notifier_;
diff --git a/chromium/cc/trees/proxy_main.cc b/chromium/cc/trees/proxy_main.cc
index 334c323e297..c89bd779b18 100644
--- a/chromium/cc/trees/proxy_main.cc
+++ b/chromium/cc/trees/proxy_main.cc
@@ -146,10 +146,9 @@ void ProxyMain::BeginMainFrame(
max_requested_pipeline_stage_ = NO_PIPELINE_STAGE;
// When we don't need to produce a CompositorFrame, there's also no need to
- // paint or commit our updates. We still need to run layout though, as it can
+ // commit our updates. We still need to run layout and paint though, as it can
// have side effects on page loading behavior.
- bool skip_paint_and_commit =
- begin_main_frame_state->begin_frame_args.animate_only;
+ bool skip_commit = begin_main_frame_state->begin_frame_args.animate_only;
// If commits are deferred, skip the entire pipeline.
bool skip_full_pipeline = defer_commits_;
@@ -158,7 +157,7 @@ void ProxyMain::BeginMainFrame(
// now, and there was no intermediate request for a commit since the last
// BeginMainFrame, we can skip the full pipeline.
skip_full_pipeline |=
- skip_paint_and_commit && final_pipeline_stage_ == NO_PIPELINE_STAGE;
+ skip_commit && final_pipeline_stage_ == NO_PIPELINE_STAGE;
if (skip_full_pipeline) {
TRACE_EVENT_INSTANT0("cc", "EarlyOut_DeferCommit",
@@ -220,17 +219,16 @@ void ProxyMain::BeginMainFrame(
// See LayerTreeHostClient::MainFrameUpdate for more documentation on
// what this does.
- layer_tree_host_->RequestMainFrameUpdate(
- skip_paint_and_commit ? LayerTreeHost::VisualStateUpdate::kPrePaint
- : LayerTreeHost::VisualStateUpdate::kAll);
+ layer_tree_host_->RequestMainFrameUpdate();
// At this point the main frame may have deferred commits to avoid committing
// right now.
- skip_paint_and_commit |= defer_commits_;
+ skip_commit |= defer_commits_;
- if (skip_paint_and_commit) {
+ if (skip_commit) {
TRACE_EVENT_INSTANT0("cc", "EarlyOut_DeferCommit_InsideBeginMainFrame",
TRACE_EVENT_SCOPE_THREAD);
+ layer_tree_host_->RecordEndOfFrameMetrics(begin_main_frame_start_time);
std::vector<std::unique_ptr<SwapPromise>> empty_swap_promises;
ImplThreadTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(&ProxyImpl::BeginMainFrameAbortedOnImpl,
@@ -277,6 +275,7 @@ void ProxyMain::BeginMainFrame(
current_pipeline_stage_ = COMMIT_PIPELINE_STAGE;
if (final_pipeline_stage_ < COMMIT_PIPELINE_STAGE) {
TRACE_EVENT_INSTANT0("cc", "EarlyOut_NoUpdates", TRACE_EVENT_SCOPE_THREAD);
+ layer_tree_host_->RecordEndOfFrameMetrics(begin_main_frame_start_time);
std::vector<std::unique_ptr<SwapPromise>> swap_promises =
layer_tree_host_->GetSwapPromiseManager()->TakeSwapPromises();
ImplThreadTaskRunner()->PostTask(
@@ -311,6 +310,7 @@ void ProxyMain::BeginMainFrame(
// coordinated by the Scheduler.
{
TRACE_EVENT0("cc", "ProxyMain::BeginMainFrame::commit");
+ layer_tree_host_->RecordEndOfFrameMetrics(begin_main_frame_start_time);
DebugScopedSetMainThreadBlocked main_thread_blocked(task_runner_provider_);
diff --git a/chromium/cc/trees/render_frame_metadata.cc b/chromium/cc/trees/render_frame_metadata.cc
index 82340bb7fff..304b451b2c3 100644
--- a/chromium/cc/trees/render_frame_metadata.cc
+++ b/chromium/cc/trees/render_frame_metadata.cc
@@ -32,9 +32,9 @@ bool RenderFrameMetadata::operator==(const RenderFrameMetadata& other) const {
device_scale_factor == other.device_scale_factor &&
viewport_size_in_pixels == other.viewport_size_in_pixels &&
page_scale_factor == other.page_scale_factor &&
-#if defined(OS_ANDROID)
top_controls_height == other.top_controls_height &&
top_controls_shown_ratio == other.top_controls_shown_ratio &&
+#if defined(OS_ANDROID)
bottom_controls_height == other.bottom_controls_height &&
bottom_controls_shown_ratio == other.bottom_controls_shown_ratio &&
min_page_scale_factor == other.min_page_scale_factor &&
diff --git a/chromium/cc/trees/render_frame_metadata.h b/chromium/cc/trees/render_frame_metadata.h
index 5f156029894..8b6c45da6c4 100644
--- a/chromium/cc/trees/render_frame_metadata.h
+++ b/chromium/cc/trees/render_frame_metadata.h
@@ -66,12 +66,12 @@ class CC_EXPORT RenderFrameMetadata {
float page_scale_factor = 1.f;
-#if defined(OS_ANDROID)
- // Used to position the Android location top bar and page content, whose
- // precise position is computed by the renderer compositor.
+ // Used to position the location top bar and page content, whose precise
+ // position is computed by the renderer compositor.
float top_controls_height = 0.f;
float top_controls_shown_ratio = 0.f;
+#if defined(OS_ANDROID)
// Used to position Android bottom bar, whose position is computed by the
// renderer compositor.
float bottom_controls_height = 0.f;
diff --git a/chromium/cc/trees/scroll_node.cc b/chromium/cc/trees/scroll_node.cc
index 9b1aaf16d92..9a19d4b4b57 100644
--- a/chromium/cc/trees/scroll_node.cc
+++ b/chromium/cc/trees/scroll_node.cc
@@ -35,7 +35,6 @@ bool ScrollNode::operator==(const ScrollNode& other) const {
return id == other.id && parent_id == other.parent_id &&
scrollable == other.scrollable &&
main_thread_scrolling_reasons == other.main_thread_scrolling_reasons &&
- non_fast_scrollable_region == other.non_fast_scrollable_region &&
container_bounds == other.container_bounds && bounds == other.bounds &&
max_scroll_offset_affected_by_page_scale ==
other.max_scroll_offset_affected_by_page_scale &&
diff --git a/chromium/cc/trees/scroll_node.h b/chromium/cc/trees/scroll_node.h
index 16e5c03688d..b96b696e1b3 100644
--- a/chromium/cc/trees/scroll_node.h
+++ b/chromium/cc/trees/scroll_node.h
@@ -32,8 +32,6 @@ struct CC_EXPORT ScrollNode {
uint32_t main_thread_scrolling_reasons;
- Region non_fast_scrollable_region;
-
// Size of the container area that the contents scrolls in, not including
// non-overlay scrollbars. Overlay scrollbars do not affect these bounds.
gfx::Size container_bounds;
diff --git a/chromium/cc/trees/single_thread_proxy.cc b/chromium/cc/trees/single_thread_proxy.cc
index 1d199d2f6d4..8e79bbbb4fb 100644
--- a/chromium/cc/trees/single_thread_proxy.cc
+++ b/chromium/cc/trees/single_thread_proxy.cc
@@ -442,12 +442,14 @@ void SingleThreadProxy::DidReceiveCompositorFrameAckOnImplThread() {
"SingleThreadProxy::DidReceiveCompositorFrameAckOnImplThread");
if (scheduler_on_impl_thread_)
scheduler_on_impl_thread_->DidReceiveCompositorFrameAck();
- // We do a PostTask here because freeing resources in some cases (such as in
- // TextureLayer) is PostTasked and we want to make sure ack is received after
- // resources are returned.
- task_runner_provider_->MainThreadTaskRunner()->PostTask(
- FROM_HERE, base::Bind(&SingleThreadProxy::DidReceiveCompositorFrameAck,
- frame_sink_bound_weak_ptr_));
+ if (layer_tree_host_->GetSettings().send_compositor_frame_ack) {
+ // We do a PostTask here because freeing resources in some cases (such as in
+ // TextureLayer) is PostTasked and we want to make sure ack is received
+ // after resources are returned.
+ task_runner_provider_->MainThreadTaskRunner()->PostTask(
+ FROM_HERE, base::Bind(&SingleThreadProxy::DidReceiveCompositorFrameAck,
+ frame_sink_bound_weak_ptr_));
+ }
}
void SingleThreadProxy::OnDrawForLayerTreeFrameSink(
@@ -773,10 +775,7 @@ void SingleThreadProxy::DoBeginMainFrame(
layer_tree_host_->WillBeginMainFrame();
layer_tree_host_->BeginMainFrame(begin_frame_args);
layer_tree_host_->AnimateLayers(begin_frame_args.frame_time);
- layer_tree_host_->RequestMainFrameUpdate(
- begin_frame_args.animate_only
- ? LayerTreeHost::VisualStateUpdate::kPrePaint
- : LayerTreeHost::VisualStateUpdate::kAll);
+ layer_tree_host_->RequestMainFrameUpdate();
}
void SingleThreadProxy::DoPainting() {
diff --git a/chromium/cc/trees/target_property.h b/chromium/cc/trees/target_property.h
index 53d8c13af10..93fa839bd82 100644
--- a/chromium/cc/trees/target_property.h
+++ b/chromium/cc/trees/target_property.h
@@ -21,9 +21,10 @@ enum Type {
SCROLL_OFFSET,
BACKGROUND_COLOR,
BOUNDS,
+ CSS_CUSTOM_PROPERTY,
// These sentinels must be last
FIRST_TARGET_PROPERTY = TRANSFORM,
- LAST_TARGET_PROPERTY = BOUNDS
+ LAST_TARGET_PROPERTY = CSS_CUSTOM_PROPERTY
};
} // namespace TargetProperty
diff --git a/chromium/cc/trees/transform_node.cc b/chromium/cc/trees/transform_node.cc
index 5e7b6bf4890..c43fd23fbe1 100644
--- a/chromium/cc/trees/transform_node.cc
+++ b/chromium/cc/trees/transform_node.cc
@@ -109,6 +109,8 @@ void TransformNode::AsValueInto(base::trace_event::TracedValue* value) const {
MathUtil::AddToTracedValue("post_local", post_local, value);
value->SetInteger("source_node_id", source_node_id);
value->SetInteger("sorting_context_id", sorting_context_id);
+ value->SetInteger("flattens_inherited_transform",
+ flattens_inherited_transform);
MathUtil::AddToTracedValue("scroll_offset", scroll_offset, value);
MathUtil::AddToTracedValue("snap_amount", snap_amount, value);
}
diff --git a/chromium/cc/trees/tree_synchronizer.cc b/chromium/cc/trees/tree_synchronizer.cc
index 7dadbbe1476..c4b44c933c8 100644
--- a/chromium/cc/trees/tree_synchronizer.cc
+++ b/chromium/cc/trees/tree_synchronizer.cc
@@ -8,7 +8,9 @@
#include <set>
+#include "base/containers/flat_set.h"
#include "base/logging.h"
+#include "base/stl_util.h"
#include "base/trace_event/trace_event.h"
#include "cc/layers/layer.h"
#include "cc/layers/layer_collections.h"
@@ -17,6 +19,72 @@
#include "cc/trees/layer_tree_impl.h"
namespace cc {
+namespace {
+#if DCHECK_IS_ON()
+template <typename LayerType>
+static void AssertValidPropertyTreeIndices(LayerType* layer) {
+ DCHECK(layer);
+ DCHECK_NE(layer->transform_tree_index(), TransformTree::kInvalidNodeId);
+ DCHECK_NE(layer->effect_tree_index(), EffectTree::kInvalidNodeId);
+ DCHECK_NE(layer->clip_tree_index(), ClipTree::kInvalidNodeId);
+ DCHECK_NE(layer->scroll_tree_index(), ScrollTree::kInvalidNodeId);
+}
+
+static bool LayerHasValidPropertyTreeIndices(LayerImpl* layer) {
+ DCHECK(layer);
+ return layer->transform_tree_index() != TransformTree::kInvalidNodeId &&
+ layer->effect_tree_index() != EffectTree::kInvalidNodeId &&
+ layer->clip_tree_index() != ClipTree::kInvalidNodeId &&
+ layer->scroll_tree_index() != ScrollTree::kInvalidNodeId;
+}
+
+static bool LayerWillPushProperties(LayerTreeHost* host, Layer* layer) {
+ return base::ContainsKey(host->LayersThatShouldPushProperties(), layer);
+}
+
+static bool LayerWillPushProperties(LayerTreeImpl* tree, LayerImpl* layer) {
+ return base::ContainsKey(tree->LayersThatShouldPushProperties(), layer) ||
+ // TODO(crbug.com/303943): Stop always pushing PictureLayerImpl
+ // properties.
+ base::ContainsValue(tree->picture_layers(), layer);
+}
+#endif
+
+template <typename LayerType>
+std::unique_ptr<LayerImpl> ReuseOrCreateLayerImpl(OwnedLayerImplMap* old_layers,
+ LayerType* layer,
+ LayerTreeImpl* tree_impl) {
+ if (!layer)
+ return nullptr;
+ std::unique_ptr<LayerImpl> layer_impl = std::move((*old_layers)[layer->id()]);
+ if (!layer_impl)
+ layer_impl = layer->CreateLayerImpl(tree_impl);
+ return layer_impl;
+}
+
+template <typename LayerTreeType>
+void PushLayerList(OwnedLayerImplMap* old_layers,
+ LayerTreeType* host,
+ LayerTreeImpl* tree_impl) {
+ tree_impl->ClearLayerList();
+ for (auto* layer : *host) {
+ std::unique_ptr<LayerImpl> layer_impl(
+ ReuseOrCreateLayerImpl(old_layers, layer, tree_impl));
+
+#if DCHECK_IS_ON()
+ // Every layer should have valid property tree indices
+ AssertValidPropertyTreeIndices(layer);
+ // Every layer_impl should either have valid property tree indices already
+ // or the corresponding layer should push them onto layer_impl.
+ DCHECK(LayerHasValidPropertyTreeIndices(layer_impl.get()) ||
+ LayerWillPushProperties(host, layer));
+#endif
+
+ tree_impl->AddToLayerList(layer_impl.get());
+ tree_impl->AddLayer(std::move(layer_impl));
+ }
+ tree_impl->OnCanDrawStateChangedForTree();
+}
template <typename LayerTreeType>
void SynchronizeTreesInternal(LayerTreeType* source_tree,
@@ -42,6 +110,8 @@ void SynchronizeTreesInternal(LayerTreeType* source_tree,
}
}
+} // namespace
+
void TreeSynchronizer::SynchronizeTrees(Layer* layer_root,
LayerTreeImpl* tree_impl) {
if (!layer_root) {
@@ -62,86 +132,38 @@ void TreeSynchronizer::SynchronizeTrees(LayerTreeImpl* pending_tree,
}
}
-template <typename LayerType>
-std::unique_ptr<LayerImpl> ReuseOrCreateLayerImpl(OwnedLayerImplMap* old_layers,
- LayerType* layer,
- LayerTreeImpl* tree_impl) {
- if (!layer)
- return nullptr;
- std::unique_ptr<LayerImpl> layer_impl = std::move((*old_layers)[layer->id()]);
- if (!layer_impl)
- layer_impl = layer->CreateLayerImpl(tree_impl);
- return layer_impl;
-}
-
-#if DCHECK_IS_ON()
-template <typename LayerType>
-static void AssertValidPropertyTreeIndices(LayerType* layer) {
- DCHECK(layer);
- DCHECK_NE(layer->transform_tree_index(), TransformTree::kInvalidNodeId);
- DCHECK_NE(layer->effect_tree_index(), EffectTree::kInvalidNodeId);
- DCHECK_NE(layer->clip_tree_index(), ClipTree::kInvalidNodeId);
- DCHECK_NE(layer->scroll_tree_index(), ScrollTree::kInvalidNodeId);
-}
-
-static bool LayerHasValidPropertyTreeIndices(LayerImpl* layer) {
- DCHECK(layer);
- return layer->transform_tree_index() != TransformTree::kInvalidNodeId &&
- layer->effect_tree_index() != EffectTree::kInvalidNodeId &&
- layer->clip_tree_index() != ClipTree::kInvalidNodeId &&
- layer->scroll_tree_index() != ScrollTree::kInvalidNodeId;
-}
-#endif
-
-template <typename LayerTreeType>
-void PushLayerList(OwnedLayerImplMap* old_layers,
- LayerTreeType* host,
- LayerTreeImpl* tree_impl) {
- tree_impl->ClearLayerList();
- for (auto* layer : *host) {
- std::unique_ptr<LayerImpl> layer_impl(
- ReuseOrCreateLayerImpl(old_layers, layer, tree_impl));
-
-#if DCHECK_IS_ON()
- // Every layer should have valid property tree indices
- AssertValidPropertyTreeIndices(layer);
- // Every layer_impl should either have valid property tree indices already
- // or the corresponding layer should push them onto layer_impl.
- DCHECK(LayerHasValidPropertyTreeIndices(layer_impl.get()) ||
- host->LayerNeedsPushPropertiesForTesting(layer));
-#endif
-
- tree_impl->AddToLayerList(layer_impl.get());
- tree_impl->AddLayer(std::move(layer_impl));
- }
- tree_impl->OnCanDrawStateChangedForTree();
-}
-
-template <typename LayerType>
-static void PushLayerPropertiesInternal(
- std::unordered_set<LayerType*> layers_that_should_push_properties,
- LayerTreeImpl* impl_tree) {
- for (auto layer : layers_that_should_push_properties) {
- LayerImpl* layer_impl = impl_tree->LayerById(layer->id());
- DCHECK(layer_impl);
- layer->PushPropertiesTo(layer_impl);
+template <typename Iterator>
+static void PushLayerPropertiesInternal(Iterator source_layers_begin,
+ Iterator source_layers_end,
+ LayerTreeImpl* target_impl_tree) {
+ for (Iterator it = source_layers_begin; it != source_layers_end; ++it) {
+ auto* source_layer = *it;
+ LayerImpl* target_layer = target_impl_tree->LayerById(source_layer->id());
+ DCHECK(target_layer);
+ source_layer->PushPropertiesTo(target_layer);
}
}
void TreeSynchronizer::PushLayerProperties(LayerTreeImpl* pending_tree,
LayerTreeImpl* active_tree) {
const auto& layers = pending_tree->LayersThatShouldPushProperties();
+ // TODO(crbug.com/303943): Stop always pushing PictureLayerImpl properties.
+ const auto& picture_layers = pending_tree->picture_layers();
TRACE_EVENT1("cc", "TreeSynchronizer::PushLayerPropertiesTo.Impl",
- "layer_count", layers.size());
- PushLayerPropertiesInternal(layers, active_tree);
+ "layer_count", layers.size() + picture_layers.size());
+ PushLayerPropertiesInternal(layers.begin(), layers.end(), active_tree);
+ PushLayerPropertiesInternal(picture_layers.begin(), picture_layers.end(),
+ active_tree);
+ pending_tree->ClearLayersThatShouldPushProperties();
}
void TreeSynchronizer::PushLayerProperties(LayerTreeHost* host_tree,
LayerTreeImpl* impl_tree) {
- const auto& layers = host_tree->LayersThatShouldPushProperties();
+ auto layers = host_tree->LayersThatShouldPushProperties();
TRACE_EVENT1("cc", "TreeSynchronizer::PushLayerPropertiesTo.Main",
"layer_count", layers.size());
- PushLayerPropertiesInternal(layers, impl_tree);
+ PushLayerPropertiesInternal(layers.begin(), layers.end(), impl_tree);
+ host_tree->ClearLayersThatShouldPushProperties();
}
} // namespace cc
diff --git a/chromium/cc/trees/tree_synchronizer_unittest.cc b/chromium/cc/trees/tree_synchronizer_unittest.cc
index 63476307e80..c49b0f0fb1c 100644
--- a/chromium/cc/trees/tree_synchronizer_unittest.cc
+++ b/chromium/cc/trees/tree_synchronizer_unittest.cc
@@ -136,7 +136,9 @@ class TreeSynchronizerTest : public testing::Test {
: animation_host_(AnimationHost::CreateForTesting(ThreadInstance::MAIN)),
host_(FakeLayerTreeHost::Create(&client_,
&task_graph_runner_,
- animation_host_.get())) {}
+ animation_host_.get())) {
+ host_->host_impl()->CreatePendingTree();
+ }
FakeLayerTreeHostClient client_;
StubLayerTreeHostSingleThreadClient single_thread_client_;
@@ -149,8 +151,8 @@ class TreeSynchronizerTest : public testing::Test {
// return a null tree.
TEST_F(TreeSynchronizerTest, SyncNullTree) {
TreeSynchronizer::SynchronizeTrees(static_cast<Layer*>(nullptr),
- host_->active_tree());
- EXPECT_TRUE(!host_->active_tree()->root_layer_for_testing());
+ host_->pending_tree());
+ EXPECT_TRUE(!host_->pending_tree()->root_layer_for_testing());
}
// Constructs a very simple tree and synchronizes it without trying to reuse any
@@ -164,11 +166,66 @@ TEST_F(TreeSynchronizerTest, SyncSimpleTreeFromEmpty) {
host_->BuildPropertyTreesForTesting();
TreeSynchronizer::SynchronizeTrees(layer_tree_root.get(),
- host_->active_tree());
+ host_->pending_tree());
+
+ LayerImpl* root = host_->pending_tree()->root_layer_for_testing();
+ EXPECT_TRUE(base::ContainsKey(
+ host_->pending_tree()->LayersThatShouldPushProperties(), root));
ExpectTreesAreIdentical(layer_tree_root.get(),
- host_->active_tree()->root_layer_for_testing(),
- host_->active_tree());
+ host_->pending_tree()->root_layer_for_testing(),
+ host_->pending_tree());
+}
+
+// Constructs a very simple tree and synchronizes it without trying to reuse any
+// preexisting layers, and test that setting needs push properties happen with
+// tree sync.
+TEST_F(TreeSynchronizerTest, SyncSimpleTreeAndPushPropertiesFromEmpty) {
+ scoped_refptr<Layer> layer_tree_root = Layer::Create();
+ layer_tree_root->AddChild(Layer::Create());
+ layer_tree_root->AddChild(Layer::Create());
+
+ host_->SetRootLayer(layer_tree_root);
+ host_->BuildPropertyTreesForTesting();
+
+ TreeSynchronizer::SynchronizeTrees(layer_tree_root.get(),
+ host_->pending_tree());
+
+ // First time the main thread layers are synced to pending tree, and all the
+ // layers are created on pending tree and they all need to push properties to
+ // active tree.
+ LayerImpl* root = host_->pending_tree()->root_layer_for_testing();
+ EXPECT_TRUE(base::ContainsKey(
+ host_->pending_tree()->LayersThatShouldPushProperties(), root));
+
+ ExpectTreesAreIdentical(layer_tree_root.get(),
+ host_->pending_tree()->root_layer_for_testing(),
+ host_->pending_tree());
+
+ // Push properties to make pending tree have valid property tree index.
+ TreeSynchronizer::PushLayerProperties(host_.get(), host_->pending_tree());
+
+ // Now sync from pending tree to active tree. This would clear the map of
+ // layers that need push properties.
+ TreeSynchronizer::SynchronizeTrees(host_->pending_tree(),
+ host_->active_tree());
+ TreeSynchronizer::PushLayerProperties(host_->pending_tree(),
+ host_->active_tree());
+ EXPECT_FALSE(base::ContainsKey(
+ host_->pending_tree()->LayersThatShouldPushProperties(), root));
+
+ // Set the main thread root layer needs push properties.
+ layer_tree_root->SetNeedsPushProperties();
+ EXPECT_TRUE(base::ContainsKey(host_->LayersThatShouldPushProperties(),
+ layer_tree_root.get()));
+
+ // When sync from main thread, the needs push properties status is carried
+ // over to pending tree.
+ TreeSynchronizer::SynchronizeTrees(layer_tree_root.get(),
+ host_->pending_tree());
+ TreeSynchronizer::PushLayerProperties(host_.get(), host_->pending_tree());
+ EXPECT_TRUE(base::ContainsKey(
+ host_->pending_tree()->LayersThatShouldPushProperties(), root));
}
// Constructs a very simple tree and synchronizes it attempting to reuse some
@@ -186,15 +243,19 @@ TEST_F(TreeSynchronizerTest, SyncSimpleTreeReusingLayers) {
host_->BuildPropertyTreesForTesting();
TreeSynchronizer::SynchronizeTrees(layer_tree_root.get(),
- host_->active_tree());
+ host_->pending_tree());
LayerImpl* layer_impl_tree_root =
- host_->active_tree()->root_layer_for_testing();
+ host_->pending_tree()->root_layer_for_testing();
+ EXPECT_TRUE(
+ base::ContainsKey(host_->pending_tree()->LayersThatShouldPushProperties(),
+ layer_impl_tree_root));
+
ExpectTreesAreIdentical(layer_tree_root.get(), layer_impl_tree_root,
- host_->active_tree());
+ host_->pending_tree());
// We have to push properties to pick up the destruction list pointer.
TreeSynchronizer::PushLayerProperties(layer_tree_root->layer_tree_host(),
- host_->active_tree());
+ host_->pending_tree());
// Add a new layer to the Layer side
layer_tree_root->children()[0]->AddChild(
@@ -206,16 +267,16 @@ TEST_F(TreeSynchronizerTest, SyncSimpleTreeReusingLayers) {
// should have created and destroyed one LayerImpl.
host_->BuildPropertyTreesForTesting();
TreeSynchronizer::SynchronizeTrees(layer_tree_root.get(),
- host_->active_tree());
- layer_impl_tree_root = host_->active_tree()->root_layer_for_testing();
+ host_->pending_tree());
+ layer_impl_tree_root = host_->pending_tree()->root_layer_for_testing();
ExpectTreesAreIdentical(layer_tree_root.get(), layer_impl_tree_root,
- host_->active_tree());
+ host_->pending_tree());
ASSERT_EQ(1u, layer_impl_destruction_list.size());
EXPECT_EQ(second_layer_impl_id, layer_impl_destruction_list[0]);
- host_->active_tree()->DetachLayers();
+ host_->pending_tree()->DetachLayers();
}
// Constructs a very simple tree and checks that a stacking-order change is
@@ -488,7 +549,6 @@ TEST_F(TreeSynchronizerTest, SynchronizeCurrentlyScrollingNode) {
FakeRenderingStatsInstrumentation stats_instrumentation;
TestTaskGraphRunner task_graph_runner;
FakeLayerTreeHostImpl* host_impl = host_->host_impl();
- host_impl->CreatePendingTree();
scoped_refptr<Layer> layer_tree_root = Layer::Create();
scoped_refptr<Layer> scroll_layer = Layer::Create();
@@ -535,7 +595,6 @@ TEST_F(TreeSynchronizerTest, SynchronizeScrollTreeScrollOffsetMap) {
FakeRenderingStatsInstrumentation stats_instrumentation;
TestTaskGraphRunner task_graph_runner;
FakeLayerTreeHostImpl* host_impl = host_->host_impl();
- host_impl->CreatePendingTree();
scoped_refptr<Layer> layer_tree_root = Layer::Create();
scoped_refptr<Layer> scroll_layer = Layer::Create();
@@ -647,7 +706,6 @@ TEST_F(TreeSynchronizerTest, RefreshPropertyTreesCachedData) {
FakeRenderingStatsInstrumentation stats_instrumentation;
TestTaskGraphRunner task_graph_runner;
FakeLayerTreeHostImpl* host_impl = host_->host_impl();
- host_impl->CreatePendingTree();
scoped_refptr<Layer> layer_tree_root = Layer::Create();
scoped_refptr<Layer> transform_layer = Layer::Create();